[llvm] 5403c59 - [PPC] Opaque pointer migration, part 2.

Kai Nacke via llvm-commits llvm-commits at lists.llvm.org
Tue Oct 11 10:24:37 PDT 2022


Author: Kai Nacke
Date: 2022-10-11T17:24:06Z
New Revision: 5403c59c608c08c8ecd4303763f08eb046eb5e4d

URL: https://github.com/llvm/llvm-project/commit/5403c59c608c08c8ecd4303763f08eb046eb5e4d
DIFF: https://github.com/llvm/llvm-project/commit/5403c59c608c08c8ecd4303763f08eb046eb5e4d.diff

LOG: [PPC] Opaque pointer migration, part 2.

The LIT test cases were migrated with the script provided by
Nikita Popov. Due to the size of the change it is split into
several parts.

Reviewed By: nemanja, nikic

Differential Revision: https://reviews.llvm.org/D135474

Added: 
    

Modified: 
    llvm/test/CodeGen/PowerPC/opt-builtins-ppc-xlcompat-cas.ll
    llvm/test/CodeGen/PowerPC/opt-cmp-inst-cr0-live.ll
    llvm/test/CodeGen/PowerPC/opt-li-add-to-addi.ll
    llvm/test/CodeGen/PowerPC/optcmp.ll
    llvm/test/CodeGen/PowerPC/or-addressing-mode.ll
    llvm/test/CodeGen/PowerPC/out-of-range-dform.ll
    llvm/test/CodeGen/PowerPC/overflow-intrinsic-optimizations.ll
    llvm/test/CodeGen/PowerPC/p10-constants.ll
    llvm/test/CodeGen/PowerPC/p10-setbc-ri.ll
    llvm/test/CodeGen/PowerPC/p10-setbc-rr.ll
    llvm/test/CodeGen/PowerPC/p10-setbcr-ri.ll
    llvm/test/CodeGen/PowerPC/p10-setbcr-rr.ll
    llvm/test/CodeGen/PowerPC/p10-setnbc-ri.ll
    llvm/test/CodeGen/PowerPC/p10-setnbc-rr.ll
    llvm/test/CodeGen/PowerPC/p10-setnbcr-ri.ll
    llvm/test/CodeGen/PowerPC/p10-setnbcr-rr.ll
    llvm/test/CodeGen/PowerPC/p10-spill-creq.ll
    llvm/test/CodeGen/PowerPC/p10-spill-crgt.ll
    llvm/test/CodeGen/PowerPC/p10-spill-crlt.ll
    llvm/test/CodeGen/PowerPC/p10-spill-crun.ll
    llvm/test/CodeGen/PowerPC/p8-isel-sched.ll
    llvm/test/CodeGen/PowerPC/p8-scalar_vector_conversions.ll
    llvm/test/CodeGen/PowerPC/p9-dform-load-alignment.ll
    llvm/test/CodeGen/PowerPC/p9-vbpermd.ll
    llvm/test/CodeGen/PowerPC/paired-vector-intrinsics.ll
    llvm/test/CodeGen/PowerPC/pcrel-block-address.ll
    llvm/test/CodeGen/PowerPC/pcrel-byte-loads.ll
    llvm/test/CodeGen/PowerPC/pcrel-call-linkage-leaf.ll
    llvm/test/CodeGen/PowerPC/pcrel-call-linkage-simple.ll
    llvm/test/CodeGen/PowerPC/pcrel-call-linkage-with-calls.ll
    llvm/test/CodeGen/PowerPC/pcrel-got-indirect.ll
    llvm/test/CodeGen/PowerPC/pcrel-indirect-call.ll
    llvm/test/CodeGen/PowerPC/pcrel-linkeropt-option.ll
    llvm/test/CodeGen/PowerPC/pcrel-linkeropt.ll
    llvm/test/CodeGen/PowerPC/pcrel-local-caller-toc.ll
    llvm/test/CodeGen/PowerPC/pcrel-relocation-plus-offset.ll
    llvm/test/CodeGen/PowerPC/pcrel-tail-calls.ll
    llvm/test/CodeGen/PowerPC/pcrel-tls-general-dynamic.ll
    llvm/test/CodeGen/PowerPC/pcrel-tls-initial-exec.ll
    llvm/test/CodeGen/PowerPC/pcrel-tls-local-dynamic.ll
    llvm/test/CodeGen/PowerPC/pcrel-tls-local-exec.ll
    llvm/test/CodeGen/PowerPC/pcrel-tls_get_addr_clobbers.ll
    llvm/test/CodeGen/PowerPC/pcrel.ll
    llvm/test/CodeGen/PowerPC/pcrel_ldst.ll
    llvm/test/CodeGen/PowerPC/peephole-align.ll
    llvm/test/CodeGen/PowerPC/pgo-ref-directive.ll
    llvm/test/CodeGen/PowerPC/pip-inner.ll
    llvm/test/CodeGen/PowerPC/popcount.ll
    llvm/test/CodeGen/PowerPC/post-ra-ec.ll
    llvm/test/CodeGen/PowerPC/pow_massv_075_025exp.ll
    llvm/test/CodeGen/PowerPC/power9-moves-and-splats.ll
    llvm/test/CodeGen/PowerPC/powf_massv_075_025exp.ll
    llvm/test/CodeGen/PowerPC/ppc-32bit-build-vector.ll
    llvm/test/CodeGen/PowerPC/ppc-32bit-shift.ll
    llvm/test/CodeGen/PowerPC/ppc-ctr-dead-code.ll
    llvm/test/CodeGen/PowerPC/ppc-disable-non-volatile-cr.ll
    llvm/test/CodeGen/PowerPC/ppc-empty-fs.ll
    llvm/test/CodeGen/PowerPC/ppc-label.ll
    llvm/test/CodeGen/PowerPC/ppc-label2.ll
    llvm/test/CodeGen/PowerPC/ppc-partword-atomic.ll
    llvm/test/CodeGen/PowerPC/ppc-prologue.ll
    llvm/test/CodeGen/PowerPC/ppc-shrink-wrapping.ll
    llvm/test/CodeGen/PowerPC/ppc-vaarg-agg.ll
    llvm/test/CodeGen/PowerPC/ppc32-align-long-double-sf.ll
    llvm/test/CodeGen/PowerPC/ppc32-constant-BE-ppcf128.ll
    llvm/test/CodeGen/PowerPC/ppc32-i1-vaarg.ll
    llvm/test/CodeGen/PowerPC/ppc32-i64-to-float-conv.ll
    llvm/test/CodeGen/PowerPC/ppc32-lshrti3.ll
    llvm/test/CodeGen/PowerPC/ppc32-nest.ll
    llvm/test/CodeGen/PowerPC/ppc32-pic-large.ll
    llvm/test/CodeGen/PowerPC/ppc32-pic.ll
    llvm/test/CodeGen/PowerPC/ppc32-secure-plt-tls.ll
    llvm/test/CodeGen/PowerPC/ppc32-secure-plt-tls2.ll
    llvm/test/CodeGen/PowerPC/ppc32-skip-regs.ll
    llvm/test/CodeGen/PowerPC/ppc32-vacopy.ll
    llvm/test/CodeGen/PowerPC/ppc440-fp-basic.ll
    llvm/test/CodeGen/PowerPC/ppc64-P9-mod.ll
    llvm/test/CodeGen/PowerPC/ppc64-P9-setb.ll
    llvm/test/CodeGen/PowerPC/ppc64-abi-extend.ll
    llvm/test/CodeGen/PowerPC/ppc64-acc-regalloc-bugfix.ll
    llvm/test/CodeGen/PowerPC/ppc64-acc-regalloc.ll
    llvm/test/CodeGen/PowerPC/ppc64-align-long-double.ll
    llvm/test/CodeGen/PowerPC/ppc64-blnop.ll
    llvm/test/CodeGen/PowerPC/ppc64-byval-align.ll
    llvm/test/CodeGen/PowerPC/ppc64-byval-larger-struct.ll
    llvm/test/CodeGen/PowerPC/ppc64-byval-multi-store.ll
    llvm/test/CodeGen/PowerPC/ppc64-calls.ll
    llvm/test/CodeGen/PowerPC/ppc64-crash.ll
    llvm/test/CodeGen/PowerPC/ppc64-func-desc-hoist.ll
    llvm/test/CodeGen/PowerPC/ppc64-get-cache-line-size.ll
    llvm/test/CodeGen/PowerPC/ppc64-i128-abi.ll
    llvm/test/CodeGen/PowerPC/ppc64-icbt-pwr7.ll
    llvm/test/CodeGen/PowerPC/ppc64-icbt-pwr8.ll
    llvm/test/CodeGen/PowerPC/ppc64-nest.ll
    llvm/test/CodeGen/PowerPC/ppc64-nonfunc-calls.ll
    llvm/test/CodeGen/PowerPC/ppc64-pre-inc-no-extra-phi.ll
    llvm/test/CodeGen/PowerPC/ppc64-prefetch.ll
    llvm/test/CodeGen/PowerPC/ppc64-rop-protection-aix.ll
    llvm/test/CodeGen/PowerPC/ppc64-rop-protection.ll
    llvm/test/CodeGen/PowerPC/ppc64-sibcall-shrinkwrap.ll
    llvm/test/CodeGen/PowerPC/ppc64-sibcall.ll
    llvm/test/CodeGen/PowerPC/ppc64-smallarg.ll
    llvm/test/CodeGen/PowerPC/ppc64-toc.ll
    llvm/test/CodeGen/PowerPC/ppc64-vaarg-int.ll
    llvm/test/CodeGen/PowerPC/ppc64-varargs.ll
    llvm/test/CodeGen/PowerPC/ppc64-xxsplti32dx-pattern-check.ll
    llvm/test/CodeGen/PowerPC/ppc64le-aggregates.ll
    llvm/test/CodeGen/PowerPC/ppc64le-calls.ll
    llvm/test/CodeGen/PowerPC/ppc64le-crsave.ll
    llvm/test/CodeGen/PowerPC/ppc64le-localentry-large.ll
    llvm/test/CodeGen/PowerPC/ppc64le-localentry.ll
    llvm/test/CodeGen/PowerPC/ppc64le-smallarg.ll
    llvm/test/CodeGen/PowerPC/ppc_fp128-bcwriter.ll
    llvm/test/CodeGen/PowerPC/ppcf128-1.ll
    llvm/test/CodeGen/PowerPC/ppcf128-3.ll
    llvm/test/CodeGen/PowerPC/ppcf128-constrained-fp-intrinsics.ll
    llvm/test/CodeGen/PowerPC/ppcf128-endian.ll
    llvm/test/CodeGen/PowerPC/ppcf128sf.ll
    llvm/test/CodeGen/PowerPC/ppcsoftops.ll
    llvm/test/CodeGen/PowerPC/pr13891.ll
    llvm/test/CodeGen/PowerPC/pr15031.ll
    llvm/test/CodeGen/PowerPC/pr15359.ll
    llvm/test/CodeGen/PowerPC/pr15630.ll
    llvm/test/CodeGen/PowerPC/pr15632.ll
    llvm/test/CodeGen/PowerPC/pr16556-2.ll
    llvm/test/CodeGen/PowerPC/pr16556.ll
    llvm/test/CodeGen/PowerPC/pr17168.ll
    llvm/test/CodeGen/PowerPC/pr17354.ll
    llvm/test/CodeGen/PowerPC/pr18663-2.ll
    llvm/test/CodeGen/PowerPC/pr18663.ll
    llvm/test/CodeGen/PowerPC/pr20442.ll
    llvm/test/CodeGen/PowerPC/pr22711.ll
    llvm/test/CodeGen/PowerPC/pr24216.ll
    llvm/test/CodeGen/PowerPC/pr24546.ll
    llvm/test/CodeGen/PowerPC/pr24636.ll
    llvm/test/CodeGen/PowerPC/pr25157-peephole.ll
    llvm/test/CodeGen/PowerPC/pr25157.ll
    llvm/test/CodeGen/PowerPC/pr26378.ll
    llvm/test/CodeGen/PowerPC/pr26690.ll
    llvm/test/CodeGen/PowerPC/pr27078.ll
    llvm/test/CodeGen/PowerPC/pr27350.ll
    llvm/test/CodeGen/PowerPC/pr28130.ll
    llvm/test/CodeGen/PowerPC/pr28630.ll
    llvm/test/CodeGen/PowerPC/pr30451.ll
    llvm/test/CodeGen/PowerPC/pr30663.ll
    llvm/test/CodeGen/PowerPC/pr30715.ll
    llvm/test/CodeGen/PowerPC/pr31144.ll
    llvm/test/CodeGen/PowerPC/pr32063.ll
    llvm/test/CodeGen/PowerPC/pr32140.ll
    llvm/test/CodeGen/PowerPC/pr33547.ll
    llvm/test/CodeGen/PowerPC/pr35402.ll
    llvm/test/CodeGen/PowerPC/pr35688.ll
    llvm/test/CodeGen/PowerPC/pr36068.ll
    llvm/test/CodeGen/PowerPC/pr36292.ll
    llvm/test/CodeGen/PowerPC/pr38087.ll
    llvm/test/CodeGen/PowerPC/pr39478.ll
    llvm/test/CodeGen/PowerPC/pr39815.ll
    llvm/test/CodeGen/PowerPC/pr40922.ll
    llvm/test/CodeGen/PowerPC/pr41088.ll
    llvm/test/CodeGen/PowerPC/pr41177.ll
    llvm/test/CodeGen/PowerPC/pr42492.ll
    llvm/test/CodeGen/PowerPC/pr43527.ll
    llvm/test/CodeGen/PowerPC/pr43976.ll
    llvm/test/CodeGen/PowerPC/pr44183.ll
    llvm/test/CodeGen/PowerPC/pr45186.ll
    llvm/test/CodeGen/PowerPC/pr45297.ll
    llvm/test/CodeGen/PowerPC/pr45301.ll
    llvm/test/CodeGen/PowerPC/pr45432.ll
    llvm/test/CodeGen/PowerPC/pr45448.ll
    llvm/test/CodeGen/PowerPC/pr46759.ll
    llvm/test/CodeGen/PowerPC/pr46923.ll
    llvm/test/CodeGen/PowerPC/pr47707.ll
    llvm/test/CodeGen/PowerPC/pr47891.ll
    llvm/test/CodeGen/PowerPC/pr47916.ll
    llvm/test/CodeGen/PowerPC/pr48519.ll
    llvm/test/CodeGen/PowerPC/pr48527.ll
    llvm/test/CodeGen/PowerPC/pr49509.ll
    llvm/test/CodeGen/PowerPC/pr56469.ll
    llvm/test/CodeGen/PowerPC/pre-inc-disable.ll
    llvm/test/CodeGen/PowerPC/preinc-ld-sel-crash.ll
    llvm/test/CodeGen/PowerPC/preincprep-invoke.ll
    llvm/test/CodeGen/PowerPC/private.ll
    llvm/test/CodeGen/PowerPC/pwr7-gt-nop.ll
    llvm/test/CodeGen/PowerPC/quadint-return.ll
    llvm/test/CodeGen/PowerPC/read-set-flm.ll
    llvm/test/CodeGen/PowerPC/reduce_cr.ll
    llvm/test/CodeGen/PowerPC/reduce_scalarization.ll
    llvm/test/CodeGen/PowerPC/reduce_scalarization02.ll
    llvm/test/CodeGen/PowerPC/redundant-copy-after-tail-dup.ll
    llvm/test/CodeGen/PowerPC/reg-coalesce-simple.ll
    llvm/test/CodeGen/PowerPC/register-pressure-reduction.ll
    llvm/test/CodeGen/PowerPC/reloc-align.ll
    llvm/test/CodeGen/PowerPC/remat-imm.ll
    llvm/test/CodeGen/PowerPC/rematerializable-instruction-machine-licm.ll
    llvm/test/CodeGen/PowerPC/remove-redundant-load-imm.ll
    llvm/test/CodeGen/PowerPC/remove-redundant-toc-saves.ll
    llvm/test/CodeGen/PowerPC/resolvefi-basereg.ll
    llvm/test/CodeGen/PowerPC/resolvefi-disp.ll
    llvm/test/CodeGen/PowerPC/respect-rounding-mode.ll
    llvm/test/CodeGen/PowerPC/retaddr.ll
    llvm/test/CodeGen/PowerPC/retaddr2.ll
    llvm/test/CodeGen/PowerPC/retaddr_multi_levels.ll
    llvm/test/CodeGen/PowerPC/return-val-i128.ll
    llvm/test/CodeGen/PowerPC/rlwimi-and-or-bits.ll
    llvm/test/CodeGen/PowerPC/rlwimi-and.ll
    llvm/test/CodeGen/PowerPC/rlwimi-commute.ll
    llvm/test/CodeGen/PowerPC/rlwimi-dyn-and.ll
    llvm/test/CodeGen/PowerPC/rlwimi-keep-rsh.ll
    llvm/test/CodeGen/PowerPC/rm-zext.ll
    llvm/test/CodeGen/PowerPC/rs-undef-use.ll
    llvm/test/CodeGen/PowerPC/s000-alias-misched.ll
    llvm/test/CodeGen/PowerPC/sat-register-clobber.ll
    llvm/test/CodeGen/PowerPC/save-crbp-ppc32svr4.ll
    llvm/test/CodeGen/PowerPC/scalar-double-ldst.ll
    llvm/test/CodeGen/PowerPC/scalar-float-ldst.ll
    llvm/test/CodeGen/PowerPC/scalar-i16-ldst.ll
    llvm/test/CodeGen/PowerPC/scalar-i32-ldst.ll
    llvm/test/CodeGen/PowerPC/scalar-i64-ldst.ll
    llvm/test/CodeGen/PowerPC/scalar-i8-ldst.ll
    llvm/test/CodeGen/PowerPC/scalar_vector_test_1.ll
    llvm/test/CodeGen/PowerPC/scalar_vector_test_2.ll
    llvm/test/CodeGen/PowerPC/scalar_vector_test_3.ll
    llvm/test/CodeGen/PowerPC/scalar_vector_test_4.ll
    llvm/test/CodeGen/PowerPC/scalar_vector_test_5.ll
    llvm/test/CodeGen/PowerPC/scalars-in-altivec-regs.ll
    llvm/test/CodeGen/PowerPC/sched-addi.ll
    llvm/test/CodeGen/PowerPC/scheduling-mem-dependency.ll
    llvm/test/CodeGen/PowerPC/sdag-ppcf128.ll
    llvm/test/CodeGen/PowerPC/select-addrRegRegOnly.ll
    llvm/test/CodeGen/PowerPC/selectiondag-extload-computeknownbits.ll
    llvm/test/CodeGen/PowerPC/selectiondag-sextload.ll
    llvm/test/CodeGen/PowerPC/setcc-logic.ll
    llvm/test/CodeGen/PowerPC/setcc-to-sub.ll
    llvm/test/CodeGen/PowerPC/setcc_no_zext.ll
    llvm/test/CodeGen/PowerPC/setcclike-or-comb.ll
    llvm/test/CodeGen/PowerPC/sign-ext-atomics.ll
    llvm/test/CodeGen/PowerPC/simplifyConstCmpToISEL.ll
    llvm/test/CodeGen/PowerPC/sink-side-effect.ll
    llvm/test/CodeGen/PowerPC/sj-ctr-loop.ll
    llvm/test/CodeGen/PowerPC/sjlj.ll
    llvm/test/CodeGen/PowerPC/sjlj_no0x.ll
    llvm/test/CodeGen/PowerPC/small-arguments.ll
    llvm/test/CodeGen/PowerPC/sms-cpy-1.ll
    llvm/test/CodeGen/PowerPC/sms-grp-order.ll
    llvm/test/CodeGen/PowerPC/sms-iterator.ll
    llvm/test/CodeGen/PowerPC/sms-phi-2.ll
    llvm/test/CodeGen/PowerPC/sms-phi-5.ll
    llvm/test/CodeGen/PowerPC/sms-phi.ll
    llvm/test/CodeGen/PowerPC/sms-remark.ll
    llvm/test/CodeGen/PowerPC/sms-simple.ll
    llvm/test/CodeGen/PowerPC/spe-fastmath.ll
    llvm/test/CodeGen/PowerPC/spe-hwdouble.ll
    llvm/test/CodeGen/PowerPC/spe.ll
    llvm/test/CodeGen/PowerPC/spill-nor0.ll
    llvm/test/CodeGen/PowerPC/spill-vec-pair.ll
    llvm/test/CodeGen/PowerPC/spill_p9_setb.ll
    llvm/test/CodeGen/PowerPC/splat-bug.ll
    llvm/test/CodeGen/PowerPC/split-index-tc.ll
    llvm/test/CodeGen/PowerPC/splitstore-check-volatile.ll
    llvm/test/CodeGen/PowerPC/stack-clash-dynamic-alloca.ll
    llvm/test/CodeGen/PowerPC/stack-clash-prologue-nounwind.ll
    llvm/test/CodeGen/PowerPC/stack-clash-prologue.ll
    llvm/test/CodeGen/PowerPC/stack-guard-oob.ll
    llvm/test/CodeGen/PowerPC/stack-no-redzone.ll
    llvm/test/CodeGen/PowerPC/stack-protector.ll
    llvm/test/CodeGen/PowerPC/stack-realign.ll
    llvm/test/CodeGen/PowerPC/stack-restore-with-setjmp.ll
    llvm/test/CodeGen/PowerPC/stackmap-frame-setup.ll
    llvm/test/CodeGen/PowerPC/std-unal-fi.ll
    llvm/test/CodeGen/PowerPC/stdux-constuse.ll
    llvm/test/CodeGen/PowerPC/stfiwx-2.ll
    llvm/test/CodeGen/PowerPC/stfiwx.ll
    llvm/test/CodeGen/PowerPC/store-combine.ll
    llvm/test/CodeGen/PowerPC/store-constant.ll
    llvm/test/CodeGen/PowerPC/store-forward-be32.ll
    llvm/test/CodeGen/PowerPC/store-forward-be64.ll
    llvm/test/CodeGen/PowerPC/store-load-fwd.ll
    llvm/test/CodeGen/PowerPC/store-rightmost-vector-elt.ll
    llvm/test/CodeGen/PowerPC/store-update.ll
    llvm/test/CodeGen/PowerPC/store_fptoi.ll
    llvm/test/CodeGen/PowerPC/structsinmem.ll
    llvm/test/CodeGen/PowerPC/structsinregs.ll
    llvm/test/CodeGen/PowerPC/stwu-gta.ll
    llvm/test/CodeGen/PowerPC/stwu-sched.ll
    llvm/test/CodeGen/PowerPC/stwu8.ll
    llvm/test/CodeGen/PowerPC/stwux.ll
    llvm/test/CodeGen/PowerPC/subreg-postra-2.ll
    llvm/test/CodeGen/PowerPC/subreg-postra.ll
    llvm/test/CodeGen/PowerPC/svr4-redzone.ll
    llvm/test/CodeGen/PowerPC/swap-reduction.ll
    llvm/test/CodeGen/PowerPC/swaps-le-1.ll
    llvm/test/CodeGen/PowerPC/swaps-le-2.ll
    llvm/test/CodeGen/PowerPC/swaps-le-3.ll
    llvm/test/CodeGen/PowerPC/swaps-le-4.ll
    llvm/test/CodeGen/PowerPC/swaps-le-5.ll
    llvm/test/CodeGen/PowerPC/swaps-le-6.ll
    llvm/test/CodeGen/PowerPC/swaps-le-7.ll
    llvm/test/CodeGen/PowerPC/swaps-le-8.ll
    llvm/test/CodeGen/PowerPC/tail-dup-analyzable-fallthrough.ll
    llvm/test/CodeGen/PowerPC/tail-dup-branch-to-fallthrough.ll
    llvm/test/CodeGen/PowerPC/tail-dup-layout.ll
    llvm/test/CodeGen/PowerPC/tailcall-speculatable-callee.ll
    llvm/test/CodeGen/PowerPC/tailcall-string-rvo.ll
    llvm/test/CodeGen/PowerPC/test-and-cmp-folding.ll
    llvm/test/CodeGen/PowerPC/testComparesi32gtu.ll
    llvm/test/CodeGen/PowerPC/testComparesi32ltu.ll
    llvm/test/CodeGen/PowerPC/testComparesieqsc.ll
    llvm/test/CodeGen/PowerPC/testComparesieqsi.ll
    llvm/test/CodeGen/PowerPC/testComparesieqsll.ll
    llvm/test/CodeGen/PowerPC/testComparesieqss.ll
    llvm/test/CodeGen/PowerPC/testComparesiequc.ll
    llvm/test/CodeGen/PowerPC/testComparesiequi.ll
    llvm/test/CodeGen/PowerPC/testComparesiequll.ll
    llvm/test/CodeGen/PowerPC/testComparesiequs.ll
    llvm/test/CodeGen/PowerPC/testComparesigesc.ll
    llvm/test/CodeGen/PowerPC/testComparesigesi.ll
    llvm/test/CodeGen/PowerPC/testComparesigesll.ll
    llvm/test/CodeGen/PowerPC/testComparesigess.ll
    llvm/test/CodeGen/PowerPC/testComparesigeuc.ll
    llvm/test/CodeGen/PowerPC/testComparesigeui.ll
    llvm/test/CodeGen/PowerPC/testComparesigeull.ll
    llvm/test/CodeGen/PowerPC/testComparesigeus.ll
    llvm/test/CodeGen/PowerPC/testComparesigtsc.ll
    llvm/test/CodeGen/PowerPC/testComparesigtsi.ll
    llvm/test/CodeGen/PowerPC/testComparesigtsll.ll
    llvm/test/CodeGen/PowerPC/testComparesigtss.ll
    llvm/test/CodeGen/PowerPC/testComparesigtuc.ll
    llvm/test/CodeGen/PowerPC/testComparesigtui.ll
    llvm/test/CodeGen/PowerPC/testComparesigtus.ll
    llvm/test/CodeGen/PowerPC/testComparesilesc.ll
    llvm/test/CodeGen/PowerPC/testComparesilesi.ll
    llvm/test/CodeGen/PowerPC/testComparesilesll.ll
    llvm/test/CodeGen/PowerPC/testComparesiless.ll
    llvm/test/CodeGen/PowerPC/testComparesileuc.ll
    llvm/test/CodeGen/PowerPC/testComparesileui.ll
    llvm/test/CodeGen/PowerPC/testComparesileull.ll
    llvm/test/CodeGen/PowerPC/testComparesileus.ll
    llvm/test/CodeGen/PowerPC/testComparesiltsc.ll
    llvm/test/CodeGen/PowerPC/testComparesiltsi.ll
    llvm/test/CodeGen/PowerPC/testComparesiltsll.ll
    llvm/test/CodeGen/PowerPC/testComparesiltss.ll
    llvm/test/CodeGen/PowerPC/testComparesiltuc.ll
    llvm/test/CodeGen/PowerPC/testComparesiltui.ll
    llvm/test/CodeGen/PowerPC/testComparesiltus.ll
    llvm/test/CodeGen/PowerPC/testComparesinesc.ll
    llvm/test/CodeGen/PowerPC/testComparesinesi.ll
    llvm/test/CodeGen/PowerPC/testComparesinesll.ll
    llvm/test/CodeGen/PowerPC/testComparesiness.ll
    llvm/test/CodeGen/PowerPC/testComparesineuc.ll
    llvm/test/CodeGen/PowerPC/testComparesineui.ll
    llvm/test/CodeGen/PowerPC/testComparesineull.ll
    llvm/test/CodeGen/PowerPC/testComparesineus.ll
    llvm/test/CodeGen/PowerPC/testCompareslleqsc.ll
    llvm/test/CodeGen/PowerPC/testCompareslleqsi.ll
    llvm/test/CodeGen/PowerPC/testCompareslleqsll.ll
    llvm/test/CodeGen/PowerPC/testCompareslleqss.ll
    llvm/test/CodeGen/PowerPC/testComparesllequc.ll
    llvm/test/CodeGen/PowerPC/testComparesllequi.ll
    llvm/test/CodeGen/PowerPC/testComparesllequll.ll
    llvm/test/CodeGen/PowerPC/testComparesllequs.ll
    llvm/test/CodeGen/PowerPC/testComparesllgesc.ll
    llvm/test/CodeGen/PowerPC/testComparesllgesi.ll
    llvm/test/CodeGen/PowerPC/testComparesllgesll.ll
    llvm/test/CodeGen/PowerPC/testComparesllgess.ll
    llvm/test/CodeGen/PowerPC/testComparesllgeuc.ll
    llvm/test/CodeGen/PowerPC/testComparesllgeui.ll
    llvm/test/CodeGen/PowerPC/testComparesllgeull.ll
    llvm/test/CodeGen/PowerPC/testComparesllgeus.ll
    llvm/test/CodeGen/PowerPC/testComparesllgtsll.ll
    llvm/test/CodeGen/PowerPC/testComparesllgtuc.ll
    llvm/test/CodeGen/PowerPC/testComparesllgtui.ll
    llvm/test/CodeGen/PowerPC/testComparesllgtus.ll
    llvm/test/CodeGen/PowerPC/testCompareslllesc.ll
    llvm/test/CodeGen/PowerPC/testCompareslllesi.ll
    llvm/test/CodeGen/PowerPC/testCompareslllesll.ll
    llvm/test/CodeGen/PowerPC/testComparesllless.ll
    llvm/test/CodeGen/PowerPC/testComparesllleuc.ll
    llvm/test/CodeGen/PowerPC/testComparesllleui.ll
    llvm/test/CodeGen/PowerPC/testComparesllleull.ll
    llvm/test/CodeGen/PowerPC/testComparesllleus.ll
    llvm/test/CodeGen/PowerPC/testComparesllltsll.ll
    llvm/test/CodeGen/PowerPC/testComparesllltuc.ll
    llvm/test/CodeGen/PowerPC/testComparesllltui.ll
    llvm/test/CodeGen/PowerPC/testComparesllltus.ll
    llvm/test/CodeGen/PowerPC/testComparesllnesll.ll
    llvm/test/CodeGen/PowerPC/testComparesllneull.ll
    llvm/test/CodeGen/PowerPC/test_call_aix.ll
    llvm/test/CodeGen/PowerPC/test_func_desc.ll
    llvm/test/CodeGen/PowerPC/thread-pointer.ll
    llvm/test/CodeGen/PowerPC/tls-cse.ll
    llvm/test/CodeGen/PowerPC/tls-debug-aix.ll
    llvm/test/CodeGen/PowerPC/tls-pic.ll
    llvm/test/CodeGen/PowerPC/tls-pie-xform.ll
    llvm/test/CodeGen/PowerPC/tls-store2.ll
    llvm/test/CodeGen/PowerPC/tls.ll
    llvm/test/CodeGen/PowerPC/tls_get_addr_clobbers.ll
    llvm/test/CodeGen/PowerPC/tls_get_addr_stackframe.ll
    llvm/test/CodeGen/PowerPC/toc-data-const.ll
    llvm/test/CodeGen/PowerPC/toc-data.ll
    llvm/test/CodeGen/PowerPC/toc-float.ll
    llvm/test/CodeGen/PowerPC/tocSaveInPrologue.ll
    llvm/test/CodeGen/PowerPC/trampoline.ll
    llvm/test/CodeGen/PowerPC/trunc-srl-load.ll
    llvm/test/CodeGen/PowerPC/uint-to-ppcfp128-crash.ll
    llvm/test/CodeGen/PowerPC/unal-altivec-wint.ll
    llvm/test/CodeGen/PowerPC/unal-altivec.ll
    llvm/test/CodeGen/PowerPC/unal-altivec2.ll
    llvm/test/CodeGen/PowerPC/unal-vec-ldst.ll
    llvm/test/CodeGen/PowerPC/unal-vec-negarith.ll
    llvm/test/CodeGen/PowerPC/unal4-std.ll
    llvm/test/CodeGen/PowerPC/unaligned-addressing-mode.ll
    llvm/test/CodeGen/PowerPC/unaligned-dqform-ld.ll
    llvm/test/CodeGen/PowerPC/unaligned-floats.ll
    llvm/test/CodeGen/PowerPC/unaligned.ll
    llvm/test/CodeGen/PowerPC/uwtables.ll
    llvm/test/CodeGen/PowerPC/vaddsplat.ll
    llvm/test/CodeGen/PowerPC/varargs-struct-float.ll
    llvm/test/CodeGen/PowerPC/varargs.ll
    llvm/test/CodeGen/PowerPC/vcmp-fold.ll
    llvm/test/CodeGen/PowerPC/vec-abi-align.ll
    llvm/test/CodeGen/PowerPC/vec-bswap.ll
    llvm/test/CodeGen/PowerPC/vec-extract-itofp.ll
    llvm/test/CodeGen/PowerPC/vec-itofp.ll
    llvm/test/CodeGen/PowerPC/vec-trunc.ll
    llvm/test/CodeGen/PowerPC/vec_auto_constant.ll
    llvm/test/CodeGen/PowerPC/vec_br_cmp.ll
    llvm/test/CodeGen/PowerPC/vec_buildvector_loadstore.ll
    llvm/test/CodeGen/PowerPC/vec_constants.ll
    llvm/test/CodeGen/PowerPC/vec_conv.ll
    llvm/test/CodeGen/PowerPC/vec_conv_fp32_to_i16_elts.ll
    llvm/test/CodeGen/PowerPC/vec_conv_fp32_to_i64_elts.ll
    llvm/test/CodeGen/PowerPC/vec_conv_fp32_to_i8_elts.ll
    llvm/test/CodeGen/PowerPC/vec_conv_fp64_to_i16_elts.ll
    llvm/test/CodeGen/PowerPC/vec_conv_fp64_to_i32_elts.ll
    llvm/test/CodeGen/PowerPC/vec_conv_fp64_to_i8_elts.ll
    llvm/test/CodeGen/PowerPC/vec_conv_fp_to_i_4byte_elts.ll
    llvm/test/CodeGen/PowerPC/vec_conv_fp_to_i_8byte_elts.ll
    llvm/test/CodeGen/PowerPC/vec_conv_i16_to_fp32_elts.ll
    llvm/test/CodeGen/PowerPC/vec_conv_i16_to_fp64_elts.ll
    llvm/test/CodeGen/PowerPC/vec_conv_i32_to_fp64_elts.ll
    llvm/test/CodeGen/PowerPC/vec_conv_i64_to_fp32_elts.ll
    llvm/test/CodeGen/PowerPC/vec_conv_i8_to_fp32_elts.ll
    llvm/test/CodeGen/PowerPC/vec_conv_i8_to_fp64_elts.ll
    llvm/test/CodeGen/PowerPC/vec_conv_i_to_fp_4byte_elts.ll
    llvm/test/CodeGen/PowerPC/vec_conv_i_to_fp_8byte_elts.ll
    llvm/test/CodeGen/PowerPC/vec_fneg.ll
    llvm/test/CodeGen/PowerPC/vec_insert_elt.ll
    llvm/test/CodeGen/PowerPC/vec_mergeow.ll
    llvm/test/CodeGen/PowerPC/vec_misaligned.ll
    llvm/test/CodeGen/PowerPC/vec_mul.ll
    llvm/test/CodeGen/PowerPC/vec_perf_shuffle.ll
    llvm/test/CodeGen/PowerPC/vec_shift.ll
    llvm/test/CodeGen/PowerPC/vec_shuffle.ll
    llvm/test/CodeGen/PowerPC/vec_shuffle_le.ll
    llvm/test/CodeGen/PowerPC/vec_shuffle_p8vector.ll
    llvm/test/CodeGen/PowerPC/vec_shuffle_p8vector_le.ll
    llvm/test/CodeGen/PowerPC/vec_splat.ll
    llvm/test/CodeGen/PowerPC/vec_splat_constant.ll
    llvm/test/CodeGen/PowerPC/vec_zero.ll
    llvm/test/CodeGen/PowerPC/vector-identity-shuffle.ll
    llvm/test/CodeGen/PowerPC/vector-ldst.ll
    llvm/test/CodeGen/PowerPC/vector-merge-store-fp-constants.ll
    llvm/test/CodeGen/PowerPC/vector.ll
    llvm/test/CodeGen/PowerPC/vrspill.ll
    llvm/test/CodeGen/PowerPC/vsel-prom.ll
    llvm/test/CodeGen/PowerPC/vsx-div.ll
    llvm/test/CodeGen/PowerPC/vsx-elementary-arith.ll
    llvm/test/CodeGen/PowerPC/vsx-fma-m.ll
    llvm/test/CodeGen/PowerPC/vsx-fma-mutate-trivial-copy.ll
    llvm/test/CodeGen/PowerPC/vsx-fma-mutate-undef.ll
    llvm/test/CodeGen/PowerPC/vsx-fma-sp.ll
    llvm/test/CodeGen/PowerPC/vsx-ldst-builtin-le.ll
    llvm/test/CodeGen/PowerPC/vsx-ldst.ll
    llvm/test/CodeGen/PowerPC/vsx-minmax.ll
    llvm/test/CodeGen/PowerPC/vsx-p8.ll
    llvm/test/CodeGen/PowerPC/vsx-p9.ll
    llvm/test/CodeGen/PowerPC/vsx-partword-int-loads-and-stores.ll
    llvm/test/CodeGen/PowerPC/vsx-recip-est.ll
    llvm/test/CodeGen/PowerPC/vsx-shuffle-le-load.ll
    llvm/test/CodeGen/PowerPC/vsx-shuffle-le-multiple-uses.ll
    llvm/test/CodeGen/PowerPC/vsx-spill-norwstore.ll
    llvm/test/CodeGen/PowerPC/vsx.ll
    llvm/test/CodeGen/PowerPC/vsx_builtins.ll
    llvm/test/CodeGen/PowerPC/vsx_insert_extract_le.ll
    llvm/test/CodeGen/PowerPC/vsx_scalar_ld_st.ll
    llvm/test/CodeGen/PowerPC/vsx_shuffle_le.ll
    llvm/test/CodeGen/PowerPC/vtable-reloc.ll
    llvm/test/CodeGen/PowerPC/weak_def_can_be_hidden.ll
    llvm/test/CodeGen/PowerPC/xray-ret-is-terminator.ll
    llvm/test/CodeGen/PowerPC/xvcmpeqdp-v2f64.ll
    llvm/test/CodeGen/PowerPC/zero-not-run.ll
    llvm/test/CodeGen/PowerPC/zext-and-cmp.ll
    llvm/test/CodeGen/PowerPC/zext-bitperm.ll
    llvm/test/CodeGen/PowerPC/zext-free.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/PowerPC/opt-builtins-ppc-xlcompat-cas.ll b/llvm/test/CodeGen/PowerPC/opt-builtins-ppc-xlcompat-cas.ll
index 6fa25bf36c3f..8b3a780701b3 100644
--- a/llvm/test/CodeGen/PowerPC/opt-builtins-ppc-xlcompat-cas.ll
+++ b/llvm/test/CodeGen/PowerPC/opt-builtins-ppc-xlcompat-cas.ll
@@ -4,35 +4,35 @@ define void @test_builtin_ppc_compare_and_swaplp(i64 %a, i64 %b, i64 %c) {
 ; CHECK-LABEL: @test_builtin_ppc_compare_and_swaplp(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
-; CHECK-NEXT:    store i64 [[A:%.*]], i64* [[A_ADDR]], align 8
-; CHECK-NEXT:    [[TMP0:%.*]] = cmpxchg weak volatile i64* [[A_ADDR]], i64 [[B:%.*]], i64 [[C:%.*]] monotonic monotonic, align 8
+; CHECK-NEXT:    store i64 [[A:%.*]], ptr [[A_ADDR]], align 8
+; CHECK-NEXT:    [[TMP0:%.*]] = cmpxchg weak volatile ptr [[A_ADDR]], i64 [[B:%.*]], i64 [[C:%.*]] monotonic monotonic, align 8
 ; CHECK-NEXT:    ret void
 ;
 entry:
   %a.addr = alloca i64, align 8
   %b.addr = alloca i64, align 8
   %c.addr = alloca i64, align 8
-  store i64 %a, i64* %a.addr, align 8
-  store i64 %b, i64* %b.addr, align 8
-  store i64 %c, i64* %c.addr, align 8
-  %0 = load i64, i64* %c.addr, align 8
-  %1 = load i64, i64* %b.addr, align 8
-  %2 = cmpxchg weak volatile i64* %a.addr, i64 %1, i64 %0 monotonic monotonic, align 8
+  store i64 %a, ptr %a.addr, align 8
+  store i64 %b, ptr %b.addr, align 8
+  store i64 %c, ptr %c.addr, align 8
+  %0 = load i64, ptr %c.addr, align 8
+  %1 = load i64, ptr %b.addr, align 8
+  %2 = cmpxchg weak volatile ptr %a.addr, i64 %1, i64 %0 monotonic monotonic, align 8
   %3 = extractvalue { i64, i1 } %2, 0
   %4 = extractvalue { i64, i1 } %2, 1
-  store i64 %3, i64* %b.addr, align 8
+  store i64 %3, ptr %b.addr, align 8
   ret void
 }
 
-define dso_local void @test_builtin_ppc_compare_and_swaplp_loop(i64* %a) {
+define dso_local void @test_builtin_ppc_compare_and_swaplp_loop(ptr %a) {
 ; CHECK-LABEL: @test_builtin_ppc_compare_and_swaplp_loop(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[CALL:%.*]] = tail call i64 bitcast (i64 (...)* @bar to i64 ()*)()
+; CHECK-NEXT:    [[CALL:%.*]] = tail call i64 @bar()
 ; CHECK-NEXT:    br label [[DO_BODY:%.*]]
 ; CHECK:       do.body:
 ; CHECK-NEXT:    [[X_0:%.*]] = phi i64 [ [[CALL]], [[ENTRY:%.*]] ], [ [[TMP1:%.*]], [[DO_BODY]] ]
 ; CHECK-NEXT:    [[ADD:%.*]] = add nsw i64 [[X_0]], 1
-; CHECK-NEXT:    [[TMP0:%.*]] = cmpxchg weak volatile i64* [[A:%.*]], i64 [[X_0]], i64 [[ADD]] monotonic monotonic, align 8
+; CHECK-NEXT:    [[TMP0:%.*]] = cmpxchg weak volatile ptr [[A:%.*]], i64 [[X_0]], i64 [[ADD]] monotonic monotonic, align 8
 ; CHECK-NEXT:    [[TMP1]] = extractvalue { i64, i1 } [[TMP0]], 0
 ; CHECK-NEXT:    [[TMP2:%.*]] = extractvalue { i64, i1 } [[TMP0]], 1
 ; CHECK-NEXT:    br i1 [[TMP2]], label [[DO_BODY]], label [[DO_END:%.*]]
@@ -40,26 +40,26 @@ define dso_local void @test_builtin_ppc_compare_and_swaplp_loop(i64* %a) {
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %a.addr = alloca i64*, align 8
+  %a.addr = alloca ptr, align 8
   %x = alloca i64, align 8
-  store i64* %a, i64** %a.addr, align 8
-  %call = call i64 bitcast (i64 (...)* @bar to i64 ()*)()
-  store i64 %call, i64* %x, align 8
+  store ptr %a, ptr %a.addr, align 8
+  %call = call i64 @bar()
+  store i64 %call, ptr %x, align 8
   br label %do.body
 
 do.body:                                          ; preds = %do.cond, %entry
   br label %do.cond
 
 do.cond:                                          ; preds = %do.body
-  %0 = load i64*, i64** %a.addr, align 8
-  %1 = load i64, i64* %x, align 8
+  %0 = load ptr, ptr %a.addr, align 8
+  %1 = load i64, ptr %x, align 8
   %add = add nsw i64 %1, 1
-  %2 = load i64*, i64** %a.addr, align 8
-  %3 = load i64, i64* %x, align 8
-  %4 = cmpxchg weak volatile i64* %2, i64 %3, i64 %add monotonic monotonic, align 8
+  %2 = load ptr, ptr %a.addr, align 8
+  %3 = load i64, ptr %x, align 8
+  %4 = cmpxchg weak volatile ptr %2, i64 %3, i64 %add monotonic monotonic, align 8
   %5 = extractvalue { i64, i1 } %4, 0
   %6 = extractvalue { i64, i1 } %4, 1
-  store i64 %5, i64* %x, align 8
+  store i64 %5, ptr %x, align 8
   %tobool = icmp ne i1 %6, false
   br i1 %tobool, label %do.body, label %do.end
 

diff  --git a/llvm/test/CodeGen/PowerPC/opt-cmp-inst-cr0-live.ll b/llvm/test/CodeGen/PowerPC/opt-cmp-inst-cr0-live.ll
index 406a3035a978..8a443c164b75 100644
--- a/llvm/test/CodeGen/PowerPC/opt-cmp-inst-cr0-live.ll
+++ b/llvm/test/CodeGen/PowerPC/opt-cmp-inst-cr0-live.ll
@@ -84,12 +84,12 @@ declare void @exit(i32 signext)
 ; In this case, we want to use OR_rec instead of OR + CMPLWI.
 
 ; CHECK-LABEL: fn5
-define zeroext i32 @fn5(i32* %p1, i32* %p2) {
+define zeroext i32 @fn5(ptr %p1, ptr %p2) {
 ; CHECK: OR_rec
 ; CHECK-NOT: CMP
 ; CHECK: BCC
-  %v1 = load i32, i32* %p1
-  %v2 = load i32, i32* %p2
+  %v1 = load i32, ptr %p1
+  %v2 = load i32, ptr %p2
   %1 = or i32 %v1, %v2
   %2 = icmp eq i32 %1, 0
   br i1 %2, label %foo, label %bar
@@ -105,7 +105,7 @@ bar:
 ; against a non-zero value.
 
 ; CHECK-LABEL: fn6
-define i8* @fn6(i8* readonly %p) {
+define ptr @fn6(ptr readonly %p) {
 ; CHECK: LBZU
 ; CHECK: EXTSB_rec
 ; CHECK-NOT: CMP
@@ -116,22 +116,22 @@ define i8* @fn6(i8* readonly %p) {
 ; CHECK: BCC
 
 entry:
-  %incdec.ptr = getelementptr inbounds i8, i8* %p, i64 -1
-  %0 = load i8, i8* %incdec.ptr
+  %incdec.ptr = getelementptr inbounds i8, ptr %p, i64 -1
+  %0 = load i8, ptr %incdec.ptr
   %cmp = icmp sgt i8 %0, -1
   br i1 %cmp, label %out, label %if.end
 
 if.end:
-  %incdec.ptr2 = getelementptr inbounds i8, i8* %p, i64 -2
-  %1 = load i8, i8* %incdec.ptr2
+  %incdec.ptr2 = getelementptr inbounds i8, ptr %p, i64 -2
+  %1 = load i8, ptr %incdec.ptr2
   %cmp4 = icmp sgt i8 %1, -1
   br i1 %cmp4, label %out, label %cleanup
 
 out:
-  %p.addr.0 = phi i8* [ %incdec.ptr, %entry ], [ %incdec.ptr2, %if.end ]
+  %p.addr.0 = phi ptr [ %incdec.ptr, %entry ], [ %incdec.ptr2, %if.end ]
   br label %cleanup
 
 cleanup:
-  %retval.0 = phi i8* [ %p.addr.0, %out ], [ null, %if.end ]
-  ret i8* %retval.0
+  %retval.0 = phi ptr [ %p.addr.0, %out ], [ null, %if.end ]
+  ret ptr %retval.0
 }

diff  --git a/llvm/test/CodeGen/PowerPC/opt-li-add-to-addi.ll b/llvm/test/CodeGen/PowerPC/opt-li-add-to-addi.ll
index 67fd5d3e3085..f20a6197d5c9 100644
--- a/llvm/test/CodeGen/PowerPC/opt-li-add-to-addi.ll
+++ b/llvm/test/CodeGen/PowerPC/opt-li-add-to-addi.ll
@@ -13,7 +13,7 @@ entry:
   br i1 %cmp, label %if.then, label %if.end
 
 if.then:
-  tail call void bitcast (void (...)* @callv to void ()*)()
+  tail call void @callv()
   br label %if.end
 
 if.end:
@@ -37,7 +37,7 @@ entry:
   br i1 %cmp, label %if.then, label %if.else
 
 if.then:
-  tail call void bitcast (void (...)* @callv to void ()*)()
+  tail call void @callv()
   br label %if.end4
 
 if.else:
@@ -45,7 +45,7 @@ if.else:
   br i1 %cmp1, label %if.then2, label %if.end4
 
 if.then2:
-  tail call void bitcast (void (...)* @callv to void ()*)()
+  tail call void @callv()
   br label %if.end4
 
 if.end4:

diff  --git a/llvm/test/CodeGen/PowerPC/optcmp.ll b/llvm/test/CodeGen/PowerPC/optcmp.ll
index 51ebbbd4fc66..bc265c646d47 100644
--- a/llvm/test/CodeGen/PowerPC/optcmp.ll
+++ b/llvm/test/CodeGen/PowerPC/optcmp.ll
@@ -5,7 +5,7 @@
 target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64"
 target triple = "powerpc64-unknown-linux-gnu"
 
-define signext i32 @foo(i32 signext %a, i32 signext %b, i32* nocapture %c) #0 {
+define signext i32 @foo(i32 signext %a, i32 signext %b, ptr nocapture %c) #0 {
 ; CHECK-LABEL: foo:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    cmpw 3, 4
@@ -27,13 +27,13 @@ define signext i32 @foo(i32 signext %a, i32 signext %b, i32* nocapture %c) #0 {
 ; CHECK-NO-ISEL-NEXT:    blr
 entry:
   %sub = sub nsw i32 %a, %b
-  store i32 %sub, i32* %c, align 4
+  store i32 %sub, ptr %c, align 4
   %cmp = icmp sgt i32 %a, %b
   %cond = select i1 %cmp, i32 %a, i32 %b
   ret i32 %cond
 }
 
-define signext i32 @foo2(i32 signext %a, i32 signext %b, i32* nocapture %c) #0 {
+define signext i32 @foo2(i32 signext %a, i32 signext %b, ptr nocapture %c) #0 {
 ; CHECK-LABEL: foo2:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    slw 4, 3, 4
@@ -57,13 +57,13 @@ define signext i32 @foo2(i32 signext %a, i32 signext %b, i32* nocapture %c) #0 {
 ; CHECK-NO-ISEL-NEXT:    blr
 entry:
   %shl = shl i32 %a, %b
-  store i32 %shl, i32* %c, align 4
+  store i32 %shl, ptr %c, align 4
   %cmp = icmp sgt i32 %shl, 0
   %conv = zext i1 %cmp to i32
   ret i32 %conv
 }
 
-define i64 @fool(i64 %a, i64 %b, i64* nocapture %c) #0 {
+define i64 @fool(i64 %a, i64 %b, ptr nocapture %c) #0 {
 ; CHECK-LABEL: fool:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    sub. 6, 3, 4
@@ -83,13 +83,13 @@ define i64 @fool(i64 %a, i64 %b, i64* nocapture %c) #0 {
 ; CHECK-NO-ISEL-NEXT:    blr
 entry:
   %sub = sub nsw i64 %a, %b
-  store i64 %sub, i64* %c, align 8
+  store i64 %sub, ptr %c, align 8
   %cmp = icmp sgt i64 %a, %b
   %cond = select i1 %cmp, i64 %a, i64 %b
   ret i64 %cond
 }
 
-define i64 @foolb(i64 %a, i64 %b, i64* nocapture %c) #0 {
+define i64 @foolb(i64 %a, i64 %b, ptr nocapture %c) #0 {
 ; CHECK-LABEL: foolb:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    sub. 6, 3, 4
@@ -109,13 +109,13 @@ define i64 @foolb(i64 %a, i64 %b, i64* nocapture %c) #0 {
 ; CHECK-NO-ISEL-NEXT:    blr
 entry:
   %sub = sub nsw i64 %a, %b
-  store i64 %sub, i64* %c, align 8
+  store i64 %sub, ptr %c, align 8
   %cmp = icmp sle i64 %a, %b
   %cond = select i1 %cmp, i64 %a, i64 %b
   ret i64 %cond
 }
 
-define i64 @foolc(i64 %a, i64 %b, i64* nocapture %c) #0 {
+define i64 @foolc(i64 %a, i64 %b, ptr nocapture %c) #0 {
 ; CHECK-LABEL: foolc:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    sub. 6, 4, 3
@@ -135,13 +135,13 @@ define i64 @foolc(i64 %a, i64 %b, i64* nocapture %c) #0 {
 ; CHECK-NO-ISEL-NEXT:    blr
 entry:
   %sub = sub nsw i64 %b, %a
-  store i64 %sub, i64* %c, align 8
+  store i64 %sub, ptr %c, align 8
   %cmp = icmp sgt i64 %a, %b
   %cond = select i1 %cmp, i64 %a, i64 %b
   ret i64 %cond
 }
 
-define i64 @foold(i64 %a, i64 %b, i64* nocapture %c) #0 {
+define i64 @foold(i64 %a, i64 %b, ptr nocapture %c) #0 {
 ; CHECK-LABEL: foold:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    sub. 6, 4, 3
@@ -161,13 +161,13 @@ define i64 @foold(i64 %a, i64 %b, i64* nocapture %c) #0 {
 ; CHECK-NO-ISEL-NEXT:    blr
 entry:
   %sub = sub nsw i64 %b, %a
-  store i64 %sub, i64* %c, align 8
+  store i64 %sub, ptr %c, align 8
   %cmp = icmp slt i64 %a, %b
   %cond = select i1 %cmp, i64 %a, i64 %b
   ret i64 %cond
 }
 
-define i64 @foold2(i64 %a, i64 %b, i64* nocapture %c) #0 {
+define i64 @foold2(i64 %a, i64 %b, ptr nocapture %c) #0 {
 ; CHECK-LABEL: foold2:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    sub. 6, 3, 4
@@ -187,13 +187,13 @@ define i64 @foold2(i64 %a, i64 %b, i64* nocapture %c) #0 {
 ; CHECK-NO-ISEL-NEXT:    blr
 entry:
   %sub = sub nsw i64 %a, %b
-  store i64 %sub, i64* %c, align 8
+  store i64 %sub, ptr %c, align 8
   %cmp = icmp slt i64 %a, %b
   %cond = select i1 %cmp, i64 %a, i64 %b
   ret i64 %cond
 }
 
-define i64 @foo2l(i64 %a, i64 %b, i64* nocapture %c) #0 {
+define i64 @foo2l(i64 %a, i64 %b, ptr nocapture %c) #0 {
 ; CHECK-LABEL: foo2l:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    sld 4, 3, 4
@@ -213,13 +213,13 @@ define i64 @foo2l(i64 %a, i64 %b, i64* nocapture %c) #0 {
 ; CHECK-NO-ISEL-NEXT:    blr
 entry:
   %shl = shl i64 %a, %b
-  store i64 %shl, i64* %c, align 8
+  store i64 %shl, ptr %c, align 8
   %cmp = icmp sgt i64 %shl, 0
   %conv1 = zext i1 %cmp to i64
   ret i64 %conv1
 }
 
-define double @food(double %a, double %b, double* nocapture %c) #0 {
+define double @food(double %a, double %b, ptr nocapture %c) #0 {
 ; CHECK-LABEL: food:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fsub 0, 1, 2
@@ -241,13 +241,13 @@ define double @food(double %a, double %b, double* nocapture %c) #0 {
 ; CHECK-NO-ISEL-NEXT:    blr
 entry:
   %sub = fsub double %a, %b
-  store double %sub, double* %c, align 8
+  store double %sub, ptr %c, align 8
   %cmp = fcmp ogt double %a, %b
   %cond = select i1 %cmp, double %a, double %b
   ret double %cond
 }
 
-define float @foof(float %a, float %b, float* nocapture %c) #0 {
+define float @foof(float %a, float %b, ptr nocapture %c) #0 {
 ; CHECK-LABEL: foof:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fsubs 0, 1, 2
@@ -269,7 +269,7 @@ define float @foof(float %a, float %b, float* nocapture %c) #0 {
 ; CHECK-NO-ISEL-NEXT:    blr
 entry:
   %sub = fsub float %a, %b
-  store float %sub, float* %c, align 4
+  store float %sub, ptr %c, align 4
   %cmp = fcmp ogt float %a, %b
   %cond = select i1 %cmp, float %a, float %b
   ret float %cond
@@ -277,7 +277,7 @@ entry:
 
 declare i64 @llvm.ctpop.i64(i64);
 
-define signext i64 @fooct(i64 signext %a, i64 signext %b, i64* nocapture %c) #0 {
+define signext i64 @fooct(i64 signext %a, i64 signext %b, ptr nocapture %c) #0 {
 ; CHECK-LABEL: fooct:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lis 6, 21845
@@ -346,7 +346,7 @@ define signext i64 @fooct(i64 signext %a, i64 signext %b, i64* nocapture %c) #0
 entry:
   %sub = sub nsw i64 %a, %b
   %subc = call i64 @llvm.ctpop.i64(i64 %sub)
-  store i64 %subc, i64* %c, align 4
+  store i64 %subc, ptr %c, align 4
   %cmp = icmp sgt i64 %subc, 0
   %cond = select i1 %cmp, i64 %a, i64 %b
   ret i64 %cond

diff  --git a/llvm/test/CodeGen/PowerPC/or-addressing-mode.ll b/llvm/test/CodeGen/PowerPC/or-addressing-mode.ll
index cb9a4c63e7e5..53d9a02edb6a 100644
--- a/llvm/test/CodeGen/PowerPC/or-addressing-mode.ll
+++ b/llvm/test/CodeGen/PowerPC/or-addressing-mode.ll
@@ -1,22 +1,22 @@
 ; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc-unknown-linux-gnu | not grep ori
 ; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc-unknown-linux-gnu | not grep rlwimi
 
-define i32 @test1(i8* %P) {
-        %tmp.2.i = ptrtoint i8* %P to i32               ; <i32> [#uses=2]
+define i32 @test1(ptr %P) {
+        %tmp.2.i = ptrtoint ptr %P to i32               ; <i32> [#uses=2]
         %tmp.4.i = and i32 %tmp.2.i, -65536             ; <i32> [#uses=1]
         %tmp.10.i = lshr i32 %tmp.2.i, 5                ; <i32> [#uses=1]
         %tmp.11.i = and i32 %tmp.10.i, 2040             ; <i32> [#uses=1]
         %tmp.13.i = or i32 %tmp.11.i, %tmp.4.i          ; <i32> [#uses=1]
-        %tmp.14.i = inttoptr i32 %tmp.13.i to i32*              ; <i32*> [#uses=1]
-        %tmp.3 = load i32, i32* %tmp.14.i            ; <i32> [#uses=1]
+        %tmp.14.i = inttoptr i32 %tmp.13.i to ptr              ; <ptr> [#uses=1]
+        %tmp.3 = load i32, ptr %tmp.14.i            ; <i32> [#uses=1]
         ret i32 %tmp.3
 }
 
 define i32 @test2(i32 %P) {
         %tmp.2 = shl i32 %P, 4          ; <i32> [#uses=1]
         %tmp.3 = or i32 %tmp.2, 2               ; <i32> [#uses=1]
-        %tmp.4 = inttoptr i32 %tmp.3 to i32*            ; <i32*> [#uses=1]
-        %tmp.5 = load i32, i32* %tmp.4               ; <i32> [#uses=1]
+        %tmp.4 = inttoptr i32 %tmp.3 to ptr            ; <ptr> [#uses=1]
+        %tmp.5 = load i32, ptr %tmp.4               ; <i32> [#uses=1]
         ret i32 %tmp.5
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/out-of-range-dform.ll b/llvm/test/CodeGen/PowerPC/out-of-range-dform.ll
index 13b68a18ac79..5168944a2d85 100644
--- a/llvm/test/CodeGen/PowerPC/out-of-range-dform.ll
+++ b/llvm/test/CodeGen/PowerPC/out-of-range-dform.ll
@@ -5,7 +5,7 @@
 
 @_ZL3num = external dso_local unnamed_addr global float, align 4
 
-define dso_local void @main() local_unnamed_addr personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+define dso_local void @main() local_unnamed_addr personality ptr @__gxx_personality_v0 {
 ; CHECK-P9-LABEL: main:
 ; CHECK-P9:       # %bb.0: # %bb
 ; CHECK-P9-NEXT:    mflr r0
@@ -34,8 +34,7 @@ define dso_local void @main() local_unnamed_addr personality i8* bitcast (i32 (.
 ; CHECK-P9-NEXT:    bne+ cr0, .LBB0_1
 ; CHECK-P9-NEXT:  .LBB0_2: # %bb16
 bb:
-  %i = tail call noalias dereferenceable_or_null(6451600) i8* @malloc()
-  %i1 = bitcast i8* %i to float*
+  %i = tail call noalias dereferenceable_or_null(6451600) ptr @malloc()
   br label %bb2
 
 bb2:                                              ; preds = %bb5, %bb
@@ -46,15 +45,15 @@ bb2:                                              ; preds = %bb5, %bb
 bb5:                                              ; preds = %bb2
   %i6 = mul nuw nsw i64 %i3, 1270
   %i7 = add nuw nsw i64 %i6, 0
-  %i8 = getelementptr inbounds float, float* %i1, i64 %i7
-  store float undef, float* %i8, align 4
+  %i8 = getelementptr inbounds float, ptr %i, i64 %i7
+  store float undef, ptr %i8, align 4
   %i9 = add nuw nsw i64 %i3, 3
-  %i10 = load float, float* @_ZL3num, align 4
+  %i10 = load float, ptr @_ZL3num, align 4
   %i11 = fmul float %i10, 0x3E00000000000000
   %i12 = mul nuw nsw i64 %i9, 1270
   %i13 = add nuw nsw i64 %i12, 0
-  %i14 = getelementptr inbounds float, float* %i1, i64 %i13
-  store float %i11, float* %i14, align 4
+  %i14 = getelementptr inbounds float, ptr %i, i64 %i13
+  store float %i11, ptr %i14, align 4
   %i15 = add nuw nsw i64 %i3, 5
   br label %bb2
 
@@ -64,4 +63,4 @@ bb16:                                             ; preds = %bb2
 
 declare i32 @__gxx_personality_v0(...)
 
-declare i8* @malloc() local_unnamed_addr
+declare ptr @malloc() local_unnamed_addr

diff  --git a/llvm/test/CodeGen/PowerPC/overflow-intrinsic-optimizations.ll b/llvm/test/CodeGen/PowerPC/overflow-intrinsic-optimizations.ll
index 353dac3d2c95..ca149dcf9a60 100644
--- a/llvm/test/CodeGen/PowerPC/overflow-intrinsic-optimizations.ll
+++ b/llvm/test/CodeGen/PowerPC/overflow-intrinsic-optimizations.ll
@@ -1,7 +1,7 @@
 ; RUN: llc %s -mtriple=powerpc -o - | FileCheck %s
 ; RUN: llc %s -mtriple=powerpc64 -o - | FileCheck %s
 
-define i1 @no__mulodi4(i32 %a, i64 %b, i32* %c) {
+define i1 @no__mulodi4(i32 %a, i64 %b, ptr %c) {
 ; CHECK-LABEL: no__mulodi4
 ; CHECK-NOT: bl __mulodi4
 ; CHECK-NOT: bl __multi3
@@ -14,7 +14,7 @@ entry:
   %5 = sext i32 %4 to i64
   %6 = icmp ne i64 %3, %5
   %7 = or i1 %2, %6
-  store i32 %4, i32* %c, align 4
+  store i32 %4, ptr %c, align 4
   ret i1 %7
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/p10-constants.ll b/llvm/test/CodeGen/PowerPC/p10-constants.ll
index bd70b7bce0f6..77472afd9c3d 100644
--- a/llvm/test/CodeGen/PowerPC/p10-constants.ll
+++ b/llvm/test/CodeGen/PowerPC/p10-constants.ll
@@ -361,7 +361,7 @@ while.body.i:                                     ; preds = %sw.epilog.i, %while
   %b.1.i = phi i32 [ %b.2.i, %sw.epilog.i ], [ 0, %while.body ]
   %c.1.i = phi i32 [ %c.2.i, %sw.epilog.i ], [ 291, %while.body ]
   %d.1.i = phi i32 [ %d.2.i, %sw.epilog.i ], [ 1179648, %while.body ]
-  %0 = load i8, i8* null, align 1
+  %0 = load i8, ptr null, align 1
   %cmp1.i = icmp eq i8 %0, 1
   br i1 %cmp1.i, label %if.then.i, label %if.else.i
 

diff  --git a/llvm/test/CodeGen/PowerPC/p10-setbc-ri.ll b/llvm/test/CodeGen/PowerPC/p10-setbc-ri.ll
index 76fd43ba50e9..64cfcc9c7b5b 100644
--- a/llvm/test/CodeGen/PowerPC/p10-setbc-ri.ll
+++ b/llvm/test/CodeGen/PowerPC/p10-setbc-ri.ll
@@ -137,7 +137,7 @@ define dso_local void @setbc9(i8 %a) {
 entry:
   %cmp = icmp slt i8 %a, 1
   %conv1 = zext i1 %cmp to i8
-  store i8 %conv1, i8* @globalVal, align 1
+  store i8 %conv1, ptr @globalVal, align 1
   ret void
 }
 
@@ -159,7 +159,7 @@ define dso_local void @setbc10(i32 %a) {
 entry:
   %cmp = icmp slt i32 %a, 1
   %conv1 = zext i1 %cmp to i32
-  store i32 %conv1, i32* @globalVal2, align 4
+  store i32 %conv1, ptr @globalVal2, align 4
   ret void
 }
 
@@ -181,7 +181,7 @@ define dso_local void @setbc11(i64 %a) {
 entry:
   %cmp = icmp slt i64 %a, 1
   %conv1 = zext i1 %cmp to i64
-  store i64 %conv1, i64* @globalVal3, align 8
+  store i64 %conv1, ptr @globalVal3, align 8
   ret void
 }
 
@@ -205,7 +205,7 @@ define dso_local void @setbc12(i16 %a) {
 entry:
   %cmp = icmp slt i16 %a, 1
   %conv1 = zext i1 %cmp to i16
-  store i16 %conv1, i16* @globalVal4, align 2
+  store i16 %conv1, ptr @globalVal4, align 2
   ret void
 }
 
@@ -329,7 +329,7 @@ define dso_local void @setbc21(i8 %a) {
 entry:
   %cmp = icmp sgt i8 %a, 1
   %conv1 = zext i1 %cmp to i8
-  store i8 %conv1, i8* @globalVal, align 1
+  store i8 %conv1, ptr @globalVal, align 1
   ret void
 }
 
@@ -351,7 +351,7 @@ define dso_local void @setbc22(i32 %a) {
 entry:
   %cmp = icmp sgt i32 %a, 1
   %conv1 = zext i1 %cmp to i32
-  store i32 %conv1, i32* @globalVal2, align 4
+  store i32 %conv1, ptr @globalVal2, align 4
   ret void
 }
 
@@ -373,7 +373,7 @@ define dso_local void @setbc23(i64 %a) {
 entry:
   %cmp = icmp sgt i64 %a, 1
   %conv1 = zext i1 %cmp to i64
-  store i64 %conv1, i64* @globalVal3, align 8
+  store i64 %conv1, ptr @globalVal3, align 8
   ret void
 }
 
@@ -397,7 +397,7 @@ define dso_local void @setbc24(i16 %a) {
 entry:
   %cmp = icmp sgt i16 %a, 1
   %conv1 = zext i1 %cmp to i16
-  store i16 %conv1, i16* @globalVal4, align 2
+  store i16 %conv1, ptr @globalVal4, align 2
   ret void
 }
 
@@ -521,7 +521,7 @@ define dso_local void @setbc33(i8 %a) {
 entry:
   %cmp = icmp eq i8 %a, 1
   %conv1 = zext i1 %cmp to i8
-  store i8 %conv1, i8* @globalVal, align 1
+  store i8 %conv1, ptr @globalVal, align 1
   ret void
 }
 
@@ -543,7 +543,7 @@ define dso_local void @setbc34(i32 %a) {
 entry:
   %cmp = icmp eq i32 %a, 1
   %conv1 = zext i1 %cmp to i32
-  store i32 %conv1, i32* @globalVal2, align 4
+  store i32 %conv1, ptr @globalVal2, align 4
   ret void
 }
 
@@ -565,7 +565,7 @@ define dso_local void @setbc35(i64 %a) {
 entry:
   %cmp = icmp eq i64 %a, 1
   %conv1 = zext i1 %cmp to i64
-  store i64 %conv1, i64* @globalVal3, align 8
+  store i64 %conv1, ptr @globalVal3, align 8
   ret void
 }
 
@@ -589,7 +589,7 @@ define dso_local void @setbc36(i16 %a) {
 entry:
   %cmp = icmp eq i16 %a, 1
   %conv1 = zext i1 %cmp to i16
-  store i16 %conv1, i16* @globalVal4, align 2
+  store i16 %conv1, ptr @globalVal4, align 2
   ret void
 }
 
@@ -635,6 +635,6 @@ define dso_local void @setbc39(i64 %a) {
 entry:
   %cmp = icmp ugt i64 %a, 1
   %conv1 = zext i1 %cmp to i64
-  store i64 %conv1, i64* @globalVal3, align 8
+  store i64 %conv1, ptr @globalVal3, align 8
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/p10-setbc-rr.ll b/llvm/test/CodeGen/PowerPC/p10-setbc-rr.ll
index 5ec4012db74b..d4133a3a8937 100644
--- a/llvm/test/CodeGen/PowerPC/p10-setbc-rr.ll
+++ b/llvm/test/CodeGen/PowerPC/p10-setbc-rr.ll
@@ -83,7 +83,7 @@ define dso_local void @setbc5(i8 signext %a, i8 signext %b) {
 entry:
   %cmp = icmp eq i8 %a, %b
   %conv3 = zext i1 %cmp to i8
-  store i8 %conv3, i8* @globalVal, align 1
+  store i8 %conv3, ptr @globalVal, align 1
   ret void
 }
 
@@ -105,7 +105,7 @@ define dso_local void @setbc6(i32 signext %a, i32 signext %b) {
 entry:
   %cmp = icmp eq i32 %a, %b
   %conv = zext i1 %cmp to i32
-  store i32 %conv, i32* @globalVal2, align 4
+  store i32 %conv, ptr @globalVal2, align 4
   ret void
 }
 
@@ -152,7 +152,7 @@ define dso_local void @setbc9(i64 %a, i64 %b) {
 entry:
   %cmp = icmp eq i64 %a, %b
   %conv1 = zext i1 %cmp to i64
-  store i64 %conv1, i64* @globalVal3, align 8
+  store i64 %conv1, ptr @globalVal3, align 8
   ret void
 }
 
@@ -188,7 +188,7 @@ define dso_local void @setbc11(i16 signext %a, i16 signext %b) {
 entry:
   %cmp = icmp eq i16 %a, %b
   %conv3 = zext i1 %cmp to i16
-  store i16 %conv3, i16* @globalVal4, align 2
+  store i16 %conv3, ptr @globalVal4, align 2
   ret void
 }
 
@@ -224,7 +224,7 @@ define dso_local void @setbc13(i8 zeroext %a, i8 zeroext %b) {
 entry:
   %cmp = icmp eq i8 %a, %b
   %conv3 = zext i1 %cmp to i8
-  store i8 %conv3, i8* @globalVal, align 1
+  store i8 %conv3, ptr @globalVal, align 1
   ret void
 }
 
@@ -260,7 +260,7 @@ define dso_local void @setbc15(i32 zeroext %a, i32 zeroext %b) {
 entry:
   %cmp = icmp eq i32 %a, %b
   %conv = zext i1 %cmp to i32
-  store i32 %conv, i32* @globalVal2, align 4
+  store i32 %conv, ptr @globalVal2, align 4
   ret void
 }
 
@@ -296,7 +296,7 @@ define dso_local void @setbc17(i16 zeroext %a, i16 zeroext %b) {
 entry:
   %cmp = icmp eq i16 %a, %b
   %conv3 = zext i1 %cmp to i16
-  store i16 %conv3, i16* @globalVal4, align 2
+  store i16 %conv3, ptr @globalVal4, align 2
   ret void
 }
 
@@ -332,7 +332,7 @@ define dso_local void @setbc19(i8 signext %a, i8 signext %b) {
 entry:
   %cmp = icmp sgt i8 %a, %b
   %conv3 = zext i1 %cmp to i8
-  store i8 %conv3, i8* @globalVal, align 1
+  store i8 %conv3, ptr @globalVal, align 1
   ret void
 }
 
@@ -355,7 +355,7 @@ define dso_local void @setbc20(i32 signext %a, i32 signext %b) {
 entry:
   %cmp = icmp sgt i32 %a, %b
   %conv = zext i1 %cmp to i32
-  store i32 %conv, i32* @globalVal2, align 4
+  store i32 %conv, ptr @globalVal2, align 4
   ret void
 }
 
@@ -391,7 +391,7 @@ define dso_local void @setbc22(i64 %a, i64 %b) {
 entry:
   %cmp = icmp sgt i64 %a, %b
   %conv1 = zext i1 %cmp to i64
-  store i64 %conv1, i64* @globalVal3, align 8
+  store i64 %conv1, ptr @globalVal3, align 8
   ret void
 }
 
@@ -427,7 +427,7 @@ define dso_local void @setbc24(i16 signext %a, i16 signext %b) {
 entry:
   %cmp = icmp sgt i16 %a, %b
   %conv3 = zext i1 %cmp to i16
-  store i16 %conv3, i16* @globalVal4, align 2
+  store i16 %conv3, ptr @globalVal4, align 2
   ret void
 }
 
@@ -463,7 +463,7 @@ define dso_local void @setbc26(i8 signext %a, i8 signext %b) {
 entry:
   %cmp = icmp slt i8 %a, %b
   %conv3 = zext i1 %cmp to i8
-  store i8 %conv3, i8* @globalVal, align 1
+  store i8 %conv3, ptr @globalVal, align 1
   ret void
 }
 
@@ -486,7 +486,7 @@ define dso_local void @setbc27(i32 signext %a, i32 signext %b) {
 entry:
   %cmp = icmp slt i32 %a, %b
   %conv = zext i1 %cmp to i32
-  store i32 %conv, i32* @globalVal2, align 4
+  store i32 %conv, ptr @globalVal2, align 4
   ret void
 }
 
@@ -535,7 +535,7 @@ define dso_local void @setbc30(i64 %a, i64 %b) {
 entry:
   %cmp = icmp slt i64 %a, %b
   %conv1 = zext i1 %cmp to i64
-  store i64 %conv1, i64* @globalVal3, align 8
+  store i64 %conv1, ptr @globalVal3, align 8
   ret void
 }
 
@@ -571,7 +571,7 @@ define dso_local void @setbc32(i16 signext %a, i16 signext %b) {
 entry:
   %cmp = icmp slt i16 %a, %b
   %conv3 = zext i1 %cmp to i16
-  store i16 %conv3, i16* @globalVal4, align 2
+  store i16 %conv3, ptr @globalVal4, align 2
   ret void
 }
 
@@ -607,7 +607,7 @@ define dso_local void @setbc34(i8 signext %a, i8 signext %b) {
 entry:
   %cmp = icmp eq i8 %a, %b
   %conv3 = zext i1 %cmp to i8
-  store i8 %conv3, i8* @globalVal, align 1
+  store i8 %conv3, ptr @globalVal, align 1
   ret void
 }
 
@@ -643,7 +643,7 @@ define dso_local void @setbc36(i32 signext %a, i32 signext %b) {
 entry:
   %cmp = icmp eq i32 %a, %b
   %conv = zext i1 %cmp to i32
-  store i32 %conv, i32* @globalVal2, align 4
+  store i32 %conv, ptr @globalVal2, align 4
   ret void
 }
 
@@ -666,7 +666,7 @@ define dso_local void @setbc37(i64 %a, i64 %b) {
 entry:
   %cmp = icmp eq i64 %a, %b
   %conv1 = zext i1 %cmp to i64
-  store i64 %conv1, i64* @globalVal3, align 8
+  store i64 %conv1, ptr @globalVal3, align 8
   ret void
 }
 
@@ -702,7 +702,7 @@ define dso_local void @setbc39(i16 signext %a, i16 signext %b) {
 entry:
   %cmp = icmp eq i16 %a, %b
   %conv3 = zext i1 %cmp to i16
-  store i16 %conv3, i16* @globalVal4, align 2
+  store i16 %conv3, ptr @globalVal4, align 2
   ret void
 }
 
@@ -738,7 +738,7 @@ define dso_local void @setbc41(i8 zeroext %a, i8 zeroext %b) {
 entry:
   %cmp = icmp eq i8 %a, %b
   %conv3 = zext i1 %cmp to i8
-  store i8 %conv3, i8* @globalVal, align 1
+  store i8 %conv3, ptr @globalVal, align 1
   ret void
 }
 
@@ -774,7 +774,7 @@ define dso_local void @setbc43(i32 zeroext %a, i32 zeroext %b) {
 entry:
   %cmp = icmp eq i32 %a, %b
   %conv = zext i1 %cmp to i32
-  store i32 %conv, i32* @globalVal2, align 4
+  store i32 %conv, ptr @globalVal2, align 4
   ret void
 }
 
@@ -810,7 +810,7 @@ define dso_local void @setbc45(i64 %a, i64 %b) {
 entry:
   %cmp = icmp eq i64 %a, %b
   %conv1 = zext i1 %cmp to i64
-  store i64 %conv1, i64* @globalVal3, align 8
+  store i64 %conv1, ptr @globalVal3, align 8
   ret void
 }
 
@@ -846,7 +846,7 @@ define dso_local void @setbc47(i16 zeroext %a, i16 zeroext %b) {
 entry:
   %cmp = icmp eq i16 %a, %b
   %conv3 = zext i1 %cmp to i16
-  store i16 %conv3, i16* @globalVal4, align 2
+  store i16 %conv3, ptr @globalVal4, align 2
   ret void
 }
 
@@ -882,7 +882,7 @@ define dso_local void @setbc49(i64 %a, i64 %b) {
 entry:
   %cmp = icmp sgt i64 %a, %b
   %conv1 = zext i1 %cmp to i64
-  store i64 %conv1, i64* @globalVal3, align 8
+  store i64 %conv1, ptr @globalVal3, align 8
   ret void
 }
 
@@ -918,6 +918,6 @@ define dso_local void @setnbc51(i64 %a, i64 %b) {
 entry:
   %cmp = icmp slt i64 %a, %b
   %conv1 = zext i1 %cmp to i64
-  store i64 %conv1, i64* @globalVal3, align 8
+  store i64 %conv1, ptr @globalVal3, align 8
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/p10-setbcr-ri.ll b/llvm/test/CodeGen/PowerPC/p10-setbcr-ri.ll
index 17ac933b2c2b..b28edbea587f 100644
--- a/llvm/test/CodeGen/PowerPC/p10-setbcr-ri.ll
+++ b/llvm/test/CodeGen/PowerPC/p10-setbcr-ri.ll
@@ -137,7 +137,7 @@ define dso_local void @setbcr9(i8 %a) {
 entry:
   %cmp = icmp ne i8 %a, 1
   %conv1 = zext i1 %cmp to i8
-  store i8 %conv1, i8* @globalVal, align 1
+  store i8 %conv1, ptr @globalVal, align 1
   ret void
 }
 
@@ -159,7 +159,7 @@ define dso_local void @setbcr10(i32 %a) {
 entry:
   %cmp = icmp ne i32 %a, 1
   %conv1 = zext i1 %cmp to i32
-  store i32 %conv1, i32* @globalVal2, align 4
+  store i32 %conv1, ptr @globalVal2, align 4
   ret void
 }
 
@@ -181,7 +181,7 @@ define dso_local void @setbcr11(i64 %a) {
 entry:
   %cmp = icmp ne i64 %a, 1
   %conv1 = zext i1 %cmp to i64
-  store i64 %conv1, i64* @globalVal3, align 8
+  store i64 %conv1, ptr @globalVal3, align 8
   ret void
 }
 
@@ -205,7 +205,7 @@ define dso_local void @setbcr12(i16 %a) {
 entry:
   %cmp = icmp ne i16 %a, 1
   %conv1 = zext i1 %cmp to i16
-  store i16 %conv1, i16* @globalVal4, align 2
+  store i16 %conv1, ptr @globalVal4, align 2
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/p10-setbcr-rr.ll b/llvm/test/CodeGen/PowerPC/p10-setbcr-rr.ll
index 2cc7091fae18..136c2a9351e4 100644
--- a/llvm/test/CodeGen/PowerPC/p10-setbcr-rr.ll
+++ b/llvm/test/CodeGen/PowerPC/p10-setbcr-rr.ll
@@ -88,7 +88,7 @@ define dso_local void @setbcr5(i8 signext %a, i8 signext %b) {
 entry:
   %cmp = icmp sge i8 %a, %b
   %conv3 = zext i1 %cmp to i8
-  store i8 %conv3, i8* @globalVal, align 1
+  store i8 %conv3, ptr @globalVal, align 1
   ret void
 }
 
@@ -111,7 +111,7 @@ define dso_local void @setbcr6(i32 signext %a, i32 signext %b) {
 entry:
   %cmp = icmp sge i32 %a, %b
   %conv = zext i1 %cmp to i32
-  store i32 %conv, i32* @globalVal2, align 4
+  store i32 %conv, ptr @globalVal2, align 4
   ret void
 }
 
@@ -160,7 +160,7 @@ define dso_local void @setbcr9(i64 %a, i64 %b) {
 entry:
   %cmp = icmp sge i64 %a, %b
   %conv1 = zext i1 %cmp to i64
-  store i64 %conv1, i64* @globalVal3, align 8
+  store i64 %conv1, ptr @globalVal3, align 8
   ret void
 }
 
@@ -196,7 +196,7 @@ define dso_local void @setbcr11(i16 signext %a, i16 signext %b) {
 entry:
   %cmp = icmp sge i16 %a, %b
   %conv3 = zext i1 %cmp to i16
-  store i16 %conv3, i16* @globalVal4, align 2
+  store i16 %conv3, ptr @globalVal4, align 2
   ret void
 }
 
@@ -232,7 +232,7 @@ define dso_local void @setbcr13(i64 %a, i64 %b) {
 entry:
   %cmp = icmp uge i64 %a, %b
   %conv1 = zext i1 %cmp to i64
-  store i64 %conv1, i64* @globalVal3
+  store i64 %conv1, ptr @globalVal3
   ret void
 }
 
@@ -268,7 +268,7 @@ define dso_local void @setbcr15(i8 signext %a, i8 signext %b) {
 entry:
   %cmp = icmp sle i8 %a, %b
   %conv3 = zext i1 %cmp to i8
-  store i8 %conv3, i8* @globalVal, align 1
+  store i8 %conv3, ptr @globalVal, align 1
   ret void
 }
 
@@ -304,7 +304,7 @@ define dso_local void @setbcr17(i32 signext %a, i32 signext %b) {
 entry:
   %cmp = icmp sle i32 %a, %b
   %conv = zext i1 %cmp to i32
-  store i32 %conv, i32* @globalVal2, align 4
+  store i32 %conv, ptr @globalVal2, align 4
   ret void
 }
 
@@ -340,7 +340,7 @@ define dso_local void @setbcr19(i64 %a, i64 %b) {
 entry:
   %cmp = icmp sle i64 %a, %b
   %conv1 = zext i1 %cmp to i64
-  store i64 %conv1, i64* @globalVal3, align 8
+  store i64 %conv1, ptr @globalVal3, align 8
   ret void
 }
 
@@ -376,7 +376,7 @@ define dso_local void @setbcr21(i16 signext %a, i16 signext %b) {
 entry:
   %cmp = icmp sle i16 %a, %b
   %conv3 = zext i1 %cmp to i16
-  store i16 %conv3, i16* @globalVal4, align 2
+  store i16 %conv3, ptr @globalVal4, align 2
   ret void
 }
 
@@ -412,7 +412,7 @@ define dso_local void @setbcr23(i64 %a, i64 %b) {
 entry:
   %cmp = icmp ule i64 %a, %b
   %conv1 = zext i1 %cmp to i64
-  store i64 %conv1, i64* @globalVal3
+  store i64 %conv1, ptr @globalVal3
   ret void
 }
 
@@ -446,7 +446,7 @@ define dso_local void @setbcr25(i8 signext %a, i8 signext %b) {
 entry:
   %cmp = icmp ne i8 %a, %b
   %conv3 = zext i1 %cmp to i8
-  store i8 %conv3, i8* @globalVal, align 1
+  store i8 %conv3, ptr @globalVal, align 1
   ret void
 }
 
@@ -468,7 +468,7 @@ define dso_local void @setbcr26(i32 signext %a, i32 signext %b) {
 entry:
   %cmp = icmp ne i32 %a, %b
   %conv = zext i1 %cmp to i32
-  store i32 %conv, i32* @globalVal2, align 4
+  store i32 %conv, ptr @globalVal2, align 4
   ret void
 }
 
@@ -502,7 +502,7 @@ define dso_local void @setbcr28(i64 %a, i64 %b) {
 entry:
   %cmp = icmp ne i64 %a, %b
   %conv1 = zext i1 %cmp to i64
-  store i64 %conv1, i64* @globalVal3, align 8
+  store i64 %conv1, ptr @globalVal3, align 8
   ret void
 }
 
@@ -536,7 +536,7 @@ define dso_local void @setbcr30(i16 signext %a, i16 signext %b) {
 entry:
   %cmp = icmp ne i16 %a, %b
   %conv3 = zext i1 %cmp to i16
-  store i16 %conv3, i16* @globalVal4, align 2
+  store i16 %conv3, ptr @globalVal4, align 2
   ret void
 }
 
@@ -570,7 +570,7 @@ define dso_local void @setbcr32(i8 zeroext %a, i8 zeroext %b) {
 entry:
   %cmp = icmp ne i8 %a, %b
   %conv3 = zext i1 %cmp to i8
-  store i8 %conv3, i8* @globalVal, align 1
+  store i8 %conv3, ptr @globalVal, align 1
   ret void
 }
 
@@ -604,7 +604,7 @@ define dso_local void @setbcr34(i32 zeroext %a, i32 zeroext %b) {
 entry:
   %cmp = icmp ne i32 %a, %b
   %conv = zext i1 %cmp to i32
-  store i32 %conv, i32* @globalVal2, align 4
+  store i32 %conv, ptr @globalVal2, align 4
   ret void
 }
 
@@ -638,7 +638,7 @@ define dso_local void @setbcr36(i16 zeroext %a, i16 zeroext %b) {
 entry:
   %cmp = icmp ne i16 %a, %b
   %conv3 = zext i1 %cmp to i16
-  store i16 %conv3, i16* @globalVal4, align 2
+  store i16 %conv3, ptr @globalVal4, align 2
   ret void
 }
 
@@ -672,7 +672,7 @@ define dso_local void @setbcr38(i8 signext %a, i8 signext %b) {
 entry:
   %cmp = icmp sge i8 %a, %b
   %conv3 = zext i1 %cmp to i8
-  store i8 %conv3, i8* @globalVal, align 1
+  store i8 %conv3, ptr @globalVal, align 1
   ret void
 }
 
@@ -706,7 +706,7 @@ define dso_local void @setbcr40(i32 signext %a, i32 signext %b) {
 entry:
   %cmp = icmp sge i32 %a, %b
   %conv = zext i1 %cmp to i32
-  store i32 %conv, i32* @globalVal2, align 4
+  store i32 %conv, ptr @globalVal2, align 4
   ret void
 }
 
@@ -740,7 +740,7 @@ define dso_local void @setbcr42(i64 %a, i64 %b) {
 entry:
   %cmp = icmp sge i64 %a, %b
   %conv1 = zext i1 %cmp to i64
-  store i64 %conv1, i64* @globalVal3, align 8
+  store i64 %conv1, ptr @globalVal3, align 8
   ret void
 }
 
@@ -774,7 +774,7 @@ define dso_local void @setbcr44(i16 signext %a, i16 signext %b) {
 entry:
   %cmp = icmp sge i16 %a, %b
   %conv3 = zext i1 %cmp to i16
-  store i16 %conv3, i16* @globalVal4, align 2
+  store i16 %conv3, ptr @globalVal4, align 2
   ret void
 }
 
@@ -810,7 +810,7 @@ define dso_local void @setbcr46(i64 %a, i64 %b) {
 entry:
   %cmp = icmp uge i64 %a, %b
   %conv1 = zext i1 %cmp to i64
-  store i64 %conv1, i64* @globalVal3
+  store i64 %conv1, ptr @globalVal3
   ret void
 }
 
@@ -844,7 +844,7 @@ define dso_local void @setbcr48(i8 signext %a, i8 signext %b) {
 entry:
   %cmp = icmp sle i8 %a, %b
   %conv3 = zext i1 %cmp to i8
-  store i8 %conv3, i8* @globalVal, align 1
+  store i8 %conv3, ptr @globalVal, align 1
   ret void
 }
 
@@ -878,7 +878,7 @@ define dso_local void @setbcr50(i32 signext %a, i32 signext %b) {
 entry:
   %cmp = icmp sle i32 %a, %b
   %conv = zext i1 %cmp to i32
-  store i32 %conv, i32* @globalVal2, align 4
+  store i32 %conv, ptr @globalVal2, align 4
   ret void
 }
 
@@ -914,7 +914,7 @@ define dso_local void @setbcr52(i64 %a, i64 %b) {
 entry:
   %cmp = icmp sle i64 %a, %b
   %conv1 = zext i1 %cmp to i64
-  store i64 %conv1, i64* @globalVal3, align 8
+  store i64 %conv1, ptr @globalVal3, align 8
   ret void
 }
 
@@ -948,7 +948,7 @@ define dso_local void @setbcr54(i16 signext %a, i16 signext %b) {
 entry:
   %cmp = icmp sle i16 %a, %b
   %conv3 = zext i1 %cmp to i16
-  store i16 %conv3, i16* @globalVal4, align 2
+  store i16 %conv3, ptr @globalVal4, align 2
   ret void
 }
 
@@ -984,7 +984,7 @@ define dso_local void @setbcr56(i64 %a, i64 %b) {
 entry:
   %cmp = icmp ule i64 %a, %b
   %conv1 = zext i1 %cmp to i64
-  store i64 %conv1, i64* @globalVal3
+  store i64 %conv1, ptr @globalVal3
   ret void
 }
 
@@ -1018,7 +1018,7 @@ define dso_local void @setbcr58(i64 %a, i64 %b) {
 entry:
   %cmp = icmp ne i64 %a, %b
   %conv1 = zext i1 %cmp to i64
-  store i64 %conv1, i64* @globalVal3, align 8
+  store i64 %conv1, ptr @globalVal3, align 8
   ret void
 }
 
@@ -1052,6 +1052,6 @@ define dso_local void @setbcr60(i64 %a, i64 %b) {
 entry:
   %cmp = icmp ne i64 %a, %b
   %conv1 = zext i1 %cmp to i64
-  store i64 %conv1, i64* @globalVal3, align 8
+  store i64 %conv1, ptr @globalVal3, align 8
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/p10-setnbc-ri.ll b/llvm/test/CodeGen/PowerPC/p10-setnbc-ri.ll
index c6b5fc3daafe..04cbf971df00 100644
--- a/llvm/test/CodeGen/PowerPC/p10-setnbc-ri.ll
+++ b/llvm/test/CodeGen/PowerPC/p10-setnbc-ri.ll
@@ -137,7 +137,7 @@ define dso_local void @setnbc9(i8 %a) {
 entry:
   %cmp = icmp slt i8 %a, 1
   %conv1 = sext i1 %cmp to i8
-  store i8 %conv1, i8* @globalVal, align 1
+  store i8 %conv1, ptr @globalVal, align 1
   ret void
 }
 
@@ -159,7 +159,7 @@ define dso_local void @setnbc10(i32 %a) {
 entry:
   %cmp = icmp slt i32 %a, 1
   %conv1 = sext i1 %cmp to i32
-  store i32 %conv1, i32* @globalVal2, align 4
+  store i32 %conv1, ptr @globalVal2, align 4
   ret void
 }
 
@@ -181,7 +181,7 @@ define dso_local void @setnbc11(i64 %a) {
 entry:
   %cmp = icmp slt i64 %a, 1
   %conv1 = sext i1 %cmp to i64
-  store i64 %conv1, i64* @globalVal3, align 8
+  store i64 %conv1, ptr @globalVal3, align 8
   ret void
 }
 
@@ -205,7 +205,7 @@ define dso_local void @setnbc12(i16 %a) {
 entry:
   %cmp = icmp slt i16 %a, 1
   %conv1 = sext i1 %cmp to i16
-  store i16 %conv1, i16* @globalVal4, align 2
+  store i16 %conv1, ptr @globalVal4, align 2
   ret void
 }
 
@@ -329,7 +329,7 @@ define dso_local void @setnbc21(i8 %a) {
 entry:
   %cmp = icmp sgt i8 %a, 1
   %conv1 = sext i1 %cmp to i8
-  store i8 %conv1, i8* @globalVal, align 1
+  store i8 %conv1, ptr @globalVal, align 1
   ret void
 }
 
@@ -351,7 +351,7 @@ define dso_local void @setnbc22(i32 %a) {
 entry:
   %cmp = icmp sgt i32 %a, 1
   %conv1 = sext i1 %cmp to i32
-  store i32 %conv1, i32* @globalVal2, align 4
+  store i32 %conv1, ptr @globalVal2, align 4
   ret void
 }
 
@@ -373,7 +373,7 @@ define dso_local void @setnbc23(i64 %a) {
 entry:
   %cmp = icmp sgt i64 %a, 1
   %conv1 = sext i1 %cmp to i64
-  store i64 %conv1, i64* @globalVal3, align 8
+  store i64 %conv1, ptr @globalVal3, align 8
   ret void
 }
 
@@ -397,7 +397,7 @@ define dso_local void @setnbc24(i16 %a) {
 entry:
   %cmp = icmp sgt i16 %a, 1
   %conv1 = sext i1 %cmp to i16
-  store i16 %conv1, i16* @globalVal4, align 2
+  store i16 %conv1, ptr @globalVal4, align 2
   ret void
 }
 
@@ -521,7 +521,7 @@ define dso_local void @setnbc33(i8 %a) {
 entry:
   %cmp = icmp eq i8 %a, 1
   %conv1 = sext i1 %cmp to i8
-  store i8 %conv1, i8* @globalVal, align 1
+  store i8 %conv1, ptr @globalVal, align 1
   ret void
 }
 
@@ -543,7 +543,7 @@ define dso_local void @setnbc34(i32 %a) {
 entry:
   %cmp = icmp eq i32 %a, 1
   %conv1 = sext i1 %cmp to i32
-  store i32 %conv1, i32* @globalVal2, align 4
+  store i32 %conv1, ptr @globalVal2, align 4
   ret void
 }
 
@@ -565,7 +565,7 @@ define dso_local void @setnbc35(i64 %a) {
 entry:
   %cmp = icmp eq i64 %a, 1
   %conv1 = sext i1 %cmp to i64
-  store i64 %conv1, i64* @globalVal3, align 8
+  store i64 %conv1, ptr @globalVal3, align 8
   ret void
 }
 
@@ -589,7 +589,7 @@ define dso_local void @setnbc36(i16 %a) {
 entry:
   %cmp = icmp eq i16 %a, 1
   %conv1 = sext i1 %cmp to i16
-  store i16 %conv1, i16* @globalVal4, align 2
+  store i16 %conv1, ptr @globalVal4, align 2
   ret void
 }
 
@@ -635,7 +635,7 @@ define dso_local void @setnbc39(i64 %a) {
 entry:
   %cmp = icmp ugt i64 %a, 1
   %conv1 = sext i1 %cmp to i64
-  store i64 %conv1, i64* @globalVal3, align 8
+  store i64 %conv1, ptr @globalVal3, align 8
   ret void
 }
 
@@ -825,7 +825,7 @@ define dso_local void @setnbc54(i8 %a) {
 entry:
   %cmp = icmp sgt i8 %a, 0
   %conv1 = sext i1 %cmp to i8
-  store i8 %conv1, i8* @globalVal, align 1
+  store i8 %conv1, ptr @globalVal, align 1
   ret void
 }
 
@@ -847,7 +847,7 @@ define dso_local void @setnbc55(i32 %a) {
 entry:
   %cmp = icmp sgt i32 %a, 0
   %conv1 = sext i1 %cmp to i32
-  store i32 %conv1, i32* @globalVal2, align 4
+  store i32 %conv1, ptr @globalVal2, align 4
   ret void
 }
 
@@ -869,7 +869,7 @@ define dso_local void @setnbc56(i64 %a) {
 entry:
   %cmp = icmp sgt i64 %a, 0
   %conv1 = sext i1 %cmp to i64
-  store i64 %conv1, i64* @globalVal3, align 8
+  store i64 %conv1, ptr @globalVal3, align 8
   ret void
 }
 
@@ -891,7 +891,7 @@ define dso_local void @setnbc57(i16 %a) {
 entry:
   %cmp = icmp sgt i16 %a, 0
   %conv1 = sext i1 %cmp to i16
-  store i16 %conv1, i16* @globalVal4, align 2
+  store i16 %conv1, ptr @globalVal4, align 2
   ret void
 }
 
@@ -1009,7 +1009,7 @@ define dso_local void @setnbc66(i8 %a) {
 entry:
   %cmp = icmp eq i8 %a, 0
   %conv1 = sext i1 %cmp to i8
-  store i8 %conv1, i8* @globalVal, align 1
+  store i8 %conv1, ptr @globalVal, align 1
   ret void
 }
 
@@ -1031,7 +1031,7 @@ define dso_local void @setnbc67(i32 %a) {
 entry:
   %cmp = icmp eq i32 %a, 0
   %conv1 = sext i1 %cmp to i32
-  store i32 %conv1, i32* @globalVal2, align 4
+  store i32 %conv1, ptr @globalVal2, align 4
   ret void
 }
 
@@ -1053,7 +1053,7 @@ define dso_local void @setnbc68(i64 %a) {
 entry:
   %cmp = icmp eq i64 %a, 0
   %conv1 = sext i1 %cmp to i64
-  store i64 %conv1, i64* @globalVal3, align 8
+  store i64 %conv1, ptr @globalVal3, align 8
   ret void
 }
 
@@ -1075,7 +1075,7 @@ define dso_local void @setnbc69(i16 %a) {
 entry:
   %cmp = icmp eq i16 %a, 0
   %conv1 = sext i1 %cmp to i16
-  store i16 %conv1, i16* @globalVal4, align 2
+  store i16 %conv1, ptr @globalVal4, align 2
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/p10-setnbc-rr.ll b/llvm/test/CodeGen/PowerPC/p10-setnbc-rr.ll
index 5fd286ebdcaf..405a3851bffc 100644
--- a/llvm/test/CodeGen/PowerPC/p10-setnbc-rr.ll
+++ b/llvm/test/CodeGen/PowerPC/p10-setnbc-rr.ll
@@ -84,7 +84,7 @@ define dso_local void @setnbc5(i8 signext %a, i8 signext %b) {
 entry:
   %cmp = icmp eq i8 %a, %b
   %conv3 = sext i1 %cmp to i8
-  store i8 %conv3, i8* @globalVal, align 1
+  store i8 %conv3, ptr @globalVal, align 1
   ret void
 }
 
@@ -106,7 +106,7 @@ define dso_local void @setnbc6(i32 signext %a, i32 signext %b) {
 entry:
   %cmp = icmp eq i32 %a, %b
   %conv = sext i1 %cmp to i32
-  store i32 %conv, i32* @globalVal2, align 4
+  store i32 %conv, ptr @globalVal2, align 4
   ret void
 }
 
@@ -152,7 +152,7 @@ define dso_local void @setnbc9(i64 %a, i64 %b) {
 entry:
   %cmp = icmp eq i64 %a, %b
   %conv1 = sext i1 %cmp to i64
-  store i64 %conv1, i64* @globalVal3, align 8
+  store i64 %conv1, ptr @globalVal3, align 8
   ret void
 }
 
@@ -186,7 +186,7 @@ define dso_local void @setnbc11(i16 signext %a, i16 signext %b) {
 entry:
   %cmp = icmp eq i16 %a, %b
   %conv3 = sext i1 %cmp to i16
-  store i16 %conv3, i16* @globalVal4, align 2
+  store i16 %conv3, ptr @globalVal4, align 2
   ret void
 }
 
@@ -220,7 +220,7 @@ define dso_local void @setnbc13(i8 zeroext %a, i8 zeroext %b) {
 entry:
   %cmp = icmp eq i8 %a, %b
   %conv3 = sext i1 %cmp to i8
-  store i8 %conv3, i8* @globalVal, align 1
+  store i8 %conv3, ptr @globalVal, align 1
   ret void
 }
 
@@ -254,7 +254,7 @@ define dso_local void @setnbc15(i32 zeroext %a, i32 zeroext %b) {
 entry:
   %cmp = icmp eq i32 %a, %b
   %conv = sext i1 %cmp to i32
-  store i32 %conv, i32* @globalVal2, align 4
+  store i32 %conv, ptr @globalVal2, align 4
   ret void
 }
 
@@ -288,7 +288,7 @@ define dso_local void @setnbc17(i16 zeroext %a, i16 zeroext %b) {
 entry:
   %cmp = icmp eq i16 %a, %b
   %conv3 = sext i1 %cmp to i16
-  store i16 %conv3, i16* @globalVal4, align 2
+  store i16 %conv3, ptr @globalVal4, align 2
   ret void
 }
 
@@ -322,7 +322,7 @@ define dso_local void @setnbc19(i8 signext %a, i8 signext %b) {
 entry:
   %cmp = icmp sgt i8 %a, %b
   %conv3 = sext i1 %cmp to i8
-  store i8 %conv3, i8* @globalVal, align 1
+  store i8 %conv3, ptr @globalVal, align 1
   ret void
 }
 
@@ -344,7 +344,7 @@ define dso_local void @setnbc20(i32 signext %a, i32 signext %b) {
 entry:
   %cmp = icmp sgt i32 %a, %b
   %sub = sext i1 %cmp to i32
-  store i32 %sub, i32* @globalVal2, align 4
+  store i32 %sub, ptr @globalVal2, align 4
   ret void
 }
 
@@ -378,7 +378,7 @@ define dso_local void @setnbc22(i64 %a, i64 %b) {
 entry:
   %cmp = icmp sgt i64 %a, %b
   %conv1 = sext i1 %cmp to i64
-  store i64 %conv1, i64* @globalVal3, align 8
+  store i64 %conv1, ptr @globalVal3, align 8
   ret void
 }
 
@@ -412,7 +412,7 @@ define dso_local void @setnbc24(i16 signext %a, i16 signext %b) {
 entry:
   %cmp = icmp sgt i16 %a, %b
   %conv3 = sext i1 %cmp to i16
-  store i16 %conv3, i16* @globalVal4, align 2
+  store i16 %conv3, ptr @globalVal4, align 2
   ret void
 }
 
@@ -446,7 +446,7 @@ define dso_local void @setnbc26(i8 zeroext %a, i8 zeroext %b) {
 entry:
   %cmp = icmp ugt i8 %a, %b
   %conv3 = sext i1 %cmp to i8
-  store i8 %conv3, i8* @globalVal, align 1
+  store i8 %conv3, ptr @globalVal, align 1
   ret void
 }
 
@@ -480,7 +480,7 @@ define dso_local void @setnbc28(i32 zeroext %a, i32 zeroext %b) {
 entry:
   %cmp = icmp ugt i32 %a, %b
   %sub = sext i1 %cmp to i32
-  store i32 %sub, i32* @globalVal2, align 4
+  store i32 %sub, ptr @globalVal2, align 4
   ret void
 }
 
@@ -514,7 +514,7 @@ define dso_local void @setnbc30(i16 zeroext %a, i16 zeroext %b) {
 entry:
   %cmp = icmp ugt i16 %a, %b
   %conv3 = sext i1 %cmp to i16
-  store i16 %conv3, i16* @globalVal4, align 2
+  store i16 %conv3, ptr @globalVal4, align 2
   ret void
 }
 
@@ -548,7 +548,7 @@ define dso_local void @setnbc32(i8 signext %a, i8 signext %b) {
 entry:
   %cmp = icmp slt i8 %a, %b
   %conv3 = sext i1 %cmp to i8
-  store i8 %conv3, i8* @globalVal, align 1
+  store i8 %conv3, ptr @globalVal, align 1
   ret void
 }
 
@@ -570,7 +570,7 @@ define dso_local void @setnbc33(i32 signext %a, i32 signext %b) {
 entry:
   %cmp = icmp slt i32 %a, %b
   %sub = sext i1 %cmp to i32
-  store i32 %sub, i32* @globalVal2, align 4
+  store i32 %sub, ptr @globalVal2, align 4
   ret void
 }
 
@@ -616,7 +616,7 @@ define dso_local void @setnbc36(i64 %a, i64 %b) {
 entry:
   %cmp = icmp slt i64 %a, %b
   %conv1 = sext i1 %cmp to i64
-  store i64 %conv1, i64* @globalVal3, align 8
+  store i64 %conv1, ptr @globalVal3, align 8
   ret void
 }
 
@@ -650,7 +650,7 @@ define dso_local void @setnbc38(i16 signext %a, i16 signext %b) {
 entry:
   %cmp = icmp slt i16 %a, %b
   %conv3 = sext i1 %cmp to i16
-  store i16 %conv3, i16* @globalVal4, align 2
+  store i16 %conv3, ptr @globalVal4, align 2
   ret void
 }
 
@@ -684,7 +684,7 @@ define dso_local void @setnbc40(i8 zeroext %a, i8 zeroext %b) {
 entry:
   %cmp = icmp ult i8 %a, %b
   %conv3 = sext i1 %cmp to i8
-  store i8 %conv3, i8* @globalVal, align 1
+  store i8 %conv3, ptr @globalVal, align 1
   ret void
 }
 
@@ -718,7 +718,7 @@ define dso_local void @setnbc42(i32 zeroext %a, i32 zeroext %b) {
 entry:
   %cmp = icmp ult i32 %a, %b
   %sub = sext i1 %cmp to i32
-  store i32 %sub, i32* @globalVal2, align 4
+  store i32 %sub, ptr @globalVal2, align 4
   ret void
 }
 
@@ -752,7 +752,7 @@ define dso_local void @setnbc44(i16 zeroext %a, i16 zeroext %b) {
 entry:
   %cmp = icmp ult i16 %a, %b
   %conv3 = sext i1 %cmp to i16
-  store i16 %conv3, i16* @globalVal4, align 2
+  store i16 %conv3, ptr @globalVal4, align 2
   ret void
 }
 
@@ -786,7 +786,7 @@ define dso_local void @setnbc46(i8 signext %a, i8 signext %b) {
 entry:
   %cmp = icmp eq i8 %a, %b
   %conv3 = sext i1 %cmp to i8
-  store i8 %conv3, i8* @globalVal, align 1
+  store i8 %conv3, ptr @globalVal, align 1
   ret void
 }
 
@@ -820,7 +820,7 @@ define dso_local void @setnbc48(i32 signext %a, i32 signext %b) {
 entry:
   %cmp = icmp eq i32 %a, %b
   %sub = sext i1 %cmp to i32
-  store i32 %sub, i32* @globalVal2, align 4
+  store i32 %sub, ptr @globalVal2, align 4
   ret void
 }
 
@@ -842,7 +842,7 @@ define dso_local void @setnbc49(i64 %a, i64 %b) {
 entry:
   %cmp = icmp eq i64 %a, %b
   %conv1 = sext i1 %cmp to i64
-  store i64 %conv1, i64* @globalVal3, align 8
+  store i64 %conv1, ptr @globalVal3, align 8
   ret void
 }
 
@@ -876,7 +876,7 @@ define dso_local void @setnbc51(i16 signext %a, i16 signext %b) {
 entry:
   %cmp = icmp eq i16 %a, %b
   %conv3 = sext i1 %cmp to i16
-  store i16 %conv3, i16* @globalVal4, align 2
+  store i16 %conv3, ptr @globalVal4, align 2
   ret void
 }
 
@@ -910,7 +910,7 @@ define dso_local void @setnbc53(i8 zeroext %a, i8 zeroext %b) {
 entry:
   %cmp = icmp eq i8 %a, %b
   %conv3 = sext i1 %cmp to i8
-  store i8 %conv3, i8* @globalVal, align 1
+  store i8 %conv3, ptr @globalVal, align 1
   ret void
 }
 
@@ -944,7 +944,7 @@ define dso_local void @setnbc55(i32 zeroext %a, i32 zeroext %b) {
 entry:
   %cmp = icmp eq i32 %a, %b
   %sub = sext i1 %cmp to i32
-  store i32 %sub, i32* @globalVal2, align 4
+  store i32 %sub, ptr @globalVal2, align 4
   ret void
 }
 
@@ -978,7 +978,7 @@ define dso_local void @setnbc57(i64 %a, i64 %b) {
 entry:
   %cmp = icmp eq i64 %a, %b
   %conv1 = sext i1 %cmp to i64
-  store i64 %conv1, i64* @globalVal3, align 8
+  store i64 %conv1, ptr @globalVal3, align 8
   ret void
 }
 
@@ -1012,7 +1012,7 @@ define dso_local void @setnbc59(i16 zeroext %a, i16 zeroext %b) {
 entry:
   %cmp = icmp eq i16 %a, %b
   %conv3 = sext i1 %cmp to i16
-  store i16 %conv3, i16* @globalVal4, align 2
+  store i16 %conv3, ptr @globalVal4, align 2
   ret void
 }
 
@@ -1046,7 +1046,7 @@ define dso_local void @setnbc61(i64 %a, i64 %b) {
 entry:
   %cmp = icmp sgt i64 %a, %b
   %conv1 = sext i1 %cmp to i64
-  store i64 %conv1, i64* @globalVal3, align 8
+  store i64 %conv1, ptr @globalVal3, align 8
   ret void
 }
 
@@ -1080,7 +1080,7 @@ define dso_local void @setnbc63(i8 zeroext %a, i8 zeroext %b) {
 entry:
   %cmp = icmp ugt i8 %a, %b
   %conv3 = sext i1 %cmp to i8
-  store i8 %conv3, i8* @globalVal, align 1
+  store i8 %conv3, ptr @globalVal, align 1
   ret void
 }
 
@@ -1114,7 +1114,7 @@ define dso_local void @setnbc65(i32 zeroext %a, i32 zeroext %b) {
 entry:
   %cmp = icmp ugt i32 %a, %b
   %sub = sext i1 %cmp to i32
-  store i32 %sub, i32* @globalVal2, align 4
+  store i32 %sub, ptr @globalVal2, align 4
   ret void
 }
 
@@ -1148,7 +1148,7 @@ define dso_local void @setnbc67(i16 zeroext %a, i16 zeroext %b) {
 entry:
   %cmp = icmp ugt i16 %a, %b
   %conv3 = sext i1 %cmp to i16
-  store i16 %conv3, i16* @globalVal4, align 2
+  store i16 %conv3, ptr @globalVal4, align 2
   ret void
 }
 
@@ -1182,7 +1182,7 @@ define dso_local void @setnbc69(i64 %a, i64 %b) {
 entry:
   %cmp = icmp slt i64 %a, %b
   %conv1 = sext i1 %cmp to i64
-  store i64 %conv1, i64* @globalVal3, align 8
+  store i64 %conv1, ptr @globalVal3, align 8
   ret void
 }
 
@@ -1216,7 +1216,7 @@ define dso_local void @setnbc71(i8 zeroext %a, i8 zeroext %b) {
 entry:
   %cmp = icmp ult i8 %a, %b
   %conv3 = sext i1 %cmp to i8
-  store i8 %conv3, i8* @globalVal, align 1
+  store i8 %conv3, ptr @globalVal, align 1
   ret void
 }
 
@@ -1250,7 +1250,7 @@ define dso_local void @setnbc73(i32 zeroext %a, i32 zeroext %b) {
 entry:
   %cmp = icmp ult i32 %a, %b
   %sub = sext i1 %cmp to i32
-  store i32 %sub, i32* @globalVal2, align 4
+  store i32 %sub, ptr @globalVal2, align 4
   ret void
 }
 
@@ -1284,7 +1284,7 @@ define dso_local void @setnbc75(i16 zeroext %a, i16 zeroext %b) {
 entry:
   %cmp = icmp ult i16 %a, %b
   %conv3 = sext i1 %cmp to i16
-  store i16 %conv3, i16* @globalVal4, align 2
+  store i16 %conv3, ptr @globalVal4, align 2
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/p10-setnbcr-ri.ll b/llvm/test/CodeGen/PowerPC/p10-setnbcr-ri.ll
index 68525ca16306..8c058cb245be 100644
--- a/llvm/test/CodeGen/PowerPC/p10-setnbcr-ri.ll
+++ b/llvm/test/CodeGen/PowerPC/p10-setnbcr-ri.ll
@@ -131,7 +131,7 @@ define dso_local void @setnbcr9(i8 %a) {
 entry:
   %cmp = icmp uge i8 %a, 1
   %conv1 = sext i1 %cmp to i8
-  store i8 %conv1, i8* @globalVal, align 1
+  store i8 %conv1, ptr @globalVal, align 1
   ret void
 }
 
@@ -153,7 +153,7 @@ define dso_local void @setnbcr10(i32 %a) {
 entry:
   %cmp = icmp uge i32 %a, 1
   %conv1 = sext i1 %cmp to i32
-  store i32 %conv1, i32* @globalVal2, align 4
+  store i32 %conv1, ptr @globalVal2, align 4
   ret void
 }
 
@@ -175,7 +175,7 @@ define dso_local void @setnbcr11(i64 %a) {
 entry:
   %cmp = icmp uge i64 %a, 1
   %conv1 = sext i1 %cmp to i64
-  store i64 %conv1, i64* @globalVal3, align 8
+  store i64 %conv1, ptr @globalVal3, align 8
   ret void
 }
 
@@ -197,7 +197,7 @@ define dso_local void @setnbcr12(i16 %a) {
 entry:
   %cmp = icmp uge i16 %a, 1
   %conv1 = sext i1 %cmp to i16
-  store i16 %conv1, i16* @globalVal4, align 2
+  store i16 %conv1, ptr @globalVal4, align 2
   ret void
 }
 
@@ -321,7 +321,7 @@ define dso_local void @setnbcr21(i8 %a) {
 entry:
   %cmp = icmp ne i8 %a, 1
   %conv1 = sext i1 %cmp to i8
-  store i8 %conv1, i8* @globalVal, align 1
+  store i8 %conv1, ptr @globalVal, align 1
   ret void
 }
 
@@ -343,7 +343,7 @@ define dso_local void @setnbcr22(i32 %a) {
 entry:
   %cmp = icmp ne i32 %a, 1
   %conv1 = sext i1 %cmp to i32
-  store i32 %conv1, i32* @globalVal2, align 4
+  store i32 %conv1, ptr @globalVal2, align 4
   ret void
 }
 
@@ -365,7 +365,7 @@ define dso_local void @setnbcr23(i64 %a) {
 entry:
   %cmp = icmp ne i64 %a, 1
   %conv1 = sext i1 %cmp to i64
-  store i64 %conv1, i64* @globalVal3, align 8
+  store i64 %conv1, ptr @globalVal3, align 8
   ret void
 }
 
@@ -389,7 +389,7 @@ define dso_local void @setnbcr24(i16 %a) {
 entry:
   %cmp = icmp ne i16 %a, 1
   %conv1 = sext i1 %cmp to i16
-  store i16 %conv1, i16* @globalVal4, align 2
+  store i16 %conv1, ptr @globalVal4, align 2
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/p10-setnbcr-rr.ll b/llvm/test/CodeGen/PowerPC/p10-setnbcr-rr.ll
index d648f522e402..af0086c42f04 100644
--- a/llvm/test/CodeGen/PowerPC/p10-setnbcr-rr.ll
+++ b/llvm/test/CodeGen/PowerPC/p10-setnbcr-rr.ll
@@ -83,7 +83,7 @@ define dso_local void @setnbcr5(i8 signext %a, i8 signext %b) {
 entry:
   %cmp = icmp sge i8 %a, %b
   %conv3 = sext i1 %cmp to i8
-  store i8 %conv3, i8* @globalVal, align 1
+  store i8 %conv3, ptr @globalVal, align 1
   ret void
 }
 
@@ -105,7 +105,7 @@ define dso_local void @setnbcr6(i32 signext %a, i32 signext %b) {
 entry:
   %cmp = icmp sge i32 %a, %b
   %sub = sext i1 %cmp to i32
-  store i32 %sub, i32* @globalVal2, align 4
+  store i32 %sub, ptr @globalVal2, align 4
   ret void
 }
 
@@ -151,7 +151,7 @@ define dso_local void @setnbcr9(i64 %a, i64 %b) {
 entry:
   %cmp = icmp sge i64 %a, %b
   %conv1 = sext i1 %cmp to i64
-  store i64 %conv1, i64* @globalVal3, align 8
+  store i64 %conv1, ptr @globalVal3, align 8
   ret void
 }
 
@@ -185,7 +185,7 @@ define dso_local void @setnbcr11(i16 signext %a, i16 signext %b) {
 entry:
   %cmp = icmp sge i16 %a, %b
   %conv3 = sext i1 %cmp to i16
-  store i16 %conv3, i16* @globalVal4, align 2
+  store i16 %conv3, ptr @globalVal4, align 2
   ret void
 }
 
@@ -219,7 +219,7 @@ define dso_local void @setnbcr13(i8 zeroext %a, i8 zeroext %b) {
 entry:
   %cmp = icmp uge i8 %a, %b
   %conv3 = sext i1 %cmp to i8
-  store i8 %conv3, i8* @globalVal
+  store i8 %conv3, ptr @globalVal
   ret void
 }
 
@@ -253,7 +253,7 @@ define dso_local void @setnbcr15(i32 zeroext %a, i32 zeroext %b) {
 entry:
   %cmp = icmp uge i32 %a, %b
   %sub = sext i1 %cmp to i32
-  store i32 %sub, i32* @globalVal2
+  store i32 %sub, ptr @globalVal2
   ret void
 }
 
@@ -287,7 +287,7 @@ define dso_local void @setnbcr17(i64 %a, i64 %b) {
 entry:
   %cmp = icmp uge i64 %a, %b
   %conv1 = sext i1 %cmp to i64
-  store i64 %conv1, i64* @globalVal3
+  store i64 %conv1, ptr @globalVal3
   ret void
 }
 
@@ -321,7 +321,7 @@ define dso_local void @setnbcr19(i16 zeroext %a, i16 zeroext %b) {
 entry:
   %cmp = icmp uge i16 %a, %b
   %conv3 = sext i1 %cmp to i16
-  store i16 %conv3, i16* @globalVal4
+  store i16 %conv3, ptr @globalVal4
   ret void
 }
 
@@ -355,7 +355,7 @@ define dso_local void @setnbcr21(i8 signext %a, i8 signext %b) {
 entry:
   %cmp = icmp sle i8 %a, %b
   %conv3 = sext i1 %cmp to i8
-  store i8 %conv3, i8* @globalVal, align 1
+  store i8 %conv3, ptr @globalVal, align 1
   ret void
 }
 
@@ -389,7 +389,7 @@ define dso_local void @setnbcr23(i32 signext %a, i32 signext %b) {
 entry:
   %cmp = icmp sle i32 %a, %b
   %sub = sext i1 %cmp to i32
-  store i32 %sub, i32* @globalVal2, align 4
+  store i32 %sub, ptr @globalVal2, align 4
   ret void
 }
 
@@ -423,7 +423,7 @@ define dso_local void @setnbcr25(i64 %a, i64 %b) {
 entry:
   %cmp = icmp sle i64 %a, %b
   %conv1 = sext i1 %cmp to i64
-  store i64 %conv1, i64* @globalVal3, align 8
+  store i64 %conv1, ptr @globalVal3, align 8
   ret void
 }
 
@@ -457,7 +457,7 @@ define dso_local void @setnbcr27(i16 signext %a, i16 signext %b) {
 entry:
   %cmp = icmp sle i16 %a, %b
   %conv3 = sext i1 %cmp to i16
-  store i16 %conv3, i16* @globalVal4, align 2
+  store i16 %conv3, ptr @globalVal4, align 2
   ret void
 }
 
@@ -491,7 +491,7 @@ define dso_local void @setnbcr29(i8 zeroext %a, i8 zeroext %b) {
 entry:
   %cmp = icmp ule i8 %a, %b
   %conv3 = sext i1 %cmp to i8
-  store i8 %conv3, i8* @globalVal
+  store i8 %conv3, ptr @globalVal
   ret void
 }
 
@@ -525,7 +525,7 @@ define dso_local void @setnbcr31(i32 zeroext %a, i32 zeroext %b) {
 entry:
   %cmp = icmp ule i32 %a, %b
   %sub = sext i1 %cmp to i32
-  store i32 %sub, i32* @globalVal2
+  store i32 %sub, ptr @globalVal2
   ret void
 }
 
@@ -559,7 +559,7 @@ define dso_local void @setnbcr33(i64 %a, i64 %b) {
 entry:
   %cmp = icmp ule i64 %a, %b
   %conv1 = sext i1 %cmp to i64
-  store i64 %conv1, i64* @globalVal3
+  store i64 %conv1, ptr @globalVal3
   ret void
 }
 
@@ -593,7 +593,7 @@ define dso_local void @setnbcr35(i16 zeroext %a, i16 zeroext %b) {
 entry:
   %cmp = icmp ule i16 %a, %b
   %conv3 = sext i1 %cmp to i16
-  store i16 %conv3, i16* @globalVal4
+  store i16 %conv3, ptr @globalVal4
   ret void
 }
 
@@ -627,7 +627,7 @@ define dso_local void @setnbcr37(i8 signext %a, i8 signext %b) {
 entry:
   %cmp = icmp ne i8 %a, %b
   %conv3 = sext i1 %cmp to i8
-  store i8 %conv3, i8* @globalVal, align 1
+  store i8 %conv3, ptr @globalVal, align 1
   ret void
 }
 
@@ -649,7 +649,7 @@ define dso_local void @setnbcr38(i32 signext %a, i32 signext %b) {
 entry:
   %cmp = icmp ne i32 %a, %b
   %sub = sext i1 %cmp to i32
-  store i32 %sub, i32* @globalVal2, align 4
+  store i32 %sub, ptr @globalVal2, align 4
   ret void
 }
 
@@ -683,7 +683,7 @@ define dso_local void @setnbcr40(i64 %a, i64 %b) {
 entry:
   %cmp = icmp ne i64 %a, %b
   %conv1 = sext i1 %cmp to i64
-  store i64 %conv1, i64* @globalVal3, align 8
+  store i64 %conv1, ptr @globalVal3, align 8
   ret void
 }
 
@@ -717,7 +717,7 @@ define dso_local void @setnbcr42(i16 signext %a, i16 signext %b) {
 entry:
   %cmp = icmp ne i16 %a, %b
   %conv3 = sext i1 %cmp to i16
-  store i16 %conv3, i16* @globalVal4, align 2
+  store i16 %conv3, ptr @globalVal4, align 2
   ret void
 }
 
@@ -751,7 +751,7 @@ define dso_local void @sernbcr44(i8 zeroext %a, i8 zeroext %b) {
 entry:
   %cmp = icmp ne i8 %a, %b
   %conv3 = sext i1 %cmp to i8
-  store i8 %conv3, i8* @globalVal, align 1
+  store i8 %conv3, ptr @globalVal, align 1
   ret void
 }
 
@@ -785,7 +785,7 @@ define dso_local void @setnbcr46(i32 zeroext %a, i32 zeroext %b) {
 entry:
   %cmp = icmp ne i32 %a, %b
   %conv = sext i1 %cmp to i32
-  store i32 %conv, i32* @globalVal2, align 4
+  store i32 %conv, ptr @globalVal2, align 4
   ret void
 }
 
@@ -819,7 +819,7 @@ define dso_local void @setnbcr48(i16 zeroext %a, i16 zeroext %b) {
 entry:
   %cmp = icmp ne i16 %a, %b
   %conv3 = sext i1 %cmp to i16
-  store i16 %conv3, i16* @globalVal4, align 2
+  store i16 %conv3, ptr @globalVal4, align 2
   ret void
 }
 
@@ -853,7 +853,7 @@ define dso_local void @setnbcr50(i8 signext %a, i8 signext %b) {
 entry:
   %cmp = icmp sge i8 %a, %b
   %conv3 = sext i1 %cmp to i8
-  store i8 %conv3, i8* @globalVal, align 1
+  store i8 %conv3, ptr @globalVal, align 1
   ret void
 }
 
@@ -887,7 +887,7 @@ define dso_local void @setnbcr52(i32 signext %a, i32 signext %b) {
 entry:
   %cmp = icmp sge i32 %a, %b
   %sub = sext i1 %cmp to i32
-  store i32 %sub, i32* @globalVal2, align 4
+  store i32 %sub, ptr @globalVal2, align 4
   ret void
 }
 
@@ -921,7 +921,7 @@ define dso_local void @setnbcr54(i64 %a, i64 %b) {
 entry:
   %cmp = icmp sge i64 %a, %b
   %conv1 = sext i1 %cmp to i64
-  store i64 %conv1, i64* @globalVal3, align 8
+  store i64 %conv1, ptr @globalVal3, align 8
   ret void
 }
 
@@ -955,7 +955,7 @@ define dso_local void @setnbcr56(i16 signext %a, i16 signext %b) {
 entry:
   %cmp = icmp sge i16 %a, %b
   %conv3 = sext i1 %cmp to i16
-  store i16 %conv3, i16* @globalVal4, align 2
+  store i16 %conv3, ptr @globalVal4, align 2
   ret void
 }
 
@@ -989,7 +989,7 @@ define dso_local void @setnbcr58(i8 zeroext %a, i8 zeroext %b) {
 entry:
   %cmp = icmp uge i8 %a, %b
   %conv3 = sext i1 %cmp to i8
-  store i8 %conv3, i8* @globalVal
+  store i8 %conv3, ptr @globalVal
   ret void
 }
 
@@ -1023,7 +1023,7 @@ define dso_local void @setnbcr60(i32 zeroext %a, i32 zeroext %b) {
 entry:
   %cmp = icmp uge i32 %a, %b
   %sub = sext i1 %cmp to i32
-  store i32 %sub, i32* @globalVal2
+  store i32 %sub, ptr @globalVal2
   ret void
 }
 
@@ -1057,7 +1057,7 @@ define dso_local void @setnbcr62(i64 %a, i64 %b) {
 entry:
   %cmp = icmp uge i64 %a, %b
   %conv1 = sext i1 %cmp to i64
-  store i64 %conv1, i64* @globalVal3
+  store i64 %conv1, ptr @globalVal3
   ret void
 }
 
@@ -1091,7 +1091,7 @@ define dso_local void @setnbcr64(i16 zeroext %a, i16 zeroext %b) {
 entry:
   %cmp = icmp uge i16 %a, %b
   %conv3 = sext i1 %cmp to i16
-  store i16 %conv3, i16* @globalVal4
+  store i16 %conv3, ptr @globalVal4
   ret void
 }
 
@@ -1125,7 +1125,7 @@ define dso_local void @setnbcr66(i8 signext %a, i8 signext %b) {
 entry:
   %cmp = icmp sle i8 %a, %b
   %conv3 = sext i1 %cmp to i8
-  store i8 %conv3, i8* @globalVal, align 1
+  store i8 %conv3, ptr @globalVal, align 1
   ret void
 }
 
@@ -1159,7 +1159,7 @@ define dso_local void @setnbcr68(i32 signext %a, i32 signext %b) {
 entry:
   %cmp = icmp sle i32 %a, %b
   %sub = sext i1 %cmp to i32
-  store i32 %sub, i32* @globalVal2, align 4
+  store i32 %sub, ptr @globalVal2, align 4
   ret void
 }
 
@@ -1193,7 +1193,7 @@ define dso_local void @setnbcr70(i64 %a, i64 %b) {
 entry:
   %cmp = icmp sle i64 %a, %b
   %conv1 = sext i1 %cmp to i64
-  store i64 %conv1, i64* @globalVal3, align 8
+  store i64 %conv1, ptr @globalVal3, align 8
   ret void
 }
 
@@ -1227,7 +1227,7 @@ define dso_local void @setnbcr72(i16 signext %a, i16 signext %b) {
 entry:
   %cmp = icmp sle i16 %a, %b
   %conv3 = sext i1 %cmp to i16
-  store i16 %conv3, i16* @globalVal4, align 2
+  store i16 %conv3, ptr @globalVal4, align 2
   ret void
 }
 
@@ -1261,7 +1261,7 @@ define dso_local void @setnbcr74(i8 zeroext %a, i8 zeroext %b) {
 entry:
   %cmp = icmp ule i8 %a, %b
   %conv3 = sext i1 %cmp to i8
-  store i8 %conv3, i8* @globalVal
+  store i8 %conv3, ptr @globalVal
   ret void
 }
 
@@ -1295,7 +1295,7 @@ define dso_local void @setnbcr76(i32 zeroext %a, i32 zeroext %b) {
 entry:
   %cmp = icmp ule i32 %a, %b
   %sub = sext i1 %cmp to i32
-  store i32 %sub, i32* @globalVal2
+  store i32 %sub, ptr @globalVal2
   ret void
 }
 
@@ -1329,7 +1329,7 @@ define dso_local void @setnbcr78(i64 %a, i64 %b) {
 entry:
   %cmp = icmp ule i64 %a, %b
   %conv1 = sext i1 %cmp to i64
-  store i64 %conv1, i64* @globalVal3
+  store i64 %conv1, ptr @globalVal3
   ret void
 }
 
@@ -1363,7 +1363,7 @@ define dso_local void @setnbcr80(i16 zeroext %a, i16 zeroext %b) {
 entry:
   %cmp = icmp ule i16 %a, %b
   %conv3 = sext i1 %cmp to i16
-  store i16 %conv3, i16* @globalVal4
+  store i16 %conv3, ptr @globalVal4
   ret void
 }
 
@@ -1409,7 +1409,7 @@ define dso_local void @setnbcr83(i64 %a, i64 %b) {
 entry:
   %cmp = icmp ne i64 %a, %b
   %conv1 = sext i1 %cmp to i64
-  store i64 %conv1, i64* @globalVal3, align 8
+  store i64 %conv1, ptr @globalVal3, align 8
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/p10-spill-creq.ll b/llvm/test/CodeGen/PowerPC/p10-spill-creq.ll
index e3d54cc38dd8..fc5ef88e3379 100644
--- a/llvm/test/CodeGen/PowerPC/p10-spill-creq.ll
+++ b/llvm/test/CodeGen/PowerPC/p10-spill-creq.ll
@@ -15,13 +15,13 @@
 ; bit of any CR field is spilled. We need to test the spilling of a CR bit
 ; other than the LT bit. Hence this test case is rather complex.
 
-%0 = type { i32, %1*, %0*, [1 x i8], i8*, i8*, i8*, i8*, i64, i32, [20 x i8] }
-%1 = type { %1*, %0*, i32 }
-%2 = type { [200 x i8], [200 x i8], %3*, %3*, %4*, %4*, %4*, %4*, %4*, i64 }
-%3 = type { i64, i32, %3*, %3*, %3*, %3*, %4*, %4*, %4*, %4*, i64, i32, i32 }
-%4 = type { i32, i64, %3*, %3*, i16, %4*, %4*, i64, i64 }
+%0 = type { i32, ptr, ptr, [1 x i8], ptr, ptr, ptr, ptr, i64, i32, [20 x i8] }
+%1 = type { ptr, ptr, i32 }
+%2 = type { [200 x i8], [200 x i8], ptr, ptr, ptr, ptr, ptr, ptr, ptr, i64 }
+%3 = type { i64, i32, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, i64, i32, i32 }
+%4 = type { i32, i64, ptr, ptr, i16, ptr, ptr, i64, i64 }
 
-define dso_local double @P10_Spill_CR_EQ(%2* %arg) local_unnamed_addr #0 {
+define dso_local double @P10_Spill_CR_EQ(ptr %arg) local_unnamed_addr #0 {
 ; CHECK-LABEL: P10_Spill_CR_EQ:
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    mfcr r12
@@ -187,10 +187,10 @@ define dso_local double @P10_Spill_CR_EQ(%2* %arg) local_unnamed_addr #0 {
 ; CHECK-NEXT:    xsadddp f1, f0, f1
 ; CHECK-NEXT:    blr
 bb:
-  %tmp = getelementptr inbounds %4, %4* null, i64 undef, i32 7
-  %tmp1 = load i64, i64* undef, align 8
-  %tmp2 = load i64, i64* null, align 8
-  %tmp3 = load i64, i64* %tmp, align 8
+  %tmp = getelementptr inbounds %4, ptr null, i64 undef, i32 7
+  %tmp1 = load i64, ptr undef, align 8
+  %tmp2 = load i64, ptr null, align 8
+  %tmp3 = load i64, ptr %tmp, align 8
   %tmp4 = icmp eq i64 %tmp1, 0
   %tmp5 = icmp eq i64 %tmp2, 0
   %tmp6 = icmp eq i64 %tmp3, 0
@@ -200,7 +200,7 @@ bb:
   br i1 %tmp4, label %bb12, label %bb10
 
 bb10:                                             ; preds = %bb
-  %tmp11 = load i32, i32* undef, align 8
+  %tmp11 = load i32, ptr undef, align 8
   br label %bb12
 
 bb12:                                             ; preds = %bb10, %bb
@@ -208,7 +208,7 @@ bb12:                                             ; preds = %bb10, %bb
   br i1 %tmp5, label %bb16, label %bb14
 
 bb14:                                             ; preds = %bb12
-  %tmp15 = load i32, i32* undef, align 8
+  %tmp15 = load i32, ptr undef, align 8
   br label %bb16
 
 bb16:                                             ; preds = %bb14, %bb12
@@ -216,7 +216,7 @@ bb16:                                             ; preds = %bb14, %bb12
   br i1 %tmp6, label %bb20, label %bb18
 
 bb18:                                             ; preds = %bb16
-  %tmp19 = load i32, i32* undef, align 8
+  %tmp19 = load i32, ptr undef, align 8
   br label %bb20
 
 bb20:                                             ; preds = %bb18, %bb16
@@ -236,7 +236,7 @@ bb20:                                             ; preds = %bb18, %bb16
   br i1 %tmp31, label %bb34, label %bb36
 
 bb34:                                             ; preds = %bb20
-  %tmp35 = load i64, i64* undef, align 8
+  %tmp35 = load i64, ptr undef, align 8
   br label %bb36
 
 bb36:                                             ; preds = %bb34, %bb20
@@ -244,7 +244,7 @@ bb36:                                             ; preds = %bb34, %bb20
   br i1 %tmp33, label %bb38, label %bb40
 
 bb38:                                             ; preds = %bb36
-  %tmp39 = load i64, i64* undef, align 8
+  %tmp39 = load i64, ptr undef, align 8
   br label %bb40
 
 bb40:                                             ; preds = %bb38, %bb36
@@ -258,15 +258,15 @@ bb40:                                             ; preds = %bb38, %bb36
   br i1 %tmp47, label %bb48, label %bb50
 
 bb48:                                             ; preds = %bb40
-  %tmp49 = load %3*, %3** undef, align 8
+  %tmp49 = load ptr, ptr undef, align 8
   br label %bb50
 
 bb50:                                             ; preds = %bb48, %bb40
-  %tmp51 = phi %3* [ undef, %bb40 ], [ %tmp49, %bb48 ]
+  %tmp51 = phi ptr [ undef, %bb40 ], [ %tmp49, %bb48 ]
   br i1 %tmp45, label %bb52, label %bb54
 
 bb52:                                             ; preds = %bb50
-  %tmp53 = load i32, i32* undef, align 8
+  %tmp53 = load i32, ptr undef, align 8
   br label %bb54
 
 bb54:                                             ; preds = %bb52, %bb50
@@ -274,13 +274,13 @@ bb54:                                             ; preds = %bb52, %bb50
   br i1 %tmp46, label %bb56, label %bb58
 
 bb56:                                             ; preds = %bb54
-  %tmp57 = load i32, i32* undef, align 8
+  %tmp57 = load i32, ptr undef, align 8
   br label %bb58
 
 bb58:                                             ; preds = %bb56, %bb54
   %tmp59 = phi i32 [ undef, %bb54 ], [ %tmp57, %bb56 ]
-  %tmp60 = getelementptr inbounds %3, %3* %tmp51, i64 0, i32 12
-  %tmp61 = load i32, i32* %tmp60, align 8
+  %tmp60 = getelementptr inbounds %3, ptr %tmp51, i64 0, i32 12
+  %tmp61 = load i32, ptr %tmp60, align 8
   %tmp62 = icmp slt i32 %tmp55, 1
   %tmp63 = icmp slt i32 %tmp59, 1
   %tmp64 = icmp slt i32 %tmp61, 1
@@ -290,12 +290,12 @@ bb58:                                             ; preds = %bb56, %bb54
   br i1 %tmp65, label %bb68, label %bb70
 
 bb68:                                             ; preds = %bb58
-  %tmp69 = load i64, i64* undef, align 8
+  %tmp69 = load i64, ptr undef, align 8
   br label %bb70
 
 bb70:                                             ; preds = %bb68, %bb58
   %tmp71 = phi i64 [ undef, %bb58 ], [ %tmp69, %bb68 ]
-  %tmp72 = load i64, i64* undef, align 8
+  %tmp72 = load i64, ptr undef, align 8
   %tmp73 = xor i1 %tmp25, true
   %tmp74 = xor i1 %tmp26, true
   %tmp75 = xor i1 %tmp27, true

diff  --git a/llvm/test/CodeGen/PowerPC/p10-spill-crgt.ll b/llvm/test/CodeGen/PowerPC/p10-spill-crgt.ll
index 4c4d9c9a046d..80712411e94b 100644
--- a/llvm/test/CodeGen/PowerPC/p10-spill-crgt.ll
+++ b/llvm/test/CodeGen/PowerPC/p10-spill-crgt.ll
@@ -401,7 +401,7 @@ define dso_local fastcc void @P10_Spill_CR_GT() unnamed_addr {
 ; CHECK-BE-NEXT:    bc 4, 4*cr4+eq, .LBB0_34
 ; CHECK-BE-NEXT:    b .LBB0_35
 bb:
-  %tmp = load i32, i32* undef, align 8
+  %tmp = load i32, ptr undef, align 8
   %tmp1 = and i32 %tmp, 16
   %tmp2 = icmp ne i32 %tmp1, 0
   %tmp3 = and i32 %tmp, 32
@@ -411,7 +411,7 @@ bb:
 bb5:                                              ; preds = %bb63, %bb
   %tmp6 = phi i32 [ 0, %bb ], [ %tmp64, %bb63 ]
   %tmp7 = phi i1 [ %tmp4, %bb ], [ undef, %bb63 ]
-  %tmp8 = load i32, i32* undef, align 8
+  %tmp8 = load i32, ptr undef, align 8
   br i1 %tmp2, label %bb9, label %bb10
 
 bb9:                                              ; preds = %bb5
@@ -528,16 +528,16 @@ bb32:                                             ; preds = %bb40, %bb29
   br i1 %tmp7, label %bb33, label %bb36
 
 bb33:                                             ; preds = %bb32
-  %tmp34 = getelementptr inbounds i8, i8* null, i64 -1
-  %tmp35 = select i1 %tmp12, i8* %tmp34, i8* null
-  store i8 0, i8* %tmp35, align 1
+  %tmp34 = getelementptr inbounds i8, ptr null, i64 -1
+  %tmp35 = select i1 %tmp12, ptr %tmp34, ptr null
+  store i8 0, ptr %tmp35, align 1
   br label %bb36
 
 bb36:                                             ; preds = %bb33, %bb32
   br i1 %tmp30, label %bb37, label %bb38
 
 bb37:                                             ; preds = %bb36
-  store i16 undef, i16* null, align 2
+  store i16 undef, ptr null, align 2
   br label %bb38
 
 bb38:                                             ; preds = %bb37, %bb36
@@ -558,9 +558,9 @@ bb42:                                             ; preds = %bb42, %bb41
 
 bb43:                                             ; preds = %bb10, %bb10
   call void @call_1()
-  %tmp44 = getelementptr inbounds i8, i8* null, i64 -1
-  %tmp45 = select i1 %tmp12, i8* %tmp44, i8* null
-  store i8 0, i8* %tmp45, align 1
+  %tmp44 = getelementptr inbounds i8, ptr null, i64 -1
+  %tmp45 = select i1 %tmp12, ptr %tmp44, ptr null
+  store i8 0, ptr %tmp45, align 1
   br label %bb63
 
 bb46:                                             ; preds = %bb46, %bb10

diff  --git a/llvm/test/CodeGen/PowerPC/p10-spill-crlt.ll b/llvm/test/CodeGen/PowerPC/p10-spill-crlt.ll
index 6c6b26bc24e4..5c1c1d90966f 100644
--- a/llvm/test/CodeGen/PowerPC/p10-spill-crlt.ll
+++ b/llvm/test/CodeGen/PowerPC/p10-spill-crlt.ll
@@ -16,10 +16,10 @@
 ; other than the LT bit. Hence this test case is rather complex.
 
 %0 = type { %1 }
-%1 = type { %0*, %0*, %0*, i32 }
+%1 = type { ptr, ptr, ptr, i32 }
 
 @call_1 = external dso_local unnamed_addr global i32, align 4
-declare %0* @call_2() local_unnamed_addr
+declare ptr @call_2() local_unnamed_addr
 declare i32 @call_3() local_unnamed_addr
 declare void @call_4() local_unnamed_addr
 
@@ -156,8 +156,8 @@ define dso_local void @P10_Spill_CR_LT() local_unnamed_addr {
 ; CHECK-BE-NEXT:  .LBB0_13: # %bb3
 ; CHECK-BE-NEXT:  .LBB0_14: # %bb2
 bb:
-  %tmp = tail call %0* @call_2()
-  %tmp1 = icmp ne %0* %tmp, null
+  %tmp = tail call ptr @call_2()
+  %tmp1 = icmp ne ptr %tmp, null
   switch i32 undef, label %bb4 [
     i32 3, label %bb2
     i32 2, label %bb3
@@ -170,13 +170,13 @@ bb3:                                              ; preds = %bb
   unreachable
 
 bb4:                                              ; preds = %bb
-  %tmp5 = load i64, i64* undef, align 8
+  %tmp5 = load i64, ptr undef, align 8
   %tmp6 = trunc i64 %tmp5 to i32
   %tmp7 = add i32 0, %tmp6
   %tmp8 = icmp sgt i32 %tmp7, 0
   %tmp9 = icmp eq i8 0, 0
   %tmp10 = zext i1 %tmp9 to i32
-  %tmp11 = icmp eq %0* %tmp, null
+  %tmp11 = icmp eq ptr %tmp, null
   br label %bb12
 
 bb12:                                             ; preds = %bb38, %bb4
@@ -200,7 +200,7 @@ bb23:                                             ; preds = %bb18
   br label %bb24
 
 bb24:                                             ; preds = %bb23
-  %tmp25 = load i32, i32* @call_1, align 4
+  %tmp25 = load i32, ptr @call_1, align 4
   %tmp26 = icmp eq i32 %tmp25, 0
   br i1 %tmp26, label %bb30, label %bb27
 

diff  --git a/llvm/test/CodeGen/PowerPC/p10-spill-crun.ll b/llvm/test/CodeGen/PowerPC/p10-spill-crun.ll
index 64570379ea0c..4ca2dc5dbe04 100644
--- a/llvm/test/CodeGen/PowerPC/p10-spill-crun.ll
+++ b/llvm/test/CodeGen/PowerPC/p10-spill-crun.ll
@@ -17,23 +17,23 @@
 
 %0 = type { i32, [768 x i8], [768 x i8], [1024 x i8], [768 x i8], [768 x i8], [768 x i8], [768 x i8], [768 x i8], [1024 x i8], [1024 x i8], i32, i16, i16, i16, i16, i16, i16, i32, i32, i32, i16, i16, i32, i32, i32, i32, i32, i32, i32, i16, i16, i16, i16, [64 x i8], i16, i16, i16, i16, i16, i16, i16, i16, i16, i16, i16, i16, i16, i16, i16, i16, i32, i16, i16, i16, i16, i16, i16, i16, i16, i16, i16, i8, i8, i8, i8, i16, i16, i16, i16, i16, i16, float, float, i32, i16, i16, float, i16, i16, i16, i16}
 %1 = type opaque
-%2 = type { i8* }
-%3 = type { %3*, %3*, %4* (i8*)*, %2, i32, %2, %2*, i8*, double*, float*, i8*, i8*, %4* }
-%4 = type { %4*, %4*, %4*, i32, i32, i32, i32, i32, i8*, [3 x float], i8, [64 x i8] }
+%2 = type { ptr }
+%3 = type { ptr, ptr, ptr, %2, i32, %2, ptr, ptr, ptr, ptr, ptr, ptr, ptr }
+%4 = type { ptr, ptr, ptr, i32, i32, i32, i32, i32, ptr, [3 x float], i8, [64 x i8] }
 
 @global_1 = external dso_local unnamed_addr constant [1 x i8], align 1
 @global_2 = external local_unnamed_addr global %0, align 8
- at global_3 = external local_unnamed_addr global i8* (i64, i8*)*, align 8
+ at global_3 = external local_unnamed_addr global ptr, align 8
 @global_4 = external dso_local unnamed_addr constant [14 x i8], align 1
 
-declare i8 @call_1(%1*) local_unnamed_addr
-declare i32 @call_2(%2*, %1*) local_unnamed_addr
-declare i32 @call_3(%2*, %1*) local_unnamed_addr
-declare %3* @call_4(%4*, i32, i32, i32, i32, i32, i16, i16, %2*, %1*, i32, float, float, float, float, i8*) local_unnamed_addr
-declare i32 @call_5(i8*) local_unnamed_addr
-declare i8 @call_6(%1*, i32) local_unnamed_addr
+declare i8 @call_1(ptr) local_unnamed_addr
+declare i32 @call_2(ptr, ptr) local_unnamed_addr
+declare i32 @call_3(ptr, ptr) local_unnamed_addr
+declare ptr @call_4(ptr, i32, i32, i32, i32, i32, i16, i16, ptr, ptr, i32, float, float, float, float, ptr) local_unnamed_addr
+declare i32 @call_5(ptr) local_unnamed_addr
+declare i8 @call_6(ptr, i32) local_unnamed_addr
 
-define dso_local void @P10_Spill_CR_UN(%2* %arg, %1* %arg1, i32 %arg2) local_unnamed_addr {
+define dso_local void @P10_Spill_CR_UN(ptr %arg, ptr %arg1, i32 %arg2) local_unnamed_addr {
 ; CHECK-LABEL: P10_Spill_CR_UN:
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    mfcr r12
@@ -340,9 +340,9 @@ define dso_local void @P10_Spill_CR_UN(%2* %arg, %1* %arg1, i32 %arg2) local_unn
 ; CHECK-BE-NEXT:  .LBB0_19: # %bb55
 bb:
   %tmp = alloca [3 x i8], align 1
-  %tmp3 = tail call zeroext i8 @call_1(%1* %arg1)
+  %tmp3 = tail call zeroext i8 @call_1(ptr %arg1)
   %tmp4 = icmp ne i8 %tmp3, 0
-  %tmp5 = tail call signext i32 @call_2(%2* %arg, %1* %arg1)
+  %tmp5 = tail call signext i32 @call_2(ptr %arg, ptr %arg1)
   %tmp6 = and i32 %arg2, 16
   %tmp7 = icmp ne i32 %tmp6, 0
   br label %bb8
@@ -351,7 +351,7 @@ bb8:                                              ; preds = %bb
   br i1 undef, label %bb9, label %bb11
 
 bb9:                                              ; preds = %bb8
-  %tmp10 = call signext i32 @call_3(%2* %arg, %1* %arg1)
+  %tmp10 = call signext i32 @call_3(ptr %arg, ptr %arg1)
   br label %bb12
 
 bb11:                                             ; preds = %bb8
@@ -369,18 +369,16 @@ bb16:                                             ; preds = %bb12
   br i1 %tmp18, label %bb37, label %bb19
 
 bb19:                                             ; preds = %bb16
-  %tmp20 = getelementptr inbounds [3 x i8], [3 x i8]* %tmp, i64 0, i64 0
-  %tmp21 = load i8* (i64, i8*)*, i8* (i64, i8*)** @global_3, align 8
-  %tmp22 = call i8* %tmp21(i64 undef, i8* getelementptr inbounds ([14 x i8], [14 x i8]* @global_4, i64 0, i64 0))
-  %tmp23 = bitcast i8* %tmp22 to i32*
-  %tmp24 = icmp eq i32* %tmp23, null
+  %tmp21 = load ptr, ptr @global_3, align 8
+  %tmp22 = call ptr %tmp21(i64 undef, ptr @global_4)
+  %tmp24 = icmp eq ptr %tmp22, null
   %tmp25 = icmp eq i32 %tmp13, 0
   %tmp26 = zext i32 %tmp5 to i64
   br label %bb27
 
 bb27:                                             ; preds = %bb34, %bb19
-  %tmp28 = call zeroext i8 @call_6(%1* %arg1, i32 signext undef)
-  store i8 %tmp28, i8* %tmp20, align 1
+  %tmp28 = call zeroext i8 @call_6(ptr %arg1, i32 signext undef)
+  store i8 %tmp28, ptr %tmp, align 1
   br label %bb29
 
 bb29:                                             ; preds = %bb27
@@ -406,8 +404,8 @@ bb36:                                             ; preds = %bb34
   br label %bb54
 
 bb37:                                             ; preds = %bb16
-  %tmp38 = load i32, i32* undef, align 8
-  %tmp39 = select i1 %tmp7, i8* getelementptr inbounds ([1 x i8], [1 x i8]* @global_1, i64 0, i64 0), i8* null
+  %tmp38 = load i32, ptr undef, align 8
+  %tmp39 = select i1 %tmp7, ptr @global_1, ptr null
   %tmp40 = icmp ne i32 %tmp38, 0
   switch i32 undef, label %bb41 [
     i32 1, label %bb42
@@ -418,8 +416,8 @@ bb41:                                             ; preds = %bb37
   br label %bb50
 
 bb42:                                             ; preds = %bb37, %bb37
-  %tmp43 = call signext i32 @call_5(i8* %tmp39)
-  %tmp44 = load i16, i16* getelementptr inbounds (%0, %0* @global_2, i64 0, i32 81), align 4
+  %tmp43 = call signext i32 @call_5(ptr %tmp39)
+  %tmp44 = load i16, ptr getelementptr inbounds (%0, ptr @global_2, i64 0, i32 81), align 4
   %tmp45 = sitofp i16 %tmp44 to float
   %tmp46 = select i1 %tmp40, float 1.750000e+00, float 1.500000e+00
   %tmp47 = fmul fast float %tmp46, %tmp45
@@ -430,7 +428,7 @@ bb42:                                             ; preds = %bb37, %bb37
 bb50:                                             ; preds = %bb42, %bb41
   %tmp51 = phi i32 [ %tmp49, %bb42 ], [ undef, %bb41 ]
   %tmp52 = trunc i32 %tmp51 to i16
-  %tmp53 = call %3* @call_4(%4* nonnull undef, i32 signext 1024, i32 signext 0, i32 signext %tmp38, i32 signext 0, i32 signext 0, i16 signext %tmp52, i16 signext undef, %2* %arg, %1* %arg1, i32 signext -1, float 0.000000e+00, float undef, float -1.000000e+00, float -1.000000e+00, i8* null)
+  %tmp53 = call ptr @call_4(ptr nonnull undef, i32 signext 1024, i32 signext 0, i32 signext %tmp38, i32 signext 0, i32 signext 0, i16 signext %tmp52, i16 signext undef, ptr %arg, ptr %arg1, i32 signext -1, float 0.000000e+00, float undef, float -1.000000e+00, float -1.000000e+00, ptr null)
   br label %bb54
 
 bb54:                                             ; preds = %bb50, %bb36

diff  --git a/llvm/test/CodeGen/PowerPC/p8-isel-sched.ll b/llvm/test/CodeGen/PowerPC/p8-isel-sched.ll
index 1fc3ef2ed235..b7452bd385fa 100644
--- a/llvm/test/CodeGen/PowerPC/p8-isel-sched.ll
+++ b/llvm/test/CodeGen/PowerPC/p8-isel-sched.ll
@@ -4,21 +4,21 @@ target datalayout = "E-m:e-i64:64-n32:64"
 target triple = "powerpc64-unknown-linux-gnu"
 
 ; Function Attrs: nounwind
-define void @foo(i32* nocapture %r1, i32* nocapture %r2, i32* nocapture %r3, i32* nocapture %r4, i32 signext %a, i32 signext %b, i32 signext %c, i32 signext %d) #0 {
+define void @foo(ptr nocapture %r1, ptr nocapture %r2, ptr nocapture %r3, ptr nocapture %r4, i32 signext %a, i32 signext %b, i32 signext %c, i32 signext %d) #0 {
 entry:
   %tobool = icmp ne i32 %a, 0
   %cond = select i1 %tobool, i32 %b, i32 %c
-  store i32 %cond, i32* %r1, align 4
+  store i32 %cond, ptr %r1, align 4
   %cond5 = select i1 %tobool, i32 %b, i32 %d
-  store i32 %cond5, i32* %r2, align 4
+  store i32 %cond5, ptr %r2, align 4
   %add = add nsw i32 %b, 1
   %sub = add nsw i32 %d, -2
   %cond10 = select i1 %tobool, i32 %add, i32 %sub
-  store i32 %cond10, i32* %r3, align 4
+  store i32 %cond10, ptr %r3, align 4
   %add13 = add nsw i32 %b, 3
   %sub15 = add nsw i32 %d, -5
   %cond17 = select i1 %tobool, i32 %add13, i32 %sub15
-  store i32 %cond17, i32* %r4, align 4
+  store i32 %cond17, ptr %r4, align 4
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/p8-scalar_vector_conversions.ll b/llvm/test/CodeGen/PowerPC/p8-scalar_vector_conversions.ll
index edacc81287ae..29252e68fdf1 100644
--- a/llvm/test/CodeGen/PowerPC/p8-scalar_vector_conversions.ll
+++ b/llvm/test/CodeGen/PowerPC/p8-scalar_vector_conversions.ll
@@ -174,7 +174,7 @@ define <2 x double> @buildd() {
 ; CHECK-AIX-NEXT:    lxvdsx 34, 0, 3
 ; CHECK-AIX-NEXT:    blr
 entry:
-  %0 = load double, double* @d, align 8
+  %0 = load double, ptr @d, align 8
   %splat.splatinsert = insertelement <2 x double> undef, double %0, i32 0
   %splat.splat = shufflevector <2 x double> %splat.splatinsert, <2 x double> undef, <2 x i32> zeroinitializer
   ret <2 x double> %splat.splat

diff  --git a/llvm/test/CodeGen/PowerPC/p9-dform-load-alignment.ll b/llvm/test/CodeGen/PowerPC/p9-dform-load-alignment.ll
index 059125380f39..c9e6f1564a6c 100644
--- a/llvm/test/CodeGen/PowerPC/p9-dform-load-alignment.ll
+++ b/llvm/test/CodeGen/PowerPC/p9-dform-load-alignment.ll
@@ -12,8 +12,8 @@ define dso_local void @AlignDSForm() local_unnamed_addr {
 ; CHECK-NEXT:    ld r3, 0(r3)
 ; CHECK-NEXT:    std r3, 0(r3)
 entry:
-  %0 = load <4 x i16>, <4 x i16>* bitcast ([4 x i16]* @best8x8mode to <4 x i16>*), align 2
-  store <4 x i16> %0, <4 x i16>* undef, align 4
+  %0 = load <4 x i16>, ptr @best8x8mode, align 2
+  store <4 x i16> %0, ptr undef, align 4
   unreachable
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/p9-vbpermd.ll b/llvm/test/CodeGen/PowerPC/p9-vbpermd.ll
index 01fa563898d2..c8d655d0ce2d 100644
--- a/llvm/test/CodeGen/PowerPC/p9-vbpermd.ll
+++ b/llvm/test/CodeGen/PowerPC/p9-vbpermd.ll
@@ -23,10 +23,10 @@ define void @test1() {
 ; CHECK-NEXT:    stxv 34, 0(3)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @vull, align 16
-  %1 = load <16 x i8>, <16 x i8>* @vuc, align 16
+  %0 = load <2 x i64>, ptr @vull, align 16
+  %1 = load <16 x i8>, ptr @vuc, align 16
   %2 = call <2 x i64> @llvm.ppc.altivec.vbpermd(<2 x i64> %0, <16 x i8> %1)
-  store <2 x i64> %2, <2 x i64>* @res_vull, align 16
+  store <2 x i64> %2, ptr @res_vull, align 16
   ret void
 }
 declare <2 x i64> @llvm.ppc.altivec.vbpermd(<2 x i64>, <16 x i8>)

diff  --git a/llvm/test/CodeGen/PowerPC/paired-vector-intrinsics.ll b/llvm/test/CodeGen/PowerPC/paired-vector-intrinsics.ll
index 89b0ad1c0cf8..1dcef38f634f 100644
--- a/llvm/test/CodeGen/PowerPC/paired-vector-intrinsics.ll
+++ b/llvm/test/CodeGen/PowerPC/paired-vector-intrinsics.ll
@@ -17,7 +17,7 @@
 
 ; assemble_pair
 declare <256 x i1> @llvm.ppc.vsx.assemble.pair(<16 x i8>, <16 x i8>)
-define void @ass_pair(<256 x i1>* %ptr, <16 x i8> %vc) {
+define void @ass_pair(ptr %ptr, <16 x i8> %vc) {
 ; CHECK-LABEL: ass_pair:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmr v3, v2
@@ -47,13 +47,13 @@ define void @ass_pair(<256 x i1>* %ptr, <16 x i8> %vc) {
 ; CHECK-BE-NOMMA-NEXT:    blr
 entry:
   %0 = tail call <256 x i1> @llvm.ppc.vsx.assemble.pair(<16 x i8> %vc, <16 x i8> %vc)
-  store <256 x i1> %0, <256 x i1>* %ptr, align 32
+  store <256 x i1> %0, ptr %ptr, align 32
   ret void
 }
 
 ; disassemble_pair
 declare { <16 x i8>, <16 x i8> } @llvm.ppc.vsx.disassemble.pair(<256 x i1>)
-define void @disass_pair(<256 x i1>* %ptr1, <16 x i8>* %ptr2, <16 x i8>* %ptr3) {
+define void @disass_pair(ptr %ptr1, ptr %ptr2, ptr %ptr3) {
 ; CHECK-LABEL: disass_pair:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lxv v3, 0(r3)
@@ -86,16 +86,16 @@ define void @disass_pair(<256 x i1>* %ptr1, <16 x i8>* %ptr2, <16 x i8>* %ptr3)
 ; CHECK-BE-NOMMA-NEXT:    stxv v3, 0(r5)
 ; CHECK-BE-NOMMA-NEXT:    blr
 entry:
-  %0 = load <256 x i1>, <256 x i1>* %ptr1, align 32
+  %0 = load <256 x i1>, ptr %ptr1, align 32
   %1 = tail call { <16 x i8>, <16 x i8> } @llvm.ppc.vsx.disassemble.pair(<256 x i1> %0)
   %2 = extractvalue { <16 x i8>, <16 x i8> } %1, 0
   %3 = extractvalue { <16 x i8>, <16 x i8> } %1, 1
-  store <16 x i8> %2, <16 x i8>* %ptr2, align 16
-  store <16 x i8> %3, <16 x i8>* %ptr3, align 16
+  store <16 x i8> %2, ptr %ptr2, align 16
+  store <16 x i8> %3, ptr %ptr3, align 16
   ret void
 }
 
-define void @test_ldst_1(<256 x i1>* %vpp, <256 x i1>* %vp2) {
+define void @test_ldst_1(ptr %vpp, ptr %vp2) {
 ; CHECK-LABEL: test_ldst_1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lxvp vsp34, 0(r3)
@@ -120,17 +120,15 @@ define void @test_ldst_1(<256 x i1>* %vpp, <256 x i1>* %vp2) {
 ; CHECK-BE-NOMMA-NEXT:    stxvp vsp34, 0(r4)
 ; CHECK-BE-NOMMA-NEXT:    blr
 entry:
-  %0 = bitcast <256 x i1>* %vpp to i8*
-  %1 = tail call <256 x i1> @llvm.ppc.vsx.lxvp(i8* %0)
-  %2 = bitcast <256 x i1>* %vp2 to i8*
-  tail call void @llvm.ppc.vsx.stxvp(<256 x i1> %1, i8* %2)
+  %0 = tail call <256 x i1> @llvm.ppc.vsx.lxvp(ptr %vpp)
+  tail call void @llvm.ppc.vsx.stxvp(<256 x i1> %0, ptr %vp2)
   ret void
 }
 
-declare <256 x i1> @llvm.ppc.vsx.lxvp(i8*)
-declare void @llvm.ppc.vsx.stxvp(<256 x i1>, i8*)
+declare <256 x i1> @llvm.ppc.vsx.lxvp(ptr)
+declare void @llvm.ppc.vsx.stxvp(<256 x i1>, ptr)
 
-define void @test_ldst_2(<256 x i1>* %vpp, i64 %offset, <256 x i1>* %vp2)  {
+define void @test_ldst_2(ptr %vpp, i64 %offset, ptr %vp2)  {
 ; CHECK-LABEL: test_ldst_2:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lxvpx vsp34, r3, r4
@@ -155,16 +153,14 @@ define void @test_ldst_2(<256 x i1>* %vpp, i64 %offset, <256 x i1>* %vp2)  {
 ; CHECK-BE-NOMMA-NEXT:    stxvpx vsp34, r5, r4
 ; CHECK-BE-NOMMA-NEXT:    blr
 entry:
-  %0 = bitcast <256 x i1>* %vpp to i8*
-  %1 = getelementptr i8, i8* %0, i64 %offset
-  %2 = tail call <256 x i1> @llvm.ppc.vsx.lxvp(i8* %1)
-  %3 = bitcast <256 x i1>* %vp2 to i8*
-  %4 = getelementptr i8, i8* %3, i64 %offset
-  tail call void @llvm.ppc.vsx.stxvp(<256 x i1> %2, i8* %4)
+  %0 = getelementptr i8, ptr %vpp, i64 %offset
+  %1 = tail call <256 x i1> @llvm.ppc.vsx.lxvp(ptr %0)
+  %2 = getelementptr i8, ptr %vp2, i64 %offset
+  tail call void @llvm.ppc.vsx.stxvp(<256 x i1> %1, ptr %2)
   ret void
 }
 
-define void @test_ldst_3(<256 x i1>* %vpp, <256 x i1>* %vp2)  {
+define void @test_ldst_3(ptr %vpp, ptr %vp2)  {
 ; CHECK-LABEL: test_ldst_3:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    plxvp vsp34, 18(r3), 0
@@ -189,16 +185,14 @@ define void @test_ldst_3(<256 x i1>* %vpp, <256 x i1>* %vp2)  {
 ; CHECK-BE-NOMMA-NEXT:    pstxvp vsp34, 18(r4), 0
 ; CHECK-BE-NOMMA-NEXT:    blr
 entry:
-  %0 = bitcast <256 x i1>* %vpp to i8*
-  %1 = getelementptr i8, i8* %0, i64 18
-  %2 = tail call <256 x i1> @llvm.ppc.vsx.lxvp(i8* %1)
-  %3 = bitcast <256 x i1>* %vp2 to i8*
-  %4 = getelementptr i8, i8* %3, i64 18
-  tail call void @llvm.ppc.vsx.stxvp(<256 x i1> %2, i8* %4)
+  %0 = getelementptr i8, ptr %vpp, i64 18
+  %1 = tail call <256 x i1> @llvm.ppc.vsx.lxvp(ptr %0)
+  %2 = getelementptr i8, ptr %vp2, i64 18
+  tail call void @llvm.ppc.vsx.stxvp(<256 x i1> %1, ptr %2)
   ret void
 }
 
-define void @test_ldst_4(<256 x i1>* %vpp, <256 x i1>* %vp2)  {
+define void @test_ldst_4(ptr %vpp, ptr %vp2)  {
 ; CHECK-LABEL: test_ldst_4:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    plxvp vsp34, 1(r3), 0
@@ -223,16 +217,14 @@ define void @test_ldst_4(<256 x i1>* %vpp, <256 x i1>* %vp2)  {
 ; CHECK-BE-NOMMA-NEXT:    pstxvp vsp34, 1(r4), 0
 ; CHECK-BE-NOMMA-NEXT:    blr
 entry:
-  %0 = bitcast <256 x i1>* %vpp to i8*
-  %1 = getelementptr i8, i8* %0, i64 1
-  %2 = tail call <256 x i1> @llvm.ppc.vsx.lxvp(i8* %1)
-  %3 = bitcast <256 x i1>* %vp2 to i8*
-  %4 = getelementptr i8, i8* %3, i64 1
-  tail call void @llvm.ppc.vsx.stxvp(<256 x i1> %2, i8* %4)
+  %0 = getelementptr i8, ptr %vpp, i64 1
+  %1 = tail call <256 x i1> @llvm.ppc.vsx.lxvp(ptr %0)
+  %2 = getelementptr i8, ptr %vp2, i64 1
+  tail call void @llvm.ppc.vsx.stxvp(<256 x i1> %1, ptr %2)
   ret void
 }
 
-define void @test_ldst_5(<256 x i1>* %vpp, <256 x i1>* %vp2)  {
+define void @test_ldst_5(ptr %vpp, ptr %vp2)  {
 ; CHECK-LABEL: test_ldst_5:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    plxvp vsp34, 42(r3), 0
@@ -257,16 +249,14 @@ define void @test_ldst_5(<256 x i1>* %vpp, <256 x i1>* %vp2)  {
 ; CHECK-BE-NOMMA-NEXT:    pstxvp vsp34, 42(r4), 0
 ; CHECK-BE-NOMMA-NEXT:    blr
 entry:
-  %0 = bitcast <256 x i1>* %vpp to i8*
-  %1 = getelementptr i8, i8* %0, i64 42
-  %2 = tail call <256 x i1> @llvm.ppc.vsx.lxvp(i8* %1)
-  %3 = bitcast <256 x i1>* %vp2 to i8*
-  %4 = getelementptr i8, i8* %3, i64 42
-  tail call void @llvm.ppc.vsx.stxvp(<256 x i1> %2, i8* %4)
+  %0 = getelementptr i8, ptr %vpp, i64 42
+  %1 = tail call <256 x i1> @llvm.ppc.vsx.lxvp(ptr %0)
+  %2 = getelementptr i8, ptr %vp2, i64 42
+  tail call void @llvm.ppc.vsx.stxvp(<256 x i1> %1, ptr %2)
   ret void
 }
 
-define void @test_ldst_6(<256 x i1>* %vpp, <256 x i1>* %vp2)  {
+define void @test_ldst_6(ptr %vpp, ptr %vp2)  {
 ; CHECK-LABEL: test_ldst_6:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lxvp vsp34, 4096(r3)
@@ -291,16 +281,14 @@ define void @test_ldst_6(<256 x i1>* %vpp, <256 x i1>* %vp2)  {
 ; CHECK-BE-NOMMA-NEXT:    stxvp vsp34, 4096(r4)
 ; CHECK-BE-NOMMA-NEXT:    blr
 entry:
-  %0 = getelementptr <256 x i1>, <256 x i1>* %vpp, i64 128
-  %1 = bitcast <256 x i1>* %0 to i8*
-  %2 = tail call <256 x i1> @llvm.ppc.vsx.lxvp(i8* %1)
-  %3 = getelementptr <256 x i1>, <256 x i1>* %vp2, i64 128
-  %4 = bitcast <256 x i1>* %3 to i8*
-  tail call void @llvm.ppc.vsx.stxvp(<256 x i1> %2, i8* %4)
+  %0 = getelementptr <256 x i1>, ptr %vpp, i64 128
+  %1 = tail call <256 x i1> @llvm.ppc.vsx.lxvp(ptr %0)
+  %2 = getelementptr <256 x i1>, ptr %vp2, i64 128
+  tail call void @llvm.ppc.vsx.stxvp(<256 x i1> %1, ptr %2)
   ret void
 }
 
-define void @test_ldst_7(<256 x i1>* %vpp, <256 x i1>* %vp2)  {
+define void @test_ldst_7(ptr %vpp, ptr %vp2)  {
 ; FIXME: A prefixed load (plxvp) is expected here as the offset in this
 ; test case is a constant that fits within 34-bits.
 ; CHECK-LABEL: test_ldst_7:
@@ -327,11 +315,9 @@ define void @test_ldst_7(<256 x i1>* %vpp, <256 x i1>* %vp2)  {
 ; CHECK-BE-NOMMA-NEXT:    pstxvp vsp34, 32799(r4), 0
 ; CHECK-BE-NOMMA-NEXT:    blr
 entry:
-  %0 = bitcast <256 x i1>* %vpp to i8*
-  %1 = getelementptr i8, i8* %0, i64 32799
-  %2 = tail call <256 x i1> @llvm.ppc.vsx.lxvp(i8* %1)
-  %3 = bitcast <256 x i1>* %vp2 to i8*
-  %4 = getelementptr i8, i8* %3, i64 32799
-  tail call void @llvm.ppc.vsx.stxvp(<256 x i1> %2, i8* %4)
+  %0 = getelementptr i8, ptr %vpp, i64 32799
+  %1 = tail call <256 x i1> @llvm.ppc.vsx.lxvp(ptr %0)
+  %2 = getelementptr i8, ptr %vp2, i64 32799
+  tail call void @llvm.ppc.vsx.stxvp(<256 x i1> %1, ptr %2)
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/pcrel-block-address.ll b/llvm/test/CodeGen/PowerPC/pcrel-block-address.ll
index 76eea940aeef..bf27562fabc9 100644
--- a/llvm/test/CodeGen/PowerPC/pcrel-block-address.ll
+++ b/llvm/test/CodeGen/PowerPC/pcrel-block-address.ll
@@ -8,11 +8,11 @@ define dso_local void @blockaddress() {
 ; CHECK:       paddi r3, 0, .Ltmp0 at PCREL, 1
 ; CHECK:       bl helper at notoc
 entry:
-  tail call void @helper(i8* blockaddress(@blockaddress, %label))
+  tail call void @helper(ptr blockaddress(@blockaddress, %label))
   br label %label
 
 label:                                            ; preds = %entry
   ret void
 }
 
-declare void @helper(i8*)
+declare void @helper(ptr)

diff  --git a/llvm/test/CodeGen/PowerPC/pcrel-byte-loads.ll b/llvm/test/CodeGen/PowerPC/pcrel-byte-loads.ll
index 3082c04942d8..9d84d83572ab 100644
--- a/llvm/test/CodeGen/PowerPC/pcrel-byte-loads.ll
+++ b/llvm/test/CodeGen/PowerPC/pcrel-byte-loads.ll
@@ -21,7 +21,7 @@ define i1 @i64_ExtLoad_i1() {
 ; CHECK-BE-NEXT:    lbz r3, GlobLd1 at toc@l(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %0 = load i1, i1* getelementptr inbounds ([20 x i1], [20 x i1]* @GlobLd1, i64 0, i64 0), align 1
+  %0 = load i1, ptr @GlobLd1, align 1
   ret i1 %0
 }
 
@@ -37,7 +37,7 @@ define zeroext i1 @i64_ZextLoad_i1() {
 ; CHECK-BE-NEXT:    lbz r3, GlobLd1 at toc@l(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %0 = load i1, i1* getelementptr inbounds ([20 x i1], [20 x i1]* @GlobLd1, i64 0, i64 0), align 1
+  %0 = load i1, ptr @GlobLd1, align 1
   ret i1 %0
 }
 
@@ -58,15 +58,15 @@ define void @i32_ZextLoad_i1() {
 ; CHECK-BE-NEXT:    stb r3, GlobSt1 at toc@l(r4)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %0 = load i1, i1* getelementptr inbounds ([20 x i1], [20 x i1]* @GlobLd1, i64 0, i64 0), align 1
-  store i1 %0, i1* getelementptr inbounds ([20 x i1], [20 x i1]* @GlobSt1, i64 0, i64 0), align 1
+  %0 = load i1, ptr @GlobLd1, align 1
+  store i1 %0, ptr @GlobSt1, align 1
   ret void
 }
 
 %1 = type { i64 }
 @Glob1 = external dso_local global %1, align 8
 @Glob2 = external dso_local unnamed_addr constant [11 x i8], align 1
-declare i32 @Decl(%1*, i8*) local_unnamed_addr #0
+declare i32 @Decl(ptr, ptr) local_unnamed_addr #0
 
 define dso_local i1 @i32_ExtLoad_i1() local_unnamed_addr #0 {
 ; CHECK-LE-LABEL: i32_ExtLoad_i1:
@@ -113,9 +113,9 @@ define dso_local i1 @i32_ExtLoad_i1() local_unnamed_addr #0 {
 ; CHECK-BE-NEXT:    mtlr r0
 ; CHECK-BE-NEXT:    blr
 bb:
-  %i = call signext i32 @Decl(%1* nonnull dereferenceable(32) @Glob1, i8* getelementptr inbounds ([11 x i8], [11 x i8]* @Glob2, i64 0, i64 0)) #1
+  %i = call signext i32 @Decl(ptr nonnull dereferenceable(32) @Glob1, ptr @Glob2) #1
   %i1 = icmp eq i32 %i, 0
-  %i2 = load i1, i1* getelementptr inbounds ([20 x i1], [20 x i1]* @GlobLd1, i64 0, i64 0), align 1
+  %i2 = load i1, ptr @GlobLd1, align 1
   %i3 = select i1 %i1, i1 false, i1 %i2
   ret i1 %i3
 }

diff  --git a/llvm/test/CodeGen/PowerPC/pcrel-call-linkage-leaf.ll b/llvm/test/CodeGen/PowerPC/pcrel-call-linkage-leaf.ll
index c65a202e2108..541b2c46dd39 100644
--- a/llvm/test/CodeGen/PowerPC/pcrel-call-linkage-leaf.ll
+++ b/llvm/test/CodeGen/PowerPC/pcrel-call-linkage-leaf.ll
@@ -56,7 +56,7 @@ define dso_local signext i32 @AsmClobberX2WithTOC(i32 signext %a, i32 signext %b
 entry:
   %add = add nsw i32 %b, %a
   tail call void asm sideeffect "li 2, 0", "~{r2}"()
-  %0 = load i32, i32* @global_int, align 4
+  %0 = load i32, ptr @global_int, align 4
   %add1 = add nsw i32 %add, %0
   ret i32 %add1
 }
@@ -162,7 +162,7 @@ define dso_local signext i32 @UsesX2AsTOC() local_unnamed_addr {
 ; CHECK-LARGE:     add r2, r2, r12
 ; CHECK-ALL:       # %bb.0: # %entry
 entry:
-  %0 = load i32, i32* @global_int, align 4
+  %0 = load i32, ptr @global_int, align 4
   ret i32 %0
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/pcrel-call-linkage-simple.ll b/llvm/test/CodeGen/PowerPC/pcrel-call-linkage-simple.ll
index 013ae7345956..1e499fc3f612 100644
--- a/llvm/test/CodeGen/PowerPC/pcrel-call-linkage-simple.ll
+++ b/llvm/test/CodeGen/PowerPC/pcrel-call-linkage-simple.ll
@@ -20,7 +20,7 @@
 ; CHECK-O-NEXT: R_PPC64_REL24_NOTOC callee
 define dso_local signext i32 @caller() local_unnamed_addr {
 entry:
-  %call = tail call signext i32 bitcast (i32 (...)* @callee to i32 ()*)()
+  %call = tail call signext i32 @callee()
   ret i32 %call
 }
 
@@ -34,13 +34,13 @@ declare signext i32 @callee(...) local_unnamed_addr
 ; CHECK-O-LABEL: ExternalSymbol
 ; CHECK-O: b
 ; CHECK-O-NEXT: R_PPC64_REL24_NOTOC memcpy
-define dso_local void @ExternalSymbol(i8* nocapture %out, i8* nocapture readonly %in, i64 %num) local_unnamed_addr {
+define dso_local void @ExternalSymbol(ptr nocapture %out, ptr nocapture readonly %in, i64 %num) local_unnamed_addr {
 entry:
-  tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 %out, i8* align 1 %in, i64 %num, i1 false)
+  tail call void @llvm.memcpy.p0.p0.i64(ptr align 1 %out, ptr align 1 %in, i64 %num, i1 false)
   ret void
 }
 
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* noalias nocapture writeonly, i8* noalias nocapture readonly, i64, i1 immarg)
+declare void @llvm.memcpy.p0.p0.i64(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i64, i1 immarg)
 
 
 ; CHECK-S-LABEL: callerNoTail
@@ -60,8 +60,8 @@ declare void @llvm.memcpy.p0i8.p0i8.i64(i8* noalias nocapture writeonly, i8* noa
 ; CHECK-O:      blr
 define dso_local signext i32 @callerNoTail() local_unnamed_addr {
 entry:
-  %call1 = tail call signext i32 bitcast (i32 (...)* @callee to i32 ()*)()
-  %call2 = tail call signext i32 bitcast (i32 (...)* @callee to i32 ()*)()
+  %call1 = tail call signext i32 @callee()
+  %call2 = tail call signext i32 @callee()
   %add = add i32 %call1, %call2
   ret i32 %add
 }

diff  --git a/llvm/test/CodeGen/PowerPC/pcrel-call-linkage-with-calls.ll b/llvm/test/CodeGen/PowerPC/pcrel-call-linkage-with-calls.ll
index 32b8a7486a7e..9fcbe875df2a 100644
--- a/llvm/test/CodeGen/PowerPC/pcrel-call-linkage-with-calls.ll
+++ b/llvm/test/CodeGen/PowerPC/pcrel-call-linkage-with-calls.ll
@@ -10,7 +10,7 @@
 
 @globalVar = common dso_local local_unnamed_addr global i32 0, align 4
 @externGlobalVar = external local_unnamed_addr global i32, align 4
- at indirectCall = common dso_local local_unnamed_addr global i32 (i32)* null, align 8
+ at indirectCall = common dso_local local_unnamed_addr global ptr null, align 8
 
 ; This funcion needs to remain as noinline.
 ; The compiler needs to know this function is local but must be forced to call
@@ -49,7 +49,7 @@ define dso_local signext i32 @DirectCallLocal1(i32 signext %a, i32 signext %b) l
 entry:
   %add = add nsw i32 %b, %a
   %call = tail call signext i32 @localCall(i32 signext %add)
-  %0 = load i32, i32* @globalVar, align 4
+  %0 = load i32, ptr @globalVar, align 4
   %mul = mul nsw i32 %0, %call
   ret i32 %mul
 }
@@ -79,7 +79,7 @@ define dso_local signext i32 @DirectCallLocal2(i32 signext %a, i32 signext %b) l
 entry:
   %add = add nsw i32 %b, %a
   %call = tail call signext i32 @localCall(i32 signext %add)
-  %0 = load i32, i32* @externGlobalVar, align 4
+  %0 = load i32, ptr @externGlobalVar, align 4
   %mul = mul nsw i32 %0, %call
   ret i32 %mul
 }
@@ -132,7 +132,7 @@ define dso_local signext i32 @DirectCallExtern1(i32 signext %a, i32 signext %b)
 entry:
   %add = add nsw i32 %b, %a
   %call = tail call signext i32 @externCall(i32 signext %add)
-  %0 = load i32, i32* @globalVar, align 4
+  %0 = load i32, ptr @globalVar, align 4
   %mul = mul nsw i32 %0, %call
   ret i32 %mul
 }
@@ -164,7 +164,7 @@ define dso_local signext i32 @DirectCallExtern2(i32 signext %a, i32 signext %b)
 entry:
   %add = add nsw i32 %b, %a
   %call = tail call signext i32 @externCall(i32 signext %add)
-  %0 = load i32, i32* @externGlobalVar, align 4
+  %0 = load i32, ptr @externGlobalVar, align 4
   %mul = mul nsw i32 %0, %call
   ret i32 %mul
 }
@@ -205,7 +205,7 @@ define dso_local signext i32 @TailCallLocal1(i32 signext %a) local_unnamed_addr
 ; CHECK-S-NEXT:    extsw r3, r3
 ; CHECK-S-NEXT:    b localCall at notoc
 entry:
-  %0 = load i32, i32* @globalVar, align 4
+  %0 = load i32, ptr @globalVar, align 4
   %add = add nsw i32 %0, %a
   %call = tail call signext i32 @localCall(i32 signext %add)
   ret i32 %call
@@ -223,7 +223,7 @@ define dso_local signext i32 @TailCallLocal2(i32 signext %a) local_unnamed_addr
 ; CHECK-S-NEXT:    extsw r3, r3
 ; CHECK-S-NEXT:    b localCall at notoc
 entry:
-  %0 = load i32, i32* @externGlobalVar, align 4
+  %0 = load i32, ptr @externGlobalVar, align 4
   %add = add nsw i32 %0, %a
   %call = tail call signext i32 @localCall(i32 signext %add)
   ret i32 %call
@@ -249,7 +249,7 @@ define dso_local signext i32 @TailCallExtern1(i32 signext %a) local_unnamed_addr
 ; CHECK-S-NEXT:    extsw r3, r3
 ; CHECK-S-NEXT:    b externCall at notoc
 entry:
-  %0 = load i32, i32* @globalVar, align 4
+  %0 = load i32, ptr @globalVar, align 4
   %add = add nsw i32 %0, %a
   %call = tail call signext i32 @externCall(i32 signext %add)
   ret i32 %call
@@ -267,7 +267,7 @@ define dso_local signext i32 @TailCallExtern2(i32 signext %a) local_unnamed_addr
 ; CHECK-S-NEXT:    extsw r3, r3
 ; CHECK-S-NEXT:    b externCall at notoc
 entry:
-  %0 = load i32, i32* @externGlobalVar, align 4
+  %0 = load i32, ptr @externGlobalVar, align 4
   %add = add nsw i32 %0, %a
   %call = tail call signext i32 @externCall(i32 signext %add)
   ret i32 %call
@@ -306,9 +306,9 @@ define dso_local signext i32 @IndirectCall1(i32 signext %a, i32 signext %b) loca
 ; CHECK-S-NEXT:    blr
 entry:
   %add = add nsw i32 %b, %a
-  %0 = load i32 (i32)*, i32 (i32)** @indirectCall, align 8
+  %0 = load ptr, ptr @indirectCall, align 8
   %call = tail call signext i32 %0(i32 signext %add)
-  %1 = load i32, i32* @globalVar, align 4
+  %1 = load i32, ptr @globalVar, align 4
   %mul = mul nsw i32 %1, %call
   ret i32 %mul
 }
@@ -338,14 +338,14 @@ define dso_local signext i32 @IndirectCall2(i32 signext %a, i32 signext %b) loca
 ; CHECK-S-NEXT:    blr
 entry:
   %add = add nsw i32 %b, %a
-  %0 = load i32 (i32)*, i32 (i32)** @indirectCall, align 8
+  %0 = load ptr, ptr @indirectCall, align 8
   %call = tail call signext i32 %0(i32 signext %add)
-  %1 = load i32, i32* @externGlobalVar, align 4
+  %1 = load i32, ptr @externGlobalVar, align 4
   %mul = mul nsw i32 %1, %call
   ret i32 %mul
 }
 
-define dso_local signext i32 @IndirectCall3(i32 signext %a, i32 signext %b, i32 (i32)* nocapture %call_param) local_unnamed_addr {
+define dso_local signext i32 @IndirectCall3(i32 signext %a, i32 signext %b, ptr nocapture %call_param) local_unnamed_addr {
 ; CHECK-ALL-LABEL: IndirectCall3:
 ; CHECK-S:       # %bb.0: # %entry
 ; CHECK-S-NEXT:    mflr r0
@@ -368,12 +368,12 @@ define dso_local signext i32 @IndirectCall3(i32 signext %a, i32 signext %b, i32
 entry:
   %add = add nsw i32 %b, %a
   %call = tail call signext i32 %call_param(i32 signext %add)
-  %0 = load i32, i32* @globalVar, align 4
+  %0 = load i32, ptr @globalVar, align 4
   %mul = mul nsw i32 %0, %call
   ret i32 %mul
 }
 
-define dso_local signext i32 @IndirectCallNoGlobal(i32 signext %a, i32 signext %b, i32 (i32)* nocapture %call_param) local_unnamed_addr {
+define dso_local signext i32 @IndirectCallNoGlobal(i32 signext %a, i32 signext %b, ptr nocapture %call_param) local_unnamed_addr {
 ; CHECK-ALL-LABEL: IndirectCallNoGlobal:
 ; CHECK-S:       # %bb.0: # %entry
 ; CHECK-S-NEXT:    mflr r0
@@ -400,7 +400,7 @@ entry:
   ret i32 %add
 }
 
-define dso_local signext i32 @IndirectCallOnly(i32 signext %a, i32 (i32)* nocapture %call_param) local_unnamed_addr {
+define dso_local signext i32 @IndirectCallOnly(i32 signext %a, ptr nocapture %call_param) local_unnamed_addr {
 ; CHECK-ALL-LABEL: IndirectCallOnly:
 ; CHECK-S:       # %bb.0: # %entry
 ; CHECK-S-NEXT:    mtctr r4

diff  --git a/llvm/test/CodeGen/PowerPC/pcrel-got-indirect.ll b/llvm/test/CodeGen/PowerPC/pcrel-got-indirect.ll
index 7ade41ced353..fe89b1949f22 100644
--- a/llvm/test/CodeGen/PowerPC/pcrel-got-indirect.ll
+++ b/llvm/test/CodeGen/PowerPC/pcrel-got-indirect.ll
@@ -14,10 +14,10 @@
 @valInt = external global i32, align 4
 @valUnsigned = external local_unnamed_addr global i32, align 4
 @valLong = external local_unnamed_addr global i64, align 8
- at ptr = external local_unnamed_addr global i32*, align 8
+ at ptr = external local_unnamed_addr global ptr, align 8
 @array = external local_unnamed_addr global [10 x i32], align 4
 @structure = external local_unnamed_addr global %struct.Struct, align 4
- at ptrfunc = external local_unnamed_addr global void (...)*, align 8
+ at ptrfunc = external local_unnamed_addr global ptr, align 8
 
 define dso_local signext i32 @ReadGlobalVarChar() local_unnamed_addr  {
 ; LE-LABEL: ReadGlobalVarChar:
@@ -36,7 +36,7 @@ define dso_local signext i32 @ReadGlobalVarChar() local_unnamed_addr  {
 ; BE-NEXT:    lbz r3, 0(r3)
 ; BE-NEXT:    blr
 entry:
-  %0 = load i8, i8* @valChar, align 1
+  %0 = load i8, ptr @valChar, align 1
   %conv = zext i8 %0 to i32
   ret i32 %conv
 }
@@ -56,7 +56,7 @@ define dso_local void @WriteGlobalVarChar() local_unnamed_addr  {
 ; BE-NEXT:    stb r4, 0(r3)
 ; BE-NEXT:    blr
 entry:
-  store i8 3, i8* @valChar, align 1
+  store i8 3, ptr @valChar, align 1
   ret void
 }
 
@@ -77,7 +77,7 @@ define dso_local signext i32 @ReadGlobalVarShort() local_unnamed_addr  {
 ; BE-NEXT:    lha r3, 0(r3)
 ; BE-NEXT:    blr
 entry:
-  %0 = load i16, i16* @valShort, align 2
+  %0 = load i16, ptr @valShort, align 2
   %conv = sext i16 %0 to i32
   ret i32 %conv
 }
@@ -97,7 +97,7 @@ define dso_local void @WriteGlobalVarShort() local_unnamed_addr  {
 ; BE-NEXT:    sth r4, 0(r3)
 ; BE-NEXT:    blr
 entry:
-  store i16 3, i16* @valShort, align 2
+  store i16 3, ptr @valShort, align 2
   ret void
 }
 
@@ -118,7 +118,7 @@ define dso_local signext i32 @ReadGlobalVarInt() local_unnamed_addr  {
 ; BE-NEXT:    lwa r3, 0(r3)
 ; BE-NEXT:    blr
 entry:
-  %0 = load i32, i32* @valInt, align 4
+  %0 = load i32, ptr @valInt, align 4
   ret i32 %0
 }
 
@@ -137,7 +137,7 @@ define dso_local void @WriteGlobalVarInt() local_unnamed_addr  {
 ; BE-NEXT:    stw r4, 0(r3)
 ; BE-NEXT:    blr
 entry:
-  store i32 33, i32* @valInt, align 4
+  store i32 33, ptr @valInt, align 4
   ret void
 }
 
@@ -158,7 +158,7 @@ define dso_local signext i32 @ReadGlobalVarUnsigned() local_unnamed_addr  {
 ; BE-NEXT:    lwa r3, 0(r3)
 ; BE-NEXT:    blr
 entry:
-  %0 = load i32, i32* @valUnsigned, align 4
+  %0 = load i32, ptr @valUnsigned, align 4
   ret i32 %0
 }
 
@@ -177,7 +177,7 @@ define dso_local void @WriteGlobalVarUnsigned() local_unnamed_addr  {
 ; BE-NEXT:    stw r4, 0(r3)
 ; BE-NEXT:    blr
 entry:
-  store i32 33, i32* @valUnsigned, align 4
+  store i32 33, ptr @valUnsigned, align 4
   ret void
 }
 
@@ -198,7 +198,7 @@ define dso_local signext i32 @ReadGlobalVarLong() local_unnamed_addr  {
 ; BE-NEXT:    lwa r3, 4(r3)
 ; BE-NEXT:    blr
 entry:
-  %0 = load i64, i64* @valLong, align 8
+  %0 = load i64, ptr @valLong, align 8
   %conv = trunc i64 %0 to i32
   ret i32 %conv
 }
@@ -218,11 +218,11 @@ define dso_local void @WriteGlobalVarLong() local_unnamed_addr  {
 ; BE-NEXT:    std r4, 0(r3)
 ; BE-NEXT:    blr
 entry:
-  store i64 3333, i64* @valLong, align 8
+  store i64 3333, ptr @valLong, align 8
   ret void
 }
 
-define dso_local i32* @ReadGlobalPtr() local_unnamed_addr  {
+define dso_local ptr @ReadGlobalPtr() local_unnamed_addr  {
 ; LE-LABEL: ReadGlobalPtr:
 ; LE:       # %bb.0: # %entry
 ; LE-NEXT:    pld r3, ptr at got@pcrel(0), 1
@@ -239,8 +239,8 @@ define dso_local i32* @ReadGlobalPtr() local_unnamed_addr  {
 ; BE-NEXT:    ld r3, 0(r3)
 ; BE-NEXT:    blr
 entry:
-  %0 = load i32*, i32** @ptr, align 8
-  ret i32* %0
+  %0 = load ptr, ptr @ptr, align 8
+  ret ptr %0
 }
 
 define dso_local void @WriteGlobalPtr() local_unnamed_addr  {
@@ -264,12 +264,12 @@ define dso_local void @WriteGlobalPtr() local_unnamed_addr  {
 ; BE-NEXT:    stw r4, 0(r3)
 ; BE-NEXT:    blr
 entry:
-  %0 = load i32*, i32** @ptr, align 8
-  store i32 3, i32* %0, align 4
+  %0 = load ptr, ptr @ptr, align 8
+  store i32 3, ptr %0, align 4
   ret void
 }
 
-define dso_local nonnull i32* @GlobalVarAddr() local_unnamed_addr  {
+define dso_local nonnull ptr @GlobalVarAddr() local_unnamed_addr  {
 ; LE-LABEL: GlobalVarAddr:
 ; LE:       # %bb.0: # %entry
 ; LE-NEXT:    pld r3, valInt at got@pcrel(0), 1
@@ -280,7 +280,7 @@ define dso_local nonnull i32* @GlobalVarAddr() local_unnamed_addr  {
 ; BE-NEXT:    pld r3, valInt at got@pcrel(0), 1
 ; BE-NEXT:    blr
 entry:
-  ret i32* @valInt
+  ret ptr @valInt
 }
 
 define dso_local signext i32 @ReadGlobalArray() local_unnamed_addr  {
@@ -300,7 +300,7 @@ define dso_local signext i32 @ReadGlobalArray() local_unnamed_addr  {
 ; BE-NEXT:    lwa r3, 12(r3)
 ; BE-NEXT:    blr
 entry:
-  %0 = load i32, i32* getelementptr inbounds ([10 x i32], [10 x i32]* @array, i64 0, i64 3), align 4
+  %0 = load i32, ptr getelementptr inbounds ([10 x i32], ptr @array, i64 0, i64 3), align 4
   ret i32 %0
 }
 
@@ -319,7 +319,7 @@ define dso_local void @WriteGlobalArray() local_unnamed_addr  {
 ; BE-NEXT:    stw r4, 12(r3)
 ; BE-NEXT:    blr
 entry:
-  store i32 5, i32* getelementptr inbounds ([10 x i32], [10 x i32]* @array, i64 0, i64 3), align 4
+  store i32 5, ptr getelementptr inbounds ([10 x i32], ptr @array, i64 0, i64 3), align 4
   ret void
 }
 
@@ -340,7 +340,7 @@ define dso_local signext i32 @ReadGlobalStruct() local_unnamed_addr  {
 ; BE-NEXT:    lwa r3, 4(r3)
 ; BE-NEXT:    blr
 entry:
-  %0 = load i32, i32* getelementptr inbounds (%struct.Struct, %struct.Struct* @structure, i64 0, i32 2), align 4
+  %0 = load i32, ptr getelementptr inbounds (%struct.Struct, ptr @structure, i64 0, i32 2), align 4
   ret i32 %0
 }
 
@@ -359,7 +359,7 @@ define dso_local void @WriteGlobalStruct() local_unnamed_addr  {
 ; BE-NEXT:    stw r4, 4(r3)
 ; BE-NEXT:    blr
 entry:
-  store i32 3, i32* getelementptr inbounds (%struct.Struct, %struct.Struct* @structure, i64 0, i32 2), align 4
+  store i32 3, ptr getelementptr inbounds (%struct.Struct, ptr @structure, i64 0, i32 2), align 4
   ret void
 }
 
@@ -386,7 +386,7 @@ define dso_local void @ReadFuncPtr() local_unnamed_addr  {
 ; BE-NEXT:    bctr
 ; BE-NEXT:    #TC_RETURNr8 ctr 0
 entry:
-  %0 = load void ()*, void ()** bitcast (void (...)** @ptrfunc to void ()**), align 8
+  %0 = load ptr, ptr @ptrfunc, align 8
   tail call void %0()
   ret void
 }
@@ -406,7 +406,7 @@ define dso_local void @WriteFuncPtr() local_unnamed_addr  {
 ; BE-NEXT:    std r4, 0(r3)
 ; BE-NEXT:    blr
 entry:
-  store void (...)* @function, void (...)** @ptrfunc, align 8
+  store ptr @function, ptr @ptrfunc, align 8
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/pcrel-indirect-call.ll b/llvm/test/CodeGen/PowerPC/pcrel-indirect-call.ll
index 88f25db58b49..b427772e9bd7 100644
--- a/llvm/test/CodeGen/PowerPC/pcrel-indirect-call.ll
+++ b/llvm/test/CodeGen/PowerPC/pcrel-indirect-call.ll
@@ -9,7 +9,7 @@
 ; is passed as a parameter in this test.
 
 ; Function Attrs: noinline
-define dso_local void @IndirectCallExternFuncPtr(void ()* nocapture %ptrfunc) {
+define dso_local void @IndirectCallExternFuncPtr(ptr nocapture %ptrfunc) {
 ; CHECK-LABEL: IndirectCallExternFuncPtr:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    mtctr r3
@@ -23,7 +23,7 @@ entry:
 
 define dso_local void @FuncPtrPassAsParam() {
 entry:
-  tail call void @IndirectCallExternFuncPtr(void ()* nonnull @Function)
+  tail call void @IndirectCallExternFuncPtr(ptr nonnull @Function)
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/pcrel-linkeropt-option.ll b/llvm/test/CodeGen/PowerPC/pcrel-linkeropt-option.ll
index 564bd29f181a..c1d3d4e1e92a 100644
--- a/llvm/test/CodeGen/PowerPC/pcrel-linkeropt-option.ll
+++ b/llvm/test/CodeGen/PowerPC/pcrel-linkeropt-option.ll
@@ -34,6 +34,6 @@ define dso_local i8 @Read8() local_unnamed_addr {
 ; OFF-NEXT:    lbz r3, 0(r3)
 ; OFF-NEXT:    blr
 entry:
-  %0 = load i8, i8* @input8, align 1
+  %0 = load i8, ptr @input8, align 1
   ret i8 %0
 }

diff  --git a/llvm/test/CodeGen/PowerPC/pcrel-linkeropt.ll b/llvm/test/CodeGen/PowerPC/pcrel-linkeropt.ll
index aefa07e8bd38..27140e56d9c8 100644
--- a/llvm/test/CodeGen/PowerPC/pcrel-linkeropt.ll
+++ b/llvm/test/CodeGen/PowerPC/pcrel-linkeropt.ll
@@ -29,10 +29,10 @@
 @outputVi64 = external local_unnamed_addr global <2 x i64>, align 16
 @ArrayIn = external global [10 x i32], align 4
 @ArrayOut = external local_unnamed_addr global [10 x i32], align 4
- at IntPtrIn = external local_unnamed_addr global i32*, align 8
- at IntPtrOut = external local_unnamed_addr global i32*, align 8
- at FuncPtrIn = external local_unnamed_addr global void (...)*, align 8
- at FuncPtrOut = external local_unnamed_addr global void (...)*, align 8
+ at IntPtrIn = external local_unnamed_addr global ptr, align 8
+ at IntPtrOut = external local_unnamed_addr global ptr, align 8
+ at FuncPtrIn = external local_unnamed_addr global ptr, align 8
+ at FuncPtrOut = external local_unnamed_addr global ptr, align 8
 
 define dso_local void @ReadWrite8() local_unnamed_addr #0 {
 ; In this test the stb r3, 0(r4) cannot be optimized because it
@@ -48,8 +48,8 @@ define dso_local void @ReadWrite8() local_unnamed_addr #0 {
 ; CHECK-NEXT:    stb r3, 0(r4)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i8, i8* @input8, align 1
-  store i8 %0, i8* @output8, align 1
+  %0 = load i8, ptr @input8, align 1
+  store i8 %0, ptr @output8, align 1
   ret void
 }
 
@@ -67,8 +67,8 @@ define dso_local void @ReadWrite16() local_unnamed_addr #0 {
 ; CHECK-NEXT:    sth r3, 0(r4)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i16, i16* @input16, align 2
-  store i16 %0, i16* @output16, align 2
+  %0 = load i16, ptr @input16, align 2
+  store i16 %0, ptr @output16, align 2
   ret void
 }
 
@@ -83,8 +83,8 @@ define dso_local void @ReadWrite32() local_unnamed_addr #0 {
 ; CHECK-NEXT:    stw r3, 0(r4)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i32, i32* @input32, align 4
-  store i32 %0, i32* @output32, align 4
+  %0 = load i32, ptr @input32, align 4
+  store i32 %0, ptr @output32, align 4
   ret void
 }
 
@@ -99,8 +99,8 @@ define dso_local void @ReadWrite64() local_unnamed_addr #0 {
 ; CHECK-NEXT:    std r3, 0(r4)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i64, i64* @input64, align 8
-  store i64 %0, i64* @output64, align 8
+  %0 = load i64, ptr @input64, align 8
+  store i64 %0, ptr @output64, align 8
   ret void
 }
 
@@ -119,8 +119,8 @@ define dso_local void @ReadWrite128() local_unnamed_addr #0 {
 ; CHECK-NEXT:    stxv vs0, 0(r3)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i128, i128* @input128, align 16
-  store i128 %0, i128* @output128, align 16
+  %0 = load i128, ptr @input128, align 16
+  store i128 %0, ptr @output128, align 16
   ret void
 }
 
@@ -137,9 +137,9 @@ define dso_local void @ReadWritef32() local_unnamed_addr #0 {
 ; CHECK-NEXT:    stfs f0, 0(r3)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load float, float* @inputf32, align 4
+  %0 = load float, ptr @inputf32, align 4
   %add = fadd float %0, 0x400851EB80000000
-  store float %add, float* @outputf32, align 4
+  store float %add, ptr @outputf32, align 4
   ret void
 }
 
@@ -157,9 +157,9 @@ define dso_local void @ReadWritef64() local_unnamed_addr #0 {
 ; CHECK-NEXT:    stfd f0, 0(r3)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load double, double* @inputf64, align 8
+  %0 = load double, ptr @inputf64, align 8
   %add = fadd double %0, 6.800000e+00
-  store double %add, double* @outputf64, align 8
+  store double %add, ptr @outputf64, align 8
   ret void
 }
 
@@ -178,9 +178,9 @@ define dso_local void @ReadWriteVi32() local_unnamed_addr #0 {
 ; CHECK-NEXT:    stxv v2, 0(r3)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @inputVi32, align 16
+  %0 = load <4 x i32>, ptr @inputVi32, align 16
   %vecins = insertelement <4 x i32> %0, i32 45, i32 1
-  store <4 x i32> %vecins, <4 x i32>* @outputVi32, align 16
+  store <4 x i32> %vecins, ptr @outputVi32, align 16
   ret void
 }
 
@@ -197,8 +197,8 @@ define dso_local void @ReadWriteVi64() local_unnamed_addr #0 {
 ; CHECK-NEXT:    stxv vs0, 0(r3)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @inputVi64, align 16
-  store <2 x i64> %0, <2 x i64>* @outputVi64, align 16
+  %0 = load <2 x i64>, ptr @inputVi64, align 16
+  store <2 x i64> %0, ptr @outputVi64, align 16
   ret void
 }
 
@@ -214,9 +214,9 @@ define dso_local void @ReadWriteArray() local_unnamed_addr #0 {
 ; CHECK-NEXT:    stw r3, 8(r4)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i32, i32* getelementptr inbounds ([10 x i32], [10 x i32]* @ArrayIn, i64 0, i64 7), align 4
+  %0 = load i32, ptr getelementptr inbounds ([10 x i32], ptr @ArrayIn, i64 0, i64 7), align 4
   %add = add nsw i32 %0, 42
-  store i32 %add, i32* getelementptr inbounds ([10 x i32], [10 x i32]* @ArrayOut, i64 0, i64 2), align 4
+  store i32 %add, ptr getelementptr inbounds ([10 x i32], ptr @ArrayOut, i64 0, i64 2), align 4
   ret void
 }
 
@@ -229,9 +229,9 @@ define dso_local void @ReadWriteSameArray() local_unnamed_addr #0 {
 ; CHECK-NEXT:    stw r4, 24(r3)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i32, i32* getelementptr inbounds ([10 x i32], [10 x i32]* @ArrayIn, i64 0, i64 3), align 4
+  %0 = load i32, ptr getelementptr inbounds ([10 x i32], ptr @ArrayIn, i64 0, i64 3), align 4
   %add = add nsw i32 %0, 8
-  store i32 %add, i32* getelementptr inbounds ([10 x i32], [10 x i32]* @ArrayIn, i64 0, i64 6), align 4
+  store i32 %add, ptr getelementptr inbounds ([10 x i32], ptr @ArrayIn, i64 0, i64 6), align 4
   ret void
 }
 
@@ -252,15 +252,15 @@ define dso_local void @ReadWriteIntPtr() local_unnamed_addr #0 {
 ; CHECK-NEXT:    stw r3, 136(r4)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i32*, i32** @IntPtrIn, align 8
-  %arrayidx = getelementptr inbounds i32, i32* %0, i64 54
-  %1 = load i32, i32* %arrayidx, align 4
-  %arrayidx1 = getelementptr inbounds i32, i32* %0, i64 12
-  %2 = load i32, i32* %arrayidx1, align 4
+  %0 = load ptr, ptr @IntPtrIn, align 8
+  %arrayidx = getelementptr inbounds i32, ptr %0, i64 54
+  %1 = load i32, ptr %arrayidx, align 4
+  %arrayidx1 = getelementptr inbounds i32, ptr %0, i64 12
+  %2 = load i32, ptr %arrayidx1, align 4
   %add = add nsw i32 %2, %1
-  %3 = load i32*, i32** @IntPtrOut, align 8
-  %arrayidx2 = getelementptr inbounds i32, i32* %3, i64 34
-  store i32 %add, i32* %arrayidx2, align 4
+  %3 = load ptr, ptr @IntPtrOut, align 8
+  %arrayidx2 = getelementptr inbounds i32, ptr %3, i64 34
+  store i32 %add, ptr %arrayidx2, align 4
   ret void
 }
 
@@ -275,8 +275,8 @@ define dso_local void @ReadWriteFuncPtr() local_unnamed_addr #0 {
 ; CHECK-NEXT:    std r3, 0(r4)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i64, i64* bitcast (void (...)** @FuncPtrIn to i64*), align 8
-  store i64 %0, i64* bitcast (void (...)** @FuncPtrOut to i64*), align 8
+  %0 = load i64, ptr @FuncPtrIn, align 8
+  store i64 %0, ptr @FuncPtrOut, align 8
   ret void
 }
 
@@ -288,7 +288,7 @@ define dso_local void @FuncPtrCopy() local_unnamed_addr #0 {
 ; CHECK-NEXT:    std r4, 0(r3)
 ; CHECK-NEXT:    blr
 entry:
-  store void (...)* @Callee, void (...)** @FuncPtrOut, align 8
+  store ptr @Callee, ptr @FuncPtrOut, align 8
   ret void
 }
 
@@ -305,7 +305,7 @@ define dso_local void @FuncPtrCall() local_unnamed_addr #0 {
 ; CHECK-NEXT:    bctr
 ; CHECK-NEXT:    #TC_RETURNr8 ctr 0
 entry:
-  %0 = load void ()*, void ()** bitcast (void (...)** @FuncPtrIn to void ()**), align 8
+  %0 = load ptr, ptr @FuncPtrIn, align 8
   tail call void %0()
   ret void
 }
@@ -319,7 +319,7 @@ define dso_local signext i32 @ReadVecElement() local_unnamed_addr #0 {
 ; CHECK-NEXT:    lwa r3, 4(r3)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @inputVi32, align 16
+  %0 = load <4 x i32>, ptr @inputVi32, align 16
   %vecext = extractelement <4 x i32> %0, i32 1
   ret i32 %vecext
 }
@@ -348,14 +348,14 @@ define dso_local signext i32 @VecMultiUse() local_unnamed_addr #0 {
 ; CHECK-NEXT:    mtlr r0
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @inputVi32, align 16
-  tail call void bitcast (void (...)* @Callee to void ()*)()
-  %1 = load <4 x i32>, <4 x i32>* @inputVi32, align 16
+  %0 = load <4 x i32>, ptr @inputVi32, align 16
+  tail call void @Callee()
+  %1 = load <4 x i32>, ptr @inputVi32, align 16
   %2 = extractelement <4 x i32> %1, i32 2
   %3 = extractelement <4 x i32> %0, i64 1
   %4 = add nsw i32 %2, %3
-  tail call void bitcast (void (...)* @Callee to void ()*)()
-  %5 = load <4 x i32>, <4 x i32>* @inputVi32, align 16
+  tail call void @Callee()
+  %5 = load <4 x i32>, ptr @inputVi32, align 16
   %vecext2 = extractelement <4 x i32> %5, i32 0
   %add3 = add nsw i32 %4, %vecext2
   ret i32 %add3
@@ -381,22 +381,22 @@ define dso_local signext i32 @UseAddr(i32 signext %a) local_unnamed_addr #0 {
 ; CHECK-NEXT:    mtlr r0
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i32, i32* getelementptr inbounds ([10 x i32], [10 x i32]* @ArrayIn, i64 0, i64 4), align 4
+  %0 = load i32, ptr getelementptr inbounds ([10 x i32], ptr @ArrayIn, i64 0, i64 4), align 4
   %add = add nsw i32 %0, %a
-  %call = tail call signext i32 @getAddr(i32* getelementptr inbounds ([10 x i32], [10 x i32]* @ArrayIn, i64 0, i64 0))
+  %call = tail call signext i32 @getAddr(ptr @ArrayIn)
   %add1 = add nsw i32 %add, %call
   ret i32 %add1
 }
 
-declare signext i32 @getAddr(i32*) local_unnamed_addr
+declare signext i32 @getAddr(ptr) local_unnamed_addr
 
-define dso_local nonnull i32* @AddrTaken32() local_unnamed_addr #0 {
+define dso_local nonnull ptr @AddrTaken32() local_unnamed_addr #0 {
 ; CHECK-LABEL: AddrTaken32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    pld r3, input32 at got@pcrel(0), 1
 ; CHECK-NEXT:    blr
 entry:
-  ret i32* @input32
+  ret ptr @input32
 }
 
 attributes #0 = { nounwind }

diff  --git a/llvm/test/CodeGen/PowerPC/pcrel-local-caller-toc.ll b/llvm/test/CodeGen/PowerPC/pcrel-local-caller-toc.ll
index 2871e077df56..f19057d35c7f 100644
--- a/llvm/test/CodeGen/PowerPC/pcrel-local-caller-toc.ll
+++ b/llvm/test/CodeGen/PowerPC/pcrel-local-caller-toc.ll
@@ -57,11 +57,11 @@ define dso_local void @caller(i32 signext %a) local_unnamed_addr #2 {
 ; CHECK-NEXT:    mtlr r0
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i32, i32* @global, align 4
+  %0 = load i32, ptr @global, align 4
   %add = add nsw i32 %0, %a
   %call = tail call signext i32 @callee(i32 signext %add)
   %mul = mul nsw i32 %call, %call
-  store i32 %mul, i32* @global, align 4
+  store i32 %mul, ptr @global, align 4
   ret void
 }
 
@@ -83,7 +83,7 @@ define dso_local signext i32 @tail_caller(i32 signext %a) local_unnamed_addr #2
 ; CHECK-NEXT:    mtlr r0
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i32, i32* @global, align 4
+  %0 = load i32, ptr @global, align 4
   %add = add nsw i32 %0, %a
   %call = tail call signext i32 @callee(i32 signext %add)
   ret i32 %call

diff  --git a/llvm/test/CodeGen/PowerPC/pcrel-relocation-plus-offset.ll b/llvm/test/CodeGen/PowerPC/pcrel-relocation-plus-offset.ll
index 98ea41f7bb26..0aa28dafb321 100644
--- a/llvm/test/CodeGen/PowerPC/pcrel-relocation-plus-offset.ll
+++ b/llvm/test/CodeGen/PowerPC/pcrel-relocation-plus-offset.ll
@@ -28,7 +28,7 @@ define dso_local signext i32 @getElementLocal7() local_unnamed_addr {
 ; CHECK-O-NEXT:      R_PPC64_PCREL34      array2+0x1c
 ; CHECK-O-NEXT:    blr
 entry:
-  %0 = load i32, i32* getelementptr inbounds ([10 x i32], [10 x i32]* @array2, i64 0, i64 7), align 4
+  %0 = load i32, ptr getelementptr inbounds ([10 x i32], ptr @array2, i64 0, i64 7), align 4
   ret i32 %0
 }
 
@@ -42,7 +42,7 @@ define dso_local signext i32 @getElementLocalNegative() local_unnamed_addr {
 ; CHECK-O-NEXT:      R_PPC64_PCREL34      array2-0x8
 ; CHECK-O-NEXT:    blr
 entry:
-  %0 = load i32, i32* getelementptr inbounds ([10 x i32], [10 x i32]* @array2, i64 0, i64 -2), align 4
+  %0 = load i32, ptr getelementptr inbounds ([10 x i32], ptr @array2, i64 0, i64 -2), align 4
   ret i32 %0
 }
 
@@ -61,7 +61,7 @@ define dso_local signext i32 @getElementExtern4() local_unnamed_addr {
 ; CHECK-O:         lwa 3, 16(3)
 ; CHECK-O-NEXT:    blr
 entry:
-  %0 = load i32, i32* getelementptr inbounds ([10 x i32], [10 x i32]* @array1, i64 0, i64 4), align 4
+  %0 = load i32, ptr getelementptr inbounds ([10 x i32], ptr @array1, i64 0, i64 4), align 4
   ret i32 %0
 }
 
@@ -80,7 +80,7 @@ define dso_local signext i32 @getElementExternNegative() local_unnamed_addr {
 ; CHECK-O:         lwa 3, -4(3)
 ; CHECK-O-NEXT:    blr
 entry:
-  %0 = load i32, i32* getelementptr inbounds ([10 x i32], [10 x i32]* @array1, i64 0, i64 -1), align 4
+  %0 = load i32, ptr getelementptr inbounds ([10 x i32], ptr @array1, i64 0, i64 -1), align 4
   ret i32 %0
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/pcrel-tail-calls.ll b/llvm/test/CodeGen/PowerPC/pcrel-tail-calls.ll
index 1982332ffd5c..319c346f08f9 100644
--- a/llvm/test/CodeGen/PowerPC/pcrel-tail-calls.ll
+++ b/llvm/test/CodeGen/PowerPC/pcrel-tail-calls.ll
@@ -12,8 +12,8 @@
 ; the past as we no longer need to restore the TOC pointer into R2 after
 ; most calls.
 
- at Func = external local_unnamed_addr global i32 (...)*, align 8
- at FuncLocal = common dso_local local_unnamed_addr global i32 (...)* null, align 8
+ at Func = external local_unnamed_addr global ptr, align 8
+ at FuncLocal = common dso_local local_unnamed_addr global ptr null, align 8
 
 ; No calls in this function but we assign the function pointers.
 define dso_local void @AssignFuncPtr() local_unnamed_addr {
@@ -25,8 +25,8 @@ define dso_local void @AssignFuncPtr() local_unnamed_addr {
 ; CHECK-NEXT:    pstd r4, FuncLocal at PCREL(0), 1
 ; CHECK-NEXT:    blr
 entry:
-  store i32 (...)* @Function, i32 (...)** @Func, align 8
-  store i32 (...)* @Function, i32 (...)** @FuncLocal, align 8
+  store ptr @Function, ptr @Func, align 8
+  store ptr @Function, ptr @FuncLocal, align 8
   ret void
 }
 
@@ -40,7 +40,7 @@ define dso_local void @TailCallLocalFuncPtr() local_unnamed_addr {
 ; CHECK-NEXT:    bctr
 ; CHECK-NEXT:    #TC_RETURNr8 ctr 0
 entry:
-  %0 = load i32 ()*, i32 ()** bitcast (i32 (...)** @FuncLocal to i32 ()**), align 8
+  %0 = load ptr, ptr @FuncLocal, align 8
   %call = tail call signext i32 %0()
   ret void
 }
@@ -56,12 +56,12 @@ define dso_local void @TailCallExtrnFuncPtr() local_unnamed_addr {
 ; CHECK-NEXT:    bctr
 ; CHECK-NEXT:    #TC_RETURNr8 ctr 0
 entry:
-  %0 = load i32 ()*, i32 ()** bitcast (i32 (...)** @Func to i32 ()**), align 8
+  %0 = load ptr, ptr @Func, align 8
   %call = tail call signext i32 %0()
   ret void
 }
 
-define dso_local signext i32 @TailCallParamFuncPtr(i32 (...)* nocapture %passedfunc) local_unnamed_addr {
+define dso_local signext i32 @TailCallParamFuncPtr(ptr nocapture %passedfunc) local_unnamed_addr {
 ; CHECK-LABEL: TailCallParamFuncPtr:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    mtctr r3
@@ -69,12 +69,11 @@ define dso_local signext i32 @TailCallParamFuncPtr(i32 (...)* nocapture %passedf
 ; CHECK-NEXT:    bctr
 ; CHECK-NEXT:    #TC_RETURNr8 ctr 0
 entry:
-  %callee.knr.cast = bitcast i32 (...)* %passedfunc to i32 ()*
-  %call = tail call signext i32 %callee.knr.cast()
+  %call = tail call signext i32 %passedfunc()
   ret i32 %call
 }
 
-define dso_local signext i32 @NoTailIndirectCall(i32 (...)* nocapture %passedfunc, i32 signext %a) local_unnamed_addr {
+define dso_local signext i32 @NoTailIndirectCall(ptr nocapture %passedfunc, i32 signext %a) local_unnamed_addr {
 ; CHECK-LABEL: NoTailIndirectCall:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    mflr r0
@@ -96,8 +95,7 @@ define dso_local signext i32 @NoTailIndirectCall(i32 (...)* nocapture %passedfun
 ; CHECK-NEXT:    mtlr r0
 ; CHECK-NEXT:    blr
 entry:
-  %callee.knr.cast = bitcast i32 (...)* %passedfunc to i32 ()*
-  %call = tail call signext i32 %callee.knr.cast()
+  %call = tail call signext i32 %passedfunc()
   %add = add nsw i32 %call, %a
   ret i32 %add
 }
@@ -108,7 +106,7 @@ define dso_local signext i32 @TailCallDirect() local_unnamed_addr {
 ; CHECK-NEXT:    b Function at notoc
 ; CHECK-NEXT:    #TC_RETURNd8 Function at notoc 0
 entry:
-  %call = tail call signext i32 bitcast (i32 (...)* @Function to i32 ()*)()
+  %call = tail call signext i32 @Function()
   ret i32 %call
 }
 
@@ -132,7 +130,7 @@ define dso_local signext i32 @NoTailCallDirect(i32 signext %a) local_unnamed_add
 ; CHECK-NEXT:    mtlr r0
 ; CHECK-NEXT:    blr
 entry:
-  %call = tail call signext i32 bitcast (i32 (...)* @Function to i32 ()*)()
+  %call = tail call signext i32 @Function()
   %add = add nsw i32 %call, %a
   ret i32 %add
 }
@@ -181,7 +179,7 @@ define dso_local signext i32 @TailCallAbs() local_unnamed_addr {
 ; CHECK-NEXT:    bctr
 ; CHECK-NEXT:    #TC_RETURNr8 ctr 0
 entry:
-  %call = tail call signext i32 inttoptr (i64 400 to i32 ()*)()
+  %call = tail call signext i32 inttoptr (i64 400 to ptr)()
   ret i32 %call
 }
 
@@ -208,7 +206,7 @@ define dso_local signext i32 @NoTailCallAbs(i32 signext %a) local_unnamed_addr {
 ; CHECK-NEXT:    mtlr r0
 ; CHECK-NEXT:    blr
 entry:
-  %call = tail call signext i32 inttoptr (i64 400 to i32 ()*)()
+  %call = tail call signext i32 inttoptr (i64 400 to ptr)()
   %add = add nsw i32 %call, %a
   ret i32 %add
 }

diff  --git a/llvm/test/CodeGen/PowerPC/pcrel-tls-general-dynamic.ll b/llvm/test/CodeGen/PowerPC/pcrel-tls-general-dynamic.ll
index 8a69a18f8fd0..8d06c8199c90 100644
--- a/llvm/test/CodeGen/PowerPC/pcrel-tls-general-dynamic.ll
+++ b/llvm/test/CodeGen/PowerPC/pcrel-tls-general-dynamic.ll
@@ -10,7 +10,7 @@
 
 @x = external thread_local global i32, align 4
 
-define nonnull i32* @GeneralDynamicAddressLoad() {
+define nonnull ptr @GeneralDynamicAddressLoad() {
   ; CHECK-S-LABEL: GeneralDynamicAddressLoad:
   ; CHECK-S:         paddi r3, 0, x at got@tlsgd at pcrel, 1
   ; CHECK-S-NEXT:    bl __tls_get_addr at notoc(x at tlsgd)
@@ -25,7 +25,7 @@ define nonnull i32* @GeneralDynamicAddressLoad() {
   ; CHECK-O-NEXT:    0000000000000014:  R_PPC64_TLSGD        x
   ; CHECK-O-NEXT:    0000000000000014:  R_PPC64_REL24_NOTOC  __tls_get_addr
   entry:
-    ret i32* @x
+    ret ptr @x
 }
 
 define i32 @GeneralDynamicValueLoad() {
@@ -48,6 +48,6 @@ define i32 @GeneralDynamicValueLoad() {
   ; CHECK-SYM-LABEL: Symbol table '.symtab' contains 7 entries
   ; CHECK-SYM:       0000000000000000     0 TLS     GLOBAL DEFAULT  UND x
   entry:
-    %0 = load i32, i32* @x, align 4
+    %0 = load i32, ptr @x, align 4
     ret i32 %0
 }

diff  --git a/llvm/test/CodeGen/PowerPC/pcrel-tls-initial-exec.ll b/llvm/test/CodeGen/PowerPC/pcrel-tls-initial-exec.ll
index e9dabb272bfd..da58ebb1c9a8 100644
--- a/llvm/test/CodeGen/PowerPC/pcrel-tls-initial-exec.ll
+++ b/llvm/test/CodeGen/PowerPC/pcrel-tls-initial-exec.ll
@@ -13,7 +13,7 @@
 
 @x = external thread_local global i32, align 4
 
-define i32* @InitialExecAddressLoad() {
+define ptr @InitialExecAddressLoad() {
 ; CHECK-S-LABEL: InitialExecAddressLoad:
 ; CHECK-S:       # %bb.0: # %entry
 ; CHECK-S-NEXT:    pld r3, x at got@tprel at pcrel(0), 1
@@ -26,7 +26,7 @@ define i32* @InitialExecAddressLoad() {
 ; CHECK-O-NEXT:    0000000000000009:  R_PPC64_TLS	x
 ; CHECK-O-NEXT:    20 00 80 4e                  	blr
 entry:
-  ret i32* @x
+  ret ptr @x
 }
 
 define i32 @InitialExecValueLoad() {
@@ -45,6 +45,6 @@ define i32 @InitialExecValueLoad() {
 ; CHECK-SYM-LABEL: Symbol table '.symtab' contains 6 entries
 ; CHECK-SYM:         0000000000000000     0 TLS     GLOBAL DEFAULT  UND x
 entry:
-  %0 = load i32, i32* @x, align 4
+  %0 = load i32, ptr @x, align 4
   ret i32 %0
 }

diff  --git a/llvm/test/CodeGen/PowerPC/pcrel-tls-local-dynamic.ll b/llvm/test/CodeGen/PowerPC/pcrel-tls-local-dynamic.ll
index 5dae2e87fcb8..0b0fdbcaf0a4 100644
--- a/llvm/test/CodeGen/PowerPC/pcrel-tls-local-dynamic.ll
+++ b/llvm/test/CodeGen/PowerPC/pcrel-tls-local-dynamic.ll
@@ -9,7 +9,7 @@
 
 @x = hidden thread_local global i32 0, align 4
 
-define nonnull i32* @LocalDynamicAddressLoad() {
+define nonnull ptr @LocalDynamicAddressLoad() {
   ; CHECK-S-LABEL: LocalDynamicAddressLoad:
   ; CHECK-S:         paddi r3, 0, x at got@tlsld at pcrel, 1
   ; CHECK-S-NEXT:    bl __tls_get_addr at notoc(x at tlsld)
@@ -27,7 +27,7 @@ define nonnull i32* @LocalDynamicAddressLoad() {
   ; CHECK-O-NEXT:    18: paddi 3, 3, 0, 0
   ; CHECK-O-NEXT:    0000000000000018: R_PPC64_DTPREL34 x
   entry:
-    ret i32* @x
+    ret ptr @x
 }
 
 define i32 @LocalDynamicValueLoad() {
@@ -50,6 +50,6 @@ define i32 @LocalDynamicValueLoad() {
   ; CHECK-O-NEXT:    0000000000000058: R_PPC64_DTPREL34 x
   ; CHECK-O-NEXT:    60: lwz 3, 0(3)
   entry:
-    %0 = load i32, i32* @x, align 4
+    %0 = load i32, ptr @x, align 4
     ret i32 %0
 }

diff  --git a/llvm/test/CodeGen/PowerPC/pcrel-tls-local-exec.ll b/llvm/test/CodeGen/PowerPC/pcrel-tls-local-exec.ll
index 59aa9b913de4..77d18e1b64de 100644
--- a/llvm/test/CodeGen/PowerPC/pcrel-tls-local-exec.ll
+++ b/llvm/test/CodeGen/PowerPC/pcrel-tls-local-exec.ll
@@ -11,7 +11,7 @@
 @x = dso_local thread_local global i32 0, align 4
 @y = dso_local thread_local global [5 x i32] [i32 0, i32 0, i32 0, i32 0, i32 0], align 4
 
-define dso_local i32* @LocalExecAddressLoad() {
+define dso_local ptr @LocalExecAddressLoad() {
 ; CHECK-S-LABEL: LocalExecAddressLoad:
 ; CHECK-S:       # %bb.0: # %entry
 ; CHECK-S-NEXT:    paddi r3, r13, x at TPREL, 0
@@ -21,7 +21,7 @@ define dso_local i32* @LocalExecAddressLoad() {
 ; CHECK-O-NEXT:    0000000000000000:  R_PPC64_TPREL34 x
 ; CHECK-O-NEXT:    8: blr
 entry:
-  ret i32* @x
+  ret ptr @x
 }
 
 define dso_local i32 @LocalExecValueLoad() {
@@ -36,7 +36,7 @@ define dso_local i32 @LocalExecValueLoad() {
 ; CHECK-O-NEXT:    28: lwz 3, 0(3)
 ; CHECK-O-NEXT:    2c: blr
 entry:
-  %0 = load i32, i32* @x, align 4
+  %0 = load i32, ptr @x, align 4
   ret i32 %0
 }
 
@@ -52,7 +52,7 @@ define dso_local void @LocalExecValueStore(i32 %in) {
 ; CHECK-O-NEXT:    48: stw 3, 0(4)
 ; CHECK-O-NEXT:    4c: blr
 entry:
-  store i32 %in, i32* @x, align 4
+  store i32 %in, ptr @x, align 4
   ret void
 }
 
@@ -68,12 +68,12 @@ define dso_local i32 @LocalExecValueLoadOffset() {
 ; CHECK-O-NEXT:    68: lwz 3, 12(3)
 ; CHECK-O-NEXT:    6c: blr
 entry:
-  %0 = load i32, i32* getelementptr inbounds ([5 x i32], [5 x i32]* @y, i64 0, i64 3), align 4
+  %0 = load i32, ptr getelementptr inbounds ([5 x i32], ptr @y, i64 0, i64 3), align 4
   ret i32 %0
 }
 
 
-define dso_local i32* @LocalExecValueLoadOffsetNoLoad() {
+define dso_local ptr @LocalExecValueLoadOffsetNoLoad() {
 ; CHECK-S-LABEL: LocalExecValueLoadOffsetNoLoad:
 ; CHECK-S:       # %bb.0: # %entry
 ; CHECK-S-NEXT:    paddi r3, r13, y at TPREL, 0
@@ -85,5 +85,5 @@ define dso_local i32* @LocalExecValueLoadOffsetNoLoad() {
 ; CHECK-O-NEXT:    88: addi 3, 3, 12
 ; CHECK-O-NEXT:    8c: blr
 entry:
-  ret i32* getelementptr inbounds ([5 x i32], [5 x i32]* @y, i64 0, i64 3)
+  ret ptr getelementptr inbounds ([5 x i32], ptr @y, i64 0, i64 3)
 }

diff  --git a/llvm/test/CodeGen/PowerPC/pcrel-tls_get_addr_clobbers.ll b/llvm/test/CodeGen/PowerPC/pcrel-tls_get_addr_clobbers.ll
index 291fb9269177..38f55dac1ea8 100644
--- a/llvm/test/CodeGen/PowerPC/pcrel-tls_get_addr_clobbers.ll
+++ b/llvm/test/CodeGen/PowerPC/pcrel-tls_get_addr_clobbers.ll
@@ -1,9 +1,9 @@
 ; RUN: llc -verify-machineinstrs -mtriple="powerpc64le-unknown-linux-gnu" \
 ; RUN:  -ppc-asm-full-reg-names -mcpu=pwr10 -relocation-model=pic < %s | FileCheck %s
 
-%0 = type { i32 (...)**, %0* }
- at x = external dso_local thread_local unnamed_addr global %0*, align 8
-define void @test(i8* %arg) {
+%0 = type { ptr, ptr }
+ at x = external dso_local thread_local unnamed_addr global ptr, align 8
+define void @test(ptr %arg) {
 ; CHECK-LABEL: test:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    mflr r0
@@ -20,6 +20,6 @@ define void @test(i8* %arg) {
 ; CHECK-NEXT:    ld r30, -16(r1)
 ; CHECK-NEXT:    mtlr r0
 entry:
-  store i8* %arg, i8** bitcast (%0** @x to i8**), align 8
+  store ptr %arg, ptr @x, align 8
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/pcrel.ll b/llvm/test/CodeGen/PowerPC/pcrel.ll
index 37c5b7c0be64..a7872c1f8a3b 100644
--- a/llvm/test/CodeGen/PowerPC/pcrel.ll
+++ b/llvm/test/CodeGen/PowerPC/pcrel.ll
@@ -33,7 +33,7 @@ define dso_local signext i32 @ReadLocalVarInt() local_unnamed_addr  {
 ; CHECK-O-NEXT:    R_PPC64_PCREL34 valIntLoc
 ; CHECK-O-NEXT:    blr
 entry:
-  %0 = load i32, i32* @valIntLoc, align 4
+  %0 = load i32, ptr @valIntLoc, align 4
   ret i32 %0
 }
 
@@ -54,6 +54,6 @@ define dso_local signext i32 @ReadGlobalVarInt() local_unnamed_addr  {
 ; CHECK-O-NEXT:    lwa 3, 0(3)
 ; CHECK-O-NEXT:    blr
 entry:
-  %0 = load i32, i32* @valIntGlob, align 4
+  %0 = load i32, ptr @valIntGlob, align 4
   ret i32 %0
 }

diff  --git a/llvm/test/CodeGen/PowerPC/pcrel_ldst.ll b/llvm/test/CodeGen/PowerPC/pcrel_ldst.ll
index 1592c9e25f08..7512d1189eb4 100644
--- a/llvm/test/CodeGen/PowerPC/pcrel_ldst.ll
+++ b/llvm/test/CodeGen/PowerPC/pcrel_ldst.ll
@@ -68,8 +68,8 @@ define dso_local void @testGlob1PtrPlus0() {
 ; CHECK-NEXT:    stb r3, GlobSt1 at toc@l(r4)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i8, i8* getelementptr inbounds ([20 x i8], [20 x i8]* @GlobLd1, i64 0, i64 0), align 1
-  store i8 %0, i8* getelementptr inbounds ([20 x i8], [20 x i8]* @GlobSt1, i64 0, i64 0), align 1
+  %0 = load i8, ptr @GlobLd1, align 1
+  store i8 %0, ptr @GlobSt1, align 1
   ret void
 }
 
@@ -101,8 +101,8 @@ define dso_local void @testGlob1PtrPlus3() {
 ; CHECK-NEXT:    stb r3, 3(r4)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i8, i8* getelementptr inbounds ([20 x i8], [20 x i8]* @GlobLd1, i64 0, i64 3), align 1
-  store i8 %0, i8* getelementptr inbounds ([20 x i8], [20 x i8]* @GlobSt1, i64 0, i64 3), align 1
+  %0 = load i8, ptr getelementptr inbounds ([20 x i8], ptr @GlobLd1, i64 0, i64 3), align 1
+  store i8 %0, ptr getelementptr inbounds ([20 x i8], ptr @GlobSt1, i64 0, i64 3), align 1
   ret void
 }
 
@@ -130,8 +130,8 @@ define dso_local void @testGlob1PtrPlus4() {
 ; CHECK-NEXT:    stb r3, GlobSt1 at toc@l+4(r4)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i8, i8* getelementptr inbounds ([20 x i8], [20 x i8]* @GlobLd1, i64 0, i64 4), align 1
-  store i8 %0, i8* getelementptr inbounds ([20 x i8], [20 x i8]* @GlobSt1, i64 0, i64 4), align 1
+  %0 = load i8, ptr getelementptr inbounds ([20 x i8], ptr @GlobLd1, i64 0, i64 4), align 1
+  store i8 %0, ptr getelementptr inbounds ([20 x i8], ptr @GlobSt1, i64 0, i64 4), align 1
   ret void
 }
 
@@ -159,8 +159,8 @@ define dso_local void @testGlob1PtrPlus16() {
 ; CHECK-NEXT:    stb r3, GlobSt1 at toc@l+16(r4)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i8, i8* getelementptr inbounds ([20 x i8], [20 x i8]* @GlobLd1, i64 0, i64 16), align 1
-  store i8 %0, i8* getelementptr inbounds ([20 x i8], [20 x i8]* @GlobSt1, i64 0, i64 16), align 1
+  %0 = load i8, ptr getelementptr inbounds ([20 x i8], ptr @GlobLd1, i64 0, i64 16), align 1
+  store i8 %0, ptr getelementptr inbounds ([20 x i8], ptr @GlobSt1, i64 0, i64 16), align 1
   ret void
 }
 
@@ -194,10 +194,10 @@ define dso_local void @testGlob1PtrPlusVar(i64 %Idx) {
 ; CHECK-NEXT:    stbx r4, r5, r3
 ; CHECK-NEXT:    blr
 entry:
-  %arrayidx = getelementptr inbounds [20 x i8], [20 x i8]* @GlobLd1, i64 0, i64 %Idx
-  %0 = load i8, i8* %arrayidx, align 1
-  %arrayidx1 = getelementptr inbounds [20 x i8], [20 x i8]* @GlobSt1, i64 0, i64 %Idx
-  store i8 %0, i8* %arrayidx1, align 1
+  %arrayidx = getelementptr inbounds [20 x i8], ptr @GlobLd1, i64 0, i64 %Idx
+  %0 = load i8, ptr %arrayidx, align 1
+  %arrayidx1 = getelementptr inbounds [20 x i8], ptr @GlobSt1, i64 0, i64 %Idx
+  store i8 %0, ptr %arrayidx1, align 1
   ret void
 }
 
@@ -225,8 +225,8 @@ define dso_local void @testGlob2PtrPlus0() {
 ; CHECK-NEXT:    stb r3, GlobSt2 at toc@l(r4)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i8, i8* getelementptr inbounds ([20 x i8], [20 x i8]* @GlobLd2, i64 0, i64 0), align 1
-  store i8 %0, i8* getelementptr inbounds ([20 x i8], [20 x i8]* @GlobSt2, i64 0, i64 0), align 1
+  %0 = load i8, ptr @GlobLd2, align 1
+  store i8 %0, ptr @GlobSt2, align 1
   ret void
 }
 
@@ -258,8 +258,8 @@ define dso_local void @testGlob2PtrPlus3() {
 ; CHECK-NEXT:    stb r3, 3(r4)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i8, i8* getelementptr inbounds ([20 x i8], [20 x i8]* @GlobLd2, i64 0, i64 3), align 1
-  store i8 %0, i8* getelementptr inbounds ([20 x i8], [20 x i8]* @GlobSt2, i64 0, i64 3), align 1
+  %0 = load i8, ptr getelementptr inbounds ([20 x i8], ptr @GlobLd2, i64 0, i64 3), align 1
+  store i8 %0, ptr getelementptr inbounds ([20 x i8], ptr @GlobSt2, i64 0, i64 3), align 1
   ret void
 }
 
@@ -287,8 +287,8 @@ define dso_local void @testGlob2PtrPlus4() {
 ; CHECK-NEXT:    stb r3, GlobSt2 at toc@l+4(r4)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i8, i8* getelementptr inbounds ([20 x i8], [20 x i8]* @GlobLd2, i64 0, i64 4), align 1
-  store i8 %0, i8* getelementptr inbounds ([20 x i8], [20 x i8]* @GlobSt2, i64 0, i64 4), align 1
+  %0 = load i8, ptr getelementptr inbounds ([20 x i8], ptr @GlobLd2, i64 0, i64 4), align 1
+  store i8 %0, ptr getelementptr inbounds ([20 x i8], ptr @GlobSt2, i64 0, i64 4), align 1
   ret void
 }
 
@@ -316,8 +316,8 @@ define dso_local void @testGlob2PtrPlus16() {
 ; CHECK-NEXT:    stb r3, GlobSt2 at toc@l+16(r4)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i8, i8* getelementptr inbounds ([20 x i8], [20 x i8]* @GlobLd2, i64 0, i64 16), align 1
-  store i8 %0, i8* getelementptr inbounds ([20 x i8], [20 x i8]* @GlobSt2, i64 0, i64 16), align 1
+  %0 = load i8, ptr getelementptr inbounds ([20 x i8], ptr @GlobLd2, i64 0, i64 16), align 1
+  store i8 %0, ptr getelementptr inbounds ([20 x i8], ptr @GlobSt2, i64 0, i64 16), align 1
   ret void
 }
 
@@ -351,10 +351,10 @@ define dso_local void @testGlob2PtrPlusVar(i64 %Idx) {
 ; CHECK-NEXT:    stbx r4, r5, r3
 ; CHECK-NEXT:    blr
 entry:
-  %arrayidx = getelementptr inbounds [20 x i8], [20 x i8]* @GlobLd2, i64 0, i64 %Idx
-  %0 = load i8, i8* %arrayidx, align 1
-  %arrayidx1 = getelementptr inbounds [20 x i8], [20 x i8]* @GlobSt2, i64 0, i64 %Idx
-  store i8 %0, i8* %arrayidx1, align 1
+  %arrayidx = getelementptr inbounds [20 x i8], ptr @GlobLd2, i64 0, i64 %Idx
+  %0 = load i8, ptr %arrayidx, align 1
+  %arrayidx1 = getelementptr inbounds [20 x i8], ptr @GlobSt2, i64 0, i64 %Idx
+  store i8 %0, ptr %arrayidx1, align 1
   ret void
 }
 
@@ -382,8 +382,8 @@ define dso_local void @testGlob3PtrPlus0() {
 ; CHECK-NEXT:    sth r3, GlobSt3 at toc@l(r4)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i16, i16* getelementptr inbounds ([20 x i16], [20 x i16]* @GlobLd3, i64 0, i64 0), align 2
-  store i16 %0, i16* getelementptr inbounds ([20 x i16], [20 x i16]* @GlobSt3, i64 0, i64 0), align 2
+  %0 = load i16, ptr @GlobLd3, align 2
+  store i16 %0, ptr @GlobSt3, align 2
   ret void
 }
 
@@ -415,8 +415,8 @@ define dso_local void @testGlob3PtrPlus3() {
 ; CHECK-NEXT:    sth r3, 3(r4)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i16, i16* bitcast (i8* getelementptr inbounds (i8, i8* bitcast ([20 x i16]* @GlobLd3 to i8*), i64 3) to i16*), align 2
-  store i16 %0, i16* bitcast (i8* getelementptr inbounds (i8, i8* bitcast ([20 x i16]* @GlobSt3 to i8*), i64 3) to i16*), align 2
+  %0 = load i16, ptr getelementptr inbounds (i8, ptr @GlobLd3, i64 3), align 2
+  store i16 %0, ptr getelementptr inbounds (i8, ptr @GlobSt3, i64 3), align 2
   ret void
 }
 
@@ -444,8 +444,8 @@ define dso_local void @testGlob3PtrPlus4() {
 ; CHECK-NEXT:    sth r3, GlobSt3 at toc@l+4(r4)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i16, i16* getelementptr inbounds ([20 x i16], [20 x i16]* @GlobLd3, i64 0, i64 2), align 2
-  store i16 %0, i16* getelementptr inbounds ([20 x i16], [20 x i16]* @GlobSt3, i64 0, i64 2), align 2
+  %0 = load i16, ptr getelementptr inbounds ([20 x i16], ptr @GlobLd3, i64 0, i64 2), align 2
+  store i16 %0, ptr getelementptr inbounds ([20 x i16], ptr @GlobSt3, i64 0, i64 2), align 2
   ret void
 }
 
@@ -473,8 +473,8 @@ define dso_local void @testGlob3PtrPlus16() {
 ; CHECK-NEXT:    sth r3, GlobSt3 at toc@l+16(r4)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i16, i16* getelementptr inbounds ([20 x i16], [20 x i16]* @GlobLd3, i64 0, i64 8), align 2
-  store i16 %0, i16* getelementptr inbounds ([20 x i16], [20 x i16]* @GlobSt3, i64 0, i64 8), align 2
+  %0 = load i16, ptr getelementptr inbounds ([20 x i16], ptr @GlobLd3, i64 0, i64 8), align 2
+  store i16 %0, ptr getelementptr inbounds ([20 x i16], ptr @GlobSt3, i64 0, i64 8), align 2
   ret void
 }
 
@@ -511,10 +511,10 @@ define dso_local void @testGlob3PtrPlusVar(i64 %Idx) {
 ; CHECK-NEXT:    sthx r4, r5, r3
 ; CHECK-NEXT:    blr
 entry:
-  %arrayidx = getelementptr inbounds [20 x i16], [20 x i16]* @GlobLd3, i64 0, i64 %Idx
-  %0 = load i16, i16* %arrayidx, align 2
-  %arrayidx1 = getelementptr inbounds [20 x i16], [20 x i16]* @GlobSt3, i64 0, i64 %Idx
-  store i16 %0, i16* %arrayidx1, align 2
+  %arrayidx = getelementptr inbounds [20 x i16], ptr @GlobLd3, i64 0, i64 %Idx
+  %0 = load i16, ptr %arrayidx, align 2
+  %arrayidx1 = getelementptr inbounds [20 x i16], ptr @GlobSt3, i64 0, i64 %Idx
+  store i16 %0, ptr %arrayidx1, align 2
   ret void
 }
 
@@ -542,8 +542,8 @@ define dso_local void @testGlob4PtrPlus0() {
 ; CHECK-NEXT:    sth r3, GlobSt4 at toc@l(r4)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i16, i16* getelementptr inbounds ([20 x i16], [20 x i16]* @GlobLd4, i64 0, i64 0), align 2
-  store i16 %0, i16* getelementptr inbounds ([20 x i16], [20 x i16]* @GlobSt4, i64 0, i64 0), align 2
+  %0 = load i16, ptr @GlobLd4, align 2
+  store i16 %0, ptr @GlobSt4, align 2
   ret void
 }
 
@@ -575,8 +575,8 @@ define dso_local void @testGlob4PtrPlus3() {
 ; CHECK-NEXT:    sth r3, 3(r4)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i16, i16* bitcast (i8* getelementptr inbounds (i8, i8* bitcast ([20 x i16]* @GlobLd4 to i8*), i64 3) to i16*), align 2
-  store i16 %0, i16* bitcast (i8* getelementptr inbounds (i8, i8* bitcast ([20 x i16]* @GlobSt4 to i8*), i64 3) to i16*), align 2
+  %0 = load i16, ptr getelementptr inbounds (i8, ptr @GlobLd4, i64 3), align 2
+  store i16 %0, ptr getelementptr inbounds (i8, ptr @GlobSt4, i64 3), align 2
   ret void
 }
 
@@ -604,8 +604,8 @@ define dso_local void @testGlob4PtrPlus4() {
 ; CHECK-NEXT:    sth r3, GlobSt4 at toc@l+4(r4)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i16, i16* getelementptr inbounds ([20 x i16], [20 x i16]* @GlobLd4, i64 0, i64 2), align 2
-  store i16 %0, i16* getelementptr inbounds ([20 x i16], [20 x i16]* @GlobSt4, i64 0, i64 2), align 2
+  %0 = load i16, ptr getelementptr inbounds ([20 x i16], ptr @GlobLd4, i64 0, i64 2), align 2
+  store i16 %0, ptr getelementptr inbounds ([20 x i16], ptr @GlobSt4, i64 0, i64 2), align 2
   ret void
 }
 
@@ -633,8 +633,8 @@ define dso_local void @testGlob4PtrPlus16() {
 ; CHECK-NEXT:    sth r3, GlobSt4 at toc@l+16(r4)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i16, i16* getelementptr inbounds ([20 x i16], [20 x i16]* @GlobLd4, i64 0, i64 8), align 2
-  store i16 %0, i16* getelementptr inbounds ([20 x i16], [20 x i16]* @GlobSt4, i64 0, i64 8), align 2
+  %0 = load i16, ptr getelementptr inbounds ([20 x i16], ptr @GlobLd4, i64 0, i64 8), align 2
+  store i16 %0, ptr getelementptr inbounds ([20 x i16], ptr @GlobSt4, i64 0, i64 8), align 2
   ret void
 }
 
@@ -671,10 +671,10 @@ define dso_local void @testGlob4PtrPlusVar(i64 %Idx) {
 ; CHECK-NEXT:    sthx r4, r5, r3
 ; CHECK-NEXT:    blr
 entry:
-  %arrayidx = getelementptr inbounds [20 x i16], [20 x i16]* @GlobLd4, i64 0, i64 %Idx
-  %0 = load i16, i16* %arrayidx, align 2
-  %arrayidx1 = getelementptr inbounds [20 x i16], [20 x i16]* @GlobSt4, i64 0, i64 %Idx
-  store i16 %0, i16* %arrayidx1, align 2
+  %arrayidx = getelementptr inbounds [20 x i16], ptr @GlobLd4, i64 0, i64 %Idx
+  %0 = load i16, ptr %arrayidx, align 2
+  %arrayidx1 = getelementptr inbounds [20 x i16], ptr @GlobSt4, i64 0, i64 %Idx
+  store i16 %0, ptr %arrayidx1, align 2
   ret void
 }
 
@@ -702,8 +702,8 @@ define dso_local void @testGlob5PtrPlus0() {
 ; CHECK-NEXT:    stw r3, GlobSt5 at toc@l(r4)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i32, i32* getelementptr inbounds ([20 x i32], [20 x i32]* @GlobLd5, i64 0, i64 0), align 4
-  store i32 %0, i32* getelementptr inbounds ([20 x i32], [20 x i32]* @GlobSt5, i64 0, i64 0), align 4
+  %0 = load i32, ptr @GlobLd5, align 4
+  store i32 %0, ptr @GlobSt5, align 4
   ret void
 }
 
@@ -731,8 +731,8 @@ define dso_local void @testGlob5PtrPlus3() {
 ; CHECK-NEXT:    stw r3, GlobSt5 at toc@l+3(r4)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i32, i32* bitcast (i8* getelementptr inbounds (i8, i8* bitcast ([20 x i32]* @GlobLd5 to i8*), i64 3) to i32*), align 4
-  store i32 %0, i32* bitcast (i8* getelementptr inbounds (i8, i8* bitcast ([20 x i32]* @GlobSt5 to i8*), i64 3) to i32*), align 4
+  %0 = load i32, ptr getelementptr inbounds (i8, ptr @GlobLd5, i64 3), align 4
+  store i32 %0, ptr getelementptr inbounds (i8, ptr @GlobSt5, i64 3), align 4
   ret void
 }
 
@@ -760,8 +760,8 @@ define dso_local void @testGlob5PtrPlus4() {
 ; CHECK-NEXT:    stw r3, GlobSt5 at toc@l+4(r4)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i32, i32* getelementptr inbounds ([20 x i32], [20 x i32]* @GlobLd5, i64 0, i64 1), align 4
-  store i32 %0, i32* getelementptr inbounds ([20 x i32], [20 x i32]* @GlobSt5, i64 0, i64 1), align 4
+  %0 = load i32, ptr getelementptr inbounds ([20 x i32], ptr @GlobLd5, i64 0, i64 1), align 4
+  store i32 %0, ptr getelementptr inbounds ([20 x i32], ptr @GlobSt5, i64 0, i64 1), align 4
   ret void
 }
 
@@ -789,8 +789,8 @@ define dso_local void @testGlob5PtrPlus16() {
 ; CHECK-NEXT:    stw r3, GlobSt5 at toc@l+16(r4)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i32, i32* getelementptr inbounds ([20 x i32], [20 x i32]* @GlobLd5, i64 0, i64 4), align 4
-  store i32 %0, i32* getelementptr inbounds ([20 x i32], [20 x i32]* @GlobSt5, i64 0, i64 4), align 4
+  %0 = load i32, ptr getelementptr inbounds ([20 x i32], ptr @GlobLd5, i64 0, i64 4), align 4
+  store i32 %0, ptr getelementptr inbounds ([20 x i32], ptr @GlobSt5, i64 0, i64 4), align 4
   ret void
 }
 
@@ -827,10 +827,10 @@ define dso_local void @testGlob5PtrPlusVar(i64 %Idx) {
 ; CHECK-NEXT:    stwx r4, r5, r3
 ; CHECK-NEXT:    blr
 entry:
-  %arrayidx = getelementptr inbounds [20 x i32], [20 x i32]* @GlobLd5, i64 0, i64 %Idx
-  %0 = load i32, i32* %arrayidx, align 4
-  %arrayidx1 = getelementptr inbounds [20 x i32], [20 x i32]* @GlobSt5, i64 0, i64 %Idx
-  store i32 %0, i32* %arrayidx1, align 4
+  %arrayidx = getelementptr inbounds [20 x i32], ptr @GlobLd5, i64 0, i64 %Idx
+  %0 = load i32, ptr %arrayidx, align 4
+  %arrayidx1 = getelementptr inbounds [20 x i32], ptr @GlobSt5, i64 0, i64 %Idx
+  store i32 %0, ptr %arrayidx1, align 4
   ret void
 }
 
@@ -858,8 +858,8 @@ define dso_local void @testGlob6PtrPlus0() {
 ; CHECK-NEXT:    stw r3, GlobSt6 at toc@l(r4)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i32, i32* getelementptr inbounds ([20 x i32], [20 x i32]* @GlobLd6, i64 0, i64 0), align 4
-  store i32 %0, i32* getelementptr inbounds ([20 x i32], [20 x i32]* @GlobSt6, i64 0, i64 0), align 4
+  %0 = load i32, ptr @GlobLd6, align 4
+  store i32 %0, ptr @GlobSt6, align 4
   ret void
 }
 
@@ -887,8 +887,8 @@ define dso_local void @testGlob6PtrPlus3() {
 ; CHECK-NEXT:    stw r3, GlobSt6 at toc@l+3(r4)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i32, i32* bitcast (i8* getelementptr inbounds (i8, i8* bitcast ([20 x i32]* @GlobLd6 to i8*), i64 3) to i32*), align 4
-  store i32 %0, i32* bitcast (i8* getelementptr inbounds (i8, i8* bitcast ([20 x i32]* @GlobSt6 to i8*), i64 3) to i32*), align 4
+  %0 = load i32, ptr getelementptr inbounds (i8, ptr @GlobLd6, i64 3), align 4
+  store i32 %0, ptr getelementptr inbounds (i8, ptr @GlobSt6, i64 3), align 4
   ret void
 }
 
@@ -916,8 +916,8 @@ define dso_local void @testGlob6PtrPlus4() {
 ; CHECK-NEXT:    stw r3, GlobSt6 at toc@l+4(r4)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i32, i32* getelementptr inbounds ([20 x i32], [20 x i32]* @GlobLd6, i64 0, i64 1), align 4
-  store i32 %0, i32* getelementptr inbounds ([20 x i32], [20 x i32]* @GlobSt6, i64 0, i64 1), align 4
+  %0 = load i32, ptr getelementptr inbounds ([20 x i32], ptr @GlobLd6, i64 0, i64 1), align 4
+  store i32 %0, ptr getelementptr inbounds ([20 x i32], ptr @GlobSt6, i64 0, i64 1), align 4
   ret void
 }
 
@@ -945,8 +945,8 @@ define dso_local void @testGlob6PtrPlus16() {
 ; CHECK-NEXT:    stw r3, GlobSt6 at toc@l+16(r4)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i32, i32* getelementptr inbounds ([20 x i32], [20 x i32]* @GlobLd6, i64 0, i64 4), align 4
-  store i32 %0, i32* getelementptr inbounds ([20 x i32], [20 x i32]* @GlobSt6, i64 0, i64 4), align 4
+  %0 = load i32, ptr getelementptr inbounds ([20 x i32], ptr @GlobLd6, i64 0, i64 4), align 4
+  store i32 %0, ptr getelementptr inbounds ([20 x i32], ptr @GlobSt6, i64 0, i64 4), align 4
   ret void
 }
 
@@ -983,10 +983,10 @@ define dso_local void @testGlob6PtrPlusVar(i64 %Idx) {
 ; CHECK-NEXT:    stwx r4, r5, r3
 ; CHECK-NEXT:    blr
 entry:
-  %arrayidx = getelementptr inbounds [20 x i32], [20 x i32]* @GlobLd6, i64 0, i64 %Idx
-  %0 = load i32, i32* %arrayidx, align 4
-  %arrayidx1 = getelementptr inbounds [20 x i32], [20 x i32]* @GlobSt6, i64 0, i64 %Idx
-  store i32 %0, i32* %arrayidx1, align 4
+  %arrayidx = getelementptr inbounds [20 x i32], ptr @GlobLd6, i64 0, i64 %Idx
+  %0 = load i32, ptr %arrayidx, align 4
+  %arrayidx1 = getelementptr inbounds [20 x i32], ptr @GlobSt6, i64 0, i64 %Idx
+  store i32 %0, ptr %arrayidx1, align 4
   ret void
 }
 
@@ -1022,8 +1022,8 @@ define dso_local void @testGlob7PtrPlus0() {
 ; CHECK-P8-NEXT:    std r3, GlobSt7 at toc@l(r4)
 ; CHECK-P8-NEXT:    blr
 entry:
-  %0 = load i64, i64* getelementptr inbounds ([20 x i64], [20 x i64]* @GlobLd7, i64 0, i64 0), align 8
-  store i64 %0, i64* getelementptr inbounds ([20 x i64], [20 x i64]* @GlobSt7, i64 0, i64 0), align 8
+  %0 = load i64, ptr @GlobLd7, align 8
+  store i64 %0, ptr @GlobSt7, align 8
   ret void
 }
 
@@ -1056,8 +1056,8 @@ define dso_local void @testGlob7PtrPlus3() {
 ; CHECK-NEXT:    stdx r3, r5, r4
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i64, i64* bitcast (i8* getelementptr inbounds (i8, i8* bitcast ([20 x i64]* @GlobLd7 to i8*), i64 3) to i64*), align 8
-  store i64 %0, i64* bitcast (i8* getelementptr inbounds (i8, i8* bitcast ([20 x i64]* @GlobSt7 to i8*), i64 3) to i64*), align 8
+  %0 = load i64, ptr getelementptr inbounds (i8, ptr @GlobLd7, i64 3), align 8
+  store i64 %0, ptr getelementptr inbounds (i8, ptr @GlobSt7, i64 3), align 8
   ret void
 }
 
@@ -1093,8 +1093,8 @@ define dso_local void @testGlob7PtrPlus4() {
 ; CHECK-P8-NEXT:    std r3, GlobSt7 at toc@l+4(r4)
 ; CHECK-P8-NEXT:    blr
 entry:
-  %0 = load i64, i64* bitcast (i8* getelementptr inbounds (i8, i8* bitcast ([20 x i64]* @GlobLd7 to i8*), i64 4) to i64*), align 8
-  store i64 %0, i64* bitcast (i8* getelementptr inbounds (i8, i8* bitcast ([20 x i64]* @GlobSt7 to i8*), i64 4) to i64*), align 8
+  %0 = load i64, ptr getelementptr inbounds (i8, ptr @GlobLd7, i64 4), align 8
+  store i64 %0, ptr getelementptr inbounds (i8, ptr @GlobSt7, i64 4), align 8
   ret void
 }
 
@@ -1130,8 +1130,8 @@ define dso_local void @testGlob7PtrPlus16() {
 ; CHECK-P8-NEXT:    std r3, GlobSt7 at toc@l+16(r4)
 ; CHECK-P8-NEXT:    blr
 entry:
-  %0 = load i64, i64* getelementptr inbounds ([20 x i64], [20 x i64]* @GlobLd7, i64 0, i64 2), align 8
-  store i64 %0, i64* getelementptr inbounds ([20 x i64], [20 x i64]* @GlobSt7, i64 0, i64 2), align 8
+  %0 = load i64, ptr getelementptr inbounds ([20 x i64], ptr @GlobLd7, i64 0, i64 2), align 8
+  store i64 %0, ptr getelementptr inbounds ([20 x i64], ptr @GlobSt7, i64 0, i64 2), align 8
   ret void
 }
 
@@ -1168,10 +1168,10 @@ define dso_local void @testGlob7PtrPlusVar(i64 %Idx) {
 ; CHECK-NEXT:    stdx r4, r5, r3
 ; CHECK-NEXT:    blr
 entry:
-  %arrayidx = getelementptr inbounds [20 x i64], [20 x i64]* @GlobLd7, i64 0, i64 %Idx
-  %0 = load i64, i64* %arrayidx, align 8
-  %arrayidx1 = getelementptr inbounds [20 x i64], [20 x i64]* @GlobSt7, i64 0, i64 %Idx
-  store i64 %0, i64* %arrayidx1, align 8
+  %arrayidx = getelementptr inbounds [20 x i64], ptr @GlobLd7, i64 0, i64 %Idx
+  %0 = load i64, ptr %arrayidx, align 8
+  %arrayidx1 = getelementptr inbounds [20 x i64], ptr @GlobSt7, i64 0, i64 %Idx
+  store i64 %0, ptr %arrayidx1, align 8
   ret void
 }
 
@@ -1207,8 +1207,8 @@ define dso_local void @testGlob8PtrPlus0() {
 ; CHECK-P8-NEXT:    std r3, GlobSt8 at toc@l(r4)
 ; CHECK-P8-NEXT:    blr
 entry:
-  %0 = load i64, i64* getelementptr inbounds ([20 x i64], [20 x i64]* @GlobLd8, i64 0, i64 0), align 8
-  store i64 %0, i64* getelementptr inbounds ([20 x i64], [20 x i64]* @GlobSt8, i64 0, i64 0), align 8
+  %0 = load i64, ptr @GlobLd8, align 8
+  store i64 %0, ptr @GlobSt8, align 8
   ret void
 }
 
@@ -1241,8 +1241,8 @@ define dso_local void @testGlob8PtrPlus3() {
 ; CHECK-NEXT:    stdx r3, r5, r4
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i64, i64* bitcast (i8* getelementptr inbounds (i8, i8* bitcast ([20 x i64]* @GlobLd8 to i8*), i64 3) to i64*), align 8
-  store i64 %0, i64* bitcast (i8* getelementptr inbounds (i8, i8* bitcast ([20 x i64]* @GlobSt8 to i8*), i64 3) to i64*), align 8
+  %0 = load i64, ptr getelementptr inbounds (i8, ptr @GlobLd8, i64 3), align 8
+  store i64 %0, ptr getelementptr inbounds (i8, ptr @GlobSt8, i64 3), align 8
   ret void
 }
 
@@ -1278,8 +1278,8 @@ define dso_local void @testGlob8PtrPlus4() {
 ; CHECK-P8-NEXT:    std r3, GlobSt8 at toc@l+4(r4)
 ; CHECK-P8-NEXT:    blr
 entry:
-  %0 = load i64, i64* bitcast (i8* getelementptr inbounds (i8, i8* bitcast ([20 x i64]* @GlobLd8 to i8*), i64 4) to i64*), align 8
-  store i64 %0, i64* bitcast (i8* getelementptr inbounds (i8, i8* bitcast ([20 x i64]* @GlobSt8 to i8*), i64 4) to i64*), align 8
+  %0 = load i64, ptr getelementptr inbounds (i8, ptr @GlobLd8, i64 4), align 8
+  store i64 %0, ptr getelementptr inbounds (i8, ptr @GlobSt8, i64 4), align 8
   ret void
 }
 
@@ -1315,8 +1315,8 @@ define dso_local void @testGlob8PtrPlus16() {
 ; CHECK-P8-NEXT:    std r3, GlobSt8 at toc@l+16(r4)
 ; CHECK-P8-NEXT:    blr
 entry:
-  %0 = load i64, i64* getelementptr inbounds ([20 x i64], [20 x i64]* @GlobLd8, i64 0, i64 2), align 8
-  store i64 %0, i64* getelementptr inbounds ([20 x i64], [20 x i64]* @GlobSt8, i64 0, i64 2), align 8
+  %0 = load i64, ptr getelementptr inbounds ([20 x i64], ptr @GlobLd8, i64 0, i64 2), align 8
+  store i64 %0, ptr getelementptr inbounds ([20 x i64], ptr @GlobSt8, i64 0, i64 2), align 8
   ret void
 }
 
@@ -1353,10 +1353,10 @@ define dso_local void @testGlob8PtrPlusVar(i64 %Idx) {
 ; CHECK-NEXT:    stdx r4, r5, r3
 ; CHECK-NEXT:    blr
 entry:
-  %arrayidx = getelementptr inbounds [20 x i64], [20 x i64]* @GlobLd8, i64 0, i64 %Idx
-  %0 = load i64, i64* %arrayidx, align 8
-  %arrayidx1 = getelementptr inbounds [20 x i64], [20 x i64]* @GlobSt8, i64 0, i64 %Idx
-  store i64 %0, i64* %arrayidx1, align 8
+  %arrayidx = getelementptr inbounds [20 x i64], ptr @GlobLd8, i64 0, i64 %Idx
+  %0 = load i64, ptr %arrayidx, align 8
+  %arrayidx1 = getelementptr inbounds [20 x i64], ptr @GlobSt8, i64 0, i64 %Idx
+  store i64 %0, ptr %arrayidx1, align 8
   ret void
 }
 
@@ -1384,8 +1384,8 @@ define dso_local void @testGlob9PtrPlus0() {
 ; CHECK-NEXT:    stw r3, GlobSt9 at toc@l(r4)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load float, float* getelementptr inbounds ([20 x float], [20 x float]* @GlobLd9, i64 0, i64 0), align 4
-  store float %0, float* getelementptr inbounds ([20 x float], [20 x float]* @GlobSt9, i64 0, i64 0), align 4
+  %0 = load float, ptr @GlobLd9, align 4
+  store float %0, ptr @GlobSt9, align 4
   ret void
 }
 
@@ -1413,8 +1413,8 @@ define dso_local void @testGlob9PtrPlus3() {
 ; CHECK-NEXT:    stw r3, GlobSt9 at toc@l+3(r4)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load float, float* bitcast (i8* getelementptr inbounds (i8, i8* bitcast ([20 x float]* @GlobLd9 to i8*), i64 3) to float*), align 4
-  store float %0, float* bitcast (i8* getelementptr inbounds (i8, i8* bitcast ([20 x float]* @GlobSt9 to i8*), i64 3) to float*), align 4
+  %0 = load float, ptr getelementptr inbounds (i8, ptr @GlobLd9, i64 3), align 4
+  store float %0, ptr getelementptr inbounds (i8, ptr @GlobSt9, i64 3), align 4
   ret void
 }
 
@@ -1442,8 +1442,8 @@ define dso_local void @testGlob9PtrPlus4() {
 ; CHECK-NEXT:    stw r3, GlobSt9 at toc@l+4(r4)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load float, float* getelementptr inbounds ([20 x float], [20 x float]* @GlobLd9, i64 0, i64 1), align 4
-  store float %0, float* getelementptr inbounds ([20 x float], [20 x float]* @GlobSt9, i64 0, i64 1), align 4
+  %0 = load float, ptr getelementptr inbounds ([20 x float], ptr @GlobLd9, i64 0, i64 1), align 4
+  store float %0, ptr getelementptr inbounds ([20 x float], ptr @GlobSt9, i64 0, i64 1), align 4
   ret void
 }
 
@@ -1471,8 +1471,8 @@ define dso_local void @testGlob9PtrPlus16() {
 ; CHECK-NEXT:    stw r3, GlobSt9 at toc@l+16(r4)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load float, float* getelementptr inbounds ([20 x float], [20 x float]* @GlobLd9, i64 0, i64 4), align 4
-  store float %0, float* getelementptr inbounds ([20 x float], [20 x float]* @GlobSt9, i64 0, i64 4), align 4
+  %0 = load float, ptr getelementptr inbounds ([20 x float], ptr @GlobLd9, i64 0, i64 4), align 4
+  store float %0, ptr getelementptr inbounds ([20 x float], ptr @GlobSt9, i64 0, i64 4), align 4
   ret void
 }
 
@@ -1509,10 +1509,10 @@ define dso_local void @testGlob9PtrPlusVar(i64 %Idx) {
 ; CHECK-NEXT:    stwx r4, r5, r3
 ; CHECK-NEXT:    blr
 entry:
-  %arrayidx = getelementptr inbounds [20 x float], [20 x float]* @GlobLd9, i64 0, i64 %Idx
-  %0 = load float, float* %arrayidx, align 4
-  %arrayidx1 = getelementptr inbounds [20 x float], [20 x float]* @GlobSt9, i64 0, i64 %Idx
-  store float %0, float* %arrayidx1, align 4
+  %arrayidx = getelementptr inbounds [20 x float], ptr @GlobLd9, i64 0, i64 %Idx
+  %0 = load float, ptr %arrayidx, align 4
+  %arrayidx1 = getelementptr inbounds [20 x float], ptr @GlobSt9, i64 0, i64 %Idx
+  store float %0, ptr %arrayidx1, align 4
   ret void
 }
 
@@ -1548,8 +1548,8 @@ define dso_local void @testGlob10PtrPlus0() {
 ; CHECK-P8-NEXT:    std r3, GlobSt10 at toc@l(r4)
 ; CHECK-P8-NEXT:    blr
 entry:
-  %0 = load double, double* getelementptr inbounds ([20 x double], [20 x double]* @GlobLd10, i64 0, i64 0), align 8
-  store double %0, double* getelementptr inbounds ([20 x double], [20 x double]* @GlobSt10, i64 0, i64 0), align 8
+  %0 = load double, ptr @GlobLd10, align 8
+  store double %0, ptr @GlobSt10, align 8
   ret void
 }
 
@@ -1582,8 +1582,8 @@ define dso_local void @testGlob10PtrPlus3() {
 ; CHECK-NEXT:    stdx r3, r5, r4
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load double, double* bitcast (i8* getelementptr inbounds (i8, i8* bitcast ([20 x double]* @GlobLd10 to i8*), i64 3) to double*), align 8
-  store double %0, double* bitcast (i8* getelementptr inbounds (i8, i8* bitcast ([20 x double]* @GlobSt10 to i8*), i64 3) to double*), align 8
+  %0 = load double, ptr getelementptr inbounds (i8, ptr @GlobLd10, i64 3), align 8
+  store double %0, ptr getelementptr inbounds (i8, ptr @GlobSt10, i64 3), align 8
   ret void
 }
 
@@ -1619,8 +1619,8 @@ define dso_local void @testGlob10PtrPlus4() {
 ; CHECK-P8-NEXT:    std r3, GlobSt10 at toc@l+4(r4)
 ; CHECK-P8-NEXT:    blr
 entry:
-  %0 = load double, double* bitcast (i8* getelementptr inbounds (i8, i8* bitcast ([20 x double]* @GlobLd10 to i8*), i64 4) to double*), align 8
-  store double %0, double* bitcast (i8* getelementptr inbounds (i8, i8* bitcast ([20 x double]* @GlobSt10 to i8*), i64 4) to double*), align 8
+  %0 = load double, ptr getelementptr inbounds (i8, ptr @GlobLd10, i64 4), align 8
+  store double %0, ptr getelementptr inbounds (i8, ptr @GlobSt10, i64 4), align 8
   ret void
 }
 
@@ -1656,8 +1656,8 @@ define dso_local void @testGlob10PtrPlus16() {
 ; CHECK-P8-NEXT:    std r3, GlobSt10 at toc@l+16(r4)
 ; CHECK-P8-NEXT:    blr
 entry:
-  %0 = load double, double* getelementptr inbounds ([20 x double], [20 x double]* @GlobLd10, i64 0, i64 2), align 8
-  store double %0, double* getelementptr inbounds ([20 x double], [20 x double]* @GlobSt10, i64 0, i64 2), align 8
+  %0 = load double, ptr getelementptr inbounds ([20 x double], ptr @GlobLd10, i64 0, i64 2), align 8
+  store double %0, ptr getelementptr inbounds ([20 x double], ptr @GlobSt10, i64 0, i64 2), align 8
   ret void
 }
 
@@ -1694,10 +1694,10 @@ define dso_local void @testGlob10PtrPlusVar(i64 %Idx) {
 ; CHECK-NEXT:    stdx r4, r5, r3
 ; CHECK-NEXT:    blr
 entry:
-  %arrayidx = getelementptr inbounds [20 x double], [20 x double]* @GlobLd10, i64 0, i64 %Idx
-  %0 = load double, double* %arrayidx, align 8
-  %arrayidx1 = getelementptr inbounds [20 x double], [20 x double]* @GlobSt10, i64 0, i64 %Idx
-  store double %0, double* %arrayidx1, align 8
+  %arrayidx = getelementptr inbounds [20 x double], ptr @GlobLd10, i64 0, i64 %Idx
+  %0 = load double, ptr %arrayidx, align 8
+  %arrayidx1 = getelementptr inbounds [20 x double], ptr @GlobSt10, i64 0, i64 %Idx
+  store double %0, ptr %arrayidx1, align 8
   ret void
 }
 
@@ -1749,8 +1749,8 @@ define dso_local void @testGlob11PtrPlus0() {
 ; CHECK-P8-BE-NEXT:    stxvw4x vs0, 0, r3
 ; CHECK-P8-BE-NEXT:    blr
 entry:
-  %0 = load <16 x i8>, <16 x i8>* getelementptr inbounds ([20 x <16 x i8>], [20 x <16 x i8>]* @GlobLd11, i64 0, i64 0), align 16
-  store <16 x i8> %0, <16 x i8>* getelementptr inbounds ([20 x <16 x i8>], [20 x <16 x i8>]* @GlobSt11, i64 0, i64 0), align 16
+  %0 = load <16 x i8>, ptr @GlobLd11, align 16
+  store <16 x i8> %0, ptr @GlobSt11, align 16
   ret void
 }
 
@@ -1805,8 +1805,8 @@ define dso_local void @testGlob11PtrPlus3() {
 ; CHECK-P8-BE-NEXT:    stxvw4x vs0, r3, r4
 ; CHECK-P8-BE-NEXT:    blr
 entry:
-  %0 = load <16 x i8>, <16 x i8>* bitcast (i8* getelementptr ([20 x <16 x i8>], [20 x <16 x i8>]* @GlobLd11, i64 0, i64 0, i64 3) to <16 x i8>*), align 16
-  store <16 x i8> %0, <16 x i8>* bitcast (i8* getelementptr ([20 x <16 x i8>], [20 x <16 x i8>]* @GlobSt11, i64 0, i64 0, i64 3) to <16 x i8>*), align 16
+  %0 = load <16 x i8>, ptr getelementptr ([20 x <16 x i8>], ptr @GlobLd11, i64 0, i64 0, i64 3), align 16
+  store <16 x i8> %0, ptr getelementptr ([20 x <16 x i8>], ptr @GlobSt11, i64 0, i64 0, i64 3), align 16
   ret void
 }
 
@@ -1861,8 +1861,8 @@ define dso_local void @testGlob11PtrPlus4() {
 ; CHECK-P8-BE-NEXT:    stxvw4x vs0, r3, r4
 ; CHECK-P8-BE-NEXT:    blr
 entry:
-  %0 = load <16 x i8>, <16 x i8>* bitcast (i8* getelementptr ([20 x <16 x i8>], [20 x <16 x i8>]* @GlobLd11, i64 0, i64 0, i64 4) to <16 x i8>*), align 16
-  store <16 x i8> %0, <16 x i8>* bitcast (i8* getelementptr ([20 x <16 x i8>], [20 x <16 x i8>]* @GlobSt11, i64 0, i64 0, i64 4) to <16 x i8>*), align 16
+  %0 = load <16 x i8>, ptr getelementptr ([20 x <16 x i8>], ptr @GlobLd11, i64 0, i64 0, i64 4), align 16
+  store <16 x i8> %0, ptr getelementptr ([20 x <16 x i8>], ptr @GlobSt11, i64 0, i64 0, i64 4), align 16
   ret void
 }
 
@@ -1916,8 +1916,8 @@ define dso_local void @testGlob11PtrPlus16() {
 ; CHECK-P8-BE-NEXT:    stxvw4x vs0, r3, r4
 ; CHECK-P8-BE-NEXT:    blr
 entry:
-  %0 = load <16 x i8>, <16 x i8>* bitcast (i8* getelementptr inbounds ([20 x <16 x i8>], [20 x <16 x i8>]* @GlobLd11, i64 0, i64 1, i64 0) to <16 x i8>*), align 16
-  store <16 x i8> %0, <16 x i8>* bitcast (i8* getelementptr inbounds ([20 x <16 x i8>], [20 x <16 x i8>]* @GlobSt11, i64 0, i64 1, i64 0) to <16 x i8>*), align 16
+  %0 = load <16 x i8>, ptr getelementptr inbounds ([20 x <16 x i8>], ptr @GlobLd11, i64 0, i64 1, i64 0), align 16
+  store <16 x i8> %0, ptr getelementptr inbounds ([20 x <16 x i8>], ptr @GlobSt11, i64 0, i64 1, i64 0), align 16
   ret void
 }
 
@@ -1976,10 +1976,10 @@ define dso_local void @testGlob11PtrPlusVar(i64 %Idx) {
 ; CHECK-P8-BE-NEXT:    stxvw4x vs0, r4, r3
 ; CHECK-P8-BE-NEXT:    blr
 entry:
-  %arrayidx = getelementptr inbounds [20 x <16 x i8>], [20 x <16 x i8>]* @GlobLd11, i64 0, i64 %Idx
-  %0 = load <16 x i8>, <16 x i8>* %arrayidx, align 16
-  %arrayidx1 = getelementptr inbounds [20 x <16 x i8>], [20 x <16 x i8>]* @GlobSt11, i64 0, i64 %Idx
-  store <16 x i8> %0, <16 x i8>* %arrayidx1, align 16
+  %arrayidx = getelementptr inbounds [20 x <16 x i8>], ptr @GlobLd11, i64 0, i64 %Idx
+  %0 = load <16 x i8>, ptr %arrayidx, align 16
+  %arrayidx1 = getelementptr inbounds [20 x <16 x i8>], ptr @GlobSt11, i64 0, i64 %Idx
+  store <16 x i8> %0, ptr %arrayidx1, align 16
   ret void
 }
 
@@ -2031,8 +2031,8 @@ define dso_local void @testGlob12PtrPlus0() {
 ; CHECK-P8-BE-NEXT:    stxvw4x vs0, 0, r3
 ; CHECK-P8-BE-NEXT:    blr
 entry:
-  %0 = load <16 x i8>, <16 x i8>* getelementptr inbounds ([20 x <16 x i8>], [20 x <16 x i8>]* @GlobLd12, i64 0, i64 0), align 16
-  store <16 x i8> %0, <16 x i8>* getelementptr inbounds ([20 x <16 x i8>], [20 x <16 x i8>]* @GlobSt12, i64 0, i64 0), align 16
+  %0 = load <16 x i8>, ptr @GlobLd12, align 16
+  store <16 x i8> %0, ptr @GlobSt12, align 16
   ret void
 }
 
@@ -2087,8 +2087,8 @@ define dso_local void @testGlob12PtrPlus3() {
 ; CHECK-P8-BE-NEXT:    stxvw4x vs0, r3, r4
 ; CHECK-P8-BE-NEXT:    blr
 entry:
-  %0 = load <16 x i8>, <16 x i8>* bitcast (i8* getelementptr ([20 x <16 x i8>], [20 x <16 x i8>]* @GlobLd12, i64 0, i64 0, i64 3) to <16 x i8>*), align 16
-  store <16 x i8> %0, <16 x i8>* bitcast (i8* getelementptr ([20 x <16 x i8>], [20 x <16 x i8>]* @GlobSt12, i64 0, i64 0, i64 3) to <16 x i8>*), align 16
+  %0 = load <16 x i8>, ptr getelementptr ([20 x <16 x i8>], ptr @GlobLd12, i64 0, i64 0, i64 3), align 16
+  store <16 x i8> %0, ptr getelementptr ([20 x <16 x i8>], ptr @GlobSt12, i64 0, i64 0, i64 3), align 16
   ret void
 }
 
@@ -2143,8 +2143,8 @@ define dso_local void @testGlob12PtrPlus4() {
 ; CHECK-P8-BE-NEXT:    stxvw4x vs0, r3, r4
 ; CHECK-P8-BE-NEXT:    blr
 entry:
-  %0 = load <16 x i8>, <16 x i8>* bitcast (i8* getelementptr ([20 x <16 x i8>], [20 x <16 x i8>]* @GlobLd12, i64 0, i64 0, i64 4) to <16 x i8>*), align 16
-  store <16 x i8> %0, <16 x i8>* bitcast (i8* getelementptr ([20 x <16 x i8>], [20 x <16 x i8>]* @GlobSt12, i64 0, i64 0, i64 4) to <16 x i8>*), align 16
+  %0 = load <16 x i8>, ptr getelementptr ([20 x <16 x i8>], ptr @GlobLd12, i64 0, i64 0, i64 4), align 16
+  store <16 x i8> %0, ptr getelementptr ([20 x <16 x i8>], ptr @GlobSt12, i64 0, i64 0, i64 4), align 16
   ret void
 }
 
@@ -2198,8 +2198,8 @@ define dso_local void @testGlob12PtrPlus16() {
 ; CHECK-P8-BE-NEXT:    stxvw4x vs0, r3, r4
 ; CHECK-P8-BE-NEXT:    blr
 entry:
-  %0 = load <16 x i8>, <16 x i8>* bitcast (i8* getelementptr inbounds ([20 x <16 x i8>], [20 x <16 x i8>]* @GlobLd12, i64 0, i64 1, i64 0) to <16 x i8>*), align 16
-  store <16 x i8> %0, <16 x i8>* bitcast (i8* getelementptr inbounds ([20 x <16 x i8>], [20 x <16 x i8>]* @GlobSt12, i64 0, i64 1, i64 0) to <16 x i8>*), align 16
+  %0 = load <16 x i8>, ptr getelementptr inbounds ([20 x <16 x i8>], ptr @GlobLd12, i64 0, i64 1, i64 0), align 16
+  store <16 x i8> %0, ptr getelementptr inbounds ([20 x <16 x i8>], ptr @GlobSt12, i64 0, i64 1, i64 0), align 16
   ret void
 }
 
@@ -2258,10 +2258,10 @@ define dso_local void @testGlob12PtrPlusVar(i64 %Idx) {
 ; CHECK-P8-BE-NEXT:    stxvw4x vs0, r4, r3
 ; CHECK-P8-BE-NEXT:    blr
 entry:
-  %arrayidx = getelementptr inbounds [20 x <16 x i8>], [20 x <16 x i8>]* @GlobLd12, i64 0, i64 %Idx
-  %0 = load <16 x i8>, <16 x i8>* %arrayidx, align 16
-  %arrayidx1 = getelementptr inbounds [20 x <16 x i8>], [20 x <16 x i8>]* @GlobSt12, i64 0, i64 %Idx
-  store <16 x i8> %0, <16 x i8>* %arrayidx1, align 16
+  %arrayidx = getelementptr inbounds [20 x <16 x i8>], ptr @GlobLd12, i64 0, i64 %Idx
+  %0 = load <16 x i8>, ptr %arrayidx, align 16
+  %arrayidx1 = getelementptr inbounds [20 x <16 x i8>], ptr @GlobSt12, i64 0, i64 %Idx
+  store <16 x i8> %0, ptr %arrayidx1, align 16
   ret void
 }
 
@@ -2289,8 +2289,8 @@ define dso_local void @Atomic_LdSt_i8() {
 ; CHECK-NEXT:    stb r3, GlobSt1 at toc@l(r4)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load atomic i8, i8* getelementptr inbounds ([20 x i8], [20 x i8]* @GlobLd1, i64 0, i64 0) monotonic, align 1
-  store atomic i8 %0, i8* getelementptr inbounds ([20 x i8], [20 x i8]* @GlobSt1, i64 0, i64 0) monotonic, align 1
+  %0 = load atomic i8, ptr @GlobLd1 monotonic, align 1
+  store atomic i8 %0, ptr @GlobSt1 monotonic, align 1
   ret void
 }
 
@@ -2318,8 +2318,8 @@ define dso_local void @Atomic_LdSt_i16() {
 ; CHECK-NEXT:    sth r3, GlobSt3 at toc@l(r4)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load atomic i16, i16* getelementptr inbounds ([20 x i16], [20 x i16]* @GlobLd3, i64 0, i64 0) monotonic, align 2
-  store atomic i16 %0, i16* getelementptr inbounds ([20 x i16], [20 x i16]* @GlobSt3, i64 0, i64 0) monotonic, align 2
+  %0 = load atomic i16, ptr @GlobLd3 monotonic, align 2
+  store atomic i16 %0, ptr @GlobSt3 monotonic, align 2
   ret void
 }
 
@@ -2347,8 +2347,8 @@ define dso_local void @Atomic_LdSt_i32() {
 ; CHECK-NEXT:    stw r3, GlobSt5 at toc@l(r4)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load atomic i32, i32* getelementptr inbounds ([20 x i32], [20 x i32]* @GlobLd5, i64 0, i64 0) monotonic, align 4
-  store atomic i32 %0, i32* getelementptr inbounds ([20 x i32], [20 x i32]* @GlobSt5, i64 0, i64 0) monotonic, align 4
+  %0 = load atomic i32, ptr @GlobLd5 monotonic, align 4
+  store atomic i32 %0, ptr @GlobSt5 monotonic, align 4
   ret void
 }
 
@@ -2384,8 +2384,8 @@ define dso_local void @Atomic_LdSt_i64() {
 ; CHECK-P8-NEXT:    std r3, GlobSt7 at toc@l(r4)
 ; CHECK-P8-NEXT:    blr
 entry:
-  %0 = load atomic i64, i64* getelementptr inbounds ([20 x i64], [20 x i64]* @GlobLd7, i64 0, i64 0) monotonic, align 8
-  store atomic i64 %0, i64* getelementptr inbounds ([20 x i64], [20 x i64]* @GlobSt7, i64 0, i64 0) monotonic, align 8
+  %0 = load atomic i64, ptr @GlobLd7 monotonic, align 8
+  store atomic i64 %0, ptr @GlobSt7 monotonic, align 8
   ret void
 }
 
@@ -2422,7 +2422,7 @@ define dso_local void @store_double_f64_to_uint(double %str) local_unnamed_addr
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptoui double %str to i64
-  store i64 %conv, i64* bitcast ([20 x double]* @GlobSt10 to i64*), align 8
+  store i64 %conv, ptr @GlobSt10, align 8
   ret void
 }
 
@@ -2459,7 +2459,7 @@ define dso_local void @store_double_f64_to_sint(double %str) local_unnamed_addr
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptosi double %str to i64
-  store i64 %conv, i64* bitcast ([20 x double]* @GlobSt10 to i64*), align 8
+  store i64 %conv, ptr @GlobSt10, align 8
   ret void
 }
 
@@ -2520,7 +2520,7 @@ define dso_local void @store_f128_to_uint(fp128 %str) local_unnamed_addr #0 {
 ; CHECK-P8-BE-NEXT:    blr
 entry:
   %conv = fptoui fp128 %str to i64
-  store i64 %conv, i64* bitcast ([20 x fp128]* @GlobF128 to i64*), align 16
+  store i64 %conv, ptr @GlobF128, align 16
   ret void
 }
 
@@ -2581,6 +2581,6 @@ define dso_local void @store_f128_to_sint(fp128 %str) local_unnamed_addr #0 {
 ; CHECK-P8-BE-NEXT:    blr
 entry:
   %conv = fptosi fp128 %str to i64
-  store i64 %conv, i64* bitcast ([20 x fp128]* @GlobF128 to i64*), align 16
+  store i64 %conv, ptr @GlobF128, align 16
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/peephole-align.ll b/llvm/test/CodeGen/PowerPC/peephole-align.ll
index 3981e948cbfb..18a8da8281a0 100644
--- a/llvm/test/CodeGen/PowerPC/peephole-align.ll
+++ b/llvm/test/CodeGen/PowerPC/peephole-align.ll
@@ -44,18 +44,18 @@ target triple = "powerpc64-unknown-linux-gnu"
 
 define dso_local void @test_b4() nounwind {
 entry:
-  %0 = load i8, i8* getelementptr inbounds (%struct.b4, %struct.b4* @b4v, i32 0, i32 0), align 1
+  %0 = load i8, ptr @b4v, align 1
   %inc0 = add nsw i8 %0, 1
-  store i8 %inc0, i8* getelementptr inbounds (%struct.b4, %struct.b4* @b4v, i32 0, i32 0), align 1
-  %1 = load i8, i8* getelementptr inbounds (%struct.b4, %struct.b4* @b4v, i32 0, i32 1), align 1
+  store i8 %inc0, ptr @b4v, align 1
+  %1 = load i8, ptr getelementptr inbounds (%struct.b4, ptr @b4v, i32 0, i32 1), align 1
   %inc1 = add nsw i8 %1, 2
-  store i8 %inc1, i8* getelementptr inbounds (%struct.b4, %struct.b4* @b4v, i32 0, i32 1), align 1
-  %2 = load i8, i8* getelementptr inbounds (%struct.b4, %struct.b4* @b4v, i32 0, i32 2), align 1
+  store i8 %inc1, ptr getelementptr inbounds (%struct.b4, ptr @b4v, i32 0, i32 1), align 1
+  %2 = load i8, ptr getelementptr inbounds (%struct.b4, ptr @b4v, i32 0, i32 2), align 1
   %inc2 = add nsw i8 %2, 3
-  store i8 %inc2, i8* getelementptr inbounds (%struct.b4, %struct.b4* @b4v, i32 0, i32 2), align 1
-  %3 = load i8, i8* getelementptr inbounds (%struct.b4, %struct.b4* @b4v, i32 0, i32 3), align 1
+  store i8 %inc2, ptr getelementptr inbounds (%struct.b4, ptr @b4v, i32 0, i32 2), align 1
+  %3 = load i8, ptr getelementptr inbounds (%struct.b4, ptr @b4v, i32 0, i32 3), align 1
   %inc3 = add nsw i8 %3, 4
-  store i8 %inc3, i8* getelementptr inbounds (%struct.b4, %struct.b4* @b4v, i32 0, i32 3), align 1
+  store i8 %inc3, ptr getelementptr inbounds (%struct.b4, ptr @b4v, i32 0, i32 3), align 1
   ret void
 }
 
@@ -70,12 +70,12 @@ entry:
 
 define dso_local void @test_h2() nounwind {
 entry:
-  %0 = load i16, i16* getelementptr inbounds (%struct.h2, %struct.h2* @h2v, i32 0, i32 0), align 2
+  %0 = load i16, ptr @h2v, align 2
   %inc0 = add nsw i16 %0, 1
-  store i16 %inc0, i16* getelementptr inbounds (%struct.h2, %struct.h2* @h2v, i32 0, i32 0), align 2
-  %1 = load i16, i16* getelementptr inbounds (%struct.h2, %struct.h2* @h2v, i32 0, i32 1), align 2
+  store i16 %inc0, ptr @h2v, align 2
+  %1 = load i16, ptr getelementptr inbounds (%struct.h2, ptr @h2v, i32 0, i32 1), align 2
   %inc1 = add nsw i16 %1, 2
-  store i16 %inc1, i16* getelementptr inbounds (%struct.h2, %struct.h2* @h2v, i32 0, i32 1), align 2
+  store i16 %inc1, ptr getelementptr inbounds (%struct.h2, ptr @h2v, i32 0, i32 1), align 2
   ret void
 }
 
@@ -89,12 +89,12 @@ entry:
 ; CHECK-DAG: sth [[REG1_1]], h2v at toc@l+2([[REGSTRUCT]])
 define dso_local void @test_h2_optsize() optsize nounwind {
 entry:
-  %0 = load i16, i16* getelementptr inbounds (%struct.h2, %struct.h2* @h2v, i32 0, i32 0), align 2
+  %0 = load i16, ptr @h2v, align 2
   %inc0 = add nsw i16 %0, 1
-  store i16 %inc0, i16* getelementptr inbounds (%struct.h2, %struct.h2* @h2v, i32 0, i32 0), align 2
-  %1 = load i16, i16* getelementptr inbounds (%struct.h2, %struct.h2* @h2v, i32 0, i32 1), align 2
+  store i16 %inc0, ptr @h2v, align 2
+  %1 = load i16, ptr getelementptr inbounds (%struct.h2, ptr @h2v, i32 0, i32 1), align 2
   %inc1 = add nsw i16 %1, 2
-  store i16 %inc1, i16* getelementptr inbounds (%struct.h2, %struct.h2* @h2v, i32 0, i32 1), align 2
+  store i16 %inc1, ptr getelementptr inbounds (%struct.h2, ptr @h2v, i32 0, i32 1), align 2
   ret void
 }
 
@@ -127,30 +127,30 @@ entry:
 
 define dso_local void @test_b8() nounwind {
 entry:
-  %0 = load i8, i8* getelementptr inbounds (%struct.b8, %struct.b8* @b8v, i32 0, i32 0), align 1
+  %0 = load i8, ptr @b8v, align 1
   %inc0 = add nsw i8 %0, 1
-  store i8 %inc0, i8* getelementptr inbounds (%struct.b8, %struct.b8* @b8v, i32 0, i32 0), align 1
-  %1 = load i8, i8* getelementptr inbounds (%struct.b8, %struct.b8* @b8v, i32 0, i32 1), align 1
+  store i8 %inc0, ptr @b8v, align 1
+  %1 = load i8, ptr getelementptr inbounds (%struct.b8, ptr @b8v, i32 0, i32 1), align 1
   %inc1 = add nsw i8 %1, 2
-  store i8 %inc1, i8* getelementptr inbounds (%struct.b8, %struct.b8* @b8v, i32 0, i32 1), align 1
-  %2 = load i8, i8* getelementptr inbounds (%struct.b8, %struct.b8* @b8v, i32 0, i32 2), align 1
+  store i8 %inc1, ptr getelementptr inbounds (%struct.b8, ptr @b8v, i32 0, i32 1), align 1
+  %2 = load i8, ptr getelementptr inbounds (%struct.b8, ptr @b8v, i32 0, i32 2), align 1
   %inc2 = add nsw i8 %2, 3
-  store i8 %inc2, i8* getelementptr inbounds (%struct.b8, %struct.b8* @b8v, i32 0, i32 2), align 1
-  %3 = load i8, i8* getelementptr inbounds (%struct.b8, %struct.b8* @b8v, i32 0, i32 3), align 1
+  store i8 %inc2, ptr getelementptr inbounds (%struct.b8, ptr @b8v, i32 0, i32 2), align 1
+  %3 = load i8, ptr getelementptr inbounds (%struct.b8, ptr @b8v, i32 0, i32 3), align 1
   %inc3 = add nsw i8 %3, 4
-  store i8 %inc3, i8* getelementptr inbounds (%struct.b8, %struct.b8* @b8v, i32 0, i32 3), align 1
-  %4 = load i8, i8* getelementptr inbounds (%struct.b8, %struct.b8* @b8v, i32 0, i32 4), align 1
+  store i8 %inc3, ptr getelementptr inbounds (%struct.b8, ptr @b8v, i32 0, i32 3), align 1
+  %4 = load i8, ptr getelementptr inbounds (%struct.b8, ptr @b8v, i32 0, i32 4), align 1
   %inc4 = add nsw i8 %4, 5
-  store i8 %inc4, i8* getelementptr inbounds (%struct.b8, %struct.b8* @b8v, i32 0, i32 4), align 1
-  %5 = load i8, i8* getelementptr inbounds (%struct.b8, %struct.b8* @b8v, i32 0, i32 5), align 1
+  store i8 %inc4, ptr getelementptr inbounds (%struct.b8, ptr @b8v, i32 0, i32 4), align 1
+  %5 = load i8, ptr getelementptr inbounds (%struct.b8, ptr @b8v, i32 0, i32 5), align 1
   %inc5 = add nsw i8 %5, 6
-  store i8 %inc5, i8* getelementptr inbounds (%struct.b8, %struct.b8* @b8v, i32 0, i32 5), align 1
-  %6 = load i8, i8* getelementptr inbounds (%struct.b8, %struct.b8* @b8v, i32 0, i32 6), align 1
+  store i8 %inc5, ptr getelementptr inbounds (%struct.b8, ptr @b8v, i32 0, i32 5), align 1
+  %6 = load i8, ptr getelementptr inbounds (%struct.b8, ptr @b8v, i32 0, i32 6), align 1
   %inc6 = add nsw i8 %6, 7
-  store i8 %inc6, i8* getelementptr inbounds (%struct.b8, %struct.b8* @b8v, i32 0, i32 6), align 1
-  %7 = load i8, i8* getelementptr inbounds (%struct.b8, %struct.b8* @b8v, i32 0, i32 7), align 1
+  store i8 %inc6, ptr getelementptr inbounds (%struct.b8, ptr @b8v, i32 0, i32 6), align 1
+  %7 = load i8, ptr getelementptr inbounds (%struct.b8, ptr @b8v, i32 0, i32 7), align 1
   %inc7 = add nsw i8 %7, 8
-  store i8 %inc7, i8* getelementptr inbounds (%struct.b8, %struct.b8* @b8v, i32 0, i32 7), align 1
+  store i8 %inc7, ptr getelementptr inbounds (%struct.b8, ptr @b8v, i32 0, i32 7), align 1
   ret void
 }
 
@@ -171,18 +171,18 @@ entry:
 
 define dso_local void @test_h4() nounwind {
 entry:
-  %0 = load i16, i16* getelementptr inbounds (%struct.h4, %struct.h4* @h4v, i32 0, i32 0), align 2
+  %0 = load i16, ptr @h4v, align 2
   %inc0 = add nsw i16 %0, 1
-  store i16 %inc0, i16* getelementptr inbounds (%struct.h4, %struct.h4* @h4v, i32 0, i32 0), align 2
-  %1 = load i16, i16* getelementptr inbounds (%struct.h4, %struct.h4* @h4v, i32 0, i32 1), align 2
+  store i16 %inc0, ptr @h4v, align 2
+  %1 = load i16, ptr getelementptr inbounds (%struct.h4, ptr @h4v, i32 0, i32 1), align 2
   %inc1 = add nsw i16 %1, 2
-  store i16 %inc1, i16* getelementptr inbounds (%struct.h4, %struct.h4* @h4v, i32 0, i32 1), align 2
-  %2 = load i16, i16* getelementptr inbounds (%struct.h4, %struct.h4* @h4v, i32 0, i32 2), align 2
+  store i16 %inc1, ptr getelementptr inbounds (%struct.h4, ptr @h4v, i32 0, i32 1), align 2
+  %2 = load i16, ptr getelementptr inbounds (%struct.h4, ptr @h4v, i32 0, i32 2), align 2
   %inc2 = add nsw i16 %2, 3
-  store i16 %inc2, i16* getelementptr inbounds (%struct.h4, %struct.h4* @h4v, i32 0, i32 2), align 2
-  %3 = load i16, i16* getelementptr inbounds (%struct.h4, %struct.h4* @h4v, i32 0, i32 3), align 2
+  store i16 %inc2, ptr getelementptr inbounds (%struct.h4, ptr @h4v, i32 0, i32 2), align 2
+  %3 = load i16, ptr getelementptr inbounds (%struct.h4, ptr @h4v, i32 0, i32 3), align 2
   %inc3 = add nsw i16 %3, 4
-  store i16 %inc3, i16* getelementptr inbounds (%struct.h4, %struct.h4* @h4v, i32 0, i32 3), align 2
+  store i16 %inc3, ptr getelementptr inbounds (%struct.h4, ptr @h4v, i32 0, i32 3), align 2
   ret void
 }
 
@@ -197,12 +197,12 @@ entry:
 
 define dso_local void @test_w2() nounwind {
 entry:
-  %0 = load i32, i32* getelementptr inbounds (%struct.w2, %struct.w2* @w2v, i32 0, i32 0), align 4
+  %0 = load i32, ptr @w2v, align 4
   %inc0 = add nsw i32 %0, 1
-  store i32 %inc0, i32* getelementptr inbounds (%struct.w2, %struct.w2* @w2v, i32 0, i32 0), align 4
-  %1 = load i32, i32* getelementptr inbounds (%struct.w2, %struct.w2* @w2v, i32 0, i32 1), align 4
+  store i32 %inc0, ptr @w2v, align 4
+  %1 = load i32, ptr getelementptr inbounds (%struct.w2, ptr @w2v, i32 0, i32 1), align 4
   %inc1 = add nsw i32 %1, 2
-  store i32 %inc1, i32* getelementptr inbounds (%struct.w2, %struct.w2* @w2v, i32 0, i32 1), align 4
+  store i32 %inc1, ptr getelementptr inbounds (%struct.w2, ptr @w2v, i32 0, i32 1), align 4
   ret void
 }
 
@@ -218,12 +218,12 @@ entry:
 
 define dso_local void @test_d2() nounwind {
 entry:
-  %0 = load i64, i64* getelementptr inbounds (%struct.d2, %struct.d2* @d2v, i32 0, i32 0), align 8
+  %0 = load i64, ptr @d2v, align 8
   %inc0 = add nsw i64 %0, 1
-  store i64 %inc0, i64* getelementptr inbounds (%struct.d2, %struct.d2* @d2v, i32 0, i32 0), align 8
-  %1 = load i64, i64* getelementptr inbounds (%struct.d2, %struct.d2* @d2v, i32 0, i32 1), align 8
+  store i64 %inc0, ptr @d2v, align 8
+  %1 = load i64, ptr getelementptr inbounds (%struct.d2, ptr @d2v, i32 0, i32 1), align 8
   %inc1 = add nsw i64 %1, 2
-  store i64 %inc1, i64* getelementptr inbounds (%struct.d2, %struct.d2* @d2v, i32 0, i32 1), align 8
+  store i64 %inc1, ptr getelementptr inbounds (%struct.d2, ptr @d2v, i32 0, i32 1), align 8
   ret void
 }
 
@@ -232,7 +232,7 @@ entry:
 ; CHECK: ld 3, d2v at toc@l+8([[REG]])
 define i64 @test_singleuse() nounwind {
 entry:
-  %0 = load i64, i64* getelementptr inbounds (%struct.d2, %struct.d2* @d2v, i32 0, i32 1), align 8
+  %0 = load i64, ptr getelementptr inbounds (%struct.d2, ptr @d2v, i32 0, i32 1), align 8
   ret i64 %0
 }
 
@@ -246,8 +246,8 @@ entry:
 ; CHECK: stdx [[REG0_1]], [[REGSTRUCT]], [[OFFSET_REG]]
 define dso_local void @test_misalign() nounwind {
 entry:
-  %0 = load i64, i64* getelementptr inbounds (%struct.misalign, %struct.misalign* @misalign_v, i32 0, i32 1), align 1
+  %0 = load i64, ptr getelementptr inbounds (%struct.misalign, ptr @misalign_v, i32 0, i32 1), align 1
   %inc0 = add nsw i64 %0, 1
-  store i64 %inc0, i64* getelementptr inbounds (%struct.misalign, %struct.misalign* @misalign_v, i32 0, i32 1), align 1
+  store i64 %inc0, ptr getelementptr inbounds (%struct.misalign, ptr @misalign_v, i32 0, i32 1), align 1
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/pgo-ref-directive.ll b/llvm/test/CodeGen/PowerPC/pgo-ref-directive.ll
index 92deedf4a026..172affa4a266 100644
--- a/llvm/test/CodeGen/PowerPC/pgo-ref-directive.ll
+++ b/llvm/test/CodeGen/PowerPC/pgo-ref-directive.ll
@@ -14,9 +14,9 @@ target triple = "powerpc-ibm-aix7.2.0.0"
 @__profd_main = private global i64 zeroinitializer, section "__llvm_prf_data", align 8
 @__llvm_prf_nm = private constant [6 x i8] c"\04\00main", section "__llvm_prf_names", align 1
 
- at llvm.used = appending global [2 x i8*]
-  [i8* bitcast (i64* @__profd_main to i8*),
-   i8* getelementptr inbounds ([6 x i8], [6 x i8]* @__llvm_prf_nm, i32 0, i32 0)], section "llvm.metadata"
+ at llvm.used = appending global [2 x ptr]
+  [ptr @__profd_main,
+   ptr @__llvm_prf_nm], section "llvm.metadata"
 
 define i32 @main() #0 {
 entry:
@@ -37,10 +37,10 @@ target triple = "powerpc-ibm-aix7.2.0.0"
 @__profd_main = private global i64 zeroinitializer, section "__llvm_prf_data", align 8
 @__llvm_prf_nm = private constant [6 x i8] c"\04\00main", section "__llvm_prf_names", align 1
 
- at llvm.used = appending global [3 x i8*]
-  [i8* bitcast ([1 x i64]* @__profc_main to i8*),
-   i8* bitcast (i64* @__profd_main to i8*),
-   i8* getelementptr inbounds ([6 x i8], [6 x i8]* @__llvm_prf_nm, i32 0, i32 0)], section "llvm.metadata"
+ at llvm.used = appending global [3 x ptr]
+  [ptr @__profc_main,
+   ptr @__profd_main,
+   ptr @__llvm_prf_nm], section "llvm.metadata"
 
 define i32 @main() #0 {
 entry:
@@ -62,13 +62,13 @@ entry:
 @__profc_main = private global [1 x i64] zeroinitializer, section "__llvm_prf_cnts", align 8
 @__profd_main = private global i64 zeroinitializer, section "__llvm_prf_data", align 8
 @__llvm_prf_nm = private constant [6 x i8] c"\04\00main", section "__llvm_prf_names", align 1
- at __llvm_prf_vnodes = private global [10 x { i64, i64, i8* }] zeroinitializer, section "__llvm_prf_vnds"
+ at __llvm_prf_vnodes = private global [10 x { i64, i64, ptr }] zeroinitializer, section "__llvm_prf_vnds"
 
- at llvm.used = appending global [4 x i8*]
-  [i8* bitcast ([1 x i64]* @__profc_main to i8*),
-   i8* bitcast (i64* @__profd_main to i8*),
-   i8* getelementptr inbounds ([6 x i8], [6 x i8]* @__llvm_prf_nm, i32 0, i32 0),
-   i8* bitcast ([10 x { i64, i64, i8* }]* @__llvm_prf_vnodes to i8*)], section "llvm.metadata"
+ at llvm.used = appending global [4 x ptr]
+  [ptr @__profc_main,
+   ptr @__profd_main,
+   ptr @__llvm_prf_nm,
+   ptr @__llvm_prf_vnodes], section "llvm.metadata"
 
 define i32 @main() #0 {
 entry:

diff  --git a/llvm/test/CodeGen/PowerPC/pip-inner.ll b/llvm/test/CodeGen/PowerPC/pip-inner.ll
index 69786ba82187..9ca87e1a45a3 100644
--- a/llvm/test/CodeGen/PowerPC/pip-inner.ll
+++ b/llvm/test/CodeGen/PowerPC/pip-inner.ll
@@ -3,7 +3,7 @@ target datalayout = "E-m:e-i64:64-n32:64"
 target triple = "powerpc64-unknown-linux-gnu"
 
 ; Function Attrs: nounwind
-define void @foo(double* %x, double* nocapture readonly %y) #0 {
+define void @foo(ptr %x, ptr nocapture readonly %y) #0 {
 entry:
   br label %for.cond1.preheader
 
@@ -13,17 +13,17 @@ for.cond1.preheader:                              ; preds = %for.end, %entry
 
 for.body3:                                        ; preds = %for.body3, %for.cond1.preheader
   %indvars.iv = phi i64 [ 0, %for.cond1.preheader ], [ %indvars.iv.next, %for.body3 ]
-  %arrayidx = getelementptr inbounds double, double* %y, i64 %indvars.iv
-  %0 = load double, double* %arrayidx, align 8
+  %arrayidx = getelementptr inbounds double, ptr %y, i64 %indvars.iv
+  %0 = load double, ptr %arrayidx, align 8
   %add = fadd double %0, 1.000000e+00
-  %arrayidx5 = getelementptr inbounds double, double* %x, i64 %indvars.iv
-  store double %add, double* %arrayidx5, align 8
+  %arrayidx5 = getelementptr inbounds double, ptr %x, i64 %indvars.iv
+  store double %add, ptr %arrayidx5, align 8
   %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
   %exitcond = icmp eq i64 %indvars.iv.next, 16000
   br i1 %exitcond, label %for.end, label %for.body3
 
 for.end:                                          ; preds = %for.body3
-  tail call void @bar(double* %x) #2
+  tail call void @bar(ptr %x) #2
   %inc7 = add nuw nsw i32 %i.015, 1
   %exitcond16 = icmp eq i32 %inc7, 1000
   br i1 %exitcond16, label %for.end8, label %for.cond1.preheader
@@ -44,7 +44,7 @@ for.end8:                                         ; preds = %for.end
 ; CHECK: blr
 }
 
-declare void @bar(double*) #1
+declare void @bar(ptr) #1
 
 attributes #0 = { nounwind "target-cpu"="a2" }
 attributes #1 = { "target-cpu"="a2" }

diff  --git a/llvm/test/CodeGen/PowerPC/popcount.ll b/llvm/test/CodeGen/PowerPC/popcount.ll
index 107ae5484b5b..a91288272cae 100644
--- a/llvm/test/CodeGen/PowerPC/popcount.ll
+++ b/llvm/test/CodeGen/PowerPC/popcount.ll
@@ -2,7 +2,7 @@
 ; RUN: llc < %s -O0 -mtriple=powerpc64le-unknown-unknown | FileCheck %s
 
 ; Function Attrs: nobuiltin nounwind readonly
-define i8 @popcount128(i128* nocapture nonnull readonly %0) {
+define i8 @popcount128(ptr nocapture nonnull readonly %0) {
 ; CHECK-LABEL: popcount128:
 ; CHECK:       # %bb.0: # %Entry
 ; CHECK-NEXT:    mr 4, 3
@@ -15,7 +15,7 @@ define i8 @popcount128(i128* nocapture nonnull readonly %0) {
 ; CHECK-NEXT:    clrldi 3, 3, 56
 ; CHECK-NEXT:    blr
 Entry:
-  %1 = load i128, i128* %0, align 16
+  %1 = load i128, ptr %0, align 16
   %2 = tail call i128 @llvm.ctpop.i128(i128 %1)
   %3 = trunc i128 %2 to i8
   ret i8 %3
@@ -25,7 +25,7 @@ Entry:
 declare i128 @llvm.ctpop.i128(i128)
 
 ; Function Attrs: nobuiltin nounwind readonly
-define i16 @popcount256(i256* nocapture nonnull readonly %0) {
+define i16 @popcount256(ptr nocapture nonnull readonly %0) {
 ; CHECK-LABEL: popcount256:
 ; CHECK:       # %bb.0: # %Entry
 ; CHECK-NEXT:    mr 6, 3
@@ -44,7 +44,7 @@ define i16 @popcount256(i256* nocapture nonnull readonly %0) {
 ; CHECK-NEXT:    clrldi 3, 3, 48
 ; CHECK-NEXT:    blr
 Entry:
-  %1 = load i256, i256* %0, align 16
+  %1 = load i256, ptr %0, align 16
   %2 = tail call i256 @llvm.ctpop.i256(i256 %1)
   %3 = trunc i256 %2 to i16
   ret i16 %3

diff  --git a/llvm/test/CodeGen/PowerPC/post-ra-ec.ll b/llvm/test/CodeGen/PowerPC/post-ra-ec.ll
index 907e767d694b..9c01d1234cce 100644
--- a/llvm/test/CodeGen/PowerPC/post-ra-ec.ll
+++ b/llvm/test/CodeGen/PowerPC/post-ra-ec.ll
@@ -2,27 +2,27 @@
 target datalayout = "E-m:e-i64:64-n32:64"
 target triple = "powerpc64-unknown-linux-gnu"
 
-%struct.inode.0.12.120 = type { i8* }
+%struct.inode.0.12.120 = type { ptr }
 %struct.kstat2.1.13.121 = type { i32 }
-%struct.task_struct.4.16.124 = type { i8*, %struct.atomic_t.2.14.122, %struct.signal_struct.3.15.123* }
+%struct.task_struct.4.16.124 = type { ptr, %struct.atomic_t.2.14.122, ptr }
 %struct.atomic_t.2.14.122 = type { i32 }
 %struct.signal_struct.3.15.123 = type { i64 }
-%struct.pid.5.17.125 = type { i8* }
+%struct.pid.5.17.125 = type { ptr }
 
 ; Function Attrs: nounwind
-define signext i32 @proc_task_getattr(%struct.inode.0.12.120* nocapture readonly %inode, %struct.kstat2.1.13.121* nocapture %stat) #0 {
+define signext i32 @proc_task_getattr(ptr nocapture readonly %inode, ptr nocapture %stat) #0 {
 entry:
-  %call1.i = tail call %struct.task_struct.4.16.124* @get_pid_task(%struct.pid.5.17.125* undef, i32 zeroext 0) #0
+  %call1.i = tail call ptr @get_pid_task(ptr undef, i32 zeroext 0) #0
   br i1 undef, label %if.end, label %if.then
 
 if.then:                                          ; preds = %entry
-  %0 = load i64, i64* undef, align 8
+  %0 = load i64, ptr undef, align 8
   %conv.i = trunc i64 %0 to i32
-  %1 = load i32, i32* null, align 4
+  %1 = load i32, ptr null, align 4
   %add = add i32 %1, %conv.i
-  store i32 %add, i32* null, align 4
-  %counter.i.i = getelementptr inbounds %struct.task_struct.4.16.124, %struct.task_struct.4.16.124* %call1.i, i64 0, i32 1, i32 0
-  %2 = tail call i32 asm sideeffect "\09lwsync\0A1:\09lwarx\09$0,0,$1\09\09# atomic_dec_return\0A\09addic\09$0,$0,-1\0A\09stwcx.\09$0,0,$1\0A\09bne-\091b\0A\09sync\0A", "=&r,r,~{cr0},~{xer},~{memory}"(i32* %counter.i.i) #0
+  store i32 %add, ptr null, align 4
+  %counter.i.i = getelementptr inbounds %struct.task_struct.4.16.124, ptr %call1.i, i64 0, i32 1, i32 0
+  %2 = tail call i32 asm sideeffect "\09lwsync\0A1:\09lwarx\09$0,0,$1\09\09# atomic_dec_return\0A\09addic\09$0,$0,-1\0A\09stwcx.\09$0,0,$1\0A\09bne-\091b\0A\09sync\0A", "=&r,r,~{cr0},~{xer},~{memory}"(ptr %counter.i.i) #0
   %cmp.i = icmp eq i32 %2, 0
   br i1 %cmp.i, label %if.then.i, label %if.end
 
@@ -31,17 +31,16 @@ if.then:                                          ; preds = %entry
 ; CHECK: blr
 
 if.then.i:                                        ; preds = %if.then
-  %3 = bitcast %struct.task_struct.4.16.124* %call1.i to i8*
-  tail call void @foo(i8* %3) #0
+  tail call void @foo(ptr %call1.i) #0
   unreachable
 
 if.end:                                           ; preds = %if.then, %entry
   ret i32 0
 }
 
-declare void @foo(i8*)
+declare void @foo(ptr)
 
-declare %struct.task_struct.4.16.124* @get_pid_task(%struct.pid.5.17.125*, i32 zeroext)
+declare ptr @get_pid_task(ptr, i32 zeroext)
 
 attributes #0 = { nounwind }
 

diff  --git a/llvm/test/CodeGen/PowerPC/pow_massv_075_025exp.ll b/llvm/test/CodeGen/PowerPC/pow_massv_075_025exp.ll
index dab7647825e4..5c44b6831be2 100644
--- a/llvm/test/CodeGen/PowerPC/pow_massv_075_025exp.ll
+++ b/llvm/test/CodeGen/PowerPC/pow_massv_075_025exp.ll
@@ -7,7 +7,7 @@
 ; RUN: llc -vector-library=MASSV < %s -mtriple=powerpc-ibm-aix-xcoff -mcpu=pwr7 | FileCheck -check-prefixes=CHECK-PWR7 %s
 
 ; Exponent is a variable
-define void @vpow_var(double* nocapture %z, double* nocapture readonly %y, double* nocapture readonly %x) {
+define void @vpow_var(ptr nocapture %z, ptr nocapture readonly %y, ptr nocapture readonly %x) {
 ; CHECK-LABEL:       @vpow_var
 ; CHECK-PWR10:       __powd2_P10
 ; CHECK-PWR9:        __powd2_P9
@@ -19,26 +19,23 @@ entry:
 
 vector.body:
   %index = phi i64 [ %index.next, %vector.body ], [ 0, %entry ]
-  %next.gep = getelementptr double, double* %z, i64 %index
-  %next.gep31 = getelementptr double, double* %y, i64 %index
-  %next.gep32 = getelementptr double, double* %x, i64 %index
-  %0 = bitcast double* %next.gep32 to <2 x double>*
-  %wide.load = load <2 x double>, <2 x double>* %0, align 8
-  %1 = bitcast double* %next.gep31 to <2 x double>*
-  %wide.load33 = load <2 x double>, <2 x double>* %1, align 8
-  %2 = call ninf afn nsz <2 x double> @__powd2(<2 x double> %wide.load, <2 x double> %wide.load33)
-  %3 = bitcast double* %next.gep to <2 x double>*
-  store <2 x double> %2, <2 x double>* %3, align 8
+  %next.gep = getelementptr double, ptr %z, i64 %index
+  %next.gep31 = getelementptr double, ptr %y, i64 %index
+  %next.gep32 = getelementptr double, ptr %x, i64 %index
+  %wide.load = load <2 x double>, ptr %next.gep32, align 8
+  %wide.load33 = load <2 x double>, ptr %next.gep31, align 8
+  %0 = call ninf afn nsz <2 x double> @__powd2(<2 x double> %wide.load, <2 x double> %wide.load33)
+  store <2 x double> %0, ptr %next.gep, align 8
   %index.next = add i64 %index, 2
-  %4 = icmp eq i64 %index.next, 1024
-  br i1 %4, label %for.end, label %vector.body
+  %1 = icmp eq i64 %index.next, 1024
+  br i1 %1, label %for.end, label %vector.body
 
 for.end:
   ret void
 }
 
 ; Exponent is a constant != 0.75 and !=0.25
-define void @vpow_const(double* nocapture %y, double* nocapture readonly %x) {
+define void @vpow_const(ptr nocapture %y, ptr nocapture readonly %x) {
 ; CHECK-LABEL:       @vpow_const
 ; CHECK-PWR10:       __powd2_P10
 ; CHECK-PWR9:        __powd2_P9
@@ -50,23 +47,21 @@ entry:
 
 vector.body:
   %index = phi i64 [ %index.next, %vector.body ], [ 0, %entry ]
-  %next.gep = getelementptr double, double* %y, i64 %index
-  %next.gep19 = getelementptr double, double* %x, i64 %index
-  %0 = bitcast double* %next.gep19 to <2 x double>*
-  %wide.load = load <2 x double>, <2 x double>* %0, align 8
-  %1 = call ninf afn nsz <2 x double> @__powd2(<2 x double> %wide.load, <2 x double> <double 7.600000e-01, double 7.600000e-01>)
-  %2 = bitcast double* %next.gep to <2 x double>*
-  store <2 x double> %1, <2 x double>* %2, align 8
+  %next.gep = getelementptr double, ptr %y, i64 %index
+  %next.gep19 = getelementptr double, ptr %x, i64 %index
+  %wide.load = load <2 x double>, ptr %next.gep19, align 8
+  %0 = call ninf afn nsz <2 x double> @__powd2(<2 x double> %wide.load, <2 x double> <double 7.600000e-01, double 7.600000e-01>)
+  store <2 x double> %0, ptr %next.gep, align 8
   %index.next = add i64 %index, 2
-  %3 = icmp eq i64 %index.next, 1024
-  br i1 %3, label %for.end, label %vector.body
+  %1 = icmp eq i64 %index.next, 1024
+  br i1 %1, label %for.end, label %vector.body
 
 for.end:
   ret void
 }
 
 ; Exponent is a constant != 0.75 and !=0.25 and they are 
diff erent 
-define void @vpow_noeq_const(double* nocapture %y, double* nocapture readonly %x) {
+define void @vpow_noeq_const(ptr nocapture %y, ptr nocapture readonly %x) {
 ; CHECK-LABEL:       @vpow_noeq_const
 ; CHECK-PWR10:       __powd2_P10
 ; CHECK-PWR9:        __powd2_P9
@@ -78,23 +73,21 @@ entry:
 
 vector.body:
   %index = phi i64 [ %index.next, %vector.body ], [ 0, %entry ]
-  %next.gep = getelementptr double, double* %y, i64 %index
-  %next.gep19 = getelementptr double, double* %x, i64 %index
-  %0 = bitcast double* %next.gep19 to <2 x double>*
-  %wide.load = load <2 x double>, <2 x double>* %0, align 8
-  %1 = call ninf afn nsz <2 x double> @__powd2(<2 x double> %wide.load, <2 x double> <double 7.700000e-01, double 7.600000e-01>)
-  %2 = bitcast double* %next.gep to <2 x double>*
-  store <2 x double> %1, <2 x double>* %2, align 8
+  %next.gep = getelementptr double, ptr %y, i64 %index
+  %next.gep19 = getelementptr double, ptr %x, i64 %index
+  %wide.load = load <2 x double>, ptr %next.gep19, align 8
+  %0 = call ninf afn nsz <2 x double> @__powd2(<2 x double> %wide.load, <2 x double> <double 7.700000e-01, double 7.600000e-01>)
+  store <2 x double> %0, ptr %next.gep, align 8
   %index.next = add i64 %index, 2
-  %3 = icmp eq i64 %index.next, 1024
-  br i1 %3, label %for.end, label %vector.body
+  %1 = icmp eq i64 %index.next, 1024
+  br i1 %1, label %for.end, label %vector.body
 
 for.end:
   ret void
 }
 
 ; Exponent is a constant != 0.75 and !=0.25 and they are 
diff erent 
-define void @vpow_noeq075_const(double* nocapture %y, double* nocapture readonly %x) {
+define void @vpow_noeq075_const(ptr nocapture %y, ptr nocapture readonly %x) {
 ; CHECK-LABEL:       @vpow_noeq075_const
 ; CHECK-PWR10:       __powd2_P10
 ; CHECK-PWR9:        __powd2_P9
@@ -106,23 +99,21 @@ entry:
 
 vector.body:
   %index = phi i64 [ %index.next, %vector.body ], [ 0, %entry ]
-  %next.gep = getelementptr double, double* %y, i64 %index
-  %next.gep19 = getelementptr double, double* %x, i64 %index
-  %0 = bitcast double* %next.gep19 to <2 x double>*
-  %wide.load = load <2 x double>, <2 x double>* %0, align 8
-  %1 = call ninf afn nsz <2 x double> @__powd2(<2 x double> %wide.load, <2 x double> <double 7.700000e-01, double 7.500000e-01>)
-  %2 = bitcast double* %next.gep to <2 x double>*
-  store <2 x double> %1, <2 x double>* %2, align 8
+  %next.gep = getelementptr double, ptr %y, i64 %index
+  %next.gep19 = getelementptr double, ptr %x, i64 %index
+  %wide.load = load <2 x double>, ptr %next.gep19, align 8
+  %0 = call ninf afn nsz <2 x double> @__powd2(<2 x double> %wide.load, <2 x double> <double 7.700000e-01, double 7.500000e-01>)
+  store <2 x double> %0, ptr %next.gep, align 8
   %index.next = add i64 %index, 2
-  %3 = icmp eq i64 %index.next, 1024
-  br i1 %3, label %for.end, label %vector.body
+  %1 = icmp eq i64 %index.next, 1024
+  br i1 %1, label %for.end, label %vector.body
 
 for.end:
   ret void
 }
 
 ; Exponent is a constant != 0.75 and !=0.25 and they are 
diff erent 
-define void @vpow_noeq025_const(double* nocapture %y, double* nocapture readonly %x) {
+define void @vpow_noeq025_const(ptr nocapture %y, ptr nocapture readonly %x) {
 ; CHECK-LABEL:       @vpow_noeq025_const
 ; CHECK-PWR10:       __powd2_P10
 ; CHECK-PWR9:        __powd2_P9
@@ -134,23 +125,21 @@ entry:
 
 vector.body:
   %index = phi i64 [ %index.next, %vector.body ], [ 0, %entry ]
-  %next.gep = getelementptr double, double* %y, i64 %index
-  %next.gep19 = getelementptr double, double* %x, i64 %index
-  %0 = bitcast double* %next.gep19 to <2 x double>*
-  %wide.load = load <2 x double>, <2 x double>* %0, align 8
-  %1 = call ninf afn nsz <2 x double> @__powd2(<2 x double> %wide.load, <2 x double> <double 7.700000e-01, double 2.500000e-01>)
-  %2 = bitcast double* %next.gep to <2 x double>*
-  store <2 x double> %1, <2 x double>* %2, align 8
+  %next.gep = getelementptr double, ptr %y, i64 %index
+  %next.gep19 = getelementptr double, ptr %x, i64 %index
+  %wide.load = load <2 x double>, ptr %next.gep19, align 8
+  %0 = call ninf afn nsz <2 x double> @__powd2(<2 x double> %wide.load, <2 x double> <double 7.700000e-01, double 2.500000e-01>)
+  store <2 x double> %0, ptr %next.gep, align 8
   %index.next = add i64 %index, 2
-  %3 = icmp eq i64 %index.next, 1024
-  br i1 %3, label %for.end, label %vector.body
+  %1 = icmp eq i64 %index.next, 1024
+  br i1 %1, label %for.end, label %vector.body
 
 for.end:
   ret void
 }
 
 ; Exponent is 0.75
-define void @vpow_075(double* nocapture %y, double* nocapture readonly %x) {
+define void @vpow_075(ptr nocapture %y, ptr nocapture readonly %x) {
 ; CHECK-LABEL:       @vpow_075
 ; CHECK-NOT:         __powd2_P{{[7,8,9,10]}}
 ; CHECK:             xvrsqrtesp
@@ -160,23 +149,21 @@ entry:
 
 vector.body:
   %index = phi i64 [ %index.next, %vector.body ], [ 0, %entry ]
-  %next.gep = getelementptr double, double* %y, i64 %index
-  %next.gep19 = getelementptr double, double* %x, i64 %index
-  %0 = bitcast double* %next.gep19 to <2 x double>*
-  %wide.load = load <2 x double>, <2 x double>* %0, align 8
-  %1 = call ninf afn <2 x double> @__powd2(<2 x double> %wide.load, <2 x double> <double 7.500000e-01, double 7.500000e-01>)
-  %2 = bitcast double* %next.gep to <2 x double>*
-  store <2 x double> %1, <2 x double>* %2, align 8
+  %next.gep = getelementptr double, ptr %y, i64 %index
+  %next.gep19 = getelementptr double, ptr %x, i64 %index
+  %wide.load = load <2 x double>, ptr %next.gep19, align 8
+  %0 = call ninf afn <2 x double> @__powd2(<2 x double> %wide.load, <2 x double> <double 7.500000e-01, double 7.500000e-01>)
+  store <2 x double> %0, ptr %next.gep, align 8
   %index.next = add i64 %index, 2
-  %3 = icmp eq i64 %index.next, 1024
-  br i1 %3, label %for.end, label %vector.body
+  %1 = icmp eq i64 %index.next, 1024
+  br i1 %1, label %for.end, label %vector.body
 
 for.end:
   ret void
 }
 
 ; Exponent is 0.25
-define void @vpow_025(double* nocapture %y, double* nocapture readonly %x) {
+define void @vpow_025(ptr nocapture %y, ptr nocapture readonly %x) {
 ; CHECK-LABEL:       @vpow_025
 ; CHECK-NOT:         __powd2_P{{[7,8,9,10]}}
 ; CHECK:             xvrsqrtesp
@@ -186,23 +173,21 @@ entry:
 
 vector.body:
   %index = phi i64 [ %index.next, %vector.body ], [ 0, %entry ]
-  %next.gep = getelementptr double, double* %y, i64 %index
-  %next.gep19 = getelementptr double, double* %x, i64 %index
-  %0 = bitcast double* %next.gep19 to <2 x double>*
-  %wide.load = load <2 x double>, <2 x double>* %0, align 8
-  %1 = call ninf afn nsz <2 x double> @__powd2(<2 x double> %wide.load, <2 x double> <double 2.500000e-01, double 2.500000e-01>)
-  %2 = bitcast double* %next.gep to <2 x double>*
-  store <2 x double> %1, <2 x double>* %2, align 8
+  %next.gep = getelementptr double, ptr %y, i64 %index
+  %next.gep19 = getelementptr double, ptr %x, i64 %index
+  %wide.load = load <2 x double>, ptr %next.gep19, align 8
+  %0 = call ninf afn nsz <2 x double> @__powd2(<2 x double> %wide.load, <2 x double> <double 2.500000e-01, double 2.500000e-01>)
+  store <2 x double> %0, ptr %next.gep, align 8
   %index.next = add i64 %index, 2
-  %3 = icmp eq i64 %index.next, 1024
-  br i1 %3, label %for.end, label %vector.body
+  %1 = icmp eq i64 %index.next, 1024
+  br i1 %1, label %for.end, label %vector.body
 
 for.end:
   ret void
 }
 
 ; Exponent is 0.75 but no proper fast-math flags
-define void @vpow_075_nofast(double* nocapture %y, double* nocapture readonly %x) {
+define void @vpow_075_nofast(ptr nocapture %y, ptr nocapture readonly %x) {
 ; CHECK-LABEL:       @vpow_075_nofast
 ; CHECK-PWR10:       __powd2_P10
 ; CHECK-PWR9:        __powd2_P9
@@ -215,23 +200,21 @@ entry:
 
 vector.body:
   %index = phi i64 [ %index.next, %vector.body ], [ 0, %entry ]
-  %next.gep = getelementptr double, double* %y, i64 %index
-  %next.gep19 = getelementptr double, double* %x, i64 %index
-  %0 = bitcast double* %next.gep19 to <2 x double>*
-  %wide.load = load <2 x double>, <2 x double>* %0, align 8
-  %1 = call <2 x double> @__powd2(<2 x double> %wide.load, <2 x double> <double 7.500000e-01, double 7.500000e-01>)
-  %2 = bitcast double* %next.gep to <2 x double>*
-  store <2 x double> %1, <2 x double>* %2, align 8
+  %next.gep = getelementptr double, ptr %y, i64 %index
+  %next.gep19 = getelementptr double, ptr %x, i64 %index
+  %wide.load = load <2 x double>, ptr %next.gep19, align 8
+  %0 = call <2 x double> @__powd2(<2 x double> %wide.load, <2 x double> <double 7.500000e-01, double 7.500000e-01>)
+  store <2 x double> %0, ptr %next.gep, align 8
   %index.next = add i64 %index, 2
-  %3 = icmp eq i64 %index.next, 1024
-  br i1 %3, label %for.end, label %vector.body
+  %1 = icmp eq i64 %index.next, 1024
+  br i1 %1, label %for.end, label %vector.body
 
 for.end:
   ret void
 }
 
 ; Exponent is 0.25 but no proper fast-math flags
-define void @vpow_025_nofast(double* nocapture %y, double* nocapture readonly %x) {
+define void @vpow_025_nofast(ptr nocapture %y, ptr nocapture readonly %x) {
 ; CHECK-LABEL:       @vpow_025_nofast
 ; CHECK-PWR10:       __powd2_P10
 ; CHECK-PWR9:        __powd2_P9
@@ -244,16 +227,14 @@ entry:
 
 vector.body:
   %index = phi i64 [ %index.next, %vector.body ], [ 0, %entry ]
-  %next.gep = getelementptr double, double* %y, i64 %index
-  %next.gep19 = getelementptr double, double* %x, i64 %index
-  %0 = bitcast double* %next.gep19 to <2 x double>*
-  %wide.load = load <2 x double>, <2 x double>* %0, align 8
-  %1 = call <2 x double> @__powd2(<2 x double> %wide.load, <2 x double> <double 2.500000e-01, double 2.500000e-01>)
-  %2 = bitcast double* %next.gep to <2 x double>*
-  store <2 x double> %1, <2 x double>* %2, align 8
+  %next.gep = getelementptr double, ptr %y, i64 %index
+  %next.gep19 = getelementptr double, ptr %x, i64 %index
+  %wide.load = load <2 x double>, ptr %next.gep19, align 8
+  %0 = call <2 x double> @__powd2(<2 x double> %wide.load, <2 x double> <double 2.500000e-01, double 2.500000e-01>)
+  store <2 x double> %0, ptr %next.gep, align 8
   %index.next = add i64 %index, 2
-  %3 = icmp eq i64 %index.next, 1024
-  br i1 %3, label %for.end, label %vector.body
+  %1 = icmp eq i64 %index.next, 1024
+  br i1 %1, label %for.end, label %vector.body
 
 for.end:
   ret void

diff  --git a/llvm/test/CodeGen/PowerPC/power9-moves-and-splats.ll b/llvm/test/CodeGen/PowerPC/power9-moves-and-splats.ll
index 85ff60840e0a..eb7a753f054a 100644
--- a/llvm/test/CodeGen/PowerPC/power9-moves-and-splats.ll
+++ b/llvm/test/CodeGen/PowerPC/power9-moves-and-splats.ll
@@ -58,7 +58,7 @@ entry:
   ret i64 %0
 }
 
-define <4 x i32> @test4(i32* nocapture readonly %in) {
+define <4 x i32> @test4(ptr nocapture readonly %in) {
 ; CHECK-LABEL: test4:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lxvwsx v2, 0, r3
@@ -70,13 +70,13 @@ define <4 x i32> @test4(i32* nocapture readonly %in) {
 ; CHECK-BE-NEXT:    blr
 
 entry:
-  %0 = load i32, i32* %in, align 4
+  %0 = load i32, ptr %in, align 4
   %splat.splatinsert = insertelement <4 x i32> undef, i32 %0, i32 0
   %splat.splat = shufflevector <4 x i32> %splat.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer
   ret <4 x i32> %splat.splat
 }
 
-define <4 x float> @test5(float* nocapture readonly %in) {
+define <4 x float> @test5(ptr nocapture readonly %in) {
 ; CHECK-LABEL: test5:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lxvwsx v2, 0, r3
@@ -88,7 +88,7 @@ define <4 x float> @test5(float* nocapture readonly %in) {
 ; CHECK-BE-NEXT:    blr
 
 entry:
-  %0 = load float, float* %in, align 4
+  %0 = load float, ptr %in, align 4
   %splat.splatinsert = insertelement <4 x float> undef, float %0, i32 0
   %splat.splat = shufflevector <4 x float> %splat.splatinsert, <4 x float> undef, <4 x i32> zeroinitializer
   ret <4 x float> %splat.splat
@@ -110,7 +110,7 @@ define <4 x i32> @test6() {
 ; CHECK-BE-NEXT:    blr
 
 entry:
-  %0 = load i32, i32* @Globi, align 4
+  %0 = load i32, ptr @Globi, align 4
   %splat.splatinsert = insertelement <4 x i32> undef, i32 %0, i32 0
   %splat.splat = shufflevector <4 x i32> %splat.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer
   ret <4 x i32> %splat.splat
@@ -132,7 +132,7 @@ define <4 x float> @test7() {
 ; CHECK-BE-NEXT:    blr
 
 entry:
-  %0 = load float, float* @Globf, align 4
+  %0 = load float, ptr @Globf, align 4
   %splat.splatinsert = insertelement <4 x float> undef, float %0, i32 0
   %splat.splat = shufflevector <4 x float> %splat.splatinsert, <4 x float> undef, <4 x i32> zeroinitializer
   ret <4 x float> %splat.splat
@@ -243,7 +243,7 @@ entry:
   ret <16 x i8> <i8 200, i8 200, i8 200, i8 200, i8 200, i8 200, i8 200, i8 200, i8 200, i8 200, i8 200, i8 200, i8 200, i8 200, i8 200, i8 200>
 }
 
-define <4 x i32> @test14(<4 x i32> %a, i32* nocapture readonly %b) {
+define <4 x i32> @test14(<4 x i32> %a, ptr nocapture readonly %b) {
 ; CHECK-LABEL: test14:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lwz r3, 0(r5)
@@ -263,10 +263,10 @@ define <4 x i32> @test14(<4 x i32> %a, i32* nocapture readonly %b) {
 ; CHECK-BE-NEXT:    blr
 
 entry:
-  %0 = load i32, i32* %b, align 4
+  %0 = load i32, ptr %b, align 4
   %splat.splatinsert = insertelement <4 x i32> undef, i32 %0, i32 0
   %splat.splat = shufflevector <4 x i32> %splat.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer
   %1 = add i32 %0, 5
-  store i32 %1, i32* %b, align 4
+  store i32 %1, ptr %b, align 4
   ret <4 x i32> %splat.splat
 }

diff  --git a/llvm/test/CodeGen/PowerPC/powf_massv_075_025exp.ll b/llvm/test/CodeGen/PowerPC/powf_massv_075_025exp.ll
index 00851c8aa59b..a5c25ca93ecb 100644
--- a/llvm/test/CodeGen/PowerPC/powf_massv_075_025exp.ll
+++ b/llvm/test/CodeGen/PowerPC/powf_massv_075_025exp.ll
@@ -7,7 +7,7 @@
 ; RUN: llc -vector-library=MASSV < %s -mtriple=powerpc-ibm-aix-xcoff -mcpu=pwr7 | FileCheck -check-prefixes=CHECK-PWR7 %s
 
 ; Exponent is a variable
-define void @vspow_var(float* nocapture %z, float* nocapture readonly %y, float* nocapture readonly %x)  {
+define void @vspow_var(ptr nocapture %z, ptr nocapture readonly %y, ptr nocapture readonly %x)  {
 ; CHECK-LABEL:       @vspow_var
 ; CHECK-PWR10:       __powf4_P10
 ; CHECK-PWR9:        __powf4_P9
@@ -19,26 +19,23 @@ entry:
 
 vector.body:
   %index = phi i64 [ %index.next, %vector.body ], [ 0, %entry ]
-  %next.gep = getelementptr float, float* %z, i64 %index
-  %next.gep31 = getelementptr float, float* %y, i64 %index
-  %next.gep32 = getelementptr float, float* %x, i64 %index
-  %0 = bitcast float* %next.gep32 to <4 x float>*
-  %wide.load = load <4 x float>, <4 x float>* %0, align 4
-  %1 = bitcast float* %next.gep31 to <4 x float>*
-  %wide.load33 = load <4 x float>, <4 x float>* %1, align 4
-  %2 = call ninf afn nsz <4 x float> @__powf4(<4 x float> %wide.load, <4 x float> %wide.load33)
-  %3 = bitcast float* %next.gep to <4 x float>*
-  store <4 x float> %2, <4 x float>* %3, align 4
+  %next.gep = getelementptr float, ptr %z, i64 %index
+  %next.gep31 = getelementptr float, ptr %y, i64 %index
+  %next.gep32 = getelementptr float, ptr %x, i64 %index
+  %wide.load = load <4 x float>, ptr %next.gep32, align 4
+  %wide.load33 = load <4 x float>, ptr %next.gep31, align 4
+  %0 = call ninf afn nsz <4 x float> @__powf4(<4 x float> %wide.load, <4 x float> %wide.load33)
+  store <4 x float> %0, ptr %next.gep, align 4
   %index.next = add i64 %index, 4
-  %4 = icmp eq i64 %index.next, 1024
-  br i1 %4, label %for.end, label %vector.body
+  %1 = icmp eq i64 %index.next, 1024
+  br i1 %1, label %for.end, label %vector.body
 
 for.end:
   ret void
 }
 
 ; Exponent is a constant != 0.75 and !=0.25
-define void @vspow_const(float* nocapture %y, float* nocapture readonly %x)  {
+define void @vspow_const(ptr nocapture %y, ptr nocapture readonly %x)  {
 ; CHECK-LABEL:       @vspow_const
 ; CHECK-PWR10:       __powf4_P10
 ; CHECK-PWR9:        __powf4_P9
@@ -50,23 +47,21 @@ entry:
 
 vector.body:
   %index = phi i64 [ %index.next, %vector.body ], [ 0, %entry ]
-  %next.gep = getelementptr float, float* %y, i64 %index
-  %next.gep19 = getelementptr float, float* %x, i64 %index
-  %0 = bitcast float* %next.gep19 to <4 x float>*
-  %wide.load = load <4 x float>, <4 x float>* %0, align 4
-  %1 = call ninf afn nsz <4 x float> @__powf4(<4 x float> %wide.load, <4 x float> <float 0x3FE851EB80000000, float 0x3FE851EB80000000, float 0x3FE851EB80000000, float 0x3FE851EB80000000>)
-  %2 = bitcast float* %next.gep to <4 x float>*
-  store <4 x float> %1, <4 x float>* %2, align 4
+  %next.gep = getelementptr float, ptr %y, i64 %index
+  %next.gep19 = getelementptr float, ptr %x, i64 %index
+  %wide.load = load <4 x float>, ptr %next.gep19, align 4
+  %0 = call ninf afn nsz <4 x float> @__powf4(<4 x float> %wide.load, <4 x float> <float 0x3FE851EB80000000, float 0x3FE851EB80000000, float 0x3FE851EB80000000, float 0x3FE851EB80000000>)
+  store <4 x float> %0, ptr %next.gep, align 4
   %index.next = add i64 %index, 4
-  %3 = icmp eq i64 %index.next, 1024
-  br i1 %3, label %for.end, label %vector.body
+  %1 = icmp eq i64 %index.next, 1024
+  br i1 %1, label %for.end, label %vector.body
 
 for.end:
   ret void
 }
 
 ; Exponent is a constant != 0.75 and !=0.25 and they are 
diff erent 
-define void @vspow_neq_const(float* nocapture %y, float* nocapture readonly %x)  {
+define void @vspow_neq_const(ptr nocapture %y, ptr nocapture readonly %x)  {
 ; CHECK-LABEL:       @vspow_neq_const
 ; CHECK-PWR10:       __powf4_P10
 ; CHECK-PWR9:        __powf4_P9
@@ -78,23 +73,21 @@ entry:
 
 vector.body:
   %index = phi i64 [ %index.next, %vector.body ], [ 0, %entry ]
-  %next.gep = getelementptr float, float* %y, i64 %index
-  %next.gep19 = getelementptr float, float* %x, i64 %index
-  %0 = bitcast float* %next.gep19 to <4 x float>*
-  %wide.load = load <4 x float>, <4 x float>* %0, align 4
-  %1 = call ninf afn nsz <4 x float> @__powf4(<4 x float> %wide.load, <4 x float> <float 0x3FE861EB80000000, float 0x3FE871EB80000000, float 0x3FE851EB80000000, float 0x3FE851EB80000000>)
-  %2 = bitcast float* %next.gep to <4 x float>*
-  store <4 x float> %1, <4 x float>* %2, align 4
+  %next.gep = getelementptr float, ptr %y, i64 %index
+  %next.gep19 = getelementptr float, ptr %x, i64 %index
+  %wide.load = load <4 x float>, ptr %next.gep19, align 4
+  %0 = call ninf afn nsz <4 x float> @__powf4(<4 x float> %wide.load, <4 x float> <float 0x3FE861EB80000000, float 0x3FE871EB80000000, float 0x3FE851EB80000000, float 0x3FE851EB80000000>)
+  store <4 x float> %0, ptr %next.gep, align 4
   %index.next = add i64 %index, 4
-  %3 = icmp eq i64 %index.next, 1024
-  br i1 %3, label %for.end, label %vector.body
+  %1 = icmp eq i64 %index.next, 1024
+  br i1 %1, label %for.end, label %vector.body
 
 for.end:
   ret void
 }
 
 ; Exponent is a constant != 0.75 and !=0.25
-define void @vspow_neq075_const(float* nocapture %y, float* nocapture readonly %x)  {
+define void @vspow_neq075_const(ptr nocapture %y, ptr nocapture readonly %x)  {
 ; CHECK-LABEL:       @vspow_neq075_const
 ; CHECK-PWR10:       __powf4_P10
 ; CHECK-PWR9:        __powf4_P9
@@ -106,23 +99,21 @@ entry:
 
 vector.body:
   %index = phi i64 [ %index.next, %vector.body ], [ 0, %entry ]
-  %next.gep = getelementptr float, float* %y, i64 %index
-  %next.gep19 = getelementptr float, float* %x, i64 %index
-  %0 = bitcast float* %next.gep19 to <4 x float>*
-  %wide.load = load <4 x float>, <4 x float>* %0, align 4
-  %1 = call ninf afn nsz <4 x float> @__powf4(<4 x float> %wide.load, <4 x float> <float 7.500000e-01, float 7.500000e-01, float 7.500000e-01, float 0x3FE851EB80000000>)
-  %2 = bitcast float* %next.gep to <4 x float>*
-  store <4 x float> %1, <4 x float>* %2, align 4
+  %next.gep = getelementptr float, ptr %y, i64 %index
+  %next.gep19 = getelementptr float, ptr %x, i64 %index
+  %wide.load = load <4 x float>, ptr %next.gep19, align 4
+  %0 = call ninf afn nsz <4 x float> @__powf4(<4 x float> %wide.load, <4 x float> <float 7.500000e-01, float 7.500000e-01, float 7.500000e-01, float 0x3FE851EB80000000>)
+  store <4 x float> %0, ptr %next.gep, align 4
   %index.next = add i64 %index, 4
-  %3 = icmp eq i64 %index.next, 1024
-  br i1 %3, label %for.end, label %vector.body
+  %1 = icmp eq i64 %index.next, 1024
+  br i1 %1, label %for.end, label %vector.body
 
 for.end:
   ret void
 }
 
 ; Exponent is a constant != 0.75 and !=0.25
-define void @vspow_neq025_const(float* nocapture %y, float* nocapture readonly %x)  {
+define void @vspow_neq025_const(ptr nocapture %y, ptr nocapture readonly %x)  {
 ; CHECK-LABEL:       @vspow_neq025_const
 ; CHECK-PWR10:       __powf4_P10
 ; CHECK-PWR9:        __powf4_P9
@@ -134,23 +125,21 @@ entry:
 
 vector.body:
   %index = phi i64 [ %index.next, %vector.body ], [ 0, %entry ]
-  %next.gep = getelementptr float, float* %y, i64 %index
-  %next.gep19 = getelementptr float, float* %x, i64 %index
-  %0 = bitcast float* %next.gep19 to <4 x float>*
-  %wide.load = load <4 x float>, <4 x float>* %0, align 4
-  %1 = call ninf afn nsz <4 x float> @__powf4(<4 x float> %wide.load, <4 x float> <float 0x3FE851EB80000000, float 2.500000e-01, float 0x3FE851EB80000000, float 2.500000e-01>)
-  %2 = bitcast float* %next.gep to <4 x float>*
-  store <4 x float> %1, <4 x float>* %2, align 4
+  %next.gep = getelementptr float, ptr %y, i64 %index
+  %next.gep19 = getelementptr float, ptr %x, i64 %index
+  %wide.load = load <4 x float>, ptr %next.gep19, align 4
+  %0 = call ninf afn nsz <4 x float> @__powf4(<4 x float> %wide.load, <4 x float> <float 0x3FE851EB80000000, float 2.500000e-01, float 0x3FE851EB80000000, float 2.500000e-01>)
+  store <4 x float> %0, ptr %next.gep, align 4
   %index.next = add i64 %index, 4
-  %3 = icmp eq i64 %index.next, 1024
-  br i1 %3, label %for.end, label %vector.body
+  %1 = icmp eq i64 %index.next, 1024
+  br i1 %1, label %for.end, label %vector.body
 
 for.end:
   ret void
 }
 
 ; Exponent is 0.75
-define void @vspow_075(float* nocapture %y, float* nocapture readonly %x)  {
+define void @vspow_075(ptr nocapture %y, ptr nocapture readonly %x)  {
 ; CHECK-LABEL:       @vspow_075
 ; CHECK-NOT:         __powf4_P{{[7,8,9,10]}}
 ; CHECK:             xvrsqrtesp
@@ -160,23 +149,21 @@ entry:
 
 vector.body:
   %index = phi i64 [ %index.next, %vector.body ], [ 0, %entry ]
-  %next.gep = getelementptr float, float* %y, i64 %index
-  %next.gep19 = getelementptr float, float* %x, i64 %index
-  %0 = bitcast float* %next.gep19 to <4 x float>*
-  %wide.load = load <4 x float>, <4 x float>* %0, align 4
-  %1 = call ninf afn <4 x float> @__powf4(<4 x float> %wide.load, <4 x float> <float 7.500000e-01, float 7.500000e-01, float 7.500000e-01, float 7.500000e-01>)
-  %2 = bitcast float* %next.gep to <4 x float>*
-  store <4 x float> %1, <4 x float>* %2, align 4
+  %next.gep = getelementptr float, ptr %y, i64 %index
+  %next.gep19 = getelementptr float, ptr %x, i64 %index
+  %wide.load = load <4 x float>, ptr %next.gep19, align 4
+  %0 = call ninf afn <4 x float> @__powf4(<4 x float> %wide.load, <4 x float> <float 7.500000e-01, float 7.500000e-01, float 7.500000e-01, float 7.500000e-01>)
+  store <4 x float> %0, ptr %next.gep, align 4
   %index.next = add i64 %index, 4
-  %3 = icmp eq i64 %index.next, 1024
-  br i1 %3, label %for.end, label %vector.body
+  %1 = icmp eq i64 %index.next, 1024
+  br i1 %1, label %for.end, label %vector.body
 
 for.end:
   ret void
 }
 
 ; Exponent is 0.25
-define void @vspow_025(float* nocapture %y, float* nocapture readonly %x)  {
+define void @vspow_025(ptr nocapture %y, ptr nocapture readonly %x)  {
 ; CHECK-LABEL:       @vspow_025
 ; CHECK-NOT:         __powf4_P{{[7,8,9,10]}}
 ; CHECK:             xvrsqrtesp
@@ -186,23 +173,21 @@ entry:
 
 vector.body:
   %index = phi i64 [ %index.next, %vector.body ], [ 0, %entry ]
-  %next.gep = getelementptr float, float* %y, i64 %index
-  %next.gep19 = getelementptr float, float* %x, i64 %index
-  %0 = bitcast float* %next.gep19 to <4 x float>*
-  %wide.load = load <4 x float>, <4 x float>* %0, align 4
-  %1 = call ninf afn nsz <4 x float> @__powf4(<4 x float> %wide.load, <4 x float> <float 2.500000e-01, float 2.500000e-01, float 2.500000e-01, float 2.500000e-01>)
-  %2 = bitcast float* %next.gep to <4 x float>*
-  store <4 x float> %1, <4 x float>* %2, align 4
+  %next.gep = getelementptr float, ptr %y, i64 %index
+  %next.gep19 = getelementptr float, ptr %x, i64 %index
+  %wide.load = load <4 x float>, ptr %next.gep19, align 4
+  %0 = call ninf afn nsz <4 x float> @__powf4(<4 x float> %wide.load, <4 x float> <float 2.500000e-01, float 2.500000e-01, float 2.500000e-01, float 2.500000e-01>)
+  store <4 x float> %0, ptr %next.gep, align 4
   %index.next = add i64 %index, 4
-  %3 = icmp eq i64 %index.next, 1024
-  br i1 %3, label %for.end, label %vector.body
+  %1 = icmp eq i64 %index.next, 1024
+  br i1 %1, label %for.end, label %vector.body
 
 for.end:
   ret void
 }
 
 ; Exponent is 0.75 but no proper fast-math flags
-define void @vspow_075_nofast(float* nocapture %y, float* nocapture readonly %x)  {
+define void @vspow_075_nofast(ptr nocapture %y, ptr nocapture readonly %x)  {
 ; CHECK-LABEL:       @vspow_075_nofast
 ; CHECK-PWR10:       __powf4_P10
 ; CHECK-PWR9:        __powf4_P9
@@ -215,23 +200,21 @@ entry:
 
 vector.body:
   %index = phi i64 [ %index.next, %vector.body ], [ 0, %entry ]
-  %next.gep = getelementptr float, float* %y, i64 %index
-  %next.gep19 = getelementptr float, float* %x, i64 %index
-  %0 = bitcast float* %next.gep19 to <4 x float>*
-  %wide.load = load <4 x float>, <4 x float>* %0, align 4
-  %1 = call <4 x float> @__powf4(<4 x float> %wide.load, <4 x float> <float 7.500000e-01, float 7.500000e-01, float 7.500000e-01, float 7.500000e-01>)
-  %2 = bitcast float* %next.gep to <4 x float>*
-  store <4 x float> %1, <4 x float>* %2, align 4
+  %next.gep = getelementptr float, ptr %y, i64 %index
+  %next.gep19 = getelementptr float, ptr %x, i64 %index
+  %wide.load = load <4 x float>, ptr %next.gep19, align 4
+  %0 = call <4 x float> @__powf4(<4 x float> %wide.load, <4 x float> <float 7.500000e-01, float 7.500000e-01, float 7.500000e-01, float 7.500000e-01>)
+  store <4 x float> %0, ptr %next.gep, align 4
   %index.next = add i64 %index, 4
-  %3 = icmp eq i64 %index.next, 1024
-  br i1 %3, label %for.end, label %vector.body
+  %1 = icmp eq i64 %index.next, 1024
+  br i1 %1, label %for.end, label %vector.body
 
 for.end:
   ret void
 }
 
 ; Exponent is 0.25 but no proper fast-math flags
-define void @vspow_025_nofast(float* nocapture %y, float* nocapture readonly %x)  {
+define void @vspow_025_nofast(ptr nocapture %y, ptr nocapture readonly %x)  {
 ; CHECK-LABEL:       @vspow_025_nofast
 ; CHECK-PWR10:       __powf4_P10
 ; CHECK-PWR9:        __powf4_P9
@@ -244,16 +227,14 @@ entry:
 
 vector.body:
   %index = phi i64 [ %index.next, %vector.body ], [ 0, %entry ]
-  %next.gep = getelementptr float, float* %y, i64 %index
-  %next.gep19 = getelementptr float, float* %x, i64 %index
-  %0 = bitcast float* %next.gep19 to <4 x float>*
-  %wide.load = load <4 x float>, <4 x float>* %0, align 4
-  %1 = call <4 x float> @__powf4(<4 x float> %wide.load, <4 x float> <float 2.500000e-01, float 2.500000e-01, float 2.500000e-01, float 2.500000e-01>)
-  %2 = bitcast float* %next.gep to <4 x float>*
-  store <4 x float> %1, <4 x float>* %2, align 4
+  %next.gep = getelementptr float, ptr %y, i64 %index
+  %next.gep19 = getelementptr float, ptr %x, i64 %index
+  %wide.load = load <4 x float>, ptr %next.gep19, align 4
+  %0 = call <4 x float> @__powf4(<4 x float> %wide.load, <4 x float> <float 2.500000e-01, float 2.500000e-01, float 2.500000e-01, float 2.500000e-01>)
+  store <4 x float> %0, ptr %next.gep, align 4
   %index.next = add i64 %index, 4
-  %3 = icmp eq i64 %index.next, 1024
-  br i1 %3, label %for.end, label %vector.body
+  %1 = icmp eq i64 %index.next, 1024
+  br i1 %1, label %for.end, label %vector.body
 
 for.end:
   ret void

diff  --git a/llvm/test/CodeGen/PowerPC/ppc-32bit-build-vector.ll b/llvm/test/CodeGen/PowerPC/ppc-32bit-build-vector.ll
index a46b554707a9..6e6e5326c05b 100644
--- a/llvm/test/CodeGen/PowerPC/ppc-32bit-build-vector.ll
+++ b/llvm/test/CodeGen/PowerPC/ppc-32bit-build-vector.ll
@@ -59,7 +59,7 @@ define dso_local fastcc void @BuildVectorICE() unnamed_addr {
      while.body:                                       ; preds = %while.body, %entry
      %newelement = phi i32 [ 0, %entry ], [ %5, %while.body ]
      %0 = insertelement <4 x i32> <i32 undef, i32 0, i32 0, i32 0>, i32 %newelement, i32 0
-     %1 = load <4 x i32>, <4 x i32>* undef, align 1
+     %1 = load <4 x i32>, ptr undef, align 1
      %2 = add <4 x i32> %1, %0
      %3 = shufflevector <4 x i32> %2, <4 x i32> undef, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
      %4 = add <4 x i32> %2, %3

diff  --git a/llvm/test/CodeGen/PowerPC/ppc-32bit-shift.ll b/llvm/test/CodeGen/PowerPC/ppc-32bit-shift.ll
index 4762cb946b43..3d5020e241b4 100644
--- a/llvm/test/CodeGen/PowerPC/ppc-32bit-shift.ll
+++ b/llvm/test/CodeGen/PowerPC/ppc-32bit-shift.ll
@@ -5,7 +5,7 @@
 ; RUN: llc -verify-machineinstrs -mtriple=powerpc64 \
 ; RUN:     -mcpu=pwr9 < %s | FileCheck %s --check-prefix=64BIT
 
-define dso_local void @foo(i32 %inta, i64* %long_intb) {
+define dso_local void @foo(i32 %inta, ptr %long_intb) {
 ; 32BIT-LABEL: foo:
 ; 32BIT:       # %bb.0: # %entry
 ; 32BIT-NEXT:    srawi 5, 3, 31
@@ -24,6 +24,6 @@ define dso_local void @foo(i32 %inta, i64* %long_intb) {
   entry:
     %conv = sext i32 %inta to i64
     %shl = shl nsw i64 %conv, 8
-    store i64 %shl, i64* %long_intb, align 8
+    store i64 %shl, ptr %long_intb, align 8
     ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/ppc-ctr-dead-code.ll b/llvm/test/CodeGen/PowerPC/ppc-ctr-dead-code.ll
index 2cfbb2d00df7..04944d5ee73e 100644
--- a/llvm/test/CodeGen/PowerPC/ppc-ctr-dead-code.ll
+++ b/llvm/test/CodeGen/PowerPC/ppc-ctr-dead-code.ll
@@ -4,7 +4,7 @@
 ; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr8 -verify-machineinstrs | FileCheck %s
 
 ; Function Attrs: norecurse nounwind readonly
-define signext i32 @limit_loop(i32 signext %iters, i32* nocapture readonly %vec, i32 signext %limit) local_unnamed_addr {
+define signext i32 @limit_loop(i32 signext %iters, ptr nocapture readonly %vec, i32 signext %limit) local_unnamed_addr {
 entry:
   %cmp5 = icmp sgt i32 %iters, 0
   br i1 %cmp5, label %for.body.preheader, label %cleanup
@@ -19,8 +19,8 @@ for.cond:                                         ; preds = %for.body
 
 for.body:                                         ; preds = %for.body.preheader, %for.cond
   %indvars.iv = phi i64 [ 0, %for.body.preheader ], [ %indvars.iv.next, %for.cond ]
-  %arrayidx = getelementptr inbounds i32, i32* %vec, i64 %indvars.iv
-  %1 = load i32, i32* %arrayidx, align 4
+  %arrayidx = getelementptr inbounds i32, ptr %vec, i64 %indvars.iv
+  %1 = load i32, ptr %arrayidx, align 4
   %cmp1 = icmp slt i32 %1, %limit
   %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
   br i1 %cmp1, label %for.cond, label %cleanup

diff  --git a/llvm/test/CodeGen/PowerPC/ppc-disable-non-volatile-cr.ll b/llvm/test/CodeGen/PowerPC/ppc-disable-non-volatile-cr.ll
index 90e4d2427768..81c667d5b1c5 100644
--- a/llvm/test/CodeGen/PowerPC/ppc-disable-non-volatile-cr.ll
+++ b/llvm/test/CodeGen/PowerPC/ppc-disable-non-volatile-cr.ll
@@ -29,11 +29,11 @@ entry:
   br i1 %cmp, label %if.then, label %if.else
 
 if.then:                                          ; preds = %entry
-  tail call void bitcast (void (...)* @fa to void ()*)()
+  tail call void @fa()
   br label %if.end
 
 if.else:                                          ; preds = %entry
-  tail call void bitcast (void (...)* @fb to void ()*)()
+  tail call void @fb()
   br label %if.end
 
 if.end:                                           ; preds = %if.else, %if.then

diff  --git a/llvm/test/CodeGen/PowerPC/ppc-empty-fs.ll b/llvm/test/CodeGen/PowerPC/ppc-empty-fs.ll
index dbeaa94a9a72..5c9bb1240caf 100644
--- a/llvm/test/CodeGen/PowerPC/ppc-empty-fs.ll
+++ b/llvm/test/CodeGen/PowerPC/ppc-empty-fs.ll
@@ -8,21 +8,18 @@ target triple = "powerpc64-unknown-linux-gnu"
 %struct.fab = type { float, float }
 
 ; Function Attrs: nounwind
-define void @func_fab(%struct.fab* noalias sret(%struct.fab) %agg.result, i64 %x.coerce) #0 {
+define void @func_fab(ptr noalias sret(%struct.fab) %agg.result, i64 %x.coerce) #0 {
 entry:
   %x = alloca %struct.fab, align 8
-  %0 = bitcast %struct.fab* %x to i64*
-  store i64 %x.coerce, i64* %0, align 1
-  %1 = bitcast %struct.fab* %agg.result to i8*
-  %2 = bitcast %struct.fab* %x to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %1, i8* align 4 %2, i64 8, i1 false)
+  store i64 %x.coerce, ptr %x, align 1
+  call void @llvm.memcpy.p0.p0.i64(ptr align 4 %agg.result, ptr align 4 %x, i64 8, i1 false)
   ret void
 }
 
 ; CHECK: func_fab
 
 ; Function Attrs: nounwind
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i1) #1
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture readonly, i64, i1) #1
 
 attributes #0 = { nounwind "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-realign-stack" "stack-protector-buffer-size"="8" "target-features"="" "unsafe-fp-math"="false" "use-soft-float"="false" }
 attributes #1 = { nounwind }

diff  --git a/llvm/test/CodeGen/PowerPC/ppc-label.ll b/llvm/test/CodeGen/PowerPC/ppc-label.ll
index 4a74b8d07c08..9a4a29a8ec8b 100644
--- a/llvm/test/CodeGen/PowerPC/ppc-label.ll
+++ b/llvm/test/CodeGen/PowerPC/ppc-label.ll
@@ -13,7 +13,7 @@
 ;   };
 ;
 ;   unsigned int ret = foo();
-;   void* g = (void *) ((unsigned int)&&L + arr[ret]);
+;   ptr g = (ptr) ((unsigned int)&&L + arr[ret]);
 ;   goto *g;
 ;
 ; x:
@@ -32,7 +32,7 @@ entry:
   br label %L
 
 L:                                                ; preds = %L, %entry
-  indirectbr i8* inttoptr (i32 add (i32 ptrtoint (i8* blockaddress(@main, %L) to i32), i32 sub (i32 ptrtoint (i8* blockaddress(@main, %return) to i32), i32 ptrtoint (i8* blockaddress(@main, %L) to i32))) to i8*), [label %return, label %L]
+  indirectbr ptr inttoptr (i32 add (i32 ptrtoint (ptr blockaddress(@main, %L) to i32), i32 sub (i32 ptrtoint (ptr blockaddress(@main, %return) to i32), i32 ptrtoint (ptr blockaddress(@main, %L) to i32))) to ptr), [label %return, label %L]
 
 return:                                           ; preds = %L
   ret i32 15

diff  --git a/llvm/test/CodeGen/PowerPC/ppc-label2.ll b/llvm/test/CodeGen/PowerPC/ppc-label2.ll
index 8c69e4edb381..cedb7b29f926 100644
--- a/llvm/test/CodeGen/PowerPC/ppc-label2.ll
+++ b/llvm/test/CodeGen/PowerPC/ppc-label2.ll
@@ -9,7 +9,7 @@ entry:
   br label %__here
 
 __here:                                           ; preds = %entry
-  ret i64 ptrtoint (i8* blockaddress(@foo, %__here) to i64)
+  ret i64 ptrtoint (ptr blockaddress(@foo, %__here) to i64)
 }
 
 ; CHECK-PIC32:           lwz {{r[0-9]+}}, .LC0-.LTOC(r30)

diff  --git a/llvm/test/CodeGen/PowerPC/ppc-partword-atomic.ll b/llvm/test/CodeGen/PowerPC/ppc-partword-atomic.ll
index 3ee12f4deea9..c4807f58c167 100644
--- a/llvm/test/CodeGen/PowerPC/ppc-partword-atomic.ll
+++ b/llvm/test/CodeGen/PowerPC/ppc-partword-atomic.ll
@@ -52,9 +52,9 @@ define dso_local zeroext i32 @testI8(i8 zeroext %val) local_unnamed_addr #0 {
 ; PWR9-NEXT:    li 3, 55
 ; PWR9-NEXT:    blr
 entry:
-  %0 = atomicrmw xchg i8* getelementptr inbounds ({ i8 }, { i8 }* @value8, i64 0, i32 0), i8 %val seq_cst, align 1
+  %0 = atomicrmw xchg ptr @value8, i8 %val seq_cst, align 1
   %conv = zext i8 %0 to i32
-  store i32 %conv, i32* @global_int, align 4
+  store i32 %conv, ptr @global_int, align 4
   ret i32 55
 }
 
@@ -105,9 +105,9 @@ define dso_local zeroext i32 @testI16(i16 zeroext %val) local_unnamed_addr #0 {
 ; PWR9-NEXT:    li 3, 55
 ; PWR9-NEXT:    blr
 entry:
-  %0 = atomicrmw xchg i16* getelementptr inbounds ({ i16 }, { i16 }* @value16, i64 0, i32 0), i16 %val seq_cst, align 2
+  %0 = atomicrmw xchg ptr @value16, i16 %val seq_cst, align 2
   %conv = zext i16 %0 to i32
-  store i32 %conv, i32* @global_int, align 4
+  store i32 %conv, ptr @global_int, align 4
   ret i32 55
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/ppc-prologue.ll b/llvm/test/CodeGen/PowerPC/ppc-prologue.ll
index 342728cda379..289121c1bcd2 100644
--- a/llvm/test/CodeGen/PowerPC/ppc-prologue.ll
+++ b/llvm/test/CodeGen/PowerPC/ppc-prologue.ll
@@ -7,20 +7,20 @@ define i32 @_Z4funci(i32 %a) ssp {
 ; CHECK-NEXT:  stw 31, 28(1)
 ; CHECK:  mr 31, 1
 entry:
-  %a_addr = alloca i32                            ; <i32*> [#uses=2]
-  %retval = alloca i32                            ; <i32*> [#uses=2]
-  %0 = alloca i32                                 ; <i32*> [#uses=2]
+  %a_addr = alloca i32                            ; <ptr> [#uses=2]
+  %retval = alloca i32                            ; <ptr> [#uses=2]
+  %0 = alloca i32                                 ; <ptr> [#uses=2]
   %"alloca point" = bitcast i32 0 to i32          ; <i32> [#uses=0]
-  store i32 %a, i32* %a_addr
-  %1 = call i32 @_Z3barPi(i32* %a_addr)           ; <i32> [#uses=1]
-  store i32 %1, i32* %0, align 4
-  %2 = load i32, i32* %0, align 4                      ; <i32> [#uses=1]
-  store i32 %2, i32* %retval, align 4
+  store i32 %a, ptr %a_addr
+  %1 = call i32 @_Z3barPi(ptr %a_addr)           ; <i32> [#uses=1]
+  store i32 %1, ptr %0, align 4
+  %2 = load i32, ptr %0, align 4                      ; <i32> [#uses=1]
+  store i32 %2, ptr %retval, align 4
   br label %return
 
 return:                                           ; preds = %entry
-  %retval1 = load i32, i32* %retval                    ; <i32> [#uses=1]
+  %retval1 = load i32, ptr %retval                    ; <i32> [#uses=1]
   ret i32 %retval1
 }
 
-declare i32 @_Z3barPi(i32*)
+declare i32 @_Z3barPi(ptr)

diff  --git a/llvm/test/CodeGen/PowerPC/ppc-shrink-wrapping.ll b/llvm/test/CodeGen/PowerPC/ppc-shrink-wrapping.ll
index 0c6311365e3a..5a4aa4385e64 100644
--- a/llvm/test/CodeGen/PowerPC/ppc-shrink-wrapping.ll
+++ b/llvm/test/CodeGen/PowerPC/ppc-shrink-wrapping.ll
@@ -61,8 +61,8 @@ define i32 @foo(i32 %a, i32 %b) {
   br i1 %tmp2, label %true, label %false
 
 true:
-  store i32 %a, i32* %tmp, align 4
-  %tmp4 = call i32 @doSomething(i32 0, i32* %tmp)
+  store i32 %a, ptr %tmp, align 4
+  %tmp4 = call i32 @doSomething(i32 0, ptr %tmp)
   br label %false
 
 false:
@@ -71,7 +71,7 @@ false:
 }
 
 ; Function Attrs: optsize
-declare i32 @doSomething(i32, i32*)
+declare i32 @doSomething(i32, ptr)
 
 
 ; Check that we do not perform the restore inside the loop whereas the save
@@ -133,7 +133,7 @@ for.preheader:
 for.body:                                         ; preds = %entry, %for.body
   %i.05 = phi i32 [ %inc, %for.body ], [ 0, %for.preheader ]
   %sum.04 = phi i32 [ %add, %for.body ], [ 0, %for.preheader ]
-  %call = tail call i32 bitcast (i32 (...)* @something to i32 ()*)()
+  %call = tail call i32 @something()
   %add = add nsw i32 %call, %sum.04
   %inc = add nuw nsw i32 %i.05, 1
   %exitcond = icmp eq i32 %inc, 10
@@ -190,7 +190,7 @@ for.preheader:
 for.body:                                         ; preds = %for.body, %entry
   %i.04 = phi i32 [ 0, %for.preheader ], [ %inc, %for.body ]
   %sum.03 = phi i32 [ 0, %for.preheader ], [ %add, %for.body ]
-  %call = tail call i32 bitcast (i32 (...)* @something to i32 ()*)()
+  %call = tail call i32 @something()
   %add = add nsw i32 %call, %sum.03
   %inc = add nuw nsw i32 %i.04, 1
   %exitcond = icmp eq i32 %inc, 10
@@ -277,14 +277,14 @@ for.preheader:
 for.body:                                         ; preds = %entry, %for.body
   %i.05 = phi i32 [ %inc, %for.body ], [ 0, %for.preheader ]
   %sum.04 = phi i32 [ %add, %for.body ], [ 0, %for.preheader ]
-  %call = tail call i32 bitcast (i32 (...)* @something to i32 ()*)()
+  %call = tail call i32 @something()
   %add = add nsw i32 %call, %sum.04
   %inc = add nuw nsw i32 %i.05, 1
   %exitcond = icmp eq i32 %inc, 10
   br i1 %exitcond, label %for.end, label %for.body
 
 for.end:                                          ; preds = %for.body
-  tail call void bitcast (void (...)* @somethingElse to void ()*)()
+  tail call void @somethingElse()
   %shl = shl i32 %add, 3
   br label %if.end
 
@@ -364,13 +364,13 @@ entry:
   br i1 %tobool, label %if.else, label %if.then
 
 if.then:                                          ; preds = %entry
-  tail call void bitcast (void (...)* @somethingElse to void ()*)()
+  tail call void @somethingElse()
   br label %for.body
 
 for.body:                                         ; preds = %for.body, %if.then
   %i.05 = phi i32 [ 0, %if.then ], [ %inc, %for.body ]
   %sum.04 = phi i32 [ 0, %if.then ], [ %add, %for.body ]
-  %call = tail call i32 bitcast (i32 (...)* @something to i32 ()*)()
+  %call = tail call i32 @something()
   %add = add nsw i32 %call, %sum.04
   %inc = add nuw nsw i32 %i.05, 1
   %exitcond = icmp eq i32 %inc, 10
@@ -589,9 +589,9 @@ if.then:
 
 for.body:                                         ; preds = %for.body, %entry
   %sum.03 = phi i32 [ 0, %if.then ], [ %add, %for.body ]
-  %call = tail call i32 bitcast (i32 (...)* @something to i32 ()*)()
+  %call = tail call i32 @something()
   %add = add nsw i32 %call, %sum.03
-  store i32 %add, i32* %ptr
+  store i32 %add, ptr %ptr
   br label %for.body
 
 if.end:
@@ -613,7 +613,7 @@ for.body:                                         ; preds = %for.body, %entry
   %sum.03 = phi i32 [ 0, %if.then ], [ %add, %body1 ], [ 1, %body2]
   %call = tail call i32 asm "mftb $0, 268", "=r,~{r14}"()
   %add = add nsw i32 %call, %sum.03
-  store i32 %add, i32* %ptr
+  store i32 %add, ptr %ptr
   br i1 undef, label %body1, label %body2
 
 body1:
@@ -639,21 +639,19 @@ body:                                             ; preds = %entry
   br i1 undef, label %loop2a, label %end
 
 loop1:                                            ; preds = %loop2a, %loop2b
-  %var.phi = phi i32* [ %next.phi, %loop2b ], [ %var, %loop2a ]
-  %next.phi = phi i32* [ %next.load, %loop2b ], [ %next.var, %loop2a ]
-  %0 = icmp eq i32* %var, null
-  %next.load = load i32*, i32** undef
+  %var.phi = phi ptr [ %next.phi, %loop2b ], [ %var, %loop2a ]
+  %next.phi = phi ptr [ %next.load, %loop2b ], [ %next.var, %loop2a ]
+  %0 = icmp eq ptr %var, null
+  %next.load = load ptr, ptr undef
   br i1 %0, label %loop2a, label %loop2b
 
 loop2a:                                           ; preds = %loop1, %body, %entry
-  %var = phi i32* [ null, %body ], [ null, %entry ], [ %next.phi, %loop1 ]
-  %next.var = phi i32* [ undef, %body ], [ null, %entry ], [ %next.load, %loop1 ]
+  %var = phi ptr [ null, %body ], [ null, %entry ], [ %next.phi, %loop1 ]
+  %next.var = phi ptr [ undef, %body ], [ null, %entry ], [ %next.load, %loop1 ]
   br label %loop1
 
 loop2b:                                           ; preds = %loop1
-  %gep1 = bitcast i32* %var.phi to i32*
-  %next.ptr = bitcast i32* %gep1 to i32**
-  store i32* %next.phi, i32** %next.ptr
+  store ptr %next.phi, ptr %var.phi
   br label %loop1
 
 end:
@@ -664,8 +662,8 @@ end:
 @lock = common global i32 0, align 4
 @htindex = common global i32 0, align 4
 @stride = common global i32 0, align 4
- at ht = common global i32* null, align 8
- at he = common global i8* null, align 8
+ at ht = common global ptr null, align 8
+ at he = common global ptr null, align 8
 
 ; Test for a bug that was caused when save point was equal to restore point.
 ; Function Attrs: nounwind
@@ -683,26 +681,26 @@ end:
 ; CHECK: blr
 define signext i32 @transpose() {
 entry:
-  %0 = load i32, i32* getelementptr inbounds ([0 x i32], [0 x i32]* @columns, i64 0, i64 1), align 4
+  %0 = load i32, ptr getelementptr inbounds ([0 x i32], ptr @columns, i64 0, i64 1), align 4
   %shl.i = shl i32 %0, 7
-  %1 = load i32, i32* getelementptr inbounds ([0 x i32], [0 x i32]* @columns, i64 0, i64 2), align 4
+  %1 = load i32, ptr getelementptr inbounds ([0 x i32], ptr @columns, i64 0, i64 2), align 4
   %or.i = or i32 %shl.i, %1
   %shl1.i = shl i32 %or.i, 7
-  %2 = load i32, i32* getelementptr inbounds ([0 x i32], [0 x i32]* @columns, i64 0, i64 3), align 4
+  %2 = load i32, ptr getelementptr inbounds ([0 x i32], ptr @columns, i64 0, i64 3), align 4
   %or2.i = or i32 %shl1.i, %2
-  %3 = load i32, i32* getelementptr inbounds ([0 x i32], [0 x i32]* @columns, i64 0, i64 7), align 4
+  %3 = load i32, ptr getelementptr inbounds ([0 x i32], ptr @columns, i64 0, i64 7), align 4
   %shl3.i = shl i32 %3, 7
-  %4 = load i32, i32* getelementptr inbounds ([0 x i32], [0 x i32]* @columns, i64 0, i64 6), align 4
+  %4 = load i32, ptr getelementptr inbounds ([0 x i32], ptr @columns, i64 0, i64 6), align 4
   %or4.i = or i32 %shl3.i, %4
   %shl5.i = shl i32 %or4.i, 7
-  %5 = load i32, i32* getelementptr inbounds ([0 x i32], [0 x i32]* @columns, i64 0, i64 5), align 4
+  %5 = load i32, ptr getelementptr inbounds ([0 x i32], ptr @columns, i64 0, i64 5), align 4
   %or6.i = or i32 %shl5.i, %5
   %cmp.i = icmp ugt i32 %or2.i, %or6.i
   br i1 %cmp.i, label %cond.true.i, label %cond.false.i
 
 cond.true.i:
   %shl7.i = shl i32 %or2.i, 7
-  %6 = load i32, i32* getelementptr inbounds ([0 x i32], [0 x i32]* @columns, i64 0, i64 4), align 4
+  %6 = load i32, ptr getelementptr inbounds ([0 x i32], ptr @columns, i64 0, i64 4), align 4
   %or8.i = or i32 %6, %shl7.i
   %conv.i = zext i32 %or8.i to i64
   %shl9.i = shl nuw nsw i64 %conv.i, 21
@@ -712,7 +710,7 @@ cond.true.i:
 
 cond.false.i:
   %shl12.i = shl i32 %or6.i, 7
-  %7 = load i32, i32* getelementptr inbounds ([0 x i32], [0 x i32]* @columns, i64 0, i64 4), align 4
+  %7 = load i32, ptr getelementptr inbounds ([0 x i32], ptr @columns, i64 0, i64 4), align 4
   %or13.i = or i32 %7, %shl12.i
   %conv14.i = zext i32 %or13.i to i64
   %shl15.i = shl nuw nsw i64 %conv14.i, 21
@@ -724,24 +722,24 @@ hash.exit:
   %cond.i = phi i64 [ %or11.i, %cond.true.i ], [ %or17.i, %cond.false.i ]
   %shr.29.i = lshr i64 %cond.i, 17
   %conv18.i = trunc i64 %shr.29.i to i32
-  store i32 %conv18.i, i32* @lock, align 4
+  store i32 %conv18.i, ptr @lock, align 4
   %rem.i = srem i64 %cond.i, 1050011
   %conv19.i = trunc i64 %rem.i to i32
-  store i32 %conv19.i, i32* @htindex, align 4
+  store i32 %conv19.i, ptr @htindex, align 4
   %rem20.i = urem i32 %conv18.i, 179
   %add.i = or i32 %rem20.i, 131072
-  store i32 %add.i, i32* @stride, align 4
-  %8 = load i32*, i32** @ht, align 8
-  %arrayidx = getelementptr inbounds i32, i32* %8, i64 %rem.i
-  %9 = load i32, i32* %arrayidx, align 4
+  store i32 %add.i, ptr @stride, align 4
+  %8 = load ptr, ptr @ht, align 8
+  %arrayidx = getelementptr inbounds i32, ptr %8, i64 %rem.i
+  %9 = load i32, ptr %arrayidx, align 4
   %cmp1 = icmp eq i32 %9, %conv18.i
   br i1 %cmp1, label %if.then, label %if.end
 
 if.then:
   %idxprom.lcssa = phi i64 [ %rem.i, %hash.exit ], [ %idxprom.1, %if.end ], [ %idxprom.2, %if.end.1 ], [ %idxprom.3, %if.end.2 ], [ %idxprom.4, %if.end.3 ], [ %idxprom.5, %if.end.4 ], [ %idxprom.6, %if.end.5 ], [ %idxprom.7, %if.end.6 ]
-  %10 = load i8*, i8** @he, align 8
-  %arrayidx3 = getelementptr inbounds i8, i8* %10, i64 %idxprom.lcssa
-  %11 = load i8, i8* %arrayidx3, align 1
+  %10 = load ptr, ptr @he, align 8
+  %arrayidx3 = getelementptr inbounds i8, ptr %10, i64 %idxprom.lcssa
+  %11 = load i8, ptr %arrayidx3, align 1
   %conv = sext i8 %11 to i32
   br label %cleanup
 
@@ -751,8 +749,8 @@ if.end:
   %sub = add nsw i32 %add, -1050011
   %sub.add = select i1 %cmp4, i32 %sub, i32 %add
   %idxprom.1 = sext i32 %sub.add to i64
-  %arrayidx.1 = getelementptr inbounds i32, i32* %8, i64 %idxprom.1
-  %12 = load i32, i32* %arrayidx.1, align 4
+  %arrayidx.1 = getelementptr inbounds i32, ptr %8, i64 %idxprom.1
+  %12 = load i32, ptr %arrayidx.1, align 4
   %cmp1.1 = icmp eq i32 %12, %conv18.i
   br i1 %cmp1.1, label %if.then, label %if.end.1
 
@@ -766,8 +764,8 @@ if.end.1:
   %sub.1 = add nsw i32 %add.1, -1050011
   %sub.add.1 = select i1 %cmp4.1, i32 %sub.1, i32 %add.1
   %idxprom.2 = sext i32 %sub.add.1 to i64
-  %arrayidx.2 = getelementptr inbounds i32, i32* %8, i64 %idxprom.2
-  %13 = load i32, i32* %arrayidx.2, align 4
+  %arrayidx.2 = getelementptr inbounds i32, ptr %8, i64 %idxprom.2
+  %13 = load i32, ptr %arrayidx.2, align 4
   %cmp1.2 = icmp eq i32 %13, %conv18.i
   br i1 %cmp1.2, label %if.then, label %if.end.2
 
@@ -777,8 +775,8 @@ if.end.2:
   %sub.2 = add nsw i32 %add.2, -1050011
   %sub.add.2 = select i1 %cmp4.2, i32 %sub.2, i32 %add.2
   %idxprom.3 = sext i32 %sub.add.2 to i64
-  %arrayidx.3 = getelementptr inbounds i32, i32* %8, i64 %idxprom.3
-  %14 = load i32, i32* %arrayidx.3, align 4
+  %arrayidx.3 = getelementptr inbounds i32, ptr %8, i64 %idxprom.3
+  %14 = load i32, ptr %arrayidx.3, align 4
   %cmp1.3 = icmp eq i32 %14, %conv18.i
   br i1 %cmp1.3, label %if.then, label %if.end.3
 
@@ -788,8 +786,8 @@ if.end.3:
   %sub.3 = add nsw i32 %add.3, -1050011
   %sub.add.3 = select i1 %cmp4.3, i32 %sub.3, i32 %add.3
   %idxprom.4 = sext i32 %sub.add.3 to i64
-  %arrayidx.4 = getelementptr inbounds i32, i32* %8, i64 %idxprom.4
-  %15 = load i32, i32* %arrayidx.4, align 4
+  %arrayidx.4 = getelementptr inbounds i32, ptr %8, i64 %idxprom.4
+  %15 = load i32, ptr %arrayidx.4, align 4
   %cmp1.4 = icmp eq i32 %15, %conv18.i
   br i1 %cmp1.4, label %if.then, label %if.end.4
 
@@ -799,8 +797,8 @@ if.end.4:
   %sub.4 = add nsw i32 %add.4, -1050011
   %sub.add.4 = select i1 %cmp4.4, i32 %sub.4, i32 %add.4
   %idxprom.5 = sext i32 %sub.add.4 to i64
-  %arrayidx.5 = getelementptr inbounds i32, i32* %8, i64 %idxprom.5
-  %16 = load i32, i32* %arrayidx.5, align 4
+  %arrayidx.5 = getelementptr inbounds i32, ptr %8, i64 %idxprom.5
+  %16 = load i32, ptr %arrayidx.5, align 4
   %cmp1.5 = icmp eq i32 %16, %conv18.i
   br i1 %cmp1.5, label %if.then, label %if.end.5
 
@@ -810,8 +808,8 @@ if.end.5:
   %sub.5 = add nsw i32 %add.5, -1050011
   %sub.add.5 = select i1 %cmp4.5, i32 %sub.5, i32 %add.5
   %idxprom.6 = sext i32 %sub.add.5 to i64
-  %arrayidx.6 = getelementptr inbounds i32, i32* %8, i64 %idxprom.6
-  %17 = load i32, i32* %arrayidx.6, align 4
+  %arrayidx.6 = getelementptr inbounds i32, ptr %8, i64 %idxprom.6
+  %17 = load i32, ptr %arrayidx.6, align 4
   %cmp1.6 = icmp eq i32 %17, %conv18.i
   br i1 %cmp1.6, label %if.then, label %if.end.6
 
@@ -821,8 +819,8 @@ if.end.6:
   %sub.6 = add nsw i32 %add.6, -1050011
   %sub.add.6 = select i1 %cmp4.6, i32 %sub.6, i32 %add.6
   %idxprom.7 = sext i32 %sub.add.6 to i64
-  %arrayidx.7 = getelementptr inbounds i32, i32* %8, i64 %idxprom.7
-  %18 = load i32, i32* %arrayidx.7, align 4
+  %arrayidx.7 = getelementptr inbounds i32, ptr %8, i64 %idxprom.7
+  %18 = load i32, ptr %arrayidx.7, align 4
   %cmp1.7 = icmp eq i32 %18, %conv18.i
   br i1 %cmp1.7, label %if.then, label %cleanup
 }

diff  --git a/llvm/test/CodeGen/PowerPC/ppc-vaarg-agg.ll b/llvm/test/CodeGen/PowerPC/ppc-vaarg-agg.ll
index 19b85efbf062..e164c1aa5877 100644
--- a/llvm/test/CodeGen/PowerPC/ppc-vaarg-agg.ll
+++ b/llvm/test/CodeGen/PowerPC/ppc-vaarg-agg.ll
@@ -2,9 +2,9 @@
 target datalayout = "E-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v128:128:128-n32"
 target triple = "powerpc-montavista-linux-gnuspe"
 
-%struct.__va_list_tag.0.9.18.23.32.41.48.55.62.67.72.77.82.87.90.93.96.101.105 = type { i8, i8, i16, i8*, i8* }
+%struct.__va_list_tag.0.9.18.23.32.41.48.55.62.67.72.77.82.87.90.93.96.101.105 = type { i8, i8, i16, ptr, ptr }
 
-define fastcc void @test1(%struct.__va_list_tag.0.9.18.23.32.41.48.55.62.67.72.77.82.87.90.93.96.101.105* %args) {
+define fastcc void @test1(ptr %args) {
 entry:
   br i1 undef, label %repeat, label %maxlen_reached
 
@@ -30,7 +30,7 @@ sw.bb321:                                         ; preds = %repeat
   unreachable
 
 sw.bb323:                                         ; preds = %repeat
-  %0 = va_arg %struct.__va_list_tag.0.9.18.23.32.41.48.55.62.67.72.77.82.87.90.93.96.101.105* %args, i32
+  %0 = va_arg ptr %args, i32
   unreachable
 
 sw.bb326:                                         ; preds = %repeat

diff  --git a/llvm/test/CodeGen/PowerPC/ppc32-align-long-double-sf.ll b/llvm/test/CodeGen/PowerPC/ppc32-align-long-double-sf.ll
index 740bc787ec9a..7fec7e039eca 100644
--- a/llvm/test/CodeGen/PowerPC/ppc32-align-long-double-sf.ll
+++ b/llvm/test/CodeGen/PowerPC/ppc32-align-long-double-sf.ll
@@ -6,15 +6,15 @@
 
 define void @foo() #0 {
 entry:
-  %0 = load ppc_fp128, ppc_fp128* @x, align 16
-  %call = tail call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str, i32 0, i32 0), ppc_fp128 %0)
+  %0 = load ppc_fp128, ptr @x, align 16
+  %call = tail call i32 (ptr, ...) @printf(ptr @.str, ppc_fp128 %0)
   ret void
 }
 ; Do not skip register r4 because of register alignment in soft float mode. Instead skipping 
 ; put in r4 part of first argument for printf function (long double).
 ; CHECK: lwzu 4, x at l({{[0-9]+}})
 
-declare i32 @printf(i8* nocapture readonly, ...) #0
+declare i32 @printf(ptr nocapture readonly, ...) #0
 
 attributes #0 = { "use-soft-float"="true" }
 

diff  --git a/llvm/test/CodeGen/PowerPC/ppc32-constant-BE-ppcf128.ll b/llvm/test/CodeGen/PowerPC/ppc32-constant-BE-ppcf128.ll
index c5bb828f96ca..a272f9e73a04 100644
--- a/llvm/test/CodeGen/PowerPC/ppc32-constant-BE-ppcf128.ll
+++ b/llvm/test/CodeGen/PowerPC/ppc32-constant-BE-ppcf128.ll
@@ -7,7 +7,7 @@ target triple = "powerpc-buildroot-linux-gnu"
 
 define i32 @main() #0 {
 entry:
-  %call = tail call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str, i32 0, i32 0), ppc_fp128 0xM3FF00000000000000000000000000000)
+  %call = tail call i32 (ptr, ...) @printf(ptr @.str, ppc_fp128 0xM3FF00000000000000000000000000000)
   ret i32 0
 }
 
@@ -18,7 +18,7 @@ entry:
 ; CHECK: li 5, 0
 ; CHECK: li 7, 0
 
-declare i32 @printf(i8* nocapture readonly, ...)
+declare i32 @printf(ptr nocapture readonly, ...)
 
 attributes #0 = { "use-soft-float"="true" }
 

diff  --git a/llvm/test/CodeGen/PowerPC/ppc32-i1-vaarg.ll b/llvm/test/CodeGen/PowerPC/ppc32-i1-vaarg.ll
index ef754446bf0a..8a09ab83bb92 100644
--- a/llvm/test/CodeGen/PowerPC/ppc32-i1-vaarg.ll
+++ b/llvm/test/CodeGen/PowerPC/ppc32-i1-vaarg.ll
@@ -1,10 +1,10 @@
 ; RUN: llc -verify-machineinstrs < %s -mcpu=ppc32 | FileCheck %s
 target triple = "powerpc-unknown-linux-gnu"
 
-declare void @printf(i8*, ...)
+declare void @printf(ptr, ...)
 
 define void @main() {
-  call void (i8*, ...) @printf(i8* undef, i1 false)
+  call void (ptr, ...) @printf(ptr undef, i1 false)
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/ppc32-i64-to-float-conv.ll b/llvm/test/CodeGen/PowerPC/ppc32-i64-to-float-conv.ll
index 5a7e4c2fd2b3..ab4753b51ca8 100644
--- a/llvm/test/CodeGen/PowerPC/ppc32-i64-to-float-conv.ll
+++ b/llvm/test/CodeGen/PowerPC/ppc32-i64-to-float-conv.ll
@@ -15,9 +15,9 @@
 
 define void @foo() local_unnamed_addr {
 entry:
-  %0 = load i64, i64* @ll
+  %0 = load i64, ptr @ll
   %conv = sitofp i64 %0 to float
-  store float %conv, float* getelementptr inbounds (%struct.A, %struct.A* @a, i32 0, i32 0)
+  store float %conv, ptr @a
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/ppc32-lshrti3.ll b/llvm/test/CodeGen/PowerPC/ppc32-lshrti3.ll
index 971ee3f3e939..c9e183c97973 100644
--- a/llvm/test/CodeGen/PowerPC/ppc32-lshrti3.ll
+++ b/llvm/test/CodeGen/PowerPC/ppc32-lshrti3.ll
@@ -9,7 +9,7 @@ target triple = "powerpc--netbsd"
 ; Function Attrs: nounwind uwtable
 define i32 @fn1() #0 {
 entry:
-  %.promoted = load i72, i72* inttoptr (i32 1 to i72*), align 4
+  %.promoted = load i72, ptr inttoptr (i32 1 to ptr), align 4
   br label %while.cond
 
 while.cond:                                       ; preds = %while.cond, %entry
@@ -28,7 +28,7 @@ while.cond:                                       ; preds = %while.cond, %entry
 
 while.end:                                        ; preds = %while.cond
   %bf.set.lcssa = phi i72 [ %bf.set, %while.cond ]
-  store i72 %bf.set.lcssa, i72* inttoptr (i32 1 to i72*), align 4
+  store i72 %bf.set.lcssa, ptr inttoptr (i32 1 to ptr), align 4
   ret i32 undef
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/ppc32-nest.ll b/llvm/test/CodeGen/PowerPC/ppc32-nest.ll
index b933edcf6163..e581fd548654 100644
--- a/llvm/test/CodeGen/PowerPC/ppc32-nest.ll
+++ b/llvm/test/CodeGen/PowerPC/ppc32-nest.ll
@@ -5,22 +5,22 @@ target triple = "powerpc-unknown-linux-gnu"
 ; Tests that the 'nest' parameter attribute causes the relevant parameter to be
 ; passed in the right register (r11 for PPC).
 
-define i8* @nest_receiver(i8* nest %arg) nounwind {
+define ptr @nest_receiver(ptr nest %arg) nounwind {
 ; CHECK-LABEL: nest_receiver:
 ; CHECK: # %bb.0:
 ; CHECK-NEXT: mr 3, 11
 ; CHECK-NEXT: blr
 
-  ret i8* %arg
+  ret ptr %arg
 }
 
-define i8* @nest_caller(i8* %arg) nounwind {
+define ptr @nest_caller(ptr %arg) nounwind {
 ; CHECK-LABEL: nest_caller:
 ; CHECK: mr 11, 3
 ; CHECK-NEXT: bl nest_receiver
 ; CHECK: blr
 
-  %result = call i8* @nest_receiver(i8* nest %arg)
-  ret i8* %result
+  %result = call ptr @nest_receiver(ptr nest %arg)
+  ret ptr %result
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/ppc32-pic-large.ll b/llvm/test/CodeGen/PowerPC/ppc32-pic-large.ll
index b583f00787ab..45aeb73b1a6b 100644
--- a/llvm/test/CodeGen/PowerPC/ppc32-pic-large.ll
+++ b/llvm/test/CodeGen/PowerPC/ppc32-pic-large.ll
@@ -16,15 +16,15 @@ declare i32 @call_foo(i32, ...)
 
 define i32 @foo() {
 entry:
-  %0 = load i32, i32* @bar, align 4
+  %0 = load i32, ptr @bar, align 4
   %call = call i32 (i32, ...) @call_foo(i32 %0, i32 0, i32 1, i32 2, i32 4, i32 8, i32 16, i32 32, i32 64)
   ret i32 %0
 }
 
 define i32 @load() {
 entry:
-  %0 = load i32, i32* @bar1
-  %1 = load i32, i32* @bar2
+  %0 = load i32, ptr @bar1
+  %1 = load i32, ptr @bar2
   %2 = add i32 %0, %1
   ret i32 %2
 }

diff  --git a/llvm/test/CodeGen/PowerPC/ppc32-pic.ll b/llvm/test/CodeGen/PowerPC/ppc32-pic.ll
index 4d508b5c4210..aed994144940 100644
--- a/llvm/test/CodeGen/PowerPC/ppc32-pic.ll
+++ b/llvm/test/CodeGen/PowerPC/ppc32-pic.ll
@@ -8,7 +8,7 @@ declare i32 @call_foo(i32, ...)
 
 define i32 @foo() {
 entry:
-  %0 = load i32, i32* @bar, align 4
+  %0 = load i32, ptr @bar, align 4
   %call = call i32 (i32, ...) @call_foo(i32 %0, i32 0, i32 1, i32 2, i32 4, i32 8, i32 16, i32 32, i32 64)
   ret i32 0
 }

diff  --git a/llvm/test/CodeGen/PowerPC/ppc32-secure-plt-tls.ll b/llvm/test/CodeGen/PowerPC/ppc32-secure-plt-tls.ll
index 0a940459467f..43667509fae2 100644
--- a/llvm/test/CodeGen/PowerPC/ppc32-secure-plt-tls.ll
+++ b/llvm/test/CodeGen/PowerPC/ppc32-secure-plt-tls.ll
@@ -3,7 +3,7 @@
 @a = thread_local local_unnamed_addr global i32 6, align 4
 define i32 @main() local_unnamed_addr #0 {
 entry:
-  %0 = load i32, i32* @a, align 4
+  %0 = load i32, ptr @a, align 4
   ret i32 %0
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/ppc32-secure-plt-tls2.ll b/llvm/test/CodeGen/PowerPC/ppc32-secure-plt-tls2.ll
index 38ebf99ef729..5af0fa3e61e8 100644
--- a/llvm/test/CodeGen/PowerPC/ppc32-secure-plt-tls2.ll
+++ b/llvm/test/CodeGen/PowerPC/ppc32-secure-plt-tls2.ll
@@ -3,7 +3,7 @@
 @a = thread_local local_unnamed_addr global i32 6, align 4
 define i32 @main() local_unnamed_addr #0 {
 entry:
-  %0 = load i32, i32* @a, align 4
+  %0 = load i32, ptr @a, align 4
   ret i32 %0
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/ppc32-skip-regs.ll b/llvm/test/CodeGen/PowerPC/ppc32-skip-regs.ll
index 5fae34f212cc..3c3044762ea9 100644
--- a/llvm/test/CodeGen/PowerPC/ppc32-skip-regs.ll
+++ b/llvm/test/CodeGen/PowerPC/ppc32-skip-regs.ll
@@ -8,8 +8,8 @@ target triple = "powerpc-buildroot-linux-gnu"
 
 define void @foo() #0 {
 entry:
-  %0 = load ppc_fp128, ppc_fp128* @x, align 16
-  %call = tail call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str, i32 0, i32 0), ppc_fp128 %0, ppc_fp128 %0)
+  %0 = load ppc_fp128, ptr @x, align 16
+  %call = tail call i32 (ptr, ...) @printf(ptr @.str, ppc_fp128 %0, ppc_fp128 %0)
   ret void
 }
 ; Do not put second argument of function in r8 register, because there is no enough registers
@@ -21,6 +21,6 @@ entry:
 ; CHECK: stw 5, 12(1)
 ; CHECK: stw 4, 8(1)
 
-declare i32 @printf(i8* nocapture readonly, ...)
+declare i32 @printf(ptr nocapture readonly, ...)
 
 attributes #0 = { "use-soft-float"="true" }

diff  --git a/llvm/test/CodeGen/PowerPC/ppc32-vacopy.ll b/llvm/test/CodeGen/PowerPC/ppc32-vacopy.ll
index c68881cbb465..60f3852fa498 100644
--- a/llvm/test/CodeGen/PowerPC/ppc32-vacopy.ll
+++ b/llvm/test/CodeGen/PowerPC/ppc32-vacopy.ll
@@ -1,17 +1,15 @@
 ; RUN: llc -verify-machineinstrs -mtriple="powerpc-unknown-linux-gnu" -mcpu=ppc64 < %s | FileCheck %s
 ; PR15286
 
-%va_list = type {i8, i8, i16, i8*, i8*}
-declare void @llvm.va_copy(i8*, i8*)
+%va_list = type {i8, i8, i16, ptr, ptr}
+declare void @llvm.va_copy(ptr, ptr)
 
 define void @test_vacopy() nounwind {
 entry:
 	%0 = alloca %va_list
 	%1 = alloca %va_list
-	%2 = bitcast %va_list* %0 to i8*
-	%3 = bitcast %va_list* %1 to i8*
 
-	call void @llvm.va_copy(i8* %3, i8* %2)
+	call void @llvm.va_copy(ptr %1, ptr %0)
 
 	ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/ppc440-fp-basic.ll b/llvm/test/CodeGen/PowerPC/ppc440-fp-basic.ll
index ed8553dde0ef..fb4985e8b5ee 100644
--- a/llvm/test/CodeGen/PowerPC/ppc440-fp-basic.ll
+++ b/llvm/test/CodeGen/PowerPC/ppc440-fp-basic.ll
@@ -2,32 +2,28 @@
 
 %0 = type { double, double }
 
-define void @maybe_an_fma(%0* sret(%0) %agg.result, %0* byval(%0) %a, %0* byval(%0) %b, %0* byval(%0) %c) nounwind {
+define void @maybe_an_fma(ptr sret(%0) %agg.result, ptr byval(%0) %a, ptr byval(%0) %b, ptr byval(%0) %c) nounwind {
 entry:
-  %a.realp = getelementptr inbounds %0, %0* %a, i32 0, i32 0
-  %a.real = load double, double* %a.realp
-  %a.imagp = getelementptr inbounds %0, %0* %a, i32 0, i32 1
-  %a.imag = load double, double* %a.imagp
-  %b.realp = getelementptr inbounds %0, %0* %b, i32 0, i32 0
-  %b.real = load double, double* %b.realp
-  %b.imagp = getelementptr inbounds %0, %0* %b, i32 0, i32 1
-  %b.imag = load double, double* %b.imagp
+  %a.real = load double, ptr %a
+  %a.imagp = getelementptr inbounds %0, ptr %a, i32 0, i32 1
+  %a.imag = load double, ptr %a.imagp
+  %b.real = load double, ptr %b
+  %b.imagp = getelementptr inbounds %0, ptr %b, i32 0, i32 1
+  %b.imag = load double, ptr %b.imagp
   %mul.rl = fmul double %a.real, %b.real
   %mul.rr = fmul double %a.imag, %b.imag
   %mul.r = fsub double %mul.rl, %mul.rr
   %mul.il = fmul double %a.imag, %b.real
   %mul.ir = fmul double %a.real, %b.imag
   %mul.i = fadd double %mul.il, %mul.ir
-  %c.realp = getelementptr inbounds %0, %0* %c, i32 0, i32 0
-  %c.real = load double, double* %c.realp
-  %c.imagp = getelementptr inbounds %0, %0* %c, i32 0, i32 1
-  %c.imag = load double, double* %c.imagp
+  %c.real = load double, ptr %c
+  %c.imagp = getelementptr inbounds %0, ptr %c, i32 0, i32 1
+  %c.imag = load double, ptr %c.imagp
   %add.r = fadd double %mul.r, %c.real
   %add.i = fadd double %mul.i, %c.imag
-  %real = getelementptr inbounds %0, %0* %agg.result, i32 0, i32 0
-  %imag = getelementptr inbounds %0, %0* %agg.result, i32 0, i32 1
-  store double %add.r, double* %real
-  store double %add.i, double* %imag
+  %imag = getelementptr inbounds %0, ptr %agg.result, i32 0, i32 1
+  store double %add.r, ptr %agg.result
+  store double %add.i, ptr %imag
   ret void
 ; CHECK: fmadd
 }

diff  --git a/llvm/test/CodeGen/PowerPC/ppc64-P9-mod.ll b/llvm/test/CodeGen/PowerPC/ppc64-P9-mod.ll
index e99074e7f90f..090ab9995623 100644
--- a/llvm/test/CodeGen/PowerPC/ppc64-P9-mod.ll
+++ b/llvm/test/CodeGen/PowerPC/ppc64-P9-mod.ll
@@ -21,7 +21,7 @@
 define void @modulo_sw(i32 signext %a, i32 signext %b) local_unnamed_addr {
 entry:
   %rem = srem i32 %a, %b
-  store i32 %rem, i32* @mod_resultsw, align 4
+  store i32 %rem, ptr @mod_resultsw, align 4
   ret void
 ; CHECK-LABEL: modulo_sw
 ; CHECK: modsw {{[0-9]+}}, 3, 4
@@ -67,7 +67,7 @@ entry:
 define void @modulo_ud(i64 %a, i64 %b) local_unnamed_addr {
 entry:
   %rem = urem i64 %a, %b
-  store i64 %rem, i64* @mod_resultud, align 8
+  store i64 %rem, ptr @mod_resultud, align 8
   ret void
 ; CHECK-LABEL: modulo_ud
 ; CHECK: modud {{[0-9]+}}, 3, 4
@@ -83,9 +83,9 @@ entry:
 define void @modulo_div_sw(i32 signext %a, i32 signext %b) local_unnamed_addr {
 entry:
   %rem = srem i32 %a, %b
-  store i32 %rem, i32* @mod_resultsw, align 4
+  store i32 %rem, ptr @mod_resultsw, align 4
   %div = sdiv i32 %a, %b
-  store i32 %div, i32* @div_resultsw, align 4
+  store i32 %div, ptr @div_resultsw, align 4
   ret void
 ; CHECK-LABEL: modulo_div_sw
 ; CHECK: modsw {{[0-9]+}}, 3, 4
@@ -109,9 +109,9 @@ entry:
 define void @modulo_div_abc_sw(i32 signext %a, i32 signext %b, i32 signext %c) local_unnamed_addr {
 entry:
   %rem = srem i32 %a, %c
-  store i32 %rem, i32* @mod_resultsw, align 4
+  store i32 %rem, ptr @mod_resultsw, align 4
   %div = sdiv i32 %b, %c
-  store i32 %div, i32* @div_resultsw, align 4
+  store i32 %div, ptr @div_resultsw, align 4
   ret void
 ; CHECK-LABEL: modulo_div_abc_sw
 ; CHECK: modsw {{[0-9]+}}, 3, 5
@@ -127,9 +127,9 @@ entry:
 define void @modulo_div_uw(i32 zeroext %a, i32 zeroext %b) local_unnamed_addr {
 entry:
   %rem = urem i32 %a, %b
-  store i32 %rem, i32* @mod_resultuw, align 4
+  store i32 %rem, ptr @mod_resultuw, align 4
   %div = udiv i32 %a, %b
-  store i32 %div, i32* @div_resultuw, align 4
+  store i32 %div, ptr @div_resultuw, align 4
   ret void
 ; CHECK-LABEL: modulo_div_uw
 ; CHECK: moduw {{[0-9]+}}, 3, 4
@@ -153,9 +153,9 @@ entry:
 define void @modulo_div_swuw(i32 signext %a, i32 signext %b) local_unnamed_addr {
 entry:
   %rem = srem i32 %a, %b
-  store i32 %rem, i32* @mod_resultsw, align 4
+  store i32 %rem, ptr @mod_resultsw, align 4
   %div = udiv i32 %a, %b
-  store i32 %div, i32* @div_resultsw, align 4
+  store i32 %div, ptr @div_resultsw, align 4
   ret void
 ; CHECK-LABEL: modulo_div_swuw
 ; CHECK: modsw {{[0-9]+}}, 3, 4
@@ -171,9 +171,9 @@ entry:
 define void @modulo_div_udsd(i64 %a, i64 %b) local_unnamed_addr {
 entry:
   %rem = urem i64 %a, %b
-  store i64 %rem, i64* @mod_resultud, align 8
+  store i64 %rem, ptr @mod_resultud, align 8
   %div = sdiv i64 %a, %b
-  store i64 %div, i64* @div_resultsd, align 8
+  store i64 %div, ptr @div_resultsd, align 8
   ret void
 ; CHECK-LABEL: modulo_div_udsd
 ; CHECK: modud {{[0-9]+}}, 3, 4
@@ -189,7 +189,7 @@ entry:
 define void @modulo_const32_sw(i32 signext %a) local_unnamed_addr {
 entry:
   %rem = srem i32 %a, 32
-  store i32 %rem, i32* @mod_resultsw, align 4
+  store i32 %rem, ptr @mod_resultsw, align 4
   ret void
 ; CHECK-LABEL: modulo_const32_sw
 ; CHECK-NOT: modsw
@@ -247,13 +247,13 @@ entry:
 define void @blocks_modulo_div_sw(i32 signext %a, i32 signext %b, i32 signext %c) local_unnamed_addr {
 entry:
   %div = sdiv i32 %a, %b
-  store i32 %div, i32* @div_resultsw, align 4
+  store i32 %div, ptr @div_resultsw, align 4
   %cmp = icmp sgt i32 %c, 0
   br i1 %cmp, label %if.then, label %if.end
 
 if.then:                                          ; preds = %entry
   %rem = srem i32 %a, %b
-  store i32 %rem, i32* @mod_resultsw, align 4
+  store i32 %rem, ptr @mod_resultsw, align 4
   br label %if.end
 
 if.end:                                           ; preds = %if.then, %entry

diff  --git a/llvm/test/CodeGen/PowerPC/ppc64-P9-setb.ll b/llvm/test/CodeGen/PowerPC/ppc64-P9-setb.ll
index 98856d097ea5..9cb61a3d457f 100644
--- a/llvm/test/CodeGen/PowerPC/ppc64-P9-setb.ll
+++ b/llvm/test/CodeGen/PowerPC/ppc64-P9-setb.ll
@@ -1369,7 +1369,7 @@ define i64 @setbn3(float %a, float %b) {
 }
 
 ; Verify this case doesn't crash
-define void @setbn4(i128 %0, i32* %sel.out) {
+define void @setbn4(i128 %0, ptr %sel.out) {
 ; CHECK-LABEL: setbn4:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    li r6, 1
@@ -1408,6 +1408,6 @@ entry:
   %c2 = icmp ugt i128 %0, 5192296858534827628530496329220096
   %ext = zext i1 %c2 to i32
   %sel = select i1 %c1, i32 -1, i32 %ext
-  store i32 %sel, i32* %sel.out, align 4
+  store i32 %sel, ptr %sel.out, align 4
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/ppc64-abi-extend.ll b/llvm/test/CodeGen/PowerPC/ppc64-abi-extend.ll
index 38fa2ee5cd0d..35fcc2756d07 100644
--- a/llvm/test/CodeGen/PowerPC/ppc64-abi-extend.ll
+++ b/llvm/test/CodeGen/PowerPC/ppc64-abi-extend.ll
@@ -15,7 +15,7 @@ declare zeroext i32 @ret_ui()
 
 define void @pass_arg_si() nounwind {
 entry:
-  %0 = load i32, i32* @si, align 4
+  %0 = load i32, ptr @si, align 4
   tail call void @arg_si(i32 signext %0) nounwind
   ret void
 }
@@ -25,7 +25,7 @@ entry:
 
 define void @pass_arg_ui() nounwind {
 entry:
-  %0 = load i32, i32* @ui, align 4
+  %0 = load i32, ptr @ui, align 4
   tail call void @arg_ui(i32 zeroext %0) nounwind
   ret void
 }
@@ -53,7 +53,7 @@ entry:
 
 define signext i32 @pass_ret_si() nounwind readonly {
 entry:
-  %0 = load i32, i32* @si, align 4
+  %0 = load i32, ptr @si, align 4
   ret i32 %0
 }
 ; CHECK: @pass_ret_si
@@ -62,7 +62,7 @@ entry:
 
 define zeroext i32 @pass_ret_ui() nounwind readonly {
 entry:
-  %0 = load i32, i32* @ui, align 4
+  %0 = load i32, ptr @ui, align 4
   ret i32 %0
 }
 ; CHECK: @pass_ret_ui

diff  --git a/llvm/test/CodeGen/PowerPC/ppc64-acc-regalloc-bugfix.ll b/llvm/test/CodeGen/PowerPC/ppc64-acc-regalloc-bugfix.ll
index 154be01d9cc9..23e104f59ee2 100644
--- a/llvm/test/CodeGen/PowerPC/ppc64-acc-regalloc-bugfix.ll
+++ b/llvm/test/CodeGen/PowerPC/ppc64-acc-regalloc-bugfix.ll
@@ -14,7 +14,7 @@ dmblvi_entry:
   %0 = tail call <512 x i1> @llvm.ppc.mma.assemble.acc(<16 x i8> zeroinitializer, <16 x i8> undef, <16 x i8> undef, <16 x i8> zeroinitializer)
   %1 = tail call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.ppc.mma.disassemble.acc(<512 x i1> %0)
   %2 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %1, 2
-  store <16 x i8> %2, <16 x i8>* null, align 1
+  store <16 x i8> %2, ptr null, align 1
   unreachable
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/ppc64-acc-regalloc.ll b/llvm/test/CodeGen/PowerPC/ppc64-acc-regalloc.ll
index 49f8b43f7a82..940075d98673 100644
--- a/llvm/test/CodeGen/PowerPC/ppc64-acc-regalloc.ll
+++ b/llvm/test/CodeGen/PowerPC/ppc64-acc-regalloc.ll
@@ -9,7 +9,7 @@
 %0 = type <{ double }>
 %1 = type <{ double }>
 
-define void @acc_regalloc(i32* %arg, [0 x %0]* %arg1, [0 x %1]* %arg2) local_unnamed_addr {
+define void @acc_regalloc(ptr %arg, ptr %arg1, ptr %arg2) local_unnamed_addr {
 ; CHECK-LABEL: acc_regalloc:
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    lwz r3, 0(r3)
@@ -203,38 +203,32 @@ define void @acc_regalloc(i32* %arg, [0 x %0]* %arg1, [0 x %1]* %arg2) local_unn
 ; TRACKLIVE-NEXT:    stxv vs12, 48(0)
 ; TRACKLIVE-NEXT:    b .LBB0_1
 bb:
-  %i = load i32, i32* %arg, align 4
+  %i = load i32, ptr %arg, align 4
   %i3 = sext i32 %i to i64
   %i4 = shl nsw i64 %i3, 3
-  %i5 = bitcast [0 x %0]* %arg1 to i8*
-  %i6 = getelementptr i8, i8* %i5, i64 undef
-  %i7 = getelementptr [0 x %1], [0 x %1]* %arg2, i64 0, i64 -8
-  %i8 = getelementptr i8, i8* %i6, i64 undef
+  %i6 = getelementptr i8, ptr %arg1, i64 undef
+  %i7 = getelementptr [0 x %1], ptr %arg2, i64 0, i64 -8
+  %i8 = getelementptr i8, ptr %i6, i64 undef
   br label %bb9
 
 bb9:                                              ; preds = %bb95, %bb
   %i10 = phi i64 [ 1, %bb ], [ 0, %bb95 ]
-  %i11 = getelementptr %1, %1* null, i64 2
-  %i12 = bitcast %1* %i11 to <2 x double>*
-  %i13 = load <2 x double>, <2 x double>* %i12, align 1
+  %i11 = getelementptr %1, ptr null, i64 2
+  %i13 = load <2 x double>, ptr %i11, align 1
   %i14 = add nuw nsw i64 %i10, 2
-  %i15 = getelementptr inbounds %1, %1* %i7, i64 undef
-  %i16 = bitcast %1* %i15 to <2 x double>*
-  %i17 = load <2 x double>, <2 x double>* %i16, align 1
-  %i18 = load <2 x double>, <2 x double>* null, align 1
-  %i19 = getelementptr %1, %1* %i15, i64 6
-  %i20 = bitcast %1* %i19 to <2 x double>*
-  %i21 = load <2 x double>, <2 x double>* %i20, align 1
-  %i22 = load i64, i64* undef, align 8
+  %i15 = getelementptr inbounds %1, ptr %i7, i64 undef
+  %i17 = load <2 x double>, ptr %i15, align 1
+  %i18 = load <2 x double>, ptr null, align 1
+  %i19 = getelementptr %1, ptr %i15, i64 6
+  %i21 = load <2 x double>, ptr %i19, align 1
+  %i22 = load i64, ptr undef, align 8
   %i23 = insertelement <2 x i64> poison, i64 %i22, i32 0
   %i24 = bitcast <2 x i64> %i23 to <2 x double>
   %i25 = shufflevector <2 x double> %i24, <2 x double> undef, <2 x i32> zeroinitializer
   %i26 = mul i64 %i14, %i4
-  %i27 = getelementptr i8, i8* null, i64 %i26
-  %i28 = getelementptr inbounds i8, i8* %i27, i64 0
-  %i29 = getelementptr i8, i8* %i28, i64 16
-  %i30 = bitcast i8* %i29 to i64*
-  %i31 = load i64, i64* %i30, align 8
+  %i27 = getelementptr i8, ptr null, i64 %i26
+  %i29 = getelementptr i8, ptr %i27, i64 16
+  %i31 = load i64, ptr %i29, align 8
   %i32 = insertelement <2 x i64> poison, i64 %i31, i32 0
   %i33 = bitcast <2 x i64> %i32 to <2 x double>
   %i34 = shufflevector <2 x double> %i33, <2 x double> undef, <2 x i32> zeroinitializer
@@ -311,18 +305,14 @@ bb95:                                             ; preds = %bb82
   %i101 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %i100, 2
   %i102 = tail call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.ppc.mma.disassemble.acc(<512 x i1> %i94)
   %i103 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %i102, 3
-  %i104 = getelementptr inbounds i8, i8* %i8, i64 undef
-  %i105 = bitcast i8* %i104 to <16 x i8>*
-  store <16 x i8> %i97, <16 x i8>* %i105, align 1
-  %i106 = getelementptr i8, i8* %i104, i64 32
-  %i107 = bitcast i8* %i106 to <16 x i8>*
-  store <16 x i8> %i101, <16 x i8>* %i107, align 1
-  %i108 = getelementptr i8, i8* null, i64 16
-  %i109 = bitcast i8* %i108 to <16 x i8>*
-  store <16 x i8> %i99, <16 x i8>* %i109, align 1
-  %i110 = getelementptr i8, i8* null, i64 48
-  %i111 = bitcast i8* %i110 to <16 x i8>*
-  store <16 x i8> %i103, <16 x i8>* %i111, align 1
+  %i104 = getelementptr inbounds i8, ptr %i8, i64 undef
+  store <16 x i8> %i97, ptr %i104, align 1
+  %i106 = getelementptr i8, ptr %i104, i64 32
+  store <16 x i8> %i101, ptr %i106, align 1
+  %i108 = getelementptr i8, ptr null, i64 16
+  store <16 x i8> %i99, ptr %i108, align 1
+  %i110 = getelementptr i8, ptr null, i64 48
+  store <16 x i8> %i103, ptr %i110, align 1
   br label %bb9
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/ppc64-align-long-double.ll b/llvm/test/CodeGen/PowerPC/ppc64-align-long-double.ll
index 3d5fef656eb9..f6941e1d7398 100644
--- a/llvm/test/CodeGen/PowerPC/ppc64-align-long-double.ll
+++ b/llvm/test/CodeGen/PowerPC/ppc64-align-long-double.ll
@@ -18,7 +18,7 @@ target triple = "powerpc64-unknown-linux-gnu"
 ; value. Since the target does bitcast through memory and we no longer
 ; remember the address we need to do the store in a fresh local
 ; address.
-define ppc_fp128 @test(%struct.S* byval(%struct.S) %x) nounwind {
+define ppc_fp128 @test(ptr byval(%struct.S) %x) nounwind {
 ; CHECK-LABEL: test:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    std 5, -16(1)
@@ -53,8 +53,8 @@ define ppc_fp128 @test(%struct.S* byval(%struct.S) %x) nounwind {
 ; CHECK-P9-NEXT:    std 4, 56(1)
 ; CHECK-P9-NEXT:    blr
 entry:
-  %b = getelementptr inbounds %struct.S, %struct.S* %x, i32 0, i32 1
-  %0 = load ppc_fp128, ppc_fp128* %b, align 16
+  %b = getelementptr inbounds %struct.S, ptr %x, i32 0, i32 1
+  %0 = load ppc_fp128, ptr %b, align 16
   ret ppc_fp128 %0
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/ppc64-blnop.ll b/llvm/test/CodeGen/PowerPC/ppc64-blnop.ll
index da08390e0748..fdcf1bb8c7f1 100644
--- a/llvm/test/CodeGen/PowerPC/ppc64-blnop.ll
+++ b/llvm/test/CodeGen/PowerPC/ppc64-blnop.ll
@@ -10,9 +10,9 @@
 
 %class.T = type { [2 x i8] }
 
-define void @e_callee(%class.T* %this, i8* %c) { ret void }
-define void @e_caller(%class.T* %this, i8* %c) {
-  call void @e_callee(%class.T* %this, i8* %c)
+define void @e_callee(ptr %this, ptr %c) { ret void }
+define void @e_caller(ptr %this, ptr %c) {
+  call void @e_callee(ptr %this, ptr %c)
   ret void
 
 ; CHECK-LABEL: e_caller:
@@ -24,9 +24,9 @@ define void @e_caller(%class.T* %this, i8* %c) {
 ; CHECK-FS-NEXT: nop
 }
 
-define void @e_scallee(%class.T* %this, i8* %c) section "
diff erent" { ret void }
-define void @e_scaller(%class.T* %this, i8* %c) {
-  call void @e_scallee(%class.T* %this, i8* %c)
+define void @e_scallee(ptr %this, ptr %c) section "
diff erent" { ret void }
+define void @e_scaller(ptr %this, ptr %c) {
+  call void @e_scallee(ptr %this, ptr %c)
   ret void
 
 ; CHECK-LABEL: e_scaller:
@@ -34,9 +34,9 @@ define void @e_scaller(%class.T* %this, i8* %c) {
 ; CHECK-NEXT: nop
 }
 
-define void @e_s2callee(%class.T* %this, i8* %c) { ret void }
-define void @e_s2caller(%class.T* %this, i8* %c) section "
diff erent" {
-  call void @e_s2callee(%class.T* %this, i8* %c)
+define void @e_s2callee(ptr %this, ptr %c) { ret void }
+define void @e_s2caller(ptr %this, ptr %c) section "
diff erent" {
+  call void @e_s2callee(ptr %this, ptr %c)
   ret void
 
 ; CHECK-LABEL: e_s2caller:
@@ -47,9 +47,9 @@ define void @e_s2caller(%class.T* %this, i8* %c) section "
diff erent" {
 $cd1 = comdat any
 $cd2 = comdat any
 
-define void @e_ccallee(%class.T* %this, i8* %c) comdat($cd1) { ret void }
-define void @e_ccaller(%class.T* %this, i8* %c) comdat($cd2) {
-  call void @e_ccallee(%class.T* %this, i8* %c)
+define void @e_ccallee(ptr %this, ptr %c) comdat($cd1) { ret void }
+define void @e_ccaller(ptr %this, ptr %c) comdat($cd2) {
+  call void @e_ccallee(ptr %this, ptr %c)
   ret void
 
 ; CHECK-LABEL: e_ccaller:
@@ -59,9 +59,9 @@ define void @e_ccaller(%class.T* %this, i8* %c) comdat($cd2) {
 
 $cd = comdat any
 
-define void @e_c1callee(%class.T* %this, i8* %c) comdat($cd) { ret void }
-define void @e_c1caller(%class.T* %this, i8* %c) comdat($cd) {
-  call void @e_c1callee(%class.T* %this, i8* %c)
+define void @e_c1callee(ptr %this, ptr %c) comdat($cd) { ret void }
+define void @e_c1caller(ptr %this, ptr %c) comdat($cd) {
+  call void @e_c1callee(ptr %this, ptr %c)
   ret void
 
 ; CHECK-LABEL: e_c1caller:
@@ -69,9 +69,9 @@ define void @e_c1caller(%class.T* %this, i8* %c) comdat($cd) {
 ; CHECK-NEXT: nop
 }
 
-define weak_odr hidden void @wo_hcallee(%class.T* %this, i8* %c) { ret void }
-define void @wo_hcaller(%class.T* %this, i8* %c) {
-  call void @wo_hcallee(%class.T* %this, i8* %c)
+define weak_odr hidden void @wo_hcallee(ptr %this, ptr %c) { ret void }
+define void @wo_hcaller(ptr %this, ptr %c) {
+  call void @wo_hcallee(ptr %this, ptr %c)
   ret void
 
 ; CHECK-LABEL: wo_hcaller:
@@ -83,9 +83,9 @@ define void @wo_hcaller(%class.T* %this, i8* %c) {
 ; SCM-NEXT:  nop
 }
 
-define weak_odr protected void @wo_pcallee(%class.T* %this, i8* %c) { ret void }
-define void @wo_pcaller(%class.T* %this, i8* %c) {
-  call void @wo_pcallee(%class.T* %this, i8* %c)
+define weak_odr protected void @wo_pcallee(ptr %this, ptr %c) { ret void }
+define void @wo_pcaller(ptr %this, ptr %c) {
+  call void @wo_pcallee(ptr %this, ptr %c)
   ret void
 
 ; CHECK-LABEL: wo_pcaller:
@@ -97,9 +97,9 @@ define void @wo_pcaller(%class.T* %this, i8* %c) {
 ; SCM-NEXT:    nop
 }
 
-define weak_odr void @wo_callee(%class.T* %this, i8* %c) { ret void }
-define void @wo_caller(%class.T* %this, i8* %c) {
-  call void @wo_callee(%class.T* %this, i8* %c)
+define weak_odr void @wo_callee(ptr %this, ptr %c) { ret void }
+define void @wo_caller(ptr %this, ptr %c) {
+  call void @wo_callee(ptr %this, ptr %c)
   ret void
 
 ; CHECK-LABEL: wo_caller:
@@ -107,9 +107,9 @@ define void @wo_caller(%class.T* %this, i8* %c) {
 ; CHECK-NEXT: nop
 }
 
-define weak protected void @w_pcallee(i8* %ptr) { ret void }
-define void @w_pcaller(i8* %ptr) {
-  call void @w_pcallee(i8* %ptr)
+define weak protected void @w_pcallee(ptr %ptr) { ret void }
+define void @w_pcaller(ptr %ptr) {
+  call void @w_pcallee(ptr %ptr)
   ret void
 
 ; CHECK-LABEL: w_pcaller:
@@ -121,9 +121,9 @@ define void @w_pcaller(i8* %ptr) {
 ; SCM-NEXT:  nop
 }
 
-define weak hidden void @w_hcallee(i8* %ptr) { ret void }
-define void @w_hcaller(i8* %ptr) {
-  call void @w_hcallee(i8* %ptr)
+define weak hidden void @w_hcallee(ptr %ptr) { ret void }
+define void @w_hcaller(ptr %ptr) {
+  call void @w_hcallee(ptr %ptr)
   ret void
 
 ; CHECK-LABEL: w_hcaller:
@@ -135,9 +135,9 @@ define void @w_hcaller(i8* %ptr) {
 ; SCM-NEXT:  nop
 }
 
-define weak void @w_callee(i8* %ptr) { ret void }
-define void @w_caller(i8* %ptr) {
-  call void @w_callee(i8* %ptr)
+define weak void @w_callee(ptr %ptr) { ret void }
+define void @w_caller(ptr %ptr) {
+  call void @w_callee(ptr %ptr)
   ret void
 
 ; CHECK-LABEL: w_caller:

diff  --git a/llvm/test/CodeGen/PowerPC/ppc64-byval-align.ll b/llvm/test/CodeGen/PowerPC/ppc64-byval-align.ll
index 68ae7488e49b..46f1622a46d4 100644
--- a/llvm/test/CodeGen/PowerPC/ppc64-byval-align.ll
+++ b/llvm/test/CodeGen/PowerPC/ppc64-byval-align.ll
@@ -9,7 +9,7 @@ target triple = "powerpc64-unknown-linux-gnu"
 @gt = common global %struct.test zeroinitializer, align 16
 @gp = common global %struct.pad zeroinitializer, align 8
 
-define signext i32 @callee1(i32 signext %x, %struct.test* byval(%struct.test) align 16 nocapture readnone %y, i32 signext %z) {
+define signext i32 @callee1(i32 signext %x, ptr byval(%struct.test) align 16 nocapture readnone %y, i32 signext %z) {
 entry:
   ret i32 %z
 }
@@ -17,33 +17,31 @@ entry:
 ; CHECK: mr 3, 7
 ; CHECK: blr
 
-declare signext i32 @test1(i32 signext, %struct.test* byval(%struct.test) align 16, i32 signext)
+declare signext i32 @test1(i32 signext, ptr byval(%struct.test) align 16, i32 signext)
 define void @caller1(i32 signext %z) {
 entry:
-  %call = tail call signext i32 @test1(i32 signext 0, %struct.test* byval(%struct.test) align 16 @gt, i32 signext %z)
+  %call = tail call signext i32 @test1(i32 signext 0, ptr byval(%struct.test) align 16 @gt, i32 signext %z)
   ret void
 }
 ; CHECK-LABEL: @caller1
 ; CHECK: mr 7, 3
 ; CHECK: bl test1
 
-define i64 @callee2(%struct.pad* byval(%struct.pad) nocapture readnone %x, i32 signext %y, %struct.test* byval(%struct.test) align 16 nocapture readonly %z) {
+define i64 @callee2(ptr byval(%struct.pad) nocapture readnone %x, i32 signext %y, ptr byval(%struct.test) align 16 nocapture readonly %z) {
 entry:
-  %x1 = getelementptr inbounds %struct.test, %struct.test* %z, i64 0, i32 0
-  %0 = load i64, i64* %x1, align 16
+  %0 = load i64, ptr %z, align 16
   ret i64 %0
 }
 ; CHECK-LABEL: @callee2
 ; CHECK: ld {{[0-9]+}}, 128(1)
 ; CHECK: blr
 
-declare i64 @test2(%struct.pad* byval(%struct.pad), i32 signext, %struct.test* byval(%struct.test) align 16)
+declare i64 @test2(ptr byval(%struct.pad), i32 signext, ptr byval(%struct.test) align 16)
 define void @caller2(i64 %z) {
 entry:
   %tmp = alloca %struct.test, align 16
-  %.compoundliteral.sroa.0.0..sroa_idx = getelementptr inbounds %struct.test, %struct.test* %tmp, i64 0, i32 0
-  store i64 %z, i64* %.compoundliteral.sroa.0.0..sroa_idx, align 16
-  %call = call i64 @test2(%struct.pad* byval(%struct.pad) @gp, i32 signext 0, %struct.test* byval(%struct.test) align 16 %tmp)
+  store i64 %z, ptr %tmp, align 16
+  %call = call i64 @test2(ptr byval(%struct.pad) @gp, i32 signext 0, ptr byval(%struct.test) align 16 %tmp)
   ret void
 }
 ; CHECK-LABEL: @caller2

diff  --git a/llvm/test/CodeGen/PowerPC/ppc64-byval-larger-struct.ll b/llvm/test/CodeGen/PowerPC/ppc64-byval-larger-struct.ll
index 63e94923986f..4f79f9293bb9 100644
--- a/llvm/test/CodeGen/PowerPC/ppc64-byval-larger-struct.ll
+++ b/llvm/test/CodeGen/PowerPC/ppc64-byval-larger-struct.ll
@@ -12,7 +12,7 @@
 ; RUN: llc -verify-machineinstrs --mtriple powerpc64-unknown-linux-gnu \
 ; RUN:   -mcpu=pwr10 -ppc-asm-full-reg-names < %s | FileCheck %s --check-prefix=P10BE
 
-define signext i8 @caller_9([9 x i8]* nocapture readonly byval([9 x i8]) %data) #0 {
+define signext i8 @caller_9(ptr nocapture readonly byval([9 x i8]) %data) #0 {
 ; P8LE-LABEL: caller_9:
 ; P8LE:       # %bb.0: # %entry
 ; P8LE-NEXT:    mflr r0
@@ -133,47 +133,45 @@ define signext i8 @caller_9([9 x i8]* nocapture readonly byval([9 x i8]) %data)
 ; P10BE-NEXT:    blr
 entry:
   %_param_data = alloca [9 x i8], align 1
-  %.elt0 = getelementptr inbounds [9 x i8], [9 x i8]* %data, i64 0, i64 0
-  %.unpack0 = load i8, i8* %.elt0, align 1
-  %.elt1 = getelementptr inbounds [9 x i8], [9 x i8]* %data, i64 0, i64 1
-  %.unpack1 = load i8, i8* %.elt1, align 1
-  %.elt2 = getelementptr inbounds [9 x i8], [9 x i8]* %data, i64 0, i64 2
-  %.unpack2 = load i8, i8* %.elt2, align 1
-  %.elt3 = getelementptr inbounds [9 x i8], [9 x i8]* %data, i64 0, i64 3
-  %.unpack3 = load i8, i8* %.elt3, align 1
-  %.elt4 = getelementptr inbounds [9 x i8], [9 x i8]* %data, i64 0, i64 4
-  %.unpack4 = load i8, i8* %.elt4, align 1
-  %.elt5 = getelementptr inbounds [9 x i8], [9 x i8]* %data, i64 0, i64 5
-  %.unpack5 = load i8, i8* %.elt5, align 1
-  %.elt6 = getelementptr inbounds [9 x i8], [9 x i8]* %data, i64 0, i64 6
-  %.unpack6 = load i8, i8* %.elt6, align 1
-  %.elt7 = getelementptr inbounds [9 x i8], [9 x i8]* %data, i64 0, i64 7
-  %.unpack7 = load i8, i8* %.elt7, align 1
-  %.elt8 = getelementptr inbounds [9 x i8], [9 x i8]* %data, i64 0, i64 8
-  %.unpack8 = load i8, i8* %.elt8, align 1
-  %.temp.0.gep = getelementptr inbounds [9 x i8], [9 x i8]* %_param_data, i64 0, i64 0
-  store i8 %.unpack0, i8* %.temp.0.gep, align 1
-  %.temp.1.gep = getelementptr inbounds [9 x i8], [9 x i8]* %_param_data, i64 0, i64 1
-  store i8 %.unpack1, i8* %.temp.1.gep, align 1
-  %.temp.2.gep = getelementptr inbounds [9 x i8], [9 x i8]* %_param_data, i64 0, i64 2
-  store i8 %.unpack2, i8* %.temp.2.gep, align 1
-  %.temp.3.gep = getelementptr inbounds [9 x i8], [9 x i8]* %_param_data, i64 0, i64 3
-  store i8 %.unpack3, i8* %.temp.3.gep, align 1
-  %.temp.4.gep = getelementptr inbounds [9 x i8], [9 x i8]* %_param_data, i64 0, i64 4
-  store i8 %.unpack4, i8* %.temp.4.gep, align 1
-  %.temp.5.gep = getelementptr inbounds [9 x i8], [9 x i8]* %_param_data, i64 0, i64 5
-  store i8 %.unpack5, i8* %.temp.5.gep, align 1
-  %.temp.6.gep = getelementptr inbounds [9 x i8], [9 x i8]* %_param_data, i64 0, i64 6
-  store i8 %.unpack6, i8* %.temp.6.gep, align 1
-  %.temp.7.gep = getelementptr inbounds [9 x i8], [9 x i8]* %_param_data, i64 0, i64 7
-  store i8 %.unpack7, i8* %.temp.7.gep, align 1
-  %.temp.8.gep = getelementptr inbounds [9 x i8], [9 x i8]* %_param_data, i64 0, i64 8
-  store i8 %.unpack8, i8* %.temp.8.gep, align 1
-  call void @callee(i8* nonnull %.temp.0.gep)
+  %.unpack0 = load i8, ptr %data, align 1
+  %.elt1 = getelementptr inbounds [9 x i8], ptr %data, i64 0, i64 1
+  %.unpack1 = load i8, ptr %.elt1, align 1
+  %.elt2 = getelementptr inbounds [9 x i8], ptr %data, i64 0, i64 2
+  %.unpack2 = load i8, ptr %.elt2, align 1
+  %.elt3 = getelementptr inbounds [9 x i8], ptr %data, i64 0, i64 3
+  %.unpack3 = load i8, ptr %.elt3, align 1
+  %.elt4 = getelementptr inbounds [9 x i8], ptr %data, i64 0, i64 4
+  %.unpack4 = load i8, ptr %.elt4, align 1
+  %.elt5 = getelementptr inbounds [9 x i8], ptr %data, i64 0, i64 5
+  %.unpack5 = load i8, ptr %.elt5, align 1
+  %.elt6 = getelementptr inbounds [9 x i8], ptr %data, i64 0, i64 6
+  %.unpack6 = load i8, ptr %.elt6, align 1
+  %.elt7 = getelementptr inbounds [9 x i8], ptr %data, i64 0, i64 7
+  %.unpack7 = load i8, ptr %.elt7, align 1
+  %.elt8 = getelementptr inbounds [9 x i8], ptr %data, i64 0, i64 8
+  %.unpack8 = load i8, ptr %.elt8, align 1
+  store i8 %.unpack0, ptr %_param_data, align 1
+  %.temp.1.gep = getelementptr inbounds [9 x i8], ptr %_param_data, i64 0, i64 1
+  store i8 %.unpack1, ptr %.temp.1.gep, align 1
+  %.temp.2.gep = getelementptr inbounds [9 x i8], ptr %_param_data, i64 0, i64 2
+  store i8 %.unpack2, ptr %.temp.2.gep, align 1
+  %.temp.3.gep = getelementptr inbounds [9 x i8], ptr %_param_data, i64 0, i64 3
+  store i8 %.unpack3, ptr %.temp.3.gep, align 1
+  %.temp.4.gep = getelementptr inbounds [9 x i8], ptr %_param_data, i64 0, i64 4
+  store i8 %.unpack4, ptr %.temp.4.gep, align 1
+  %.temp.5.gep = getelementptr inbounds [9 x i8], ptr %_param_data, i64 0, i64 5
+  store i8 %.unpack5, ptr %.temp.5.gep, align 1
+  %.temp.6.gep = getelementptr inbounds [9 x i8], ptr %_param_data, i64 0, i64 6
+  store i8 %.unpack6, ptr %.temp.6.gep, align 1
+  %.temp.7.gep = getelementptr inbounds [9 x i8], ptr %_param_data, i64 0, i64 7
+  store i8 %.unpack7, ptr %.temp.7.gep, align 1
+  %.temp.8.gep = getelementptr inbounds [9 x i8], ptr %_param_data, i64 0, i64 8
+  store i8 %.unpack8, ptr %.temp.8.gep, align 1
+  call void @callee(ptr nonnull %_param_data)
   ret i8 0
 }
 
-define signext i8 @caller_9_callee_9([9 x i8]* nocapture readonly byval([9 x i8]) %data) #0 {
+define signext i8 @caller_9_callee_9(ptr nocapture readonly byval([9 x i8]) %data) #0 {
 ; P8LE-LABEL: caller_9_callee_9:
 ; P8LE:       # %bb.0: # %entry
 ; P8LE-NEXT:    mflr r0
@@ -300,47 +298,45 @@ define signext i8 @caller_9_callee_9([9 x i8]* nocapture readonly byval([9 x i8]
 ; P10BE-NEXT:    blr
 entry:
   %_param_data = alloca [9 x i8], align 1
-  %.elt0 = getelementptr inbounds [9 x i8], [9 x i8]* %data, i64 0, i64 0
-  %.unpack0 = load i8, i8* %.elt0, align 1
-  %.elt1 = getelementptr inbounds [9 x i8], [9 x i8]* %data, i64 0, i64 1
-  %.unpack1 = load i8, i8* %.elt1, align 1
-  %.elt2 = getelementptr inbounds [9 x i8], [9 x i8]* %data, i64 0, i64 2
-  %.unpack2 = load i8, i8* %.elt2, align 1
-  %.elt3 = getelementptr inbounds [9 x i8], [9 x i8]* %data, i64 0, i64 3
-  %.unpack3 = load i8, i8* %.elt3, align 1
-  %.elt4 = getelementptr inbounds [9 x i8], [9 x i8]* %data, i64 0, i64 4
-  %.unpack4 = load i8, i8* %.elt4, align 1
-  %.elt5 = getelementptr inbounds [9 x i8], [9 x i8]* %data, i64 0, i64 5
-  %.unpack5 = load i8, i8* %.elt5, align 1
-  %.elt6 = getelementptr inbounds [9 x i8], [9 x i8]* %data, i64 0, i64 6
-  %.unpack6 = load i8, i8* %.elt6, align 1
-  %.elt7 = getelementptr inbounds [9 x i8], [9 x i8]* %data, i64 0, i64 7
-  %.unpack7 = load i8, i8* %.elt7, align 1
-  %.elt8 = getelementptr inbounds [9 x i8], [9 x i8]* %data, i64 0, i64 8
-  %.unpack8 = load i8, i8* %.elt8, align 1
-  %.temp.0.gep = getelementptr inbounds [9 x i8], [9 x i8]* %_param_data, i64 0, i64 0
-  store i8 %.unpack0, i8* %.temp.0.gep, align 1
-  %.temp.1.gep = getelementptr inbounds [9 x i8], [9 x i8]* %_param_data, i64 0, i64 1
-  store i8 %.unpack1, i8* %.temp.1.gep, align 1
-  %.temp.2.gep = getelementptr inbounds [9 x i8], [9 x i8]* %_param_data, i64 0, i64 2
-  store i8 %.unpack2, i8* %.temp.2.gep, align 1
-  %.temp.3.gep = getelementptr inbounds [9 x i8], [9 x i8]* %_param_data, i64 0, i64 3
-  store i8 %.unpack3, i8* %.temp.3.gep, align 1
-  %.temp.4.gep = getelementptr inbounds [9 x i8], [9 x i8]* %_param_data, i64 0, i64 4
-  store i8 %.unpack4, i8* %.temp.4.gep, align 1
-  %.temp.5.gep = getelementptr inbounds [9 x i8], [9 x i8]* %_param_data, i64 0, i64 5
-  store i8 %.unpack5, i8* %.temp.5.gep, align 1
-  %.temp.6.gep = getelementptr inbounds [9 x i8], [9 x i8]* %_param_data, i64 0, i64 6
-  store i8 %.unpack6, i8* %.temp.6.gep, align 1
-  %.temp.7.gep = getelementptr inbounds [9 x i8], [9 x i8]* %_param_data, i64 0, i64 7
-  store i8 %.unpack7, i8* %.temp.7.gep, align 1
-  %.temp.8.gep = getelementptr inbounds [9 x i8], [9 x i8]* %_param_data, i64 0, i64 8
-  store i8 %.unpack8, i8* %.temp.8.gep, align 1
-  call void @callee_9([9 x i8]* nocapture readonly byval([9 x i8]) %data)
+  %.unpack0 = load i8, ptr %data, align 1
+  %.elt1 = getelementptr inbounds [9 x i8], ptr %data, i64 0, i64 1
+  %.unpack1 = load i8, ptr %.elt1, align 1
+  %.elt2 = getelementptr inbounds [9 x i8], ptr %data, i64 0, i64 2
+  %.unpack2 = load i8, ptr %.elt2, align 1
+  %.elt3 = getelementptr inbounds [9 x i8], ptr %data, i64 0, i64 3
+  %.unpack3 = load i8, ptr %.elt3, align 1
+  %.elt4 = getelementptr inbounds [9 x i8], ptr %data, i64 0, i64 4
+  %.unpack4 = load i8, ptr %.elt4, align 1
+  %.elt5 = getelementptr inbounds [9 x i8], ptr %data, i64 0, i64 5
+  %.unpack5 = load i8, ptr %.elt5, align 1
+  %.elt6 = getelementptr inbounds [9 x i8], ptr %data, i64 0, i64 6
+  %.unpack6 = load i8, ptr %.elt6, align 1
+  %.elt7 = getelementptr inbounds [9 x i8], ptr %data, i64 0, i64 7
+  %.unpack7 = load i8, ptr %.elt7, align 1
+  %.elt8 = getelementptr inbounds [9 x i8], ptr %data, i64 0, i64 8
+  %.unpack8 = load i8, ptr %.elt8, align 1
+  store i8 %.unpack0, ptr %_param_data, align 1
+  %.temp.1.gep = getelementptr inbounds [9 x i8], ptr %_param_data, i64 0, i64 1
+  store i8 %.unpack1, ptr %.temp.1.gep, align 1
+  %.temp.2.gep = getelementptr inbounds [9 x i8], ptr %_param_data, i64 0, i64 2
+  store i8 %.unpack2, ptr %.temp.2.gep, align 1
+  %.temp.3.gep = getelementptr inbounds [9 x i8], ptr %_param_data, i64 0, i64 3
+  store i8 %.unpack3, ptr %.temp.3.gep, align 1
+  %.temp.4.gep = getelementptr inbounds [9 x i8], ptr %_param_data, i64 0, i64 4
+  store i8 %.unpack4, ptr %.temp.4.gep, align 1
+  %.temp.5.gep = getelementptr inbounds [9 x i8], ptr %_param_data, i64 0, i64 5
+  store i8 %.unpack5, ptr %.temp.5.gep, align 1
+  %.temp.6.gep = getelementptr inbounds [9 x i8], ptr %_param_data, i64 0, i64 6
+  store i8 %.unpack6, ptr %.temp.6.gep, align 1
+  %.temp.7.gep = getelementptr inbounds [9 x i8], ptr %_param_data, i64 0, i64 7
+  store i8 %.unpack7, ptr %.temp.7.gep, align 1
+  %.temp.8.gep = getelementptr inbounds [9 x i8], ptr %_param_data, i64 0, i64 8
+  store i8 %.unpack8, ptr %.temp.8.gep, align 1
+  call void @callee_9(ptr nocapture readonly byval([9 x i8]) %data)
   ret i8 0
 }
 
-define signext i8 @caller_10([10 x i8]* nocapture readonly byval([10 x i8]) %data) #0 {
+define signext i8 @caller_10(ptr nocapture readonly byval([10 x i8]) %data) #0 {
 ; P8LE-LABEL: caller_10:
 ; P8LE:       # %bb.0: # %entry
 ; P8LE-NEXT:    mflr r0
@@ -461,51 +457,49 @@ define signext i8 @caller_10([10 x i8]* nocapture readonly byval([10 x i8]) %dat
 ; P10BE-NEXT:    blr
 entry:
   %_param_data = alloca [10 x i8], align 1
-  %.elt0 = getelementptr inbounds [10 x i8], [10 x i8]* %data, i64 0, i64 0
-  %.unpack0 = load i8, i8* %.elt0, align 1
-  %.elt1 = getelementptr inbounds [10 x i8], [10 x i8]* %data, i64 0, i64 1
-  %.unpack1 = load i8, i8* %.elt1, align 1
-  %.elt2 = getelementptr inbounds [10 x i8], [10 x i8]* %data, i64 0, i64 2
-  %.unpack2 = load i8, i8* %.elt2, align 1
-  %.elt3 = getelementptr inbounds [10 x i8], [10 x i8]* %data, i64 0, i64 3
-  %.unpack3 = load i8, i8* %.elt3, align 1
-  %.elt4 = getelementptr inbounds [10 x i8], [10 x i8]* %data, i64 0, i64 4
-  %.unpack4 = load i8, i8* %.elt4, align 1
-  %.elt5 = getelementptr inbounds [10 x i8], [10 x i8]* %data, i64 0, i64 5
-  %.unpack5 = load i8, i8* %.elt5, align 1
-  %.elt6 = getelementptr inbounds [10 x i8], [10 x i8]* %data, i64 0, i64 6
-  %.unpack6 = load i8, i8* %.elt6, align 1
-  %.elt7 = getelementptr inbounds [10 x i8], [10 x i8]* %data, i64 0, i64 7
-  %.unpack7 = load i8, i8* %.elt7, align 1
-  %.elt8 = getelementptr inbounds [10 x i8], [10 x i8]* %data, i64 0, i64 8
-  %.unpack8 = load i8, i8* %.elt8, align 1
-  %.elt9 = getelementptr inbounds [10 x i8], [10 x i8]* %data, i64 0, i64 9
-  %.unpack9 = load i8, i8* %.elt9, align 1
-  %.temp.0.gep = getelementptr inbounds [10 x i8], [10 x i8]* %_param_data, i64 0, i64 0
-  store i8 %.unpack0, i8* %.temp.0.gep, align 1
-  %.temp.1.gep = getelementptr inbounds [10 x i8], [10 x i8]* %_param_data, i64 0, i64 1
-  store i8 %.unpack1, i8* %.temp.1.gep, align 1
-  %.temp.2.gep = getelementptr inbounds [10 x i8], [10 x i8]* %_param_data, i64 0, i64 2
-  store i8 %.unpack2, i8* %.temp.2.gep, align 1
-  %.temp.3.gep = getelementptr inbounds [10 x i8], [10 x i8]* %_param_data, i64 0, i64 3
-  store i8 %.unpack3, i8* %.temp.3.gep, align 1
-  %.temp.4.gep = getelementptr inbounds [10 x i8], [10 x i8]* %_param_data, i64 0, i64 4
-  store i8 %.unpack4, i8* %.temp.4.gep, align 1
-  %.temp.5.gep = getelementptr inbounds [10 x i8], [10 x i8]* %_param_data, i64 0, i64 5
-  store i8 %.unpack5, i8* %.temp.5.gep, align 1
-  %.temp.6.gep = getelementptr inbounds [10 x i8], [10 x i8]* %_param_data, i64 0, i64 6
-  store i8 %.unpack6, i8* %.temp.6.gep, align 1
-  %.temp.7.gep = getelementptr inbounds [10 x i8], [10 x i8]* %_param_data, i64 0, i64 7
-  store i8 %.unpack7, i8* %.temp.7.gep, align 1
-  %.temp.8.gep = getelementptr inbounds [10 x i8], [10 x i8]* %_param_data, i64 0, i64 8
-  store i8 %.unpack8, i8* %.temp.8.gep, align 1
-  %.temp.9.gep = getelementptr inbounds [10 x i8], [10 x i8]* %_param_data, i64 0, i64 9
-  store i8 %.unpack9, i8* %.temp.9.gep, align 1
-  call void @callee(i8* nonnull %.temp.0.gep)
+  %.unpack0 = load i8, ptr %data, align 1
+  %.elt1 = getelementptr inbounds [10 x i8], ptr %data, i64 0, i64 1
+  %.unpack1 = load i8, ptr %.elt1, align 1
+  %.elt2 = getelementptr inbounds [10 x i8], ptr %data, i64 0, i64 2
+  %.unpack2 = load i8, ptr %.elt2, align 1
+  %.elt3 = getelementptr inbounds [10 x i8], ptr %data, i64 0, i64 3
+  %.unpack3 = load i8, ptr %.elt3, align 1
+  %.elt4 = getelementptr inbounds [10 x i8], ptr %data, i64 0, i64 4
+  %.unpack4 = load i8, ptr %.elt4, align 1
+  %.elt5 = getelementptr inbounds [10 x i8], ptr %data, i64 0, i64 5
+  %.unpack5 = load i8, ptr %.elt5, align 1
+  %.elt6 = getelementptr inbounds [10 x i8], ptr %data, i64 0, i64 6
+  %.unpack6 = load i8, ptr %.elt6, align 1
+  %.elt7 = getelementptr inbounds [10 x i8], ptr %data, i64 0, i64 7
+  %.unpack7 = load i8, ptr %.elt7, align 1
+  %.elt8 = getelementptr inbounds [10 x i8], ptr %data, i64 0, i64 8
+  %.unpack8 = load i8, ptr %.elt8, align 1
+  %.elt9 = getelementptr inbounds [10 x i8], ptr %data, i64 0, i64 9
+  %.unpack9 = load i8, ptr %.elt9, align 1
+  store i8 %.unpack0, ptr %_param_data, align 1
+  %.temp.1.gep = getelementptr inbounds [10 x i8], ptr %_param_data, i64 0, i64 1
+  store i8 %.unpack1, ptr %.temp.1.gep, align 1
+  %.temp.2.gep = getelementptr inbounds [10 x i8], ptr %_param_data, i64 0, i64 2
+  store i8 %.unpack2, ptr %.temp.2.gep, align 1
+  %.temp.3.gep = getelementptr inbounds [10 x i8], ptr %_param_data, i64 0, i64 3
+  store i8 %.unpack3, ptr %.temp.3.gep, align 1
+  %.temp.4.gep = getelementptr inbounds [10 x i8], ptr %_param_data, i64 0, i64 4
+  store i8 %.unpack4, ptr %.temp.4.gep, align 1
+  %.temp.5.gep = getelementptr inbounds [10 x i8], ptr %_param_data, i64 0, i64 5
+  store i8 %.unpack5, ptr %.temp.5.gep, align 1
+  %.temp.6.gep = getelementptr inbounds [10 x i8], ptr %_param_data, i64 0, i64 6
+  store i8 %.unpack6, ptr %.temp.6.gep, align 1
+  %.temp.7.gep = getelementptr inbounds [10 x i8], ptr %_param_data, i64 0, i64 7
+  store i8 %.unpack7, ptr %.temp.7.gep, align 1
+  %.temp.8.gep = getelementptr inbounds [10 x i8], ptr %_param_data, i64 0, i64 8
+  store i8 %.unpack8, ptr %.temp.8.gep, align 1
+  %.temp.9.gep = getelementptr inbounds [10 x i8], ptr %_param_data, i64 0, i64 9
+  store i8 %.unpack9, ptr %.temp.9.gep, align 1
+  call void @callee(ptr nonnull %_param_data)
   ret i8 0
 }
 
-define signext i8 @caller_12([12 x i8]* nocapture readonly byval([12 x i8]) %data) #0 {
+define signext i8 @caller_12(ptr nocapture readonly byval([12 x i8]) %data) #0 {
 ; P8LE-LABEL: caller_12:
 ; P8LE:       # %bb.0: # %entry
 ; P8LE-NEXT:    mflr r0
@@ -626,59 +620,57 @@ define signext i8 @caller_12([12 x i8]* nocapture readonly byval([12 x i8]) %dat
 ; P10BE-NEXT:    blr
 entry:
   %_param_data = alloca [12 x i8], align 1
-  %.elt0 = getelementptr inbounds [12 x i8], [12 x i8]* %data, i64 0, i64 0
-  %.unpack0 = load i8, i8* %.elt0, align 1
-  %.elt1 = getelementptr inbounds [12 x i8], [12 x i8]* %data, i64 0, i64 1
-  %.unpack1 = load i8, i8* %.elt1, align 1
-  %.elt2 = getelementptr inbounds [12 x i8], [12 x i8]* %data, i64 0, i64 2
-  %.unpack2 = load i8, i8* %.elt2, align 1
-  %.elt3 = getelementptr inbounds [12 x i8], [12 x i8]* %data, i64 0, i64 3
-  %.unpack3 = load i8, i8* %.elt3, align 1
-  %.elt4 = getelementptr inbounds [12 x i8], [12 x i8]* %data, i64 0, i64 4
-  %.unpack4 = load i8, i8* %.elt4, align 1
-  %.elt5 = getelementptr inbounds [12 x i8], [12 x i8]* %data, i64 0, i64 5
-  %.unpack5 = load i8, i8* %.elt5, align 1
-  %.elt6 = getelementptr inbounds [12 x i8], [12 x i8]* %data, i64 0, i64 6
-  %.unpack6 = load i8, i8* %.elt6, align 1
-  %.elt7 = getelementptr inbounds [12 x i8], [12 x i8]* %data, i64 0, i64 7
-  %.unpack7 = load i8, i8* %.elt7, align 1
-  %.elt8 = getelementptr inbounds [12 x i8], [12 x i8]* %data, i64 0, i64 8
-  %.unpack8 = load i8, i8* %.elt8, align 1
-  %.elt9 = getelementptr inbounds [12 x i8], [12 x i8]* %data, i64 0, i64 9
-  %.unpack9 = load i8, i8* %.elt9, align 1
-  %.elt10 = getelementptr inbounds [12 x i8], [12 x i8]* %data, i64 0, i64 10
-  %.unpack10 = load i8, i8* %.elt10, align 1
-  %.elt11 = getelementptr inbounds [12 x i8], [12 x i8]* %data, i64 0, i64 11
-  %.unpack11 = load i8, i8* %.elt11, align 1
-  %.temp.0.gep = getelementptr inbounds [12 x i8], [12 x i8]* %_param_data, i64 0, i64 0
-  store i8 %.unpack0, i8* %.temp.0.gep, align 1
-  %.temp.1.gep = getelementptr inbounds [12 x i8], [12 x i8]* %_param_data, i64 0, i64 1
-  store i8 %.unpack1, i8* %.temp.1.gep, align 1
-  %.temp.2.gep = getelementptr inbounds [12 x i8], [12 x i8]* %_param_data, i64 0, i64 2
-  store i8 %.unpack2, i8* %.temp.2.gep, align 1
-  %.temp.3.gep = getelementptr inbounds [12 x i8], [12 x i8]* %_param_data, i64 0, i64 3
-  store i8 %.unpack3, i8* %.temp.3.gep, align 1
-  %.temp.4.gep = getelementptr inbounds [12 x i8], [12 x i8]* %_param_data, i64 0, i64 4
-  store i8 %.unpack4, i8* %.temp.4.gep, align 1
-  %.temp.5.gep = getelementptr inbounds [12 x i8], [12 x i8]* %_param_data, i64 0, i64 5
-  store i8 %.unpack5, i8* %.temp.5.gep, align 1
-  %.temp.6.gep = getelementptr inbounds [12 x i8], [12 x i8]* %_param_data, i64 0, i64 6
-  store i8 %.unpack6, i8* %.temp.6.gep, align 1
-  %.temp.7.gep = getelementptr inbounds [12 x i8], [12 x i8]* %_param_data, i64 0, i64 7
-  store i8 %.unpack7, i8* %.temp.7.gep, align 1
-  %.temp.8.gep = getelementptr inbounds [12 x i8], [12 x i8]* %_param_data, i64 0, i64 8
-  store i8 %.unpack8, i8* %.temp.8.gep, align 1
-  %.temp.9.gep = getelementptr inbounds [12 x i8], [12 x i8]* %_param_data, i64 0, i64 9
-  store i8 %.unpack9, i8* %.temp.9.gep, align 1
-  %.temp.10.gep = getelementptr inbounds [12 x i8], [12 x i8]* %_param_data, i64 0, i64 10
-  store i8 %.unpack10, i8* %.temp.10.gep, align 1
-  %.temp.11.gep = getelementptr inbounds [12 x i8], [12 x i8]* %_param_data, i64 0, i64 11
-  store i8 %.unpack11, i8* %.temp.11.gep, align 1
-  call void @callee(i8* nonnull %.temp.0.gep)
+  %.unpack0 = load i8, ptr %data, align 1
+  %.elt1 = getelementptr inbounds [12 x i8], ptr %data, i64 0, i64 1
+  %.unpack1 = load i8, ptr %.elt1, align 1
+  %.elt2 = getelementptr inbounds [12 x i8], ptr %data, i64 0, i64 2
+  %.unpack2 = load i8, ptr %.elt2, align 1
+  %.elt3 = getelementptr inbounds [12 x i8], ptr %data, i64 0, i64 3
+  %.unpack3 = load i8, ptr %.elt3, align 1
+  %.elt4 = getelementptr inbounds [12 x i8], ptr %data, i64 0, i64 4
+  %.unpack4 = load i8, ptr %.elt4, align 1
+  %.elt5 = getelementptr inbounds [12 x i8], ptr %data, i64 0, i64 5
+  %.unpack5 = load i8, ptr %.elt5, align 1
+  %.elt6 = getelementptr inbounds [12 x i8], ptr %data, i64 0, i64 6
+  %.unpack6 = load i8, ptr %.elt6, align 1
+  %.elt7 = getelementptr inbounds [12 x i8], ptr %data, i64 0, i64 7
+  %.unpack7 = load i8, ptr %.elt7, align 1
+  %.elt8 = getelementptr inbounds [12 x i8], ptr %data, i64 0, i64 8
+  %.unpack8 = load i8, ptr %.elt8, align 1
+  %.elt9 = getelementptr inbounds [12 x i8], ptr %data, i64 0, i64 9
+  %.unpack9 = load i8, ptr %.elt9, align 1
+  %.elt10 = getelementptr inbounds [12 x i8], ptr %data, i64 0, i64 10
+  %.unpack10 = load i8, ptr %.elt10, align 1
+  %.elt11 = getelementptr inbounds [12 x i8], ptr %data, i64 0, i64 11
+  %.unpack11 = load i8, ptr %.elt11, align 1
+  store i8 %.unpack0, ptr %_param_data, align 1
+  %.temp.1.gep = getelementptr inbounds [12 x i8], ptr %_param_data, i64 0, i64 1
+  store i8 %.unpack1, ptr %.temp.1.gep, align 1
+  %.temp.2.gep = getelementptr inbounds [12 x i8], ptr %_param_data, i64 0, i64 2
+  store i8 %.unpack2, ptr %.temp.2.gep, align 1
+  %.temp.3.gep = getelementptr inbounds [12 x i8], ptr %_param_data, i64 0, i64 3
+  store i8 %.unpack3, ptr %.temp.3.gep, align 1
+  %.temp.4.gep = getelementptr inbounds [12 x i8], ptr %_param_data, i64 0, i64 4
+  store i8 %.unpack4, ptr %.temp.4.gep, align 1
+  %.temp.5.gep = getelementptr inbounds [12 x i8], ptr %_param_data, i64 0, i64 5
+  store i8 %.unpack5, ptr %.temp.5.gep, align 1
+  %.temp.6.gep = getelementptr inbounds [12 x i8], ptr %_param_data, i64 0, i64 6
+  store i8 %.unpack6, ptr %.temp.6.gep, align 1
+  %.temp.7.gep = getelementptr inbounds [12 x i8], ptr %_param_data, i64 0, i64 7
+  store i8 %.unpack7, ptr %.temp.7.gep, align 1
+  %.temp.8.gep = getelementptr inbounds [12 x i8], ptr %_param_data, i64 0, i64 8
+  store i8 %.unpack8, ptr %.temp.8.gep, align 1
+  %.temp.9.gep = getelementptr inbounds [12 x i8], ptr %_param_data, i64 0, i64 9
+  store i8 %.unpack9, ptr %.temp.9.gep, align 1
+  %.temp.10.gep = getelementptr inbounds [12 x i8], ptr %_param_data, i64 0, i64 10
+  store i8 %.unpack10, ptr %.temp.10.gep, align 1
+  %.temp.11.gep = getelementptr inbounds [12 x i8], ptr %_param_data, i64 0, i64 11
+  store i8 %.unpack11, ptr %.temp.11.gep, align 1
+  call void @callee(ptr nonnull %_param_data)
   ret i8 0
 }
 
-define signext i8 @caller_14([14 x i8]* nocapture readonly byval([14 x i8]) %data) #0 {
+define signext i8 @caller_14(ptr nocapture readonly byval([14 x i8]) %data) #0 {
 ; P8LE-LABEL: caller_14:
 ; P8LE:       # %bb.0: # %entry
 ; P8LE-NEXT:    mflr r0
@@ -811,59 +803,57 @@ define signext i8 @caller_14([14 x i8]* nocapture readonly byval([14 x i8]) %dat
 ; P10BE-NEXT:    blr
 entry:
   %_param_data = alloca [14 x i8], align 1
-  %.elt0 = getelementptr inbounds [14 x i8], [14 x i8]* %data, i64 0, i64 0
-  %.unpack0 = load i8, i8* %.elt0, align 1
-  %.elt1 = getelementptr inbounds [14 x i8], [14 x i8]* %data, i64 0, i64 1
-  %.unpack1 = load i8, i8* %.elt1, align 1
-  %.elt2 = getelementptr inbounds [14 x i8], [14 x i8]* %data, i64 0, i64 2
-  %.unpack2 = load i8, i8* %.elt2, align 1
-  %.elt3 = getelementptr inbounds [14 x i8], [14 x i8]* %data, i64 0, i64 3
-  %.unpack3 = load i8, i8* %.elt3, align 1
-  %.elt4 = getelementptr inbounds [14 x i8], [14 x i8]* %data, i64 0, i64 4
-  %.unpack4 = load i8, i8* %.elt4, align 1
-  %.elt5 = getelementptr inbounds [14 x i8], [14 x i8]* %data, i64 0, i64 5
-  %.unpack5 = load i8, i8* %.elt5, align 1
-  %.elt6 = getelementptr inbounds [14 x i8], [14 x i8]* %data, i64 0, i64 6
-  %.unpack6 = load i8, i8* %.elt6, align 1
-  %.elt7 = getelementptr inbounds [14 x i8], [14 x i8]* %data, i64 0, i64 7
-  %.unpack7 = load i8, i8* %.elt7, align 1
-  %.elt8 = getelementptr inbounds [14 x i8], [14 x i8]* %data, i64 0, i64 8
-  %.unpack8 = load i8, i8* %.elt8, align 1
-  %.elt9 = getelementptr inbounds [14 x i8], [14 x i8]* %data, i64 0, i64 9
-  %.unpack9 = load i8, i8* %.elt9, align 1
-  %.elt10 = getelementptr inbounds [14 x i8], [14 x i8]* %data, i64 0, i64 10
-  %.unpack10 = load i8, i8* %.elt10, align 1
-  %.elt11 = getelementptr inbounds [14 x i8], [14 x i8]* %data, i64 0, i64 11
-  %.unpack11 = load i8, i8* %.elt11, align 1
-  %.temp.0.gep = getelementptr inbounds [14 x i8], [14 x i8]* %_param_data, i64 0, i64 0
-  store i8 %.unpack0, i8* %.temp.0.gep, align 1
-  %.temp.1.gep = getelementptr inbounds [14 x i8], [14 x i8]* %_param_data, i64 0, i64 1
-  store i8 %.unpack1, i8* %.temp.1.gep, align 1
-  %.temp.2.gep = getelementptr inbounds [14 x i8], [14 x i8]* %_param_data, i64 0, i64 2
-  store i8 %.unpack2, i8* %.temp.2.gep, align 1
-  %.temp.3.gep = getelementptr inbounds [14 x i8], [14 x i8]* %_param_data, i64 0, i64 3
-  store i8 %.unpack3, i8* %.temp.3.gep, align 1
-  %.temp.4.gep = getelementptr inbounds [14 x i8], [14 x i8]* %_param_data, i64 0, i64 4
-  store i8 %.unpack4, i8* %.temp.4.gep, align 1
-  %.temp.5.gep = getelementptr inbounds [14 x i8], [14 x i8]* %_param_data, i64 0, i64 5
-  store i8 %.unpack5, i8* %.temp.5.gep, align 1
-  %.temp.6.gep = getelementptr inbounds [14 x i8], [14 x i8]* %_param_data, i64 0, i64 6
-  store i8 %.unpack6, i8* %.temp.6.gep, align 1
-  %.temp.7.gep = getelementptr inbounds [14 x i8], [14 x i8]* %_param_data, i64 0, i64 7
-  store i8 %.unpack7, i8* %.temp.7.gep, align 1
-  %.temp.8.gep = getelementptr inbounds [14 x i8], [14 x i8]* %_param_data, i64 0, i64 8
-  store i8 %.unpack8, i8* %.temp.8.gep, align 1
-  %.temp.9.gep = getelementptr inbounds [14 x i8], [14 x i8]* %_param_data, i64 0, i64 9
-  store i8 %.unpack9, i8* %.temp.9.gep, align 1
-  %.temp.10.gep = getelementptr inbounds [14 x i8], [14 x i8]* %_param_data, i64 0, i64 10
-  store i8 %.unpack10, i8* %.temp.10.gep, align 1
-  %.temp.11.gep = getelementptr inbounds [14 x i8], [14 x i8]* %_param_data, i64 0, i64 11
-  store i8 %.unpack11, i8* %.temp.11.gep, align 1
-  call void @callee(i8* nonnull %.temp.0.gep)
+  %.unpack0 = load i8, ptr %data, align 1
+  %.elt1 = getelementptr inbounds [14 x i8], ptr %data, i64 0, i64 1
+  %.unpack1 = load i8, ptr %.elt1, align 1
+  %.elt2 = getelementptr inbounds [14 x i8], ptr %data, i64 0, i64 2
+  %.unpack2 = load i8, ptr %.elt2, align 1
+  %.elt3 = getelementptr inbounds [14 x i8], ptr %data, i64 0, i64 3
+  %.unpack3 = load i8, ptr %.elt3, align 1
+  %.elt4 = getelementptr inbounds [14 x i8], ptr %data, i64 0, i64 4
+  %.unpack4 = load i8, ptr %.elt4, align 1
+  %.elt5 = getelementptr inbounds [14 x i8], ptr %data, i64 0, i64 5
+  %.unpack5 = load i8, ptr %.elt5, align 1
+  %.elt6 = getelementptr inbounds [14 x i8], ptr %data, i64 0, i64 6
+  %.unpack6 = load i8, ptr %.elt6, align 1
+  %.elt7 = getelementptr inbounds [14 x i8], ptr %data, i64 0, i64 7
+  %.unpack7 = load i8, ptr %.elt7, align 1
+  %.elt8 = getelementptr inbounds [14 x i8], ptr %data, i64 0, i64 8
+  %.unpack8 = load i8, ptr %.elt8, align 1
+  %.elt9 = getelementptr inbounds [14 x i8], ptr %data, i64 0, i64 9
+  %.unpack9 = load i8, ptr %.elt9, align 1
+  %.elt10 = getelementptr inbounds [14 x i8], ptr %data, i64 0, i64 10
+  %.unpack10 = load i8, ptr %.elt10, align 1
+  %.elt11 = getelementptr inbounds [14 x i8], ptr %data, i64 0, i64 11
+  %.unpack11 = load i8, ptr %.elt11, align 1
+  store i8 %.unpack0, ptr %_param_data, align 1
+  %.temp.1.gep = getelementptr inbounds [14 x i8], ptr %_param_data, i64 0, i64 1
+  store i8 %.unpack1, ptr %.temp.1.gep, align 1
+  %.temp.2.gep = getelementptr inbounds [14 x i8], ptr %_param_data, i64 0, i64 2
+  store i8 %.unpack2, ptr %.temp.2.gep, align 1
+  %.temp.3.gep = getelementptr inbounds [14 x i8], ptr %_param_data, i64 0, i64 3
+  store i8 %.unpack3, ptr %.temp.3.gep, align 1
+  %.temp.4.gep = getelementptr inbounds [14 x i8], ptr %_param_data, i64 0, i64 4
+  store i8 %.unpack4, ptr %.temp.4.gep, align 1
+  %.temp.5.gep = getelementptr inbounds [14 x i8], ptr %_param_data, i64 0, i64 5
+  store i8 %.unpack5, ptr %.temp.5.gep, align 1
+  %.temp.6.gep = getelementptr inbounds [14 x i8], ptr %_param_data, i64 0, i64 6
+  store i8 %.unpack6, ptr %.temp.6.gep, align 1
+  %.temp.7.gep = getelementptr inbounds [14 x i8], ptr %_param_data, i64 0, i64 7
+  store i8 %.unpack7, ptr %.temp.7.gep, align 1
+  %.temp.8.gep = getelementptr inbounds [14 x i8], ptr %_param_data, i64 0, i64 8
+  store i8 %.unpack8, ptr %.temp.8.gep, align 1
+  %.temp.9.gep = getelementptr inbounds [14 x i8], ptr %_param_data, i64 0, i64 9
+  store i8 %.unpack9, ptr %.temp.9.gep, align 1
+  %.temp.10.gep = getelementptr inbounds [14 x i8], ptr %_param_data, i64 0, i64 10
+  store i8 %.unpack10, ptr %.temp.10.gep, align 1
+  %.temp.11.gep = getelementptr inbounds [14 x i8], ptr %_param_data, i64 0, i64 11
+  store i8 %.unpack11, ptr %.temp.11.gep, align 1
+  call void @callee(ptr nonnull %_param_data)
   ret i8 0
 }
 
-define signext i8 @caller_16([16 x i8]* nocapture readonly byval([16 x i8]) %data) #0 {
+define signext i8 @caller_16(ptr nocapture readonly byval([16 x i8]) %data) #0 {
 ; P8LE-LABEL: caller_16:
 ; P8LE:       # %bb.0: # %entry
 ; P8LE-NEXT:    mflr r0
@@ -981,59 +971,57 @@ define signext i8 @caller_16([16 x i8]* nocapture readonly byval([16 x i8]) %dat
 ; P10BE-NEXT:    blr
 entry:
   %_param_data = alloca [16 x i8], align 1
-  %.elt0 = getelementptr inbounds [16 x i8], [16 x i8]* %data, i64 0, i64 0
-  %.unpack0 = load i8, i8* %.elt0, align 1
-  %.elt1 = getelementptr inbounds [16 x i8], [16 x i8]* %data, i64 0, i64 1
-  %.unpack1 = load i8, i8* %.elt1, align 1
-  %.elt2 = getelementptr inbounds [16 x i8], [16 x i8]* %data, i64 0, i64 2
-  %.unpack2 = load i8, i8* %.elt2, align 1
-  %.elt3 = getelementptr inbounds [16 x i8], [16 x i8]* %data, i64 0, i64 3
-  %.unpack3 = load i8, i8* %.elt3, align 1
-  %.elt4 = getelementptr inbounds [16 x i8], [16 x i8]* %data, i64 0, i64 4
-  %.unpack4 = load i8, i8* %.elt4, align 1
-  %.elt5 = getelementptr inbounds [16 x i8], [16 x i8]* %data, i64 0, i64 5
-  %.unpack5 = load i8, i8* %.elt5, align 1
-  %.elt6 = getelementptr inbounds [16 x i8], [16 x i8]* %data, i64 0, i64 6
-  %.unpack6 = load i8, i8* %.elt6, align 1
-  %.elt7 = getelementptr inbounds [16 x i8], [16 x i8]* %data, i64 0, i64 7
-  %.unpack7 = load i8, i8* %.elt7, align 1
-  %.elt8 = getelementptr inbounds [16 x i8], [16 x i8]* %data, i64 0, i64 8
-  %.unpack8 = load i8, i8* %.elt8, align 1
-  %.elt9 = getelementptr inbounds [16 x i8], [16 x i8]* %data, i64 0, i64 9
-  %.unpack9 = load i8, i8* %.elt9, align 1
-  %.elt10 = getelementptr inbounds [16 x i8], [16 x i8]* %data, i64 0, i64 10
-  %.unpack10 = load i8, i8* %.elt10, align 1
-  %.elt11 = getelementptr inbounds [16 x i8], [16 x i8]* %data, i64 0, i64 11
-  %.unpack11 = load i8, i8* %.elt11, align 1
-  %.temp.0.gep = getelementptr inbounds [16 x i8], [16 x i8]* %_param_data, i64 0, i64 0
-  store i8 %.unpack0, i8* %.temp.0.gep, align 1
-  %.temp.1.gep = getelementptr inbounds [16 x i8], [16 x i8]* %_param_data, i64 0, i64 1
-  store i8 %.unpack1, i8* %.temp.1.gep, align 1
-  %.temp.2.gep = getelementptr inbounds [16 x i8], [16 x i8]* %_param_data, i64 0, i64 2
-  store i8 %.unpack2, i8* %.temp.2.gep, align 1
-  %.temp.3.gep = getelementptr inbounds [16 x i8], [16 x i8]* %_param_data, i64 0, i64 3
-  store i8 %.unpack3, i8* %.temp.3.gep, align 1
-  %.temp.4.gep = getelementptr inbounds [16 x i8], [16 x i8]* %_param_data, i64 0, i64 4
-  store i8 %.unpack4, i8* %.temp.4.gep, align 1
-  %.temp.5.gep = getelementptr inbounds [16 x i8], [16 x i8]* %_param_data, i64 0, i64 5
-  store i8 %.unpack5, i8* %.temp.5.gep, align 1
-  %.temp.6.gep = getelementptr inbounds [16 x i8], [16 x i8]* %_param_data, i64 0, i64 6
-  store i8 %.unpack6, i8* %.temp.6.gep, align 1
-  %.temp.7.gep = getelementptr inbounds [16 x i8], [16 x i8]* %_param_data, i64 0, i64 7
-  store i8 %.unpack7, i8* %.temp.7.gep, align 1
-  %.temp.8.gep = getelementptr inbounds [16 x i8], [16 x i8]* %_param_data, i64 0, i64 8
-  store i8 %.unpack8, i8* %.temp.8.gep, align 1
-  %.temp.9.gep = getelementptr inbounds [16 x i8], [16 x i8]* %_param_data, i64 0, i64 9
-  store i8 %.unpack9, i8* %.temp.9.gep, align 1
-  %.temp.10.gep = getelementptr inbounds [16 x i8], [16 x i8]* %_param_data, i64 0, i64 10
-  store i8 %.unpack10, i8* %.temp.10.gep, align 1
-  %.temp.11.gep = getelementptr inbounds [16 x i8], [16 x i8]* %_param_data, i64 0, i64 11
-  store i8 %.unpack11, i8* %.temp.11.gep, align 1
-  call void @callee(i8* nonnull %.temp.0.gep)
+  %.unpack0 = load i8, ptr %data, align 1
+  %.elt1 = getelementptr inbounds [16 x i8], ptr %data, i64 0, i64 1
+  %.unpack1 = load i8, ptr %.elt1, align 1
+  %.elt2 = getelementptr inbounds [16 x i8], ptr %data, i64 0, i64 2
+  %.unpack2 = load i8, ptr %.elt2, align 1
+  %.elt3 = getelementptr inbounds [16 x i8], ptr %data, i64 0, i64 3
+  %.unpack3 = load i8, ptr %.elt3, align 1
+  %.elt4 = getelementptr inbounds [16 x i8], ptr %data, i64 0, i64 4
+  %.unpack4 = load i8, ptr %.elt4, align 1
+  %.elt5 = getelementptr inbounds [16 x i8], ptr %data, i64 0, i64 5
+  %.unpack5 = load i8, ptr %.elt5, align 1
+  %.elt6 = getelementptr inbounds [16 x i8], ptr %data, i64 0, i64 6
+  %.unpack6 = load i8, ptr %.elt6, align 1
+  %.elt7 = getelementptr inbounds [16 x i8], ptr %data, i64 0, i64 7
+  %.unpack7 = load i8, ptr %.elt7, align 1
+  %.elt8 = getelementptr inbounds [16 x i8], ptr %data, i64 0, i64 8
+  %.unpack8 = load i8, ptr %.elt8, align 1
+  %.elt9 = getelementptr inbounds [16 x i8], ptr %data, i64 0, i64 9
+  %.unpack9 = load i8, ptr %.elt9, align 1
+  %.elt10 = getelementptr inbounds [16 x i8], ptr %data, i64 0, i64 10
+  %.unpack10 = load i8, ptr %.elt10, align 1
+  %.elt11 = getelementptr inbounds [16 x i8], ptr %data, i64 0, i64 11
+  %.unpack11 = load i8, ptr %.elt11, align 1
+  store i8 %.unpack0, ptr %_param_data, align 1
+  %.temp.1.gep = getelementptr inbounds [16 x i8], ptr %_param_data, i64 0, i64 1
+  store i8 %.unpack1, ptr %.temp.1.gep, align 1
+  %.temp.2.gep = getelementptr inbounds [16 x i8], ptr %_param_data, i64 0, i64 2
+  store i8 %.unpack2, ptr %.temp.2.gep, align 1
+  %.temp.3.gep = getelementptr inbounds [16 x i8], ptr %_param_data, i64 0, i64 3
+  store i8 %.unpack3, ptr %.temp.3.gep, align 1
+  %.temp.4.gep = getelementptr inbounds [16 x i8], ptr %_param_data, i64 0, i64 4
+  store i8 %.unpack4, ptr %.temp.4.gep, align 1
+  %.temp.5.gep = getelementptr inbounds [16 x i8], ptr %_param_data, i64 0, i64 5
+  store i8 %.unpack5, ptr %.temp.5.gep, align 1
+  %.temp.6.gep = getelementptr inbounds [16 x i8], ptr %_param_data, i64 0, i64 6
+  store i8 %.unpack6, ptr %.temp.6.gep, align 1
+  %.temp.7.gep = getelementptr inbounds [16 x i8], ptr %_param_data, i64 0, i64 7
+  store i8 %.unpack7, ptr %.temp.7.gep, align 1
+  %.temp.8.gep = getelementptr inbounds [16 x i8], ptr %_param_data, i64 0, i64 8
+  store i8 %.unpack8, ptr %.temp.8.gep, align 1
+  %.temp.9.gep = getelementptr inbounds [16 x i8], ptr %_param_data, i64 0, i64 9
+  store i8 %.unpack9, ptr %.temp.9.gep, align 1
+  %.temp.10.gep = getelementptr inbounds [16 x i8], ptr %_param_data, i64 0, i64 10
+  store i8 %.unpack10, ptr %.temp.10.gep, align 1
+  %.temp.11.gep = getelementptr inbounds [16 x i8], ptr %_param_data, i64 0, i64 11
+  store i8 %.unpack11, ptr %.temp.11.gep, align 1
+  call void @callee(ptr nonnull %_param_data)
   ret i8 0
 }
 
-define signext i8 @caller_18([18 x i8]* nocapture readonly byval([18 x i8]) %data) #0 {
+define signext i8 @caller_18(ptr nocapture readonly byval([18 x i8]) %data) #0 {
 ; P8LE-LABEL: caller_18:
 ; P8LE:       # %bb.0: # %entry
 ; P8LE-NEXT:    mflr r0
@@ -1157,62 +1145,60 @@ define signext i8 @caller_18([18 x i8]* nocapture readonly byval([18 x i8]) %dat
 ; P10BE-NEXT:    blr
 entry:
   %_param_data = alloca [18 x i8], align 1
-  %.elt0 = getelementptr inbounds [18 x i8], [18 x i8]* %data, i64 0, i64 0
-  %.unpack0 = load i8, i8* %.elt0, align 1
-  %.elt1 = getelementptr inbounds [18 x i8], [18 x i8]* %data, i64 0, i64 1
-  %.unpack1 = load i8, i8* %.elt1, align 1
-  %.elt2 = getelementptr inbounds [18 x i8], [18 x i8]* %data, i64 0, i64 2
-  %.unpack2 = load i8, i8* %.elt2, align 1
-  %.elt3 = getelementptr inbounds [18 x i8], [18 x i8]* %data, i64 0, i64 3
-  %.unpack3 = load i8, i8* %.elt3, align 1
-  %.elt4 = getelementptr inbounds [18 x i8], [18 x i8]* %data, i64 0, i64 4
-  %.unpack4 = load i8, i8* %.elt4, align 1
-  %.elt5 = getelementptr inbounds [18 x i8], [18 x i8]* %data, i64 0, i64 5
-  %.unpack5 = load i8, i8* %.elt5, align 1
-  %.elt6 = getelementptr inbounds [18 x i8], [18 x i8]* %data, i64 0, i64 6
-  %.unpack6 = load i8, i8* %.elt6, align 1
-  %.elt7 = getelementptr inbounds [18 x i8], [18 x i8]* %data, i64 0, i64 7
-  %.unpack7 = load i8, i8* %.elt7, align 1
-  %.elt8 = getelementptr inbounds [18 x i8], [18 x i8]* %data, i64 0, i64 8
-  %.unpack8 = load i8, i8* %.elt8, align 1
-  %.elt9 = getelementptr inbounds [18 x i8], [18 x i8]* %data, i64 0, i64 9
-  %.unpack9 = load i8, i8* %.elt9, align 1
-  %.elt10 = getelementptr inbounds [18 x i8], [18 x i8]* %data, i64 0, i64 10
-  %.unpack10 = load i8, i8* %.elt10, align 1
-  %.elt11 = getelementptr inbounds [18 x i8], [18 x i8]* %data, i64 0, i64 11
-  %.unpack11 = load i8, i8* %.elt11, align 1
-  %.temp.0.gep = getelementptr inbounds [18 x i8], [18 x i8]* %_param_data, i64 0, i64 0
-  store i8 %.unpack0, i8* %.temp.0.gep, align 1
-  %.temp.1.gep = getelementptr inbounds [18 x i8], [18 x i8]* %_param_data, i64 0, i64 1
-  store i8 %.unpack1, i8* %.temp.1.gep, align 1
-  %.temp.2.gep = getelementptr inbounds [18 x i8], [18 x i8]* %_param_data, i64 0, i64 2
-  store i8 %.unpack2, i8* %.temp.2.gep, align 1
-  %.temp.3.gep = getelementptr inbounds [18 x i8], [18 x i8]* %_param_data, i64 0, i64 3
-  store i8 %.unpack3, i8* %.temp.3.gep, align 1
-  %.temp.4.gep = getelementptr inbounds [18 x i8], [18 x i8]* %_param_data, i64 0, i64 4
-  store i8 %.unpack4, i8* %.temp.4.gep, align 1
-  %.temp.5.gep = getelementptr inbounds [18 x i8], [18 x i8]* %_param_data, i64 0, i64 5
-  store i8 %.unpack5, i8* %.temp.5.gep, align 1
-  %.temp.6.gep = getelementptr inbounds [18 x i8], [18 x i8]* %_param_data, i64 0, i64 6
-  store i8 %.unpack6, i8* %.temp.6.gep, align 1
-  %.temp.7.gep = getelementptr inbounds [18 x i8], [18 x i8]* %_param_data, i64 0, i64 7
-  store i8 %.unpack7, i8* %.temp.7.gep, align 1
-  %.temp.8.gep = getelementptr inbounds [18 x i8], [18 x i8]* %_param_data, i64 0, i64 8
-  store i8 %.unpack8, i8* %.temp.8.gep, align 1
-  %.temp.9.gep = getelementptr inbounds [18 x i8], [18 x i8]* %_param_data, i64 0, i64 9
-  store i8 %.unpack9, i8* %.temp.9.gep, align 1
-  %.temp.10.gep = getelementptr inbounds [18 x i8], [18 x i8]* %_param_data, i64 0, i64 10
-  store i8 %.unpack10, i8* %.temp.10.gep, align 1
-  %.temp.11.gep = getelementptr inbounds [18 x i8], [18 x i8]* %_param_data, i64 0, i64 11
-  store i8 %.unpack11, i8* %.temp.11.gep, align 1
-  call void @callee(i8* nonnull %.temp.0.gep)
+  %.unpack0 = load i8, ptr %data, align 1
+  %.elt1 = getelementptr inbounds [18 x i8], ptr %data, i64 0, i64 1
+  %.unpack1 = load i8, ptr %.elt1, align 1
+  %.elt2 = getelementptr inbounds [18 x i8], ptr %data, i64 0, i64 2
+  %.unpack2 = load i8, ptr %.elt2, align 1
+  %.elt3 = getelementptr inbounds [18 x i8], ptr %data, i64 0, i64 3
+  %.unpack3 = load i8, ptr %.elt3, align 1
+  %.elt4 = getelementptr inbounds [18 x i8], ptr %data, i64 0, i64 4
+  %.unpack4 = load i8, ptr %.elt4, align 1
+  %.elt5 = getelementptr inbounds [18 x i8], ptr %data, i64 0, i64 5
+  %.unpack5 = load i8, ptr %.elt5, align 1
+  %.elt6 = getelementptr inbounds [18 x i8], ptr %data, i64 0, i64 6
+  %.unpack6 = load i8, ptr %.elt6, align 1
+  %.elt7 = getelementptr inbounds [18 x i8], ptr %data, i64 0, i64 7
+  %.unpack7 = load i8, ptr %.elt7, align 1
+  %.elt8 = getelementptr inbounds [18 x i8], ptr %data, i64 0, i64 8
+  %.unpack8 = load i8, ptr %.elt8, align 1
+  %.elt9 = getelementptr inbounds [18 x i8], ptr %data, i64 0, i64 9
+  %.unpack9 = load i8, ptr %.elt9, align 1
+  %.elt10 = getelementptr inbounds [18 x i8], ptr %data, i64 0, i64 10
+  %.unpack10 = load i8, ptr %.elt10, align 1
+  %.elt11 = getelementptr inbounds [18 x i8], ptr %data, i64 0, i64 11
+  %.unpack11 = load i8, ptr %.elt11, align 1
+  store i8 %.unpack0, ptr %_param_data, align 1
+  %.temp.1.gep = getelementptr inbounds [18 x i8], ptr %_param_data, i64 0, i64 1
+  store i8 %.unpack1, ptr %.temp.1.gep, align 1
+  %.temp.2.gep = getelementptr inbounds [18 x i8], ptr %_param_data, i64 0, i64 2
+  store i8 %.unpack2, ptr %.temp.2.gep, align 1
+  %.temp.3.gep = getelementptr inbounds [18 x i8], ptr %_param_data, i64 0, i64 3
+  store i8 %.unpack3, ptr %.temp.3.gep, align 1
+  %.temp.4.gep = getelementptr inbounds [18 x i8], ptr %_param_data, i64 0, i64 4
+  store i8 %.unpack4, ptr %.temp.4.gep, align 1
+  %.temp.5.gep = getelementptr inbounds [18 x i8], ptr %_param_data, i64 0, i64 5
+  store i8 %.unpack5, ptr %.temp.5.gep, align 1
+  %.temp.6.gep = getelementptr inbounds [18 x i8], ptr %_param_data, i64 0, i64 6
+  store i8 %.unpack6, ptr %.temp.6.gep, align 1
+  %.temp.7.gep = getelementptr inbounds [18 x i8], ptr %_param_data, i64 0, i64 7
+  store i8 %.unpack7, ptr %.temp.7.gep, align 1
+  %.temp.8.gep = getelementptr inbounds [18 x i8], ptr %_param_data, i64 0, i64 8
+  store i8 %.unpack8, ptr %.temp.8.gep, align 1
+  %.temp.9.gep = getelementptr inbounds [18 x i8], ptr %_param_data, i64 0, i64 9
+  store i8 %.unpack9, ptr %.temp.9.gep, align 1
+  %.temp.10.gep = getelementptr inbounds [18 x i8], ptr %_param_data, i64 0, i64 10
+  store i8 %.unpack10, ptr %.temp.10.gep, align 1
+  %.temp.11.gep = getelementptr inbounds [18 x i8], ptr %_param_data, i64 0, i64 11
+  store i8 %.unpack11, ptr %.temp.11.gep, align 1
+  call void @callee(ptr nonnull %_param_data)
   ret i8 0
 }
 
 
 
-declare void @callee(i8*) local_unnamed_addr #0
-declare void @callee_9([9 x i8]* nocapture readonly byval([9 x i8]) %data) local_unnamed_addr #0
+declare void @callee(ptr) local_unnamed_addr #0
+declare void @callee_9(ptr nocapture readonly byval([9 x i8]) %data) local_unnamed_addr #0
 
 attributes #0 = { nounwind }
 

diff  --git a/llvm/test/CodeGen/PowerPC/ppc64-byval-multi-store.ll b/llvm/test/CodeGen/PowerPC/ppc64-byval-multi-store.ll
index 7bba52da174a..8a258c413703 100644
--- a/llvm/test/CodeGen/PowerPC/ppc64-byval-multi-store.ll
+++ b/llvm/test/CodeGen/PowerPC/ppc64-byval-multi-store.ll
@@ -12,7 +12,7 @@
 ; RUN: llc -verify-machineinstrs --mtriple powerpc64-unknown-linux-gnu \
 ; RUN:   -mcpu=pwr10 -ppc-asm-full-reg-names < %s | FileCheck %s --check-prefix=P10BE
 
-define signext i8 @caller_1([1 x i8]* nocapture readonly byval([1 x i8]) %data) #0 {
+define signext i8 @caller_1(ptr nocapture readonly byval([1 x i8]) %data) #0 {
 ; P8LE-LABEL: caller_1:
 ; P8LE:       # %bb.0: # %entry
 ; P8LE-NEXT:    mflr r0
@@ -115,15 +115,13 @@ define signext i8 @caller_1([1 x i8]* nocapture readonly byval([1 x i8]) %data)
 ; P10BE-NEXT:    blr
 entry:
   %_param_data = alloca [1 x i8], align 1
-  %.elt = getelementptr inbounds [1 x i8], [1 x i8]* %data, i64 0, i64 0
-  %.unpack = load i8, i8* %.elt, align 1
-  %.temp.0.gep = getelementptr inbounds [1 x i8], [1 x i8]* %_param_data, i64 0, i64 0
-  store i8 %.unpack, i8* %.temp.0.gep, align 1
-  call void @callee(i8* nonnull %.temp.0.gep)
+  %.unpack = load i8, ptr %data, align 1
+  store i8 %.unpack, ptr %_param_data, align 1
+  call void @callee(ptr nonnull %_param_data)
   ret i8 0
 }
 
-define signext i8 @caller_2([2 x i8]* nocapture readonly byval([2 x i8]) %data) #0 {
+define signext i8 @caller_2(ptr nocapture readonly byval([2 x i8]) %data) #0 {
 ; P8LE-LABEL: caller_2:
 ; P8LE:       # %bb.0: # %entry
 ; P8LE-NEXT:    mflr r0
@@ -226,19 +224,17 @@ define signext i8 @caller_2([2 x i8]* nocapture readonly byval([2 x i8]) %data)
 ; P10BE-NEXT:    blr
 entry:
   %_param_data = alloca [2 x i8], align 1
-  %.elt = getelementptr inbounds [2 x i8], [2 x i8]* %data, i64 0, i64 0
-  %.unpack = load i8, i8* %.elt, align 1
-  %.elt1 = getelementptr inbounds [2 x i8], [2 x i8]* %data, i64 0, i64 1
-  %.unpack2 = load i8, i8* %.elt1, align 1
-  %.temp.0.gep = getelementptr inbounds [2 x i8], [2 x i8]* %_param_data, i64 0, i64 0
-  store i8 %.unpack, i8* %.temp.0.gep, align 1
-  %.temp.1.gep = getelementptr inbounds [2 x i8], [2 x i8]* %_param_data, i64 0, i64 1
-  store i8 %.unpack2, i8* %.temp.1.gep, align 1
-  call void @callee(i8* nonnull %.temp.0.gep)
+  %.unpack = load i8, ptr %data, align 1
+  %.elt1 = getelementptr inbounds [2 x i8], ptr %data, i64 0, i64 1
+  %.unpack2 = load i8, ptr %.elt1, align 1
+  store i8 %.unpack, ptr %_param_data, align 1
+  %.temp.1.gep = getelementptr inbounds [2 x i8], ptr %_param_data, i64 0, i64 1
+  store i8 %.unpack2, ptr %.temp.1.gep, align 1
+  call void @callee(ptr nonnull %_param_data)
   ret i8 0
 }
 
-define signext i8 @caller_3([3 x i8]* nocapture readonly byval([3 x i8]) %data) #0 {
+define signext i8 @caller_3(ptr nocapture readonly byval([3 x i8]) %data) #0 {
 ; P8LE-LABEL: caller_3:
 ; P8LE:       # %bb.0: # %entry
 ; P8LE-NEXT:    mflr r0
@@ -365,23 +361,21 @@ define signext i8 @caller_3([3 x i8]* nocapture readonly byval([3 x i8]) %data)
 ; P10BE-NEXT:    blr
 entry:
   %_param_data = alloca [3 x i8], align 1
-  %.elt = getelementptr inbounds [3 x i8], [3 x i8]* %data, i64 0, i64 0
-  %.unpack = load i8, i8* %.elt, align 1
-  %.elt1 = getelementptr inbounds [3 x i8], [3 x i8]* %data, i64 0, i64 1
-  %.unpack2 = load i8, i8* %.elt1, align 1
-  %.elt3 = getelementptr inbounds [3 x i8], [3 x i8]* %data, i64 0, i64 2
-  %.unpack4 = load i8, i8* %.elt3, align 1
-  %.temp.0.gep = getelementptr inbounds [3 x i8], [3 x i8]* %_param_data, i64 0, i64 0
-  store i8 %.unpack, i8* %.temp.0.gep, align 1
-  %.temp.1.gep = getelementptr inbounds [3 x i8], [3 x i8]* %_param_data, i64 0, i64 1
-  store i8 %.unpack2, i8* %.temp.1.gep, align 1
-  %.temp.2.gep = getelementptr inbounds [3 x i8], [3 x i8]* %_param_data, i64 0, i64 2
-  store i8 %.unpack4, i8* %.temp.2.gep, align 1
-  call void @callee(i8* nonnull %.temp.0.gep)
+  %.unpack = load i8, ptr %data, align 1
+  %.elt1 = getelementptr inbounds [3 x i8], ptr %data, i64 0, i64 1
+  %.unpack2 = load i8, ptr %.elt1, align 1
+  %.elt3 = getelementptr inbounds [3 x i8], ptr %data, i64 0, i64 2
+  %.unpack4 = load i8, ptr %.elt3, align 1
+  store i8 %.unpack, ptr %_param_data, align 1
+  %.temp.1.gep = getelementptr inbounds [3 x i8], ptr %_param_data, i64 0, i64 1
+  store i8 %.unpack2, ptr %.temp.1.gep, align 1
+  %.temp.2.gep = getelementptr inbounds [3 x i8], ptr %_param_data, i64 0, i64 2
+  store i8 %.unpack4, ptr %.temp.2.gep, align 1
+  call void @callee(ptr nonnull %_param_data)
   ret i8 0
 }
 
-define signext i8 @caller_4([4 x i8]* nocapture readonly byval([4 x i8]) %data) #0 {
+define signext i8 @caller_4(ptr nocapture readonly byval([4 x i8]) %data) #0 {
 ; P8LE-LABEL: caller_4:
 ; P8LE:       # %bb.0: # %entry
 ; P8LE-NEXT:    mflr r0
@@ -484,27 +478,25 @@ define signext i8 @caller_4([4 x i8]* nocapture readonly byval([4 x i8]) %data)
 ; P10BE-NEXT:    blr
 entry:
   %_param_data = alloca [4 x i8], align 1
-  %.elt = getelementptr inbounds [4 x i8], [4 x i8]* %data, i64 0, i64 0
-  %.unpack = load i8, i8* %.elt, align 1
-  %.elt1 = getelementptr inbounds [4 x i8], [4 x i8]* %data, i64 0, i64 1
-  %.unpack2 = load i8, i8* %.elt1, align 1
-  %.elt3 = getelementptr inbounds [4 x i8], [4 x i8]* %data, i64 0, i64 2
-  %.unpack4 = load i8, i8* %.elt3, align 1
-  %.elt5 = getelementptr inbounds [4 x i8], [4 x i8]* %data, i64 0, i64 3
-  %.unpack6 = load i8, i8* %.elt5, align 1
-  %.temp.0.gep = getelementptr inbounds [4 x i8], [4 x i8]* %_param_data, i64 0, i64 0
-  store i8 %.unpack, i8* %.temp.0.gep, align 1
-  %.temp.1.gep = getelementptr inbounds [4 x i8], [4 x i8]* %_param_data, i64 0, i64 1
-  store i8 %.unpack2, i8* %.temp.1.gep, align 1
-  %.temp.2.gep = getelementptr inbounds [4 x i8], [4 x i8]* %_param_data, i64 0, i64 2
-  store i8 %.unpack4, i8* %.temp.2.gep, align 1
-  %.temp.3.gep = getelementptr inbounds [4 x i8], [4 x i8]* %_param_data, i64 0, i64 3
-  store i8 %.unpack6, i8* %.temp.3.gep, align 1
-  call void @callee(i8* nonnull %.temp.0.gep)
+  %.unpack = load i8, ptr %data, align 1
+  %.elt1 = getelementptr inbounds [4 x i8], ptr %data, i64 0, i64 1
+  %.unpack2 = load i8, ptr %.elt1, align 1
+  %.elt3 = getelementptr inbounds [4 x i8], ptr %data, i64 0, i64 2
+  %.unpack4 = load i8, ptr %.elt3, align 1
+  %.elt5 = getelementptr inbounds [4 x i8], ptr %data, i64 0, i64 3
+  %.unpack6 = load i8, ptr %.elt5, align 1
+  store i8 %.unpack, ptr %_param_data, align 1
+  %.temp.1.gep = getelementptr inbounds [4 x i8], ptr %_param_data, i64 0, i64 1
+  store i8 %.unpack2, ptr %.temp.1.gep, align 1
+  %.temp.2.gep = getelementptr inbounds [4 x i8], ptr %_param_data, i64 0, i64 2
+  store i8 %.unpack4, ptr %.temp.2.gep, align 1
+  %.temp.3.gep = getelementptr inbounds [4 x i8], ptr %_param_data, i64 0, i64 3
+  store i8 %.unpack6, ptr %.temp.3.gep, align 1
+  call void @callee(ptr nonnull %_param_data)
   ret i8 0
 }
 
-define signext i8 @caller_5([5 x i8]* nocapture readonly byval([5 x i8]) %data) #0 {
+define signext i8 @caller_5(ptr nocapture readonly byval([5 x i8]) %data) #0 {
 ; P8LE-LABEL: caller_5:
 ; P8LE:       # %bb.0: # %entry
 ; P8LE-NEXT:    mflr r0
@@ -625,31 +617,29 @@ define signext i8 @caller_5([5 x i8]* nocapture readonly byval([5 x i8]) %data)
 ; P10BE-NEXT:    blr
 entry:
   %_param_data = alloca [5 x i8], align 1
-  %.elt = getelementptr inbounds [5 x i8], [5 x i8]* %data, i64 0, i64 0
-  %.unpack = load i8, i8* %.elt, align 1
-  %.elt1 = getelementptr inbounds [5 x i8], [5 x i8]* %data, i64 0, i64 1
-  %.unpack2 = load i8, i8* %.elt1, align 1
-  %.elt3 = getelementptr inbounds [5 x i8], [5 x i8]* %data, i64 0, i64 2
-  %.unpack4 = load i8, i8* %.elt3, align 1
-  %.elt5 = getelementptr inbounds [5 x i8], [5 x i8]* %data, i64 0, i64 3
-  %.unpack6 = load i8, i8* %.elt5, align 1
-  %.elt7 = getelementptr inbounds [5 x i8], [5 x i8]* %data, i64 0, i64 4
-  %.unpack8 = load i8, i8* %.elt7, align 1
-  %.temp.0.gep = getelementptr inbounds [5 x i8], [5 x i8]* %_param_data, i64 0, i64 0
-  store i8 %.unpack, i8* %.temp.0.gep, align 1
-  %.temp.1.gep = getelementptr inbounds [5 x i8], [5 x i8]* %_param_data, i64 0, i64 1
-  store i8 %.unpack2, i8* %.temp.1.gep, align 1
-  %.temp.2.gep = getelementptr inbounds [5 x i8], [5 x i8]* %_param_data, i64 0, i64 2
-  store i8 %.unpack4, i8* %.temp.2.gep, align 1
-  %.temp.3.gep = getelementptr inbounds [5 x i8], [5 x i8]* %_param_data, i64 0, i64 3
-  store i8 %.unpack6, i8* %.temp.3.gep, align 1
-  %.temp.4.gep = getelementptr inbounds [5 x i8], [5 x i8]* %_param_data, i64 0, i64 4
-  store i8 %.unpack8, i8* %.temp.4.gep, align 1
-  call void @callee(i8* nonnull %.temp.0.gep)
+  %.unpack = load i8, ptr %data, align 1
+  %.elt1 = getelementptr inbounds [5 x i8], ptr %data, i64 0, i64 1
+  %.unpack2 = load i8, ptr %.elt1, align 1
+  %.elt3 = getelementptr inbounds [5 x i8], ptr %data, i64 0, i64 2
+  %.unpack4 = load i8, ptr %.elt3, align 1
+  %.elt5 = getelementptr inbounds [5 x i8], ptr %data, i64 0, i64 3
+  %.unpack6 = load i8, ptr %.elt5, align 1
+  %.elt7 = getelementptr inbounds [5 x i8], ptr %data, i64 0, i64 4
+  %.unpack8 = load i8, ptr %.elt7, align 1
+  store i8 %.unpack, ptr %_param_data, align 1
+  %.temp.1.gep = getelementptr inbounds [5 x i8], ptr %_param_data, i64 0, i64 1
+  store i8 %.unpack2, ptr %.temp.1.gep, align 1
+  %.temp.2.gep = getelementptr inbounds [5 x i8], ptr %_param_data, i64 0, i64 2
+  store i8 %.unpack4, ptr %.temp.2.gep, align 1
+  %.temp.3.gep = getelementptr inbounds [5 x i8], ptr %_param_data, i64 0, i64 3
+  store i8 %.unpack6, ptr %.temp.3.gep, align 1
+  %.temp.4.gep = getelementptr inbounds [5 x i8], ptr %_param_data, i64 0, i64 4
+  store i8 %.unpack8, ptr %.temp.4.gep, align 1
+  call void @callee(ptr nonnull %_param_data)
   ret i8 0
 }
 
-define signext i8 @caller_6([6 x i8]* nocapture readonly byval([6 x i8]) %data) #0 {
+define signext i8 @caller_6(ptr nocapture readonly byval([6 x i8]) %data) #0 {
 ; P8LE-LABEL: caller_6:
 ; P8LE:       # %bb.0: # %entry
 ; P8LE-NEXT:    mflr r0
@@ -776,35 +766,33 @@ define signext i8 @caller_6([6 x i8]* nocapture readonly byval([6 x i8]) %data)
 ; P10BE-NEXT:    blr
 entry:
   %_param_data = alloca [6 x i8], align 1
-  %.elt = getelementptr inbounds [6 x i8], [6 x i8]* %data, i64 0, i64 0
-  %.unpack = load i8, i8* %.elt, align 1
-  %.elt1 = getelementptr inbounds [6 x i8], [6 x i8]* %data, i64 0, i64 1
-  %.unpack2 = load i8, i8* %.elt1, align 1
-  %.elt3 = getelementptr inbounds [6 x i8], [6 x i8]* %data, i64 0, i64 2
-  %.unpack4 = load i8, i8* %.elt3, align 1
-  %.elt5 = getelementptr inbounds [6 x i8], [6 x i8]* %data, i64 0, i64 3
-  %.unpack6 = load i8, i8* %.elt5, align 1
-  %.elt7 = getelementptr inbounds [6 x i8], [6 x i8]* %data, i64 0, i64 4
-  %.unpack8 = load i8, i8* %.elt7, align 1
-  %.elt9 = getelementptr inbounds [6 x i8], [6 x i8]* %data, i64 0, i64 5
-  %.unpack10 = load i8, i8* %.elt9, align 1
-  %.temp.0.gep = getelementptr inbounds [6 x i8], [6 x i8]* %_param_data, i64 0, i64 0
-  store i8 %.unpack, i8* %.temp.0.gep, align 1
-  %.temp.1.gep = getelementptr inbounds [6 x i8], [6 x i8]* %_param_data, i64 0, i64 1
-  store i8 %.unpack2, i8* %.temp.1.gep, align 1
-  %.temp.2.gep = getelementptr inbounds [6 x i8], [6 x i8]* %_param_data, i64 0, i64 2
-  store i8 %.unpack4, i8* %.temp.2.gep, align 1
-  %.temp.3.gep = getelementptr inbounds [6 x i8], [6 x i8]* %_param_data, i64 0, i64 3
-  store i8 %.unpack6, i8* %.temp.3.gep, align 1
-  %.temp.4.gep = getelementptr inbounds [6 x i8], [6 x i8]* %_param_data, i64 0, i64 4
-  store i8 %.unpack8, i8* %.temp.4.gep, align 1
-  %.temp.5.gep = getelementptr inbounds [6 x i8], [6 x i8]* %_param_data, i64 0, i64 5
-  store i8 %.unpack10, i8* %.temp.5.gep, align 1
-  call void @callee(i8* nonnull %.temp.0.gep)
+  %.unpack = load i8, ptr %data, align 1
+  %.elt1 = getelementptr inbounds [6 x i8], ptr %data, i64 0, i64 1
+  %.unpack2 = load i8, ptr %.elt1, align 1
+  %.elt3 = getelementptr inbounds [6 x i8], ptr %data, i64 0, i64 2
+  %.unpack4 = load i8, ptr %.elt3, align 1
+  %.elt5 = getelementptr inbounds [6 x i8], ptr %data, i64 0, i64 3
+  %.unpack6 = load i8, ptr %.elt5, align 1
+  %.elt7 = getelementptr inbounds [6 x i8], ptr %data, i64 0, i64 4
+  %.unpack8 = load i8, ptr %.elt7, align 1
+  %.elt9 = getelementptr inbounds [6 x i8], ptr %data, i64 0, i64 5
+  %.unpack10 = load i8, ptr %.elt9, align 1
+  store i8 %.unpack, ptr %_param_data, align 1
+  %.temp.1.gep = getelementptr inbounds [6 x i8], ptr %_param_data, i64 0, i64 1
+  store i8 %.unpack2, ptr %.temp.1.gep, align 1
+  %.temp.2.gep = getelementptr inbounds [6 x i8], ptr %_param_data, i64 0, i64 2
+  store i8 %.unpack4, ptr %.temp.2.gep, align 1
+  %.temp.3.gep = getelementptr inbounds [6 x i8], ptr %_param_data, i64 0, i64 3
+  store i8 %.unpack6, ptr %.temp.3.gep, align 1
+  %.temp.4.gep = getelementptr inbounds [6 x i8], ptr %_param_data, i64 0, i64 4
+  store i8 %.unpack8, ptr %.temp.4.gep, align 1
+  %.temp.5.gep = getelementptr inbounds [6 x i8], ptr %_param_data, i64 0, i64 5
+  store i8 %.unpack10, ptr %.temp.5.gep, align 1
+  call void @callee(ptr nonnull %_param_data)
   ret i8 0
 }
 
-define signext i8 @caller_7([7 x i8]* nocapture readonly byval([7 x i8]) %data) #0 {
+define signext i8 @caller_7(ptr nocapture readonly byval([7 x i8]) %data) #0 {
 ; P8LE-LABEL: caller_7:
 ; P8LE:       # %bb.0: # %entry
 ; P8LE-NEXT:    mflr r0
@@ -949,38 +937,36 @@ define signext i8 @caller_7([7 x i8]* nocapture readonly byval([7 x i8]) %data)
 ; P10BE-NEXT:    blr
 entry:
   %_param_data = alloca [7 x i8], align 1
-  %.elt = getelementptr inbounds [7 x i8], [7 x i8]* %data, i64 0, i64 0
-  %.unpack = load i8, i8* %.elt, align 1
-  %.elt1 = getelementptr inbounds [7 x i8], [7 x i8]* %data, i64 0, i64 1
-  %.unpack2 = load i8, i8* %.elt1, align 1
-  %.elt3 = getelementptr inbounds [7 x i8], [7 x i8]* %data, i64 0, i64 2
-  %.unpack4 = load i8, i8* %.elt3, align 1
-  %.elt5 = getelementptr inbounds [7 x i8], [7 x i8]* %data, i64 0, i64 3
-  %.unpack6 = load i8, i8* %.elt5, align 1
-  %.elt7 = getelementptr inbounds [7 x i8], [7 x i8]* %data, i64 0, i64 4
-  %.unpack8 = load i8, i8* %.elt7, align 1
-  %.elt9 = getelementptr inbounds [7 x i8], [7 x i8]* %data, i64 0, i64 5
-  %.unpack10 = load i8, i8* %.elt9, align 1
-  %.elt11 = getelementptr inbounds [7 x i8], [7 x i8]* %data, i64 0, i64 6
-  %.unpack12 = load i8, i8* %.elt11, align 1
-  %.temp.0.gep = getelementptr inbounds [7 x i8], [7 x i8]* %_param_data, i64 0, i64 0
-  store i8 %.unpack, i8* %.temp.0.gep, align 1
-  %.temp.1.gep = getelementptr inbounds [7 x i8], [7 x i8]* %_param_data, i64 0, i64 1
-  store i8 %.unpack2, i8* %.temp.1.gep, align 1
-  %.temp.2.gep = getelementptr inbounds [7 x i8], [7 x i8]* %_param_data, i64 0, i64 2
-  store i8 %.unpack4, i8* %.temp.2.gep, align 1
-  %.temp.3.gep = getelementptr inbounds [7 x i8], [7 x i8]* %_param_data, i64 0, i64 3
-  store i8 %.unpack6, i8* %.temp.3.gep, align 1
-  %.temp.4.gep = getelementptr inbounds [7 x i8], [7 x i8]* %_param_data, i64 0, i64 4
-  store i8 %.unpack8, i8* %.temp.4.gep, align 1
-  %.temp.5.gep = getelementptr inbounds [7 x i8], [7 x i8]* %_param_data, i64 0, i64 5
-  store i8 %.unpack10, i8* %.temp.5.gep, align 1
-  %.temp.6.gep = getelementptr inbounds [7 x i8], [7 x i8]* %_param_data, i64 0, i64 6
-  store i8 %.unpack12, i8* %.temp.6.gep, align 1
-  call void @callee(i8* nonnull %.temp.0.gep)
+  %.unpack = load i8, ptr %data, align 1
+  %.elt1 = getelementptr inbounds [7 x i8], ptr %data, i64 0, i64 1
+  %.unpack2 = load i8, ptr %.elt1, align 1
+  %.elt3 = getelementptr inbounds [7 x i8], ptr %data, i64 0, i64 2
+  %.unpack4 = load i8, ptr %.elt3, align 1
+  %.elt5 = getelementptr inbounds [7 x i8], ptr %data, i64 0, i64 3
+  %.unpack6 = load i8, ptr %.elt5, align 1
+  %.elt7 = getelementptr inbounds [7 x i8], ptr %data, i64 0, i64 4
+  %.unpack8 = load i8, ptr %.elt7, align 1
+  %.elt9 = getelementptr inbounds [7 x i8], ptr %data, i64 0, i64 5
+  %.unpack10 = load i8, ptr %.elt9, align 1
+  %.elt11 = getelementptr inbounds [7 x i8], ptr %data, i64 0, i64 6
+  %.unpack12 = load i8, ptr %.elt11, align 1
+  store i8 %.unpack, ptr %_param_data, align 1
+  %.temp.1.gep = getelementptr inbounds [7 x i8], ptr %_param_data, i64 0, i64 1
+  store i8 %.unpack2, ptr %.temp.1.gep, align 1
+  %.temp.2.gep = getelementptr inbounds [7 x i8], ptr %_param_data, i64 0, i64 2
+  store i8 %.unpack4, ptr %.temp.2.gep, align 1
+  %.temp.3.gep = getelementptr inbounds [7 x i8], ptr %_param_data, i64 0, i64 3
+  store i8 %.unpack6, ptr %.temp.3.gep, align 1
+  %.temp.4.gep = getelementptr inbounds [7 x i8], ptr %_param_data, i64 0, i64 4
+  store i8 %.unpack8, ptr %.temp.4.gep, align 1
+  %.temp.5.gep = getelementptr inbounds [7 x i8], ptr %_param_data, i64 0, i64 5
+  store i8 %.unpack10, ptr %.temp.5.gep, align 1
+  %.temp.6.gep = getelementptr inbounds [7 x i8], ptr %_param_data, i64 0, i64 6
+  store i8 %.unpack12, ptr %.temp.6.gep, align 1
+  call void @callee(ptr nonnull %_param_data)
   ret i8 0
 }
 
-declare void @callee(i8*) local_unnamed_addr #0
+declare void @callee(ptr) local_unnamed_addr #0
 
 attributes #0 = { nounwind }

diff  --git a/llvm/test/CodeGen/PowerPC/ppc64-calls.ll b/llvm/test/CodeGen/PowerPC/ppc64-calls.ll
index 8975c7296650..321b5bddaae6 100644
--- a/llvm/test/CodeGen/PowerPC/ppc64-calls.ll
+++ b/llvm/test/CodeGen/PowerPC/ppc64-calls.ll
@@ -39,7 +39,7 @@ define dso_local void @test_weak() nounwind readnone {
 }
 
 ; Indirect calls requires a full stub creation
-define dso_local void @test_indirect(void ()* nocapture %fp) nounwind {
+define dso_local void @test_indirect(ptr nocapture %fp) nounwind {
 ; CHECK-LABEL: test_indirect:
   tail call void %fp() nounwind
 ; CHECK: ld [[FP:[0-9]+]], 0(3)
@@ -56,7 +56,7 @@ define dso_local void @test_indirect(void ()* nocapture %fp) nounwind {
 ; used on 64-bit SVR4 (as e.g. on Darwin).
 define dso_local void @test_abs() nounwind {
 ; CHECK-LABEL: test_abs:
-  tail call void inttoptr (i64 1024 to void ()*)() nounwind
+  tail call void inttoptr (i64 1024 to ptr)() nounwind
 ; CHECK: ld [[FP:[0-9]+]], 1024(0)
 ; CHECK: ld 11, 1040(0)
 ; CHECK: ld 2, 1032(0)
@@ -79,10 +79,10 @@ define double @test_external(double %x) nounwind {
 
 ; The 'ld 2, 40(1)' really must always come directly after the bctrl to make
 ; the unwinding code in libgcc happy.
- at g = external global void ()*
+ at g = external global ptr
 declare void @h(i64)
 define dso_local void @test_indir_toc_reload(i64 %x) {
-  %1 = load void ()*, void ()** @g
+  %1 = load ptr, ptr @g
   call void %1()
   call void @h(i64 %x)
   ret void

diff  --git a/llvm/test/CodeGen/PowerPC/ppc64-crash.ll b/llvm/test/CodeGen/PowerPC/ppc64-crash.ll
index 4431f0c15ece..b5d925b067f3 100644
--- a/llvm/test/CodeGen/PowerPC/ppc64-crash.ll
+++ b/llvm/test/CodeGen/PowerPC/ppc64-crash.ll
@@ -8,7 +8,7 @@ target triple = "powerpc64-unknown-freebsd"
 %struct.pos_T = type { i64 }
 
 ; check that we're not copying stuff between R and X registers
-define internal void @serialize_pos(%struct.pos_T* byval(%struct.pos_T) %pos, %struct.__sFILE* %fp) nounwind {
+define internal void @serialize_pos(ptr byval(%struct.pos_T) %pos, ptr %fp) nounwind {
 entry:
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/ppc64-func-desc-hoist.ll b/llvm/test/CodeGen/PowerPC/ppc64-func-desc-hoist.ll
index 2e40664c8f91..0df66ffec7a0 100644
--- a/llvm/test/CodeGen/PowerPC/ppc64-func-desc-hoist.ll
+++ b/llvm/test/CodeGen/PowerPC/ppc64-func-desc-hoist.ll
@@ -4,9 +4,8 @@ target datalayout = "E-m:e-i64:64-n32:64"
 target triple = "powerpc64-unknown-linux-gnu"
 
 ; Function Attrs: nounwind
-define void @bar(void (...)* nocapture %x) #0 {
+define void @bar(ptr nocapture %x) #0 {
 entry:
-  %callee.knr.cast = bitcast void (...)* %x to void ()*
   br label %for.body
 
 ; INVFUNCDESC-LABEL: @bar
@@ -32,7 +31,7 @@ entry:
 
 for.body:                                         ; preds = %for.body, %entry
   %i.02 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
-  tail call void %callee.knr.cast() #0
+  tail call void %x() #0
   %inc = add nuw nsw i32 %i.02, 1
   %exitcond = icmp eq i32 %inc, 1600000000
   br i1 %exitcond, label %for.end, label %for.body

diff  --git a/llvm/test/CodeGen/PowerPC/ppc64-get-cache-line-size.ll b/llvm/test/CodeGen/PowerPC/ppc64-get-cache-line-size.ll
index 0a3dc4eb94fb..1a0060000187 100644
--- a/llvm/test/CodeGen/PowerPC/ppc64-get-cache-line-size.ll
+++ b/llvm/test/CodeGen/PowerPC/ppc64-get-cache-line-size.ll
@@ -9,8 +9,8 @@
 ; Function Attrs: nounwind
 define signext i32 @check_cache_line() local_unnamed_addr {
 entry:
-  %call = tail call i32* bitcast (i32* (...)* @magici to i32* ()*)()
-  %call115 = tail call signext i32 bitcast (i32 (...)* @iter to i32 ()*)()
+  %call = tail call ptr @magici()
+  %call115 = tail call signext i32 @iter()
   %cmp16 = icmp sgt i32 %call115, 0
   br i1 %cmp16, label %for.body, label %for.cond.cleanup
 
@@ -21,15 +21,15 @@ for.cond.cleanup:                                 ; preds = %for.body, %entry
 for.body:                                         ; preds = %entry, %for.body
   %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
   %res.017 = phi i32 [ %add5, %for.body ], [ 0, %entry ]
-  %arrayidx = getelementptr inbounds i32, i32* %call, i64 %indvars.iv
-  %0 = load i32, i32* %arrayidx, align 4
+  %arrayidx = getelementptr inbounds i32, ptr %call, i64 %indvars.iv
+  %0 = load i32, ptr %arrayidx, align 4
   %add = add nsw i32 %0, %res.017
   %1 = add nuw nsw i64 %indvars.iv, 16
-  %arrayidx4 = getelementptr inbounds i32, i32* %call, i64 %1
-  %2 = load i32, i32* %arrayidx4, align 4
+  %arrayidx4 = getelementptr inbounds i32, ptr %call, i64 %1
+  %2 = load i32, ptr %arrayidx4, align 4
   %add5 = add nsw i32 %add, %2
   %indvars.iv.next = add nuw i64 %indvars.iv, 1
-  %call1 = tail call signext i32 bitcast (i32 (...)* @iter to i32 ()*)()
+  %call1 = tail call signext i32 @iter()
   %3 = sext i32 %call1 to i64
   %cmp = icmp slt i64 %indvars.iv.next, %3
   br i1 %cmp, label %for.body, label %for.cond.cleanup
@@ -43,7 +43,7 @@ for.body:                                         ; preds = %entry, %for.body
 ; CHECK-DCBT: blr
 }
 
-declare i32* @magici(...) local_unnamed_addr
+declare ptr @magici(...) local_unnamed_addr
 
 declare signext i32 @iter(...) local_unnamed_addr
 

diff  --git a/llvm/test/CodeGen/PowerPC/ppc64-i128-abi.ll b/llvm/test/CodeGen/PowerPC/ppc64-i128-abi.ll
index b5333550977a..d456284db67b 100644
--- a/llvm/test/CodeGen/PowerPC/ppc64-i128-abi.ll
+++ b/llvm/test/CodeGen/PowerPC/ppc64-i128-abi.ll
@@ -212,7 +212,7 @@ define i128 @i128_increment_by_val(i128 %a, i128 %b) nounwind {
 ; callee. See comments for individual functions above for details on registers
 ; used for parameters.
 define <1 x i128> @call_v1i128_increment_by_one() nounwind {
-       %tmp = load <1 x i128>, <1 x i128>* @x, align 16
+       %tmp = load <1 x i128>, ptr @x, align 16
        %ret = call <1 x i128> @v1i128_increment_by_one(<1 x i128> %tmp)
        ret <1 x i128> %ret
 
@@ -241,8 +241,8 @@ define <1 x i128> @call_v1i128_increment_by_one() nounwind {
 }
 
 define <1 x i128> @call_v1i128_increment_by_val() nounwind {
-       %tmp = load <1 x i128>, <1 x i128>* @x, align 16
-       %tmp2 = load <1 x i128>, <1 x i128>* @y, align 16
+       %tmp = load <1 x i128>, ptr @x, align 16
+       %tmp2 = load <1 x i128>, ptr @y, align 16
        %ret = call <1 x i128> @v1i128_increment_by_val(<1 x i128> %tmp, <1 x i128> %tmp2)
        ret <1 x i128> %ret
 
@@ -280,7 +280,7 @@ define <1 x i128> @call_v1i128_increment_by_val() nounwind {
 }
 
 define i128 @call_i128_increment_by_one() nounwind {
-       %tmp = load i128, i128* @a, align 16
+       %tmp = load i128, ptr @a, align 16
        %ret = call i128 @i128_increment_by_one(i128 %tmp)
        ret i128 %ret
 ;       %ret4 = call i128 @i128_increment_by_val(i128 %tmp2, i128 %tmp2)
@@ -304,8 +304,8 @@ define i128 @call_i128_increment_by_one() nounwind {
 }
 
 define i128 @call_i128_increment_by_val() nounwind {
-       %tmp = load i128, i128* @a, align 16
-       %tmp2 = load i128, i128* @b, align 16
+       %tmp = load i128, ptr @a, align 16
+       %tmp2 = load i128, ptr @b, align 16
        %ret = call i128 @i128_increment_by_val(i128 %tmp, i128 %tmp2)
        ret i128 %ret
 ; CHECK-LE-LABEL: @call_i128_increment_by_val
@@ -363,8 +363,8 @@ entry:
 
 define i128 @i128_split() {
 entry:
-  %0 = load i128, i128* @a, align 16
-  %1 = load i128, i128* @b, align 16
+  %0 = load i128, ptr @a, align 16
+  %1 = load i128, ptr @b, align 16
   %call = tail call i128 @callee_i128_split(i32 1, i128 %0, i32 4, i32 5,
                                            i32 6, i32 7, i128 %1, i32 8, i128 9)
   ret i128 %call

diff  --git a/llvm/test/CodeGen/PowerPC/ppc64-icbt-pwr7.ll b/llvm/test/CodeGen/PowerPC/ppc64-icbt-pwr7.ll
index e0ff14e60bb0..760f08c7166f 100644
--- a/llvm/test/CodeGen/PowerPC/ppc64-icbt-pwr7.ll
+++ b/llvm/test/CodeGen/PowerPC/ppc64-icbt-pwr7.ll
@@ -2,11 +2,11 @@
 ; Based on the ppc64-prefetch.ll test
 ; RUN: not --crash llc -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 < %s 2>&1 | FileCheck %s
  
-declare void @llvm.prefetch(i8*, i32, i32, i32)
+declare void @llvm.prefetch(ptr, i32, i32, i32)
 
-define void @test(i8* %a, ...) nounwind {
+define void @test(ptr %a, ...) nounwind {
 entry:
-  call void @llvm.prefetch(i8* %a, i32 0, i32 3, i32 0)
+  call void @llvm.prefetch(ptr %a, i32 0, i32 3, i32 0)
   ret void
 
 ; FIXME: Crashing is not really the correct behavior here, we really should just emit nothing

diff  --git a/llvm/test/CodeGen/PowerPC/ppc64-icbt-pwr8.ll b/llvm/test/CodeGen/PowerPC/ppc64-icbt-pwr8.ll
index 3441b58a3db9..f9320563f618 100644
--- a/llvm/test/CodeGen/PowerPC/ppc64-icbt-pwr8.ll
+++ b/llvm/test/CodeGen/PowerPC/ppc64-icbt-pwr8.ll
@@ -2,11 +2,11 @@
 ; Copied from the ppc64-prefetch.ll test
 ; RUN: llc -verify-machineinstrs -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr8 < %s | FileCheck %s
 
-declare void @llvm.prefetch(i8*, i32, i32, i32)
+declare void @llvm.prefetch(ptr, i32, i32, i32)
 
-define void @test(i8* %a, ...) nounwind {
+define void @test(ptr %a, ...) nounwind {
 entry:
-  call void @llvm.prefetch(i8* %a, i32 0, i32 3, i32 0)
+  call void @llvm.prefetch(ptr %a, i32 0, i32 3, i32 0)
   ret void
 
 ; CHECK-LABEL: @test

diff  --git a/llvm/test/CodeGen/PowerPC/ppc64-nest.ll b/llvm/test/CodeGen/PowerPC/ppc64-nest.ll
index cd2366cfa450..32b85f50f4a5 100644
--- a/llvm/test/CodeGen/PowerPC/ppc64-nest.ll
+++ b/llvm/test/CodeGen/PowerPC/ppc64-nest.ll
@@ -5,26 +5,26 @@ target triple = "powerpc64-unknown-linux-gnu"
 ; Tests that the 'nest' parameter attribute causes the relevant parameter to be
 ; passed in the right register (r11 for PPC).
 
-define i8* @nest_receiver(i8* nest %arg) nounwind {
+define ptr @nest_receiver(ptr nest %arg) nounwind {
 ; CHECK-LABEL: nest_receiver:
 ; CHECK: # %bb.0:
 ; CHECK-NEXT: mr 3, 11
 ; CHECK-NEXT: blr
 
-  ret i8* %arg
+  ret ptr %arg
 }
 
-define i8* @nest_caller(i8* %arg) nounwind {
+define ptr @nest_caller(ptr %arg) nounwind {
 ; CHECK-LABEL: nest_caller:
 ; CHECK: mr 11, 3
 ; CHECK-NEXT: bl nest_receiver
 ; CHECK: blr
 
-  %result = call i8* @nest_receiver(i8* nest %arg)
-  ret i8* %result
+  %result = call ptr @nest_receiver(ptr nest %arg)
+  ret ptr %result
 }
 
-define void @test_indirect(i32 ()* nocapture %f, i8* %p) {
+define void @test_indirect(ptr nocapture %f, ptr %p) {
 entry:
 
 ; CHECK-LABEL: test_indirect
@@ -35,8 +35,7 @@ entry:
 ; CHECK: bctrl
 ; CHECK: blr
 
-  %callee.knr.cast = bitcast i32 ()* %f to i32 (i8*)*
-  %call = tail call signext i32 %callee.knr.cast(i8* nest %p)
+  %call = tail call signext i32 %f(ptr nest %p)
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/ppc64-nonfunc-calls.ll b/llvm/test/CodeGen/PowerPC/ppc64-nonfunc-calls.ll
index 7d1c23536a56..8962e55623a5 100644
--- a/llvm/test/CodeGen/PowerPC/ppc64-nonfunc-calls.ll
+++ b/llvm/test/CodeGen/PowerPC/ppc64-nonfunc-calls.ll
@@ -11,7 +11,7 @@ target triple = "powerpc64-unknown-linux-gnu"
 ; Function Attrs: nounwind
 define dso_local void @foo() #0 {
 entry:
-  tail call void bitcast ([33 x i8]* @something to void ()*)() #0
+  tail call void @something() #0
   ret void
 
 ; CHECK-LABEL: @foo
@@ -30,7 +30,7 @@ entry:
 ; Function Attrs: nounwind
 define dso_local void @bar() #0 {
 entry:
-  tail call void bitcast (%struct.cd* @tls_something to void ()*)() #0
+  tail call void @tls_something() #0
   ret void
 
 ; CHECK-LABEL: @bar
@@ -49,7 +49,7 @@ entry:
 ; Function Attrs: nounwind
 define dso_local void @ext() #0 {
 entry:
-  tail call void bitcast (%struct.cd* @extern_something to void ()*)() #0
+  tail call void @extern_something() #0
   ret void
 
 ; CHECK-LABEL: @ext

diff  --git a/llvm/test/CodeGen/PowerPC/ppc64-pre-inc-no-extra-phi.ll b/llvm/test/CodeGen/PowerPC/ppc64-pre-inc-no-extra-phi.ll
index ab84707ce46e..a866cb8fa6dc 100644
--- a/llvm/test/CodeGen/PowerPC/ppc64-pre-inc-no-extra-phi.ll
+++ b/llvm/test/CodeGen/PowerPC/ppc64-pre-inc-no-extra-phi.ll
@@ -11,14 +11,14 @@ entry:
 
 while.cond:
   %l.0 = phi i64 [ 0, %entry ], [ %inc, %while.cond ]
-  %arrayidx = getelementptr inbounds [100 x i64], [100 x i64]* @perm, i64 0, i64 %l.0
-  %0 = load i64, i64* %arrayidx, align 8
+  %arrayidx = getelementptr inbounds [100 x i64], ptr @perm, i64 0, i64 %l.0
+  %0 = load i64, ptr %arrayidx, align 8
   %cmp = icmp sgt i64 %0, 0
   %inc = add nuw nsw i64 %l.0, 1
   br i1 %cmp, label %while.cond, label %while.end
 
 while.end:
-  store i64 0, i64* %arrayidx, align 8
+  store i64 0, ptr %arrayidx, align 8
   ret void
 ; CHECK-LABEL: sort_basket
 ; CHECK: addi {{[0-9]+}}, {{[0-9]+}}, -8

diff  --git a/llvm/test/CodeGen/PowerPC/ppc64-prefetch.ll b/llvm/test/CodeGen/PowerPC/ppc64-prefetch.ll
index 044a2e4a7494..7d5f59120467 100644
--- a/llvm/test/CodeGen/PowerPC/ppc64-prefetch.ll
+++ b/llvm/test/CodeGen/PowerPC/ppc64-prefetch.ll
@@ -2,29 +2,29 @@ target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3
 target triple = "powerpc64-unknown-linux-gnu"
 ; RUN: llc -verify-machineinstrs -mcpu=a2 < %s | FileCheck %s
 
-define void @test1(i8* %a, ...) nounwind {
+define void @test1(ptr %a, ...) nounwind {
 entry:
-  call void @llvm.prefetch(i8* %a, i32 0, i32 3, i32 1)
+  call void @llvm.prefetch(ptr %a, i32 0, i32 3, i32 1)
   ret void
 
 ; CHECK-LABEL: @test1
 ; CHECK: dcbt
 }
 
-declare void @llvm.prefetch(i8*, i32, i32, i32)
+declare void @llvm.prefetch(ptr, i32, i32, i32)
 
-define void @test2(i8* %a, ...) nounwind {
+define void @test2(ptr %a, ...) nounwind {
 entry:
-  call void @llvm.prefetch(i8* %a, i32 1, i32 3, i32 1)
+  call void @llvm.prefetch(ptr %a, i32 1, i32 3, i32 1)
   ret void
 
 ; CHECK-LABEL: @test2
 ; CHECK: dcbtst
 }
 
-define void @test3(i8* %a, ...) nounwind {
+define void @test3(ptr %a, ...) nounwind {
 entry:
-  call void @llvm.prefetch(i8* %a, i32 0, i32 3, i32 0)
+  call void @llvm.prefetch(ptr %a, i32 0, i32 3, i32 0)
   ret void
 
 ; CHECK-LABEL: @test3

diff  --git a/llvm/test/CodeGen/PowerPC/ppc64-rop-protection-aix.ll b/llvm/test/CodeGen/PowerPC/ppc64-rop-protection-aix.ll
index 5bae2d7c5436..a56636ad1d22 100644
--- a/llvm/test/CodeGen/PowerPC/ppc64-rop-protection-aix.ll
+++ b/llvm/test/CodeGen/PowerPC/ppc64-rop-protection-aix.ll
@@ -276,7 +276,7 @@ entry:
 ;; outside of the initial 288 byte volatile program storage region in the
 ;; Protected Zone. However, this restriction will be removed in an upcoming
 ;; revision of the ABI.
-define dso_local zeroext i32 @spill(i32* nocapture readonly %in) #0 {
+define dso_local zeroext i32 @spill(ptr nocapture readonly %in) #0 {
 ; BE-P10-LABEL: spill:
 ; BE-P10:       # %bb.0: # %entry
 ; BE-P10-NEXT:    mflr r0
@@ -1904,21 +1904,20 @@ define dso_local zeroext i32 @spill(i32* nocapture readonly %in) #0 {
 ; BE-32BIT-P8-PRIV-NEXT:    blr
 entry:
   %local = alloca i32, align 4
-  %0 = bitcast i32* %local to i8*
-  call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %0)
-  %arrayidx = getelementptr inbounds i32, i32* %in, i64 3
-  %1 = load i32, i32* %arrayidx, align 4
-  store i32 %1, i32* %local, align 4
+  call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %local)
+  %arrayidx = getelementptr inbounds i32, ptr %in, i64 3
+  %0 = load i32, ptr %arrayidx, align 4
+  store i32 %0, ptr %local, align 4
   tail call void asm sideeffect "nop", "~{cr2},~{cr3},~{cr4},~{r0},~{r1},~{r2},~{r3},~{r4},~{r5},~{r6},~{r7},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15},~{r16},~{r17},~{r18},~{r19},~{r20},~{r21},~{r22},~{r23},~{r24},~{r25},~{r26},~{r27},~{r28},~{r29},~{r30},~{r31},~{f14},~{f15},~{f16},~{f17},~{f18},~{f19},~{f20},~{f21},~{f22},~{f23},~{f24},~{f25},~{f26},~{f27},~{f28},~{f29},~{f30},~{f31},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"()
-  %call = call zeroext i32 @callee2(i32* nonnull %local)
-  %arrayidx1 = getelementptr inbounds i32, i32* %in, i64 4
-  %2 = load i32, i32* %arrayidx1, align 4
-  %add = add i32 %2, %call
-  call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %0)
+  %call = call zeroext i32 @callee2(ptr nonnull %local)
+  %arrayidx1 = getelementptr inbounds i32, ptr %in, i64 4
+  %1 = load i32, ptr %arrayidx1, align 4
+  %add = add i32 %1, %call
+  call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %local)
   ret i32 %add
 }
 
-define dso_local zeroext i32 @shrinkwrap(i32* readonly %in) #0 {
+define dso_local zeroext i32 @shrinkwrap(ptr readonly %in) #0 {
 ; BE-P10-LABEL: shrinkwrap:
 ; BE-P10:       # %bb.0: # %entry
 ; BE-P10-NEXT:    cmpldi r3, 0
@@ -2262,20 +2261,19 @@ define dso_local zeroext i32 @shrinkwrap(i32* readonly %in) #0 {
 ; BE-32BIT-P8-PRIV-NEXT:    blr
 entry:
   %local = alloca i32, align 4
-  %tobool.not = icmp eq i32* %in, null
+  %tobool.not = icmp eq ptr %in, null
   br i1 %tobool.not, label %return, label %if.end
 
 if.end:                                           ; preds = %entry
-  %0 = bitcast i32* %local to i8*
-  call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %0)
-  %arrayidx = getelementptr inbounds i32, i32* %in, i64 3
-  %1 = load i32, i32* %arrayidx, align 4
-  store i32 %1, i32* %local, align 4
-  %call = call zeroext i32 @callee2(i32* nonnull %local)
-  %arrayidx1 = getelementptr inbounds i32, i32* %in, i64 4
-  %2 = load i32, i32* %arrayidx1, align 4
-  %add = add i32 %2, %call
-  call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %0)
+  call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %local)
+  %arrayidx = getelementptr inbounds i32, ptr %in, i64 3
+  %0 = load i32, ptr %arrayidx, align 4
+  store i32 %0, ptr %local, align 4
+  %call = call zeroext i32 @callee2(ptr nonnull %local)
+  %arrayidx1 = getelementptr inbounds i32, ptr %in, i64 4
+  %1 = load i32, ptr %arrayidx1, align 4
+  %add = add i32 %1, %call
+  call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %local)
   br label %return
 
 return:                                           ; preds = %entry, %if.end
@@ -2283,7 +2281,7 @@ return:                                           ; preds = %entry, %if.end
   ret i32 %retval.0
 }
 
-define dso_local zeroext i32 @aligned(i32* nocapture readonly %in) #0 {
+define dso_local zeroext i32 @aligned(ptr nocapture readonly %in) #0 {
 ; BE-P10-LABEL: aligned:
 ; BE-P10:       # %bb.0: # %entry
 ; BE-P10-NEXT:    mflr r0
@@ -2797,35 +2795,32 @@ entry:
   %beforeLocal = alloca i32, align 4
   %local = alloca i32, align 32768
   %afterLocal = alloca i32, align 4
-  %0 = bitcast i32* %beforeLocal to i8*
-  call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %0)
-  %arrayidx = getelementptr inbounds i32, i32* %in, i64 1
-  %1 = load i32, i32* %arrayidx, align 4
-  store i32 %1, i32* %beforeLocal, align 4
-  %2 = bitcast i32* %local to i8*
-  call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %2)
-  %arrayidx1 = getelementptr inbounds i32, i32* %in, i64 3
-  %3 = load i32, i32* %arrayidx1, align 4
-  store i32 %3, i32* %local, align 32768
-  %4 = bitcast i32* %afterLocal to i8*
-  call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %4)
-  %arrayidx2 = getelementptr inbounds i32, i32* %in, i64 5
-  %5 = load i32, i32* %arrayidx2, align 4
-  store i32 %5, i32* %afterLocal, align 4
-  %call = call zeroext i32 @callee3(i32* nonnull %local, i32* nonnull %beforeLocal, i32* nonnull %afterLocal)
-  %arrayidx3 = getelementptr inbounds i32, i32* %in, i64 4
-  %6 = load i32, i32* %arrayidx3, align 4
-  %add = add i32 %6, %call
-  call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %4)
-  call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %2)
-  call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %0)
+  call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %beforeLocal)
+  %arrayidx = getelementptr inbounds i32, ptr %in, i64 1
+  %0 = load i32, ptr %arrayidx, align 4
+  store i32 %0, ptr %beforeLocal, align 4
+  call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %local)
+  %arrayidx1 = getelementptr inbounds i32, ptr %in, i64 3
+  %1 = load i32, ptr %arrayidx1, align 4
+  store i32 %1, ptr %local, align 32768
+  call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %afterLocal)
+  %arrayidx2 = getelementptr inbounds i32, ptr %in, i64 5
+  %2 = load i32, ptr %arrayidx2, align 4
+  store i32 %2, ptr %afterLocal, align 4
+  %call = call zeroext i32 @callee3(ptr nonnull %local, ptr nonnull %beforeLocal, ptr nonnull %afterLocal)
+  %arrayidx3 = getelementptr inbounds i32, ptr %in, i64 4
+  %3 = load i32, ptr %arrayidx3, align 4
+  %add = add i32 %3, %call
+  call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %afterLocal)
+  call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %local)
+  call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %beforeLocal)
   ret i32 %add
 }
 
 declare zeroext i32 @callee(i32 zeroext) local_unnamed_addr
-declare zeroext i32 @callee2(i32*) local_unnamed_addr
-declare zeroext i32 @callee3(i32*, i32*, i32*) local_unnamed_addr
-declare void @llvm.lifetime.start.p0i8(i64 immarg, i8* nocapture)
-declare void @llvm.lifetime.end.p0i8(i64 immarg, i8* nocapture)
+declare zeroext i32 @callee2(ptr) local_unnamed_addr
+declare zeroext i32 @callee3(ptr, ptr, ptr) local_unnamed_addr
+declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
 
 attributes #0 = { nounwind }

diff  --git a/llvm/test/CodeGen/PowerPC/ppc64-rop-protection.ll b/llvm/test/CodeGen/PowerPC/ppc64-rop-protection.ll
index 958347c9c952..f7f2f4b1c186 100644
--- a/llvm/test/CodeGen/PowerPC/ppc64-rop-protection.ll
+++ b/llvm/test/CodeGen/PowerPC/ppc64-rop-protection.ll
@@ -413,7 +413,7 @@ entry:
 ;; outside of the initial 288 byte volatile program storage region in the
 ;; Protected Zone. However, this restriction will be removed in an upcoming
 ;; revision of the ABI.
-define dso_local zeroext i32 @spill(i32* nocapture readonly %in) #0 {
+define dso_local zeroext i32 @spill(ptr nocapture readonly %in) #0 {
 ; LE-P10-LABEL: spill:
 ; LE-P10:       # %bb.0: # %entry
 ; LE-P10-NEXT:    mflr r0
@@ -2840,21 +2840,20 @@ define dso_local zeroext i32 @spill(i32* nocapture readonly %in) #0 {
 ; BE-P8-PRIV-NEXT:    blr
 entry:
   %local = alloca i32, align 4
-  %0 = bitcast i32* %local to i8*
-  call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %0)
-  %arrayidx = getelementptr inbounds i32, i32* %in, i64 3
-  %1 = load i32, i32* %arrayidx, align 4
-  store i32 %1, i32* %local, align 4
+  call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %local)
+  %arrayidx = getelementptr inbounds i32, ptr %in, i64 3
+  %0 = load i32, ptr %arrayidx, align 4
+  store i32 %0, ptr %local, align 4
   tail call void asm sideeffect "nop", "~{cr2},~{cr3},~{cr4},~{r0},~{r1},~{r2},~{r3},~{r4},~{r5},~{r6},~{r7},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15},~{r16},~{r17},~{r18},~{r19},~{r20},~{r21},~{r22},~{r23},~{r24},~{r25},~{r26},~{r27},~{r28},~{r29},~{r30},~{r31},~{f14},~{f15},~{f16},~{f17},~{f18},~{f19},~{f20},~{f21},~{f22},~{f23},~{f24},~{f25},~{f26},~{f27},~{f28},~{f29},~{f30},~{f31},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"()
-  %call = call zeroext i32 @callee2(i32* nonnull %local)
-  %arrayidx1 = getelementptr inbounds i32, i32* %in, i64 4
-  %2 = load i32, i32* %arrayidx1, align 4
-  %add = add i32 %2, %call
-  call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %0)
+  %call = call zeroext i32 @callee2(ptr nonnull %local)
+  %arrayidx1 = getelementptr inbounds i32, ptr %in, i64 4
+  %1 = load i32, ptr %arrayidx1, align 4
+  %add = add i32 %1, %call
+  call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %local)
   ret i32 %add
 }
 
-define dso_local zeroext i32 @shrinkwrap(i32* readonly %in) #0 {
+define dso_local zeroext i32 @shrinkwrap(ptr readonly %in) #0 {
 ; LE-P10-LABEL: shrinkwrap:
 ; LE-P10:       # %bb.0: # %entry
 ; LE-P10-NEXT:    cmpldi r3, 0
@@ -3385,20 +3384,19 @@ define dso_local zeroext i32 @shrinkwrap(i32* readonly %in) #0 {
 ; BE-P8-PRIV-NEXT:    blr
 entry:
   %local = alloca i32, align 4
-  %tobool.not = icmp eq i32* %in, null
+  %tobool.not = icmp eq ptr %in, null
   br i1 %tobool.not, label %return, label %if.end
 
 if.end:                                           ; preds = %entry
-  %0 = bitcast i32* %local to i8*
-  call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %0)
-  %arrayidx = getelementptr inbounds i32, i32* %in, i64 3
-  %1 = load i32, i32* %arrayidx, align 4
-  store i32 %1, i32* %local, align 4
-  %call = call zeroext i32 @callee2(i32* nonnull %local)
-  %arrayidx1 = getelementptr inbounds i32, i32* %in, i64 4
-  %2 = load i32, i32* %arrayidx1, align 4
-  %add = add i32 %2, %call
-  call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %0)
+  call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %local)
+  %arrayidx = getelementptr inbounds i32, ptr %in, i64 3
+  %0 = load i32, ptr %arrayidx, align 4
+  store i32 %0, ptr %local, align 4
+  %call = call zeroext i32 @callee2(ptr nonnull %local)
+  %arrayidx1 = getelementptr inbounds i32, ptr %in, i64 4
+  %1 = load i32, ptr %arrayidx1, align 4
+  %add = add i32 %1, %call
+  call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %local)
   br label %return
 
 return:                                           ; preds = %entry, %if.end
@@ -3406,7 +3404,7 @@ return:                                           ; preds = %entry, %if.end
   ret i32 %retval.0
 }
 
-define dso_local zeroext i32 @aligned(i32* nocapture readonly %in) #0 {
+define dso_local zeroext i32 @aligned(ptr nocapture readonly %in) #0 {
 ; LE-P10-LABEL: aligned:
 ; LE-P10:       # %bb.0: # %entry
 ; LE-P10-NEXT:    mflr r0
@@ -4191,35 +4189,32 @@ entry:
   %beforeLocal = alloca i32, align 4
   %local = alloca i32, align 32768
   %afterLocal = alloca i32, align 4
-  %0 = bitcast i32* %beforeLocal to i8*
-  call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %0)
-  %arrayidx = getelementptr inbounds i32, i32* %in, i64 1
-  %1 = load i32, i32* %arrayidx, align 4
-  store i32 %1, i32* %beforeLocal, align 4
-  %2 = bitcast i32* %local to i8*
-  call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %2)
-  %arrayidx1 = getelementptr inbounds i32, i32* %in, i64 3
-  %3 = load i32, i32* %arrayidx1, align 4
-  store i32 %3, i32* %local, align 32768
-  %4 = bitcast i32* %afterLocal to i8*
-  call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %4)
-  %arrayidx2 = getelementptr inbounds i32, i32* %in, i64 5
-  %5 = load i32, i32* %arrayidx2, align 4
-  store i32 %5, i32* %afterLocal, align 4
-  %call = call zeroext i32 @callee3(i32* nonnull %local, i32* nonnull %beforeLocal, i32* nonnull %afterLocal)
-  %arrayidx3 = getelementptr inbounds i32, i32* %in, i64 4
-  %6 = load i32, i32* %arrayidx3, align 4
-  %add = add i32 %6, %call
-  call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %4)
-  call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %2)
-  call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %0)
+  call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %beforeLocal)
+  %arrayidx = getelementptr inbounds i32, ptr %in, i64 1
+  %0 = load i32, ptr %arrayidx, align 4
+  store i32 %0, ptr %beforeLocal, align 4
+  call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %local)
+  %arrayidx1 = getelementptr inbounds i32, ptr %in, i64 3
+  %1 = load i32, ptr %arrayidx1, align 4
+  store i32 %1, ptr %local, align 32768
+  call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %afterLocal)
+  %arrayidx2 = getelementptr inbounds i32, ptr %in, i64 5
+  %2 = load i32, ptr %arrayidx2, align 4
+  store i32 %2, ptr %afterLocal, align 4
+  %call = call zeroext i32 @callee3(ptr nonnull %local, ptr nonnull %beforeLocal, ptr nonnull %afterLocal)
+  %arrayidx3 = getelementptr inbounds i32, ptr %in, i64 4
+  %3 = load i32, ptr %arrayidx3, align 4
+  %add = add i32 %3, %call
+  call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %afterLocal)
+  call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %local)
+  call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %beforeLocal)
   ret i32 %add
 }
 
 declare zeroext i32 @callee(i32 zeroext) local_unnamed_addr
-declare zeroext i32 @callee2(i32*) local_unnamed_addr
-declare zeroext i32 @callee3(i32*, i32*, i32*) local_unnamed_addr
-declare void @llvm.lifetime.start.p0i8(i64 immarg, i8* nocapture)
-declare void @llvm.lifetime.end.p0i8(i64 immarg, i8* nocapture)
+declare zeroext i32 @callee2(ptr) local_unnamed_addr
+declare zeroext i32 @callee3(ptr, ptr, ptr) local_unnamed_addr
+declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
 
 attributes #0 = { nounwind }

diff  --git a/llvm/test/CodeGen/PowerPC/ppc64-sibcall-shrinkwrap.ll b/llvm/test/CodeGen/PowerPC/ppc64-sibcall-shrinkwrap.ll
index 1c083c38dc12..d67f6a029a0d 100644
--- a/llvm/test/CodeGen/PowerPC/ppc64-sibcall-shrinkwrap.ll
+++ b/llvm/test/CodeGen/PowerPC/ppc64-sibcall-shrinkwrap.ll
@@ -9,9 +9,9 @@
 declare void @__assert_fail();
 
 define dso_local i8 @_ZNK5clang9NamedDecl23getLinkageAndVisibilityEv(
-    %"class.clang::NamedDecl"* %this) {
+    ptr %this) {
 entry:
-  %tobool = icmp eq %"class.clang::NamedDecl"* %this, null
+  %tobool = icmp eq ptr %this, null
   br i1 %tobool, label %cond.false, label %exit
 
 cond.false:
@@ -19,12 +19,9 @@ cond.false:
   unreachable
 
 exit:
-  %DeclKind = getelementptr inbounds
-                            %"class.clang::NamedDecl",
-                            %"class.clang::NamedDecl"* %this, i64 0, i32 0
-  %bf.load = load i32, i32* %DeclKind, align 4
+  %bf.load = load i32, ptr %this, align 4
   %call.i = tail call i8 @LVComputationKind(
-    %"class.clang::NamedDecl"* %this,
+    ptr %this,
     i32 %bf.load)
   ret i8 %call.i
 
@@ -38,7 +35,7 @@ exit:
 }
 
 define dso_local fastcc i8 @LVComputationKind(
-    %"class.clang::NamedDecl"* %D,
+    ptr %D,
     i32 %computation) {
   ret i8 0
 }

diff  --git a/llvm/test/CodeGen/PowerPC/ppc64-sibcall.ll b/llvm/test/CodeGen/PowerPC/ppc64-sibcall.ll
index 6671f7939c0d..8268495e9b8b 100644
--- a/llvm/test/CodeGen/PowerPC/ppc64-sibcall.ll
+++ b/llvm/test/CodeGen/PowerPC/ppc64-sibcall.ll
@@ -11,12 +11,12 @@
 %S_32 = type { [7 x i32], i32 }
 
 ; Function Attrs: noinline nounwind
-define dso_local void @callee_56_copy([7 x i64] %a, %S_56* %b) #0 { ret void }
-define dso_local void @callee_64_copy([8 x i64] %a, %S_64* %b) #0 { ret void }
+define dso_local void @callee_56_copy([7 x i64] %a, ptr %b) #0 { ret void }
+define dso_local void @callee_64_copy([8 x i64] %a, ptr %b) #0 { ret void }
 
 ; Function Attrs: nounwind
-define dso_local void @caller_56_reorder_copy(%S_56* %b, [7 x i64] %a) #1 {
-  tail call void @callee_56_copy([7 x i64] %a, %S_56* %b)
+define dso_local void @caller_56_reorder_copy(ptr %b, [7 x i64] %a) #1 {
+  tail call void @callee_56_copy([7 x i64] %a, ptr %b)
   ret void
 
 ; CHECK-SCO-LABEL: caller_56_reorder_copy:
@@ -24,8 +24,8 @@ define dso_local void @caller_56_reorder_copy(%S_56* %b, [7 x i64] %a) #1 {
 ; CHECK-SCO: TC_RETURNd8 callee_56_copy
 }
 
-define dso_local void @caller_64_reorder_copy(%S_64* %b, [8 x i64] %a) #1 {
-  tail call void @callee_64_copy([8 x i64] %a, %S_64* %b)
+define dso_local void @caller_64_reorder_copy(ptr %b, [8 x i64] %a) #1 {
+  tail call void @callee_64_copy([8 x i64] %a, ptr %b)
   ret void
 
 ; CHECK-SCO-LABEL: caller_64_reorder_copy:
@@ -67,17 +67,17 @@ define dso_local void @caller_64_64_undef_copy([8 x i64] %a, [8 x i64] %b) #1 {
 }
 
 define dso_local void @arg8_callee(
-  float %a, i32 signext %b, float %c, i32* %d,
-  i8 zeroext %e, float %f, i32* %g, i32 signext %h)
+  float %a, i32 signext %b, float %c, ptr %d,
+  i8 zeroext %e, float %f, ptr %g, i32 signext %h)
 {
   ret void
 }
 
-define dso_local void @arg8_caller(float %a, i32 signext %b, i8 zeroext %c, i32* %d) {
+define dso_local void @arg8_caller(float %a, i32 signext %b, i8 zeroext %c, ptr %d) {
 entry:
   tail call void @arg8_callee(float undef, i32 signext undef, float undef,
-                              i32* %d, i8 zeroext undef, float undef,
-                              i32* undef, i32 signext undef)
+                              ptr %d, i8 zeroext undef, float undef,
+                              ptr undef, i32 signext undef)
   ret void
 
 ; CHECK-SCO-LABEL: arg8_caller:
@@ -87,15 +87,14 @@ entry:
 ; Struct return test
 
 ; Function Attrs: noinline nounwind
-define dso_local void @callee_sret_56(%S_56* noalias sret(%S_56) %agg.result) #0 { ret void }
-define dso_local void @callee_sret_32(%S_32* noalias sret(%S_32) %agg.result) #0 { ret void }
+define dso_local void @callee_sret_56(ptr noalias sret(%S_56) %agg.result) #0 { ret void }
+define dso_local void @callee_sret_32(ptr noalias sret(%S_32) %agg.result) #0 { ret void }
 
 ; Function Attrs: nounwind
-define dso_local void @caller_do_something_sret_32(%S_32* noalias sret(%S_32) %agg.result) #1 {
+define dso_local void @caller_do_something_sret_32(ptr noalias sret(%S_32) %agg.result) #1 {
   %1 = alloca %S_56, align 4
-  %2 = bitcast %S_56* %1 to i8*
-  call void @callee_sret_56(%S_56* nonnull sret(%S_56) %1)
-  tail call void @callee_sret_32(%S_32* sret(%S_32) %agg.result)
+  call void @callee_sret_56(ptr nonnull sret(%S_56) %1)
+  tail call void @callee_sret_32(ptr sret(%S_32) %agg.result)
   ret void
 
 ; CHECK-SCO-LABEL: caller_do_something_sret_32:
@@ -105,9 +104,9 @@ define dso_local void @caller_do_something_sret_32(%S_32* noalias sret(%S_32) %a
 ; CHECK-SCO: TC_RETURNd8 callee_sret_32
 }
 
-define dso_local void @caller_local_sret_32(%S_32* %a) #1 {
+define dso_local void @caller_local_sret_32(ptr %a) #1 {
   %tmp = alloca %S_32, align 4
-  tail call void @callee_sret_32(%S_32* nonnull sret(%S_32) %tmp)
+  tail call void @callee_sret_32(ptr nonnull sret(%S_32) %tmp)
   ret void
 
 ; CHECK-SCO-LABEL: caller_local_sret_32:
@@ -117,9 +116,9 @@ define dso_local void @caller_local_sret_32(%S_32* %a) #1 {
 attributes #0 = { noinline nounwind  }
 attributes #1 = { nounwind }
 
-define dso_local void @f128_callee(i32* %ptr, ppc_fp128 %a, ppc_fp128 %b) { ret void }
-define dso_local void @f128_caller(i32* %ptr, ppc_fp128 %a, ppc_fp128 %b) {
-  tail call void @f128_callee(i32* %ptr, ppc_fp128 %a, ppc_fp128 %b)
+define dso_local void @f128_callee(ptr %ptr, ppc_fp128 %a, ppc_fp128 %b) { ret void }
+define dso_local void @f128_caller(ptr %ptr, ppc_fp128 %a, ppc_fp128 %b) {
+  tail call void @f128_callee(ptr %ptr, ppc_fp128 %a, ppc_fp128 %b)
   ret void
 
 ; CHECK-SCO-LABEL: f128_caller:
@@ -129,9 +128,9 @@ define dso_local void @f128_caller(i32* %ptr, ppc_fp128 %a, ppc_fp128 %b) {
 ; weak linkage test
 %class.T = type { [2 x i8] }
 
-define weak_odr hidden void @wo_hcallee(%class.T* %this, i8* %c) { ret void }
-define dso_local void @wo_hcaller(%class.T* %this, i8* %c) {
-  tail call void @wo_hcallee(%class.T* %this, i8* %c)
+define weak_odr hidden void @wo_hcallee(ptr %this, ptr %c) { ret void }
+define dso_local void @wo_hcaller(ptr %this, ptr %c) {
+  tail call void @wo_hcallee(ptr %this, ptr %c)
   ret void
 
 ; CHECK-SCO-LABEL: wo_hcaller:
@@ -141,9 +140,9 @@ define dso_local void @wo_hcaller(%class.T* %this, i8* %c) {
 ; SCM:       bl wo_hcallee
 }
 
-define weak_odr protected void @wo_pcallee(%class.T* %this, i8* %c) { ret void }
-define dso_local void @wo_pcaller(%class.T* %this, i8* %c) {
-  tail call void @wo_pcallee(%class.T* %this, i8* %c)
+define weak_odr protected void @wo_pcallee(ptr %this, ptr %c) { ret void }
+define dso_local void @wo_pcaller(ptr %this, ptr %c) {
+  tail call void @wo_pcallee(ptr %this, ptr %c)
   ret void
 
 ; CHECK-SCO-LABEL: wo_pcaller:
@@ -153,9 +152,9 @@ define dso_local void @wo_pcaller(%class.T* %this, i8* %c) {
 ; SCM:       bl wo_pcallee
 }
 
-define weak_odr void @wo_callee(%class.T* %this, i8* %c) { ret void }
-define dso_local void @wo_caller(%class.T* %this, i8* %c) {
-  tail call void @wo_callee(%class.T* %this, i8* %c)
+define weak_odr void @wo_callee(ptr %this, ptr %c) { ret void }
+define dso_local void @wo_caller(ptr %this, ptr %c) {
+  tail call void @wo_callee(ptr %this, ptr %c)
   ret void
 
 ; CHECK-SCO-LABEL: wo_caller:
@@ -165,9 +164,9 @@ define dso_local void @wo_caller(%class.T* %this, i8* %c) {
 ; SCM:       bl wo_callee
 }
 
-define weak protected void @w_pcallee(i8* %ptr) { ret void }
-define dso_local void @w_pcaller(i8* %ptr) {
-  tail call void @w_pcallee(i8* %ptr)
+define weak protected void @w_pcallee(ptr %ptr) { ret void }
+define dso_local void @w_pcaller(ptr %ptr) {
+  tail call void @w_pcallee(ptr %ptr)
   ret void
 
 ; CHECK-SCO-LABEL: w_pcaller:
@@ -177,9 +176,9 @@ define dso_local void @w_pcaller(i8* %ptr) {
 ; SCM:       bl w_pcallee
 }
 
-define weak hidden void @w_hcallee(i8* %ptr) { ret void }
-define dso_local void @w_hcaller(i8* %ptr) {
-  tail call void @w_hcallee(i8* %ptr)
+define weak hidden void @w_hcallee(ptr %ptr) { ret void }
+define dso_local void @w_hcaller(ptr %ptr) {
+  tail call void @w_hcallee(ptr %ptr)
   ret void
 
 ; CHECK-SCO-LABEL: w_hcaller:
@@ -189,9 +188,9 @@ define dso_local void @w_hcaller(i8* %ptr) {
 ; SCM:       bl w_hcallee
 }
 
-define weak void @w_callee(i8* %ptr) { ret void }
-define dso_local void @w_caller(i8* %ptr) {
-  tail call void @w_callee(i8* %ptr)
+define weak void @w_callee(ptr %ptr) { ret void }
+define dso_local void @w_caller(ptr %ptr) {
+  tail call void @w_callee(ptr %ptr)
   ret void
 
 ; CHECK-SCO-LABEL: w_caller:
@@ -204,9 +203,9 @@ define dso_local void @w_caller(i8* %ptr) {
 %struct.byvalTest = type { [8 x i8] }
 @byval = common global %struct.byvalTest zeroinitializer
 
-define dso_local void @byval_callee(%struct.byvalTest* byval(%struct.byvalTest) %ptr) { ret void }
+define dso_local void @byval_callee(ptr byval(%struct.byvalTest) %ptr) { ret void }
 define dso_local void @byval_caller() {
-  tail call void @byval_callee(%struct.byvalTest* byval(%struct.byvalTest) @byval)
+  tail call void @byval_callee(ptr byval(%struct.byvalTest) @byval)
   ret void
 
 ; CHECK-SCO-LABEL: bl byval_callee

diff  --git a/llvm/test/CodeGen/PowerPC/ppc64-smallarg.ll b/llvm/test/CodeGen/PowerPC/ppc64-smallarg.ll
index cd6ccd8bb88d..af24164496ff 100644
--- a/llvm/test/CodeGen/PowerPC/ppc64-smallarg.ll
+++ b/llvm/test/CodeGen/PowerPC/ppc64-smallarg.ll
@@ -13,12 +13,10 @@ target triple = "powerpc64-unknown-linux-gnu"
 @gs = common global %struct.small_arg zeroinitializer, align 2
 @gf = common global float 0.000000e+00, align 4
 
-define void @callee1(%struct.small_arg* noalias nocapture sret(%struct.small_arg) %agg.result, %struct.large_arg* byval(%struct.large_arg) nocapture readnone %pad, %struct.small_arg* byval(%struct.small_arg) nocapture readonly %x) {
+define void @callee1(ptr noalias nocapture sret(%struct.small_arg) %agg.result, ptr byval(%struct.large_arg) nocapture readnone %pad, ptr byval(%struct.small_arg) nocapture readonly %x) {
 entry:
-  %0 = bitcast %struct.small_arg* %x to i32*
-  %1 = bitcast %struct.small_arg* %agg.result to i32*
-  %2 = load i32, i32* %0, align 2
-  store i32 %2, i32* %1, align 2
+  %0 = load i32, ptr %x, align 2
+  store i32 %0, ptr %agg.result, align 2
   ret void
 }
 ; CHECK: @callee1
@@ -28,14 +26,14 @@ entry:
 define void @caller1() {
 entry:
   %tmp = alloca %struct.small_arg, align 2
-  call void @test1(%struct.small_arg* sret(%struct.small_arg) %tmp, %struct.large_arg* byval(%struct.large_arg) @gl, %struct.small_arg* byval(%struct.small_arg) @gs)
+  call void @test1(ptr sret(%struct.small_arg) %tmp, ptr byval(%struct.large_arg) @gl, ptr byval(%struct.small_arg) @gs)
   ret void
 }
 ; CHECK: @caller1
 ; CHECK: stw {{[0-9]+}}, 124(1)
 ; CHECK: bl test1
 
-declare void @test1(%struct.small_arg* sret(%struct.small_arg), %struct.large_arg* byval(%struct.large_arg), %struct.small_arg* byval(%struct.small_arg))
+declare void @test1(ptr sret(%struct.small_arg), ptr byval(%struct.large_arg), ptr byval(%struct.small_arg))
 
 define float @callee2(float %pad1, float %pad2, float %pad3, float %pad4, float %pad5, float %pad6, float %pad7, float %pad8, float %pad9, float %pad10, float %pad11, float %pad12, float %pad13, float %x) {
 entry:
@@ -47,7 +45,7 @@ entry:
 
 define void @caller2() {
 entry:
-  %0 = load float, float* @gf, align 4
+  %0 = load float, ptr @gf, align 4
   %call = tail call float @test2(float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float %0)
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/ppc64-toc.ll b/llvm/test/CodeGen/PowerPC/ppc64-toc.ll
index 121aa0975023..7e797e702f21 100644
--- a/llvm/test/CodeGen/PowerPC/ppc64-toc.ll
+++ b/llvm/test/CodeGen/PowerPC/ppc64-toc.ll
@@ -17,7 +17,7 @@ entry:
 ; CHECK-NEXT: .quad   0
 ; CHECK-NEXT: .text
 ; CHECK-NEXT: .L[[BEGIN]]:
-  %0 = load i64, i64* @number64, align 8
+  %0 = load i64, ptr @number64, align 8
 ; CHECK: ld {{[0-9]+}}, .LC{{[0-9]+}}@toc(2)
   %cmp = icmp eq i64 %0, %a
   %conv1 = zext i1 %cmp to i64 
@@ -28,7 +28,7 @@ define i64 @internal_static_var(i64 %a) nounwind {
 entry:
 ; CHECK-LABEL: internal_static_var:
 ; CHECK: ld {{[0-9]+}}, .LC{{[0-9]+}}@toc(2)
-  %0 = load i64, i64* @internal_static_var.x, align 8
+  %0 = load i64, ptr @internal_static_var.x, align 8
   %cmp = icmp eq i64 %0, %a
   %conv1 = zext i1 %cmp to i64 
   ret i64 %conv1 
@@ -48,8 +48,8 @@ define i32 @access_double_array(double %a, i32 %i) nounwind readonly {
 entry:
 ; CHECK-LABEL: access_double_array:
   %idxprom = sext i32 %i to i64
-  %arrayidx = getelementptr inbounds [32 x double], [32 x double]* @double_array, i64 0, i64 %idxprom
-  %0 = load double, double* %arrayidx, align 8
+  %arrayidx = getelementptr inbounds [32 x double], ptr @double_array, i64 0, i64 %idxprom
+  %0 = load double, ptr %arrayidx, align 8
 ; CHECK: ld {{[0-9]+}}, .LC{{[0-9]+}}@toc(2)
   %cmp = fcmp oeq double %0, %a
   %conv = zext i1 %cmp to i32

diff  --git a/llvm/test/CodeGen/PowerPC/ppc64-vaarg-int.ll b/llvm/test/CodeGen/PowerPC/ppc64-vaarg-int.ll
index 96be8f4e6be7..c3ee953d4dbe 100644
--- a/llvm/test/CodeGen/PowerPC/ppc64-vaarg-int.ll
+++ b/llvm/test/CodeGen/PowerPC/ppc64-vaarg-int.ll
@@ -4,15 +4,14 @@ target triple = "powerpc64-unknown-linux-gnu"
 
 define i32 @intvaarg(i32 %a, ...) nounwind {
 entry:
-  %va = alloca i8*, align 8
-  %va1 = bitcast i8** %va to i8*
-  call void @llvm.va_start(i8* %va1)
-  %0 = va_arg i8** %va, i32
+  %va = alloca ptr, align 8
+  call void @llvm.va_start(ptr %va)
+  %0 = va_arg ptr %va, i32
   %sub = sub nsw i32 %a, %0
   ret i32 %sub
 }
 
-declare void @llvm.va_start(i8*) nounwind
+declare void @llvm.va_start(ptr) nounwind
 
 ; CHECK: @intvaarg
 ; Make sure that the va pointer is incremented by 8 (not 4).

diff  --git a/llvm/test/CodeGen/PowerPC/ppc64-varargs.ll b/llvm/test/CodeGen/PowerPC/ppc64-varargs.ll
index 5aeedbf13e0b..e22dfe9f0803 100644
--- a/llvm/test/CodeGen/PowerPC/ppc64-varargs.ll
+++ b/llvm/test/CodeGen/PowerPC/ppc64-varargs.ll
@@ -50,10 +50,9 @@ define i32 @f1(...) nounwind {
 ; LE-NEXT:    std r4, -8(r1)
 ; LE-NEXT:    blr
 entry:
-  %va = alloca i8*, align 8
-  %va.cast = bitcast i8** %va to i8*
-  call void @llvm.va_start(i8* %va.cast)
+  %va = alloca ptr, align 8
+  call void @llvm.va_start(ptr %va)
   ret i32 0
 }
 
-declare void @llvm.va_start(i8*) nounwind
+declare void @llvm.va_start(ptr) nounwind

diff  --git a/llvm/test/CodeGen/PowerPC/ppc64-xxsplti32dx-pattern-check.ll b/llvm/test/CodeGen/PowerPC/ppc64-xxsplti32dx-pattern-check.ll
index b7f5b4581f76..45ebcf3c18bb 100644
--- a/llvm/test/CodeGen/PowerPC/ppc64-xxsplti32dx-pattern-check.ll
+++ b/llvm/test/CodeGen/PowerPC/ppc64-xxsplti32dx-pattern-check.ll
@@ -14,7 +14,7 @@ declare dso_local void @callee() local_unnamed_addr
 
 define dso_local void @test_xxsplti32dx() local_unnamed_addr {
 entry:
-  %i1 = load double, double* undef, align 8
+  %i1 = load double, ptr undef, align 8
   br label %for.body124
 
 for.body124:
@@ -22,8 +22,8 @@ for.body124:
   br i1 undef, label %for.body919.preheader, label %for.end1072
 
 for.body919.preheader:
-  %i4 = load double, double* null, align 8
-  %i5 = load double, double* null, align 8
+  %i4 = load double, ptr null, align 8
+  %i5 = load double, ptr null, align 8
   %i15 = insertelement <2 x double> poison, double %i5, i32 0
   %i23 = insertelement <2 x double> undef, double %i4, i32 1
   %i24 = insertelement <2 x double> %i15, double 0x3FC5555555555555, i32 1
@@ -36,6 +36,6 @@ for.end1072:
   %E1 = phi double [ %E0, %for.body124 ], [ %sub994, %for.body919.preheader ]
   %i28 = phi <2 x double> [ zeroinitializer, %for.body124 ], [ %i15, %for.body919.preheader ]
   tail call void @callee()
-  store <2 x double> %i28, <2 x double>* undef, align 8
+  store <2 x double> %i28, ptr undef, align 8
   br label %for.body124
 }

diff  --git a/llvm/test/CodeGen/PowerPC/ppc64le-aggregates.ll b/llvm/test/CodeGen/PowerPC/ppc64le-aggregates.ll
index 3819e2654f2a..eba481e3345d 100644
--- a/llvm/test/CodeGen/PowerPC/ppc64le-aggregates.ll
+++ b/llvm/test/CodeGen/PowerPC/ppc64le-aggregates.ll
@@ -259,9 +259,9 @@ entry:
 
 define void @caller2() {
 entry:
-  %0 = load [8 x float], [8 x float]* getelementptr inbounds (%struct.float8, %struct.float8* @g8, i64 0, i32 0), align 4
-  %1 = load [5 x float], [5 x float]* getelementptr inbounds (%struct.float5, %struct.float5* @g5, i64 0, i32 0), align 4
-  %2 = load [2 x float], [2 x float]* getelementptr inbounds (%struct.float2, %struct.float2* @g2, i64 0, i32 0), align 4
+  %0 = load [8 x float], ptr @g8, align 4
+  %1 = load [5 x float], ptr @g5, align 4
+  %2 = load [2 x float], ptr @g2, align 4
   tail call void @test2([8 x float] %0, [5 x float] %1, [2 x float] %2)
   ret void
 }
@@ -298,8 +298,8 @@ entry:
 
 define void @caller3(double %d) {
 entry:
-  %0 = load [8 x float], [8 x float]* getelementptr inbounds (%struct.float8, %struct.float8* @g8, i64 0, i32 0), align 4
-  %1 = load [5 x float], [5 x float]* getelementptr inbounds (%struct.float5, %struct.float5* @g5, i64 0, i32 0), align 4
+  %0 = load [8 x float], ptr @g8, align 4
+  %1 = load [5 x float], ptr @g5, align 4
   tail call void @test3([8 x float] %0, [5 x float] %1, double %d)
   ret void
 }
@@ -321,8 +321,8 @@ entry:
 
 define void @caller4(float %f) {
 entry:
-  %0 = load [8 x float], [8 x float]* getelementptr inbounds (%struct.float8, %struct.float8* @g8, i64 0, i32 0), align 4
-  %1 = load [5 x float], [5 x float]* getelementptr inbounds (%struct.float5, %struct.float5* @g5, i64 0, i32 0), align 4
+  %0 = load [8 x float], ptr @g8, align 4
+  %1 = load [5 x float], ptr @g5, align 4
   tail call void @test4([8 x float] %0, [5 x float] %1, float %f)
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/ppc64le-calls.ll b/llvm/test/CodeGen/PowerPC/ppc64le-calls.ll
index 3eab2dba5644..dcb133773f79 100644
--- a/llvm/test/CodeGen/PowerPC/ppc64le-calls.ll
+++ b/llvm/test/CodeGen/PowerPC/ppc64le-calls.ll
@@ -8,7 +8,7 @@ target datalayout = "e-m:e-i64:64-n32:64"
 target triple = "powerpc64le-unknown-linux-gnu"
 
 ; Indirect calls requires a full stub creation
-define void @test_indirect(void ()* nocapture %fp) {
+define void @test_indirect(ptr nocapture %fp) {
 ; CHECK-LABEL: @test_indirect
   tail call void %fp()
 ; CHECK-DAG: std 2, 24(1)

diff  --git a/llvm/test/CodeGen/PowerPC/ppc64le-crsave.ll b/llvm/test/CodeGen/PowerPC/ppc64le-crsave.ll
index f32b3e7dfc92..53329582360f 100644
--- a/llvm/test/CodeGen/PowerPC/ppc64le-crsave.ll
+++ b/llvm/test/CodeGen/PowerPC/ppc64le-crsave.ll
@@ -2,9 +2,9 @@
 target datalayout = "e-m:e-i64:64-n32:64"
 target triple = "powerpc64le-unknown-linux-gnu"
 
- at _ZTIi = external constant i8*
-declare i8* @__cxa_allocate_exception(i64)
-declare void @__cxa_throw(i8*, i8*, i8*)
+ at _ZTIi = external constant ptr
+declare ptr @__cxa_allocate_exception(i64)
+declare void @__cxa_throw(ptr, ptr, ptr)
 
 define void @crsave() {
 entry:
@@ -12,10 +12,9 @@ entry:
   call void asm sideeffect "", "~{cr3}"()
   call void asm sideeffect "", "~{cr4}"()
 
-  %exception = call i8* @__cxa_allocate_exception(i64 4)
-  %0 = bitcast i8* %exception to i32*
-  store i32 0, i32* %0
-  call void @__cxa_throw(i8* %exception, i8* bitcast (i8** @_ZTIi to i8*), i8* null)
+  %exception = call ptr @__cxa_allocate_exception(i64 4)
+  store i32 0, ptr %exception
+  call void @__cxa_throw(ptr %exception, ptr @_ZTIi, ptr null)
   unreachable
 
 return:                                           ; No predecessors!

diff  --git a/llvm/test/CodeGen/PowerPC/ppc64le-localentry-large.ll b/llvm/test/CodeGen/PowerPC/ppc64le-localentry-large.ll
index ec51f91a3059..749f4f040c85 100644
--- a/llvm/test/CodeGen/PowerPC/ppc64le-localentry-large.ll
+++ b/llvm/test/CodeGen/PowerPC/ppc64le-localentry-large.ll
@@ -19,7 +19,7 @@ entry:
 ; CHECK-NEXT: .Lfunc_lep[[FN]]:
 ; CHECK-NEXT: .localentry use_toc, .Lfunc_lep[[FN]]-.Lfunc_gep[[FN]]
 ; CHECK-NEXT: %entry
-  %0 = load i64, i64* @number64, align 8
+  %0 = load i64, ptr @number64, align 8
   %cmp = icmp eq i64 %0, %a
   %conv1 = zext i1 %cmp to i64
   ret i64 %conv1

diff  --git a/llvm/test/CodeGen/PowerPC/ppc64le-localentry.ll b/llvm/test/CodeGen/PowerPC/ppc64le-localentry.ll
index e894a545a1bf..d2b7cfb55169 100644
--- a/llvm/test/CodeGen/PowerPC/ppc64le-localentry.ll
+++ b/llvm/test/CodeGen/PowerPC/ppc64le-localentry.ll
@@ -23,7 +23,7 @@ entry:
 ; CHECK-NEXT: .Lfunc_lep[[FN]]:
 ; CHECK-NEXT: .localentry use_toc, .Lfunc_lep[[FN]]-.Lfunc_gep[[FN]]
 ; CHECK-NEXT: %entry
-  %0 = load i64, i64* @number64, align 8
+  %0 = load i64, ptr @number64, align 8
   %cmp = icmp eq i64 %0, %a
   %conv1 = zext i1 %cmp to i64
   ret i64 %conv1

diff  --git a/llvm/test/CodeGen/PowerPC/ppc64le-smallarg.ll b/llvm/test/CodeGen/PowerPC/ppc64le-smallarg.ll
index 4b2982401ba8..fa2e09a8e4b6 100644
--- a/llvm/test/CodeGen/PowerPC/ppc64le-smallarg.ll
+++ b/llvm/test/CodeGen/PowerPC/ppc64le-smallarg.ll
@@ -13,12 +13,10 @@ target triple = "powerpc64le-unknown-linux-gnu"
 @gs = common global %struct.small_arg zeroinitializer, align 2
 @gf = common global float 0.000000e+00, align 4
 
-define void @callee1(%struct.small_arg* noalias nocapture sret(%struct.small_arg) %agg.result, %struct.large_arg* byval(%struct.large_arg) nocapture readnone %pad, %struct.small_arg* byval(%struct.small_arg) nocapture readonly %x) {
+define void @callee1(ptr noalias nocapture sret(%struct.small_arg) %agg.result, ptr byval(%struct.large_arg) nocapture readnone %pad, ptr byval(%struct.small_arg) nocapture readonly %x) {
 entry:
-  %0 = bitcast %struct.small_arg* %x to i32*
-  %1 = bitcast %struct.small_arg* %agg.result to i32*
-  %2 = load i32, i32* %0, align 2
-  store i32 %2, i32* %1, align 2
+  %0 = load i32, ptr %x, align 2
+  store i32 %0, ptr %agg.result, align 2
   ret void
 }
 ; CHECK: @callee1
@@ -28,14 +26,14 @@ entry:
 define void @caller1() {
 entry:
   %tmp = alloca %struct.small_arg, align 2
-  call void @test1(%struct.small_arg* sret(%struct.small_arg) %tmp, %struct.large_arg* byval(%struct.large_arg) @gl, %struct.small_arg* byval(%struct.small_arg) @gs)
+  call void @test1(ptr sret(%struct.small_arg) %tmp, ptr byval(%struct.large_arg) @gl, ptr byval(%struct.small_arg) @gs)
   ret void
 }
 ; CHECK: @caller1
 ; CHECK: stw {{[0-9]+}}, 104(1)
 ; CHECK: bl test1
 
-declare void @test1(%struct.small_arg* sret(%struct.small_arg), %struct.large_arg* byval(%struct.large_arg), %struct.small_arg* byval(%struct.small_arg))
+declare void @test1(ptr sret(%struct.small_arg), ptr byval(%struct.large_arg), ptr byval(%struct.small_arg))
 
 define float @callee2(float %pad1, float %pad2, float %pad3, float %pad4, float %pad5, float %pad6, float %pad7, float %pad8, float %pad9, float %pad10, float %pad11, float %pad12, float %pad13, float %x) {
 entry:
@@ -47,7 +45,7 @@ entry:
 
 define void @caller2() {
 entry:
-  %0 = load float, float* @gf, align 4
+  %0 = load float, ptr @gf, align 4
   %call = tail call float @test2(float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float %0)
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/ppc_fp128-bcwriter.ll b/llvm/test/CodeGen/PowerPC/ppc_fp128-bcwriter.ll
index 101a0e49abde..3966f85cc866 100644
--- a/llvm/test/CodeGen/PowerPC/ppc_fp128-bcwriter.ll
+++ b/llvm/test/CodeGen/PowerPC/ppc_fp128-bcwriter.ll
@@ -6,10 +6,10 @@
 define i32 @main() local_unnamed_addr {
 _main_entry:
   %e3 = alloca ppc_fp128, align 16
-  store ppc_fp128 0xM0000000000000000FFFFFFFFFFFFFFFF, ppc_fp128* %e3, align 16
-  %0 = call i64 @foo( ppc_fp128* nonnull %e3)
+  store ppc_fp128 0xM0000000000000000FFFFFFFFFFFFFFFF, ptr %e3, align 16
+  %0 = call i64 @foo( ptr nonnull %e3)
   ret i32 undef
 }
 
-declare i64 @foo(ppc_fp128 *)
+declare i64 @foo(ptr)
 

diff  --git a/llvm/test/CodeGen/PowerPC/ppcf128-1.ll b/llvm/test/CodeGen/PowerPC/ppcf128-1.ll
index 462faf5a8db2..4bee8ffa65f4 100644
--- a/llvm/test/CodeGen/PowerPC/ppcf128-1.ll
+++ b/llvm/test/CodeGen/PowerPC/ppcf128-1.ll
@@ -5,88 +5,88 @@ target triple = "powerpc-unknown-linux-gnu"
 
 define ppc_fp128 @plus(ppc_fp128 %x, ppc_fp128 %y) {
 entry:
-	%x_addr = alloca ppc_fp128		; <ppc_fp128*> [#uses=2]
-	%y_addr = alloca ppc_fp128		; <ppc_fp128*> [#uses=2]
-	%retval = alloca ppc_fp128, align 16		; <ppc_fp128*> [#uses=2]
-	%tmp = alloca ppc_fp128, align 16		; <ppc_fp128*> [#uses=2]
+	%x_addr = alloca ppc_fp128		; <ptr> [#uses=2]
+	%y_addr = alloca ppc_fp128		; <ptr> [#uses=2]
+	%retval = alloca ppc_fp128, align 16		; <ptr> [#uses=2]
+	%tmp = alloca ppc_fp128, align 16		; <ptr> [#uses=2]
 	%"alloca point" = bitcast i32 0 to i32		; <i32> [#uses=0]
-	store ppc_fp128 %x, ppc_fp128* %x_addr
-	store ppc_fp128 %y, ppc_fp128* %y_addr
-	%tmp1 = load ppc_fp128, ppc_fp128* %x_addr, align 16		; <ppc_fp128> [#uses=1]
-	%tmp2 = load ppc_fp128, ppc_fp128* %y_addr, align 16		; <ppc_fp128> [#uses=1]
+	store ppc_fp128 %x, ptr %x_addr
+	store ppc_fp128 %y, ptr %y_addr
+	%tmp1 = load ppc_fp128, ptr %x_addr, align 16		; <ppc_fp128> [#uses=1]
+	%tmp2 = load ppc_fp128, ptr %y_addr, align 16		; <ppc_fp128> [#uses=1]
 	%tmp3 = fadd ppc_fp128 %tmp1, %tmp2		; <ppc_fp128> [#uses=1]
-	store ppc_fp128 %tmp3, ppc_fp128* %tmp, align 16
-	%tmp4 = load ppc_fp128, ppc_fp128* %tmp, align 16		; <ppc_fp128> [#uses=1]
-	store ppc_fp128 %tmp4, ppc_fp128* %retval, align 16
+	store ppc_fp128 %tmp3, ptr %tmp, align 16
+	%tmp4 = load ppc_fp128, ptr %tmp, align 16		; <ppc_fp128> [#uses=1]
+	store ppc_fp128 %tmp4, ptr %retval, align 16
 	br label %return
 
 return:		; preds = %entry
-	%retval5 = load ppc_fp128, ppc_fp128* %retval		; <ppc_fp128> [#uses=1]
+	%retval5 = load ppc_fp128, ptr %retval		; <ppc_fp128> [#uses=1]
 	ret ppc_fp128 %retval5
 }
 
 define ppc_fp128 @minus(ppc_fp128 %x, ppc_fp128 %y) {
 entry:
-	%x_addr = alloca ppc_fp128		; <ppc_fp128*> [#uses=2]
-	%y_addr = alloca ppc_fp128		; <ppc_fp128*> [#uses=2]
-	%retval = alloca ppc_fp128, align 16		; <ppc_fp128*> [#uses=2]
-	%tmp = alloca ppc_fp128, align 16		; <ppc_fp128*> [#uses=2]
+	%x_addr = alloca ppc_fp128		; <ptr> [#uses=2]
+	%y_addr = alloca ppc_fp128		; <ptr> [#uses=2]
+	%retval = alloca ppc_fp128, align 16		; <ptr> [#uses=2]
+	%tmp = alloca ppc_fp128, align 16		; <ptr> [#uses=2]
 	%"alloca point" = bitcast i32 0 to i32		; <i32> [#uses=0]
-	store ppc_fp128 %x, ppc_fp128* %x_addr
-	store ppc_fp128 %y, ppc_fp128* %y_addr
-	%tmp1 = load ppc_fp128, ppc_fp128* %x_addr, align 16		; <ppc_fp128> [#uses=1]
-	%tmp2 = load ppc_fp128, ppc_fp128* %y_addr, align 16		; <ppc_fp128> [#uses=1]
+	store ppc_fp128 %x, ptr %x_addr
+	store ppc_fp128 %y, ptr %y_addr
+	%tmp1 = load ppc_fp128, ptr %x_addr, align 16		; <ppc_fp128> [#uses=1]
+	%tmp2 = load ppc_fp128, ptr %y_addr, align 16		; <ppc_fp128> [#uses=1]
 	%tmp3 = fsub ppc_fp128 %tmp1, %tmp2		; <ppc_fp128> [#uses=1]
-	store ppc_fp128 %tmp3, ppc_fp128* %tmp, align 16
-	%tmp4 = load ppc_fp128, ppc_fp128* %tmp, align 16		; <ppc_fp128> [#uses=1]
-	store ppc_fp128 %tmp4, ppc_fp128* %retval, align 16
+	store ppc_fp128 %tmp3, ptr %tmp, align 16
+	%tmp4 = load ppc_fp128, ptr %tmp, align 16		; <ppc_fp128> [#uses=1]
+	store ppc_fp128 %tmp4, ptr %retval, align 16
 	br label %return
 
 return:		; preds = %entry
-	%retval5 = load ppc_fp128, ppc_fp128* %retval		; <ppc_fp128> [#uses=1]
+	%retval5 = load ppc_fp128, ptr %retval		; <ppc_fp128> [#uses=1]
 	ret ppc_fp128 %retval5
 }
 
 define ppc_fp128 @times(ppc_fp128 %x, ppc_fp128 %y) {
 entry:
-	%x_addr = alloca ppc_fp128		; <ppc_fp128*> [#uses=2]
-	%y_addr = alloca ppc_fp128		; <ppc_fp128*> [#uses=2]
-	%retval = alloca ppc_fp128, align 16		; <ppc_fp128*> [#uses=2]
-	%tmp = alloca ppc_fp128, align 16		; <ppc_fp128*> [#uses=2]
+	%x_addr = alloca ppc_fp128		; <ptr> [#uses=2]
+	%y_addr = alloca ppc_fp128		; <ptr> [#uses=2]
+	%retval = alloca ppc_fp128, align 16		; <ptr> [#uses=2]
+	%tmp = alloca ppc_fp128, align 16		; <ptr> [#uses=2]
 	%"alloca point" = bitcast i32 0 to i32		; <i32> [#uses=0]
-	store ppc_fp128 %x, ppc_fp128* %x_addr
-	store ppc_fp128 %y, ppc_fp128* %y_addr
-	%tmp1 = load ppc_fp128, ppc_fp128* %x_addr, align 16		; <ppc_fp128> [#uses=1]
-	%tmp2 = load ppc_fp128, ppc_fp128* %y_addr, align 16		; <ppc_fp128> [#uses=1]
+	store ppc_fp128 %x, ptr %x_addr
+	store ppc_fp128 %y, ptr %y_addr
+	%tmp1 = load ppc_fp128, ptr %x_addr, align 16		; <ppc_fp128> [#uses=1]
+	%tmp2 = load ppc_fp128, ptr %y_addr, align 16		; <ppc_fp128> [#uses=1]
 	%tmp3 = fmul ppc_fp128 %tmp1, %tmp2		; <ppc_fp128> [#uses=1]
-	store ppc_fp128 %tmp3, ppc_fp128* %tmp, align 16
-	%tmp4 = load ppc_fp128, ppc_fp128* %tmp, align 16		; <ppc_fp128> [#uses=1]
-	store ppc_fp128 %tmp4, ppc_fp128* %retval, align 16
+	store ppc_fp128 %tmp3, ptr %tmp, align 16
+	%tmp4 = load ppc_fp128, ptr %tmp, align 16		; <ppc_fp128> [#uses=1]
+	store ppc_fp128 %tmp4, ptr %retval, align 16
 	br label %return
 
 return:		; preds = %entry
-	%retval5 = load ppc_fp128, ppc_fp128* %retval		; <ppc_fp128> [#uses=1]
+	%retval5 = load ppc_fp128, ptr %retval		; <ppc_fp128> [#uses=1]
 	ret ppc_fp128 %retval5
 }
 
 define ppc_fp128 @divide(ppc_fp128 %x, ppc_fp128 %y) {
 entry:
-	%x_addr = alloca ppc_fp128		; <ppc_fp128*> [#uses=2]
-	%y_addr = alloca ppc_fp128		; <ppc_fp128*> [#uses=2]
-	%retval = alloca ppc_fp128, align 16		; <ppc_fp128*> [#uses=2]
-	%tmp = alloca ppc_fp128, align 16		; <ppc_fp128*> [#uses=2]
+	%x_addr = alloca ppc_fp128		; <ptr> [#uses=2]
+	%y_addr = alloca ppc_fp128		; <ptr> [#uses=2]
+	%retval = alloca ppc_fp128, align 16		; <ptr> [#uses=2]
+	%tmp = alloca ppc_fp128, align 16		; <ptr> [#uses=2]
 	%"alloca point" = bitcast i32 0 to i32		; <i32> [#uses=0]
-	store ppc_fp128 %x, ppc_fp128* %x_addr
-	store ppc_fp128 %y, ppc_fp128* %y_addr
-	%tmp1 = load ppc_fp128, ppc_fp128* %x_addr, align 16		; <ppc_fp128> [#uses=1]
-	%tmp2 = load ppc_fp128, ppc_fp128* %y_addr, align 16		; <ppc_fp128> [#uses=1]
+	store ppc_fp128 %x, ptr %x_addr
+	store ppc_fp128 %y, ptr %y_addr
+	%tmp1 = load ppc_fp128, ptr %x_addr, align 16		; <ppc_fp128> [#uses=1]
+	%tmp2 = load ppc_fp128, ptr %y_addr, align 16		; <ppc_fp128> [#uses=1]
 	%tmp3 = fdiv ppc_fp128 %tmp1, %tmp2		; <ppc_fp128> [#uses=1]
-	store ppc_fp128 %tmp3, ppc_fp128* %tmp, align 16
-	%tmp4 = load ppc_fp128, ppc_fp128* %tmp, align 16		; <ppc_fp128> [#uses=1]
-	store ppc_fp128 %tmp4, ppc_fp128* %retval, align 16
+	store ppc_fp128 %tmp3, ptr %tmp, align 16
+	%tmp4 = load ppc_fp128, ptr %tmp, align 16		; <ppc_fp128> [#uses=1]
+	store ppc_fp128 %tmp4, ptr %retval, align 16
 	br label %return
 
 return:		; preds = %entry
-	%retval5 = load ppc_fp128, ppc_fp128* %retval		; <ppc_fp128> [#uses=1]
+	%retval5 = load ppc_fp128, ptr %retval		; <ppc_fp128> [#uses=1]
 	ret ppc_fp128 %retval5
 }

diff  --git a/llvm/test/CodeGen/PowerPC/ppcf128-3.ll b/llvm/test/CodeGen/PowerPC/ppcf128-3.ll
index b9a35e014923..3706417069a8 100644
--- a/llvm/test/CodeGen/PowerPC/ppcf128-3.ll
+++ b/llvm/test/CodeGen/PowerPC/ppcf128-3.ll
@@ -1,28 +1,28 @@
 ; RUN: llc -verify-machineinstrs < %s -mtriple=ppc32--
 	%struct.stp_sequence = type { double, double }
 
-define i32 @stp_sequence_set_short_data(%struct.stp_sequence* %sequence, i32 %count, i16* %data) {
+define i32 @stp_sequence_set_short_data(ptr %sequence, i32 %count, ptr %data) {
 entry:
 	%tmp1112 = sitofp i16 0 to ppc_fp128		; <ppc_fp128> [#uses=1]
 	%tmp13 = call i32 (...) @__inline_isfinite( ppc_fp128 %tmp1112 ) nounwind 		; <i32> [#uses=0]
 	ret i32 0
 }
 
-define i32 @stp_sequence_set_short_data2(%struct.stp_sequence* %sequence, i32 %count, i16* %data) {
+define i32 @stp_sequence_set_short_data2(ptr %sequence, i32 %count, ptr %data) {
 entry:
 	%tmp1112 = sitofp i8 0 to ppc_fp128		; <ppc_fp128> [#uses=1]
 	%tmp13 = call i32 (...) @__inline_isfinite( ppc_fp128 %tmp1112 ) nounwind 		; <i32> [#uses=0]
 	ret i32 0
 }
 
-define i32 @stp_sequence_set_short_data3(%struct.stp_sequence* %sequence, i32 %count, i16* %data) {
+define i32 @stp_sequence_set_short_data3(ptr %sequence, i32 %count, ptr %data) {
 entry:
 	%tmp1112 = uitofp i16 0 to ppc_fp128		; <ppc_fp128> [#uses=1]
 	%tmp13 = call i32 (...) @__inline_isfinite( ppc_fp128 %tmp1112 ) nounwind 		; <i32> [#uses=0]
 	ret i32 0
 }
 
-define i32 @stp_sequence_set_short_data4(%struct.stp_sequence* %sequence, i32 %count, i16* %data) {
+define i32 @stp_sequence_set_short_data4(ptr %sequence, i32 %count, ptr %data) {
 entry:
 	%tmp1112 = uitofp i8 0 to ppc_fp128		; <ppc_fp128> [#uses=1]
 	%tmp13 = call i32 (...) @__inline_isfinite( ppc_fp128 %tmp1112 ) nounwind 		; <i32> [#uses=0]

diff  --git a/llvm/test/CodeGen/PowerPC/ppcf128-constrained-fp-intrinsics.ll b/llvm/test/CodeGen/PowerPC/ppcf128-constrained-fp-intrinsics.ll
index f32a3c504af6..22d29af9b73b 100644
--- a/llvm/test/CodeGen/PowerPC/ppcf128-constrained-fp-intrinsics.ll
+++ b/llvm/test/CodeGen/PowerPC/ppcf128-constrained-fp-intrinsics.ll
@@ -1413,7 +1413,7 @@ entry:
 
 ; Test that resultant libcalls retain order even when their non-strict FLOP form could be
 ; trivially optimized into 
diff ering sequences.
-define void @test_constrained_libcall_multichain(float* %firstptr, ppc_fp128* %result) #0 {
+define void @test_constrained_libcall_multichain(ptr %firstptr, ptr %result) #0 {
 ; PC64LE-LABEL: test_constrained_libcall_multichain:
 ; PC64LE:       # %bb.0:
 ; PC64LE-NEXT:    mflr 0
@@ -1570,11 +1570,11 @@ define void @test_constrained_libcall_multichain(float* %firstptr, ppc_fp128* %r
 ; PC64-NEXT:    ld 0, 16(1)
 ; PC64-NEXT:    mtlr 0
 ; PC64-NEXT:    blr
-  %load = load float, float* %firstptr
+  %load = load float, ptr %firstptr
   %first = call ppc_fp128 @llvm.experimental.constrained.fpext.f32.ppcf128(
                     float %load,
                     metadata !"fpexcept.strict") #1
-  store ppc_fp128 %first, ppc_fp128* %result
+  store ppc_fp128 %first, ptr %result
 
   ; For unconstrained FLOPs, these next two FP instructions would necessarily
   ; be executed in series with one another.
@@ -1583,15 +1583,15 @@ define void @test_constrained_libcall_multichain(float* %firstptr, ppc_fp128* %r
                     ppc_fp128 %first,
                     metadata !"round.dynamic",
                     metadata !"fpexcept.strict") #1
-  %stridx1 = getelementptr ppc_fp128, ppc_fp128* %result, i32 1
-  store ppc_fp128 %fadd, ppc_fp128* %stridx1
+  %stridx1 = getelementptr ppc_fp128, ptr %result, i32 1
+  store ppc_fp128 %fadd, ptr %stridx1
   %fmul = call ppc_fp128 @llvm.experimental.constrained.fmul.ppcf128(
                     ppc_fp128 %fadd,
                     ppc_fp128 %fadd,
                     metadata !"round.dynamic",
                     metadata !"fpexcept.strict") #1
-  %stridx2 = getelementptr ppc_fp128, ppc_fp128* %stridx1, i32 1
-  store ppc_fp128 %fadd, ppc_fp128* %stridx2
+  %stridx2 = getelementptr ppc_fp128, ptr %stridx1, i32 1
+  store ppc_fp128 %fadd, ptr %stridx2
 
   ; For unconstrained FLOPs, these next two FP instructions could be reordered
   ; or even executed in parallel with respect to the previous two instructions.
@@ -1605,9 +1605,9 @@ define void @test_constrained_libcall_multichain(float* %firstptr, ppc_fp128* %r
                     ppc_fp128 %powi,
                     metadata !"round.dynamic",
                     metadata !"fpexcept.strict") #1
-  store float %tinypow, float* %firstptr
-  %stridxn1 = getelementptr ppc_fp128, ppc_fp128* %result, i32 -1
-  store ppc_fp128 %powi, ppc_fp128* %stridxn1
+  store float %tinypow, ptr %firstptr
+  %stridxn1 = getelementptr ppc_fp128, ptr %result, i32 -1
+  store ppc_fp128 %powi, ptr %stridxn1
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/ppcf128-endian.ll b/llvm/test/CodeGen/PowerPC/ppcf128-endian.ll
index 666303406a7d..dc6b11160b8b 100644
--- a/llvm/test/CodeGen/PowerPC/ppcf128-endian.ll
+++ b/llvm/test/CodeGen/PowerPC/ppcf128-endian.ll
@@ -18,9 +18,9 @@ define void @callee(ppc_fp128 %x) {
 ; CHECK-NEXT:    blr
 entry:
   %x.addr = alloca ppc_fp128, align 16
-  store ppc_fp128 %x, ppc_fp128* %x.addr, align 16
-  %0 = load ppc_fp128, ppc_fp128* %x.addr, align 16
-  store ppc_fp128 %0, ppc_fp128* @g, align 16
+  store ppc_fp128 %x, ptr %x.addr, align 16
+  %0 = load ppc_fp128, ptr %x.addr, align 16
+  store ppc_fp128 %0, ptr @g, align 16
   ret void
 }
 
@@ -43,7 +43,7 @@ define void @caller() {
 ; CHECK-NEXT:    mtlr 0
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load ppc_fp128, ppc_fp128* @g, align 16
+  %0 = load ppc_fp128, ptr @g, align 16
   call void @test(ppc_fp128 %0)
   ret void
 }
@@ -82,7 +82,7 @@ define ppc_fp128 @result() {
 ; CHECK-NEXT:    lfd 2, 8(3)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load ppc_fp128, ppc_fp128* @g, align 16
+  %0 = load ppc_fp128, ptr @g, align 16
   ret ppc_fp128 %0
 }
 
@@ -106,7 +106,7 @@ define void @use_result() {
 ; CHECK-NEXT:    blr
 entry:
   %call = tail call ppc_fp128 @test_result() #3
-  store ppc_fp128 %call, ppc_fp128* @g, align 16
+  store ppc_fp128 %call, ptr @g, align 16
   ret void
 }
 
@@ -191,7 +191,7 @@ entry:
   ret double %conv
 }
 
-declare void @llvm.va_start(i8*)
+declare void @llvm.va_start(ptr)
 
 define double @vararg(i32 %a, ...) {
 ; CHECK-LABEL: vararg:
@@ -212,10 +212,9 @@ define double @vararg(i32 %a, ...) {
 ; CHECK-NEXT:    std 3, -8(1)
 ; CHECK-NEXT:    blr
 entry:
-  %va = alloca i8*, align 8
-  %va1 = bitcast i8** %va to i8*
-  call void @llvm.va_start(i8* %va1)
-  %arg = va_arg i8** %va, ppc_fp128
+  %va = alloca ptr, align 8
+  call void @llvm.va_start(ptr %va)
+  %arg = va_arg ptr %va, ppc_fp128
   %conv = fptrunc ppc_fp128 %arg to double
   ret double %conv
 }

diff  --git a/llvm/test/CodeGen/PowerPC/ppcf128sf.ll b/llvm/test/CodeGen/PowerPC/ppcf128sf.ll
index fde7d48da7c2..e9e718c86326 100644
--- a/llvm/test/CodeGen/PowerPC/ppcf128sf.ll
+++ b/llvm/test/CodeGen/PowerPC/ppcf128sf.ll
@@ -11,22 +11,22 @@
 define void @foo() #0 {
 entry:
   %c = alloca ppc_fp128, align 16
-  %0 = load ppc_fp128, ppc_fp128* @ld, align 16
-  %1 = load ppc_fp128, ppc_fp128* @ld2, align 16
+  %0 = load ppc_fp128, ptr @ld, align 16
+  %1 = load ppc_fp128, ptr @ld2, align 16
   %add = fadd ppc_fp128 %0, %1
-  store volatile ppc_fp128 %add, ppc_fp128* %c, align 16
-  %2 = load ppc_fp128, ppc_fp128* @ld, align 16
-  %3 = load ppc_fp128, ppc_fp128* @ld2, align 16
+  store volatile ppc_fp128 %add, ptr %c, align 16
+  %2 = load ppc_fp128, ptr @ld, align 16
+  %3 = load ppc_fp128, ptr @ld2, align 16
   %sub = fsub ppc_fp128 %2, %3
-  store volatile ppc_fp128 %sub, ppc_fp128* %c, align 16
-  %4 = load ppc_fp128, ppc_fp128* @ld, align 16
-  %5 = load ppc_fp128, ppc_fp128* @ld2, align 16
+  store volatile ppc_fp128 %sub, ptr %c, align 16
+  %4 = load ppc_fp128, ptr @ld, align 16
+  %5 = load ppc_fp128, ptr @ld2, align 16
   %mul = fmul ppc_fp128 %4, %5
-  store volatile ppc_fp128 %mul, ppc_fp128* %c, align 16
-  %6 = load ppc_fp128, ppc_fp128* @ld, align 16
-  %7 = load ppc_fp128, ppc_fp128* @ld2, align 16
+  store volatile ppc_fp128 %mul, ptr %c, align 16
+  %6 = load ppc_fp128, ptr @ld, align 16
+  %7 = load ppc_fp128, ptr @ld2, align 16
   %div = fdiv ppc_fp128 %6, %7
-  store volatile ppc_fp128 %div, ppc_fp128* %c, align 16
+  store volatile ppc_fp128 %div, ptr %c, align 16
   ret void
 
   ; CHECK-LABEL:    __gcc_qadd
@@ -37,9 +37,9 @@ entry:
 
 define void @foo1() #0 {
 entry:
-  %0 = load double, double* @d, align 8
+  %0 = load double, ptr @d, align 8
   %conv = fpext double %0 to ppc_fp128
-  store ppc_fp128 %conv, ppc_fp128* @ld, align 16
+  store ppc_fp128 %conv, ptr @ld, align 16
   ret void
 
   ; CHECK-LABEL:    __gcc_dtoq
@@ -47,9 +47,9 @@ entry:
 
 define void @foo2() #0 {
 entry:
-  %0 = load ppc_fp128, ppc_fp128* @ld, align 16
+  %0 = load ppc_fp128, ptr @ld, align 16
   %conv = fptrunc ppc_fp128 %0 to double
-  store double %conv, double* @d, align 8
+  store double %conv, ptr @d, align 8
   ret void
 
   ; CHECK-LABEL:    __gcc_qtod
@@ -57,9 +57,9 @@ entry:
 
 define void @foo3() #0 {
 entry:
-  %0 = load ppc_fp128, ppc_fp128* @ld, align 16
+  %0 = load ppc_fp128, ptr @ld, align 16
   %conv = fptrunc ppc_fp128 %0 to float
-  store float %conv, float* @f, align 4
+  store float %conv, ptr @f, align 4
   ret void
 
   ; CHECK-LABEL:    __gcc_qtos
@@ -67,9 +67,9 @@ entry:
 
 define void @foo4() #0 {
 entry:
-  %0 = load i32, i32* @i, align 4
+  %0 = load i32, ptr @i, align 4
   %conv = sitofp i32 %0 to ppc_fp128
-  store ppc_fp128 %conv, ppc_fp128* @ld, align 16
+  store ppc_fp128 %conv, ptr @ld, align 16
   ret void
 
   ; CHECK-LABEL:    __gcc_itoq
@@ -77,9 +77,9 @@ entry:
 
 define void @foo5() #0 {
 entry:
-  %0 = load i32, i32* @ui, align 4
+  %0 = load i32, ptr @ui, align 4
   %conv = uitofp i32 %0 to ppc_fp128
-  store ppc_fp128 %conv, ppc_fp128* @ld, align 16
+  store ppc_fp128 %conv, ptr @ld, align 16
   ret void
 
   ; CHECK-LABEL:    __gcc_utoq
@@ -87,12 +87,12 @@ entry:
 
 define void @foo6() #0 {
 entry:
-  %0 = load ppc_fp128, ppc_fp128* @ld, align 16
-  %1 = load ppc_fp128, ppc_fp128* @ld2, align 16
+  %0 = load ppc_fp128, ptr @ld, align 16
+  %1 = load ppc_fp128, ptr @ld2, align 16
   %cmp = fcmp oeq ppc_fp128 %0, %1
   %conv = zext i1 %cmp to i32
   %conv1 = trunc i32 %conv to i8
-  store i8 %conv1, i8* @var, align 1
+  store i8 %conv1, ptr @var, align 1
   ret void
 
   ; CHECK-LABEL:    __gcc_qeq
@@ -100,12 +100,12 @@ entry:
 
 define void @foo7() #0 {
 entry:
-  %0 = load ppc_fp128, ppc_fp128* @ld, align 16
-  %1 = load ppc_fp128, ppc_fp128* @ld2, align 16
+  %0 = load ppc_fp128, ptr @ld, align 16
+  %1 = load ppc_fp128, ptr @ld2, align 16
   %cmp = fcmp une ppc_fp128 %0, %1
   %conv = zext i1 %cmp to i32
   %conv1 = trunc i32 %conv to i8
-  store i8 %conv1, i8* @var, align 1
+  store i8 %conv1, ptr @var, align 1
   ret void
 
   ; CHECK-LABEL:    __gcc_qne
@@ -113,12 +113,12 @@ entry:
 
 define void @foo8() #0 {
 entry:
-  %0 = load ppc_fp128, ppc_fp128* @ld, align 16
-  %1 = load ppc_fp128, ppc_fp128* @ld2, align 16
+  %0 = load ppc_fp128, ptr @ld, align 16
+  %1 = load ppc_fp128, ptr @ld2, align 16
   %cmp = fcmp ogt ppc_fp128 %0, %1
   %conv = zext i1 %cmp to i32
   %conv1 = trunc i32 %conv to i8
-  store i8 %conv1, i8* @var, align 1
+  store i8 %conv1, ptr @var, align 1
   ret void
 
   ; CHECK-LABEL:    __gcc_qgt
@@ -126,12 +126,12 @@ entry:
 
 define void @foo9() #0 {
 entry:
-  %0 = load ppc_fp128, ppc_fp128* @ld, align 16
-  %1 = load ppc_fp128, ppc_fp128* @ld2, align 16
+  %0 = load ppc_fp128, ptr @ld, align 16
+  %1 = load ppc_fp128, ptr @ld2, align 16
   %cmp = fcmp olt ppc_fp128 %0, %1
   %conv = zext i1 %cmp to i32
   %conv1 = trunc i32 %conv to i8
-  store i8 %conv1, i8* @var, align 1
+  store i8 %conv1, ptr @var, align 1
   ret void
 
   ; CHECK-LABEL:    __gcc_qlt
@@ -139,12 +139,12 @@ entry:
 
 define void @foo10() #0 {
 entry:
-  %0 = load ppc_fp128, ppc_fp128* @ld, align 16
-  %1 = load ppc_fp128, ppc_fp128* @ld2, align 16
+  %0 = load ppc_fp128, ptr @ld, align 16
+  %1 = load ppc_fp128, ptr @ld2, align 16
   %cmp = fcmp ole ppc_fp128 %0, %1
   %conv = zext i1 %cmp to i32
   %conv1 = trunc i32 %conv to i8
-  store i8 %conv1, i8* @var, align 1
+  store i8 %conv1, ptr @var, align 1
   ret void
 
   ; CHECK-LABEL:    __gcc_qle
@@ -152,12 +152,12 @@ entry:
 
 define void @foo11() #0 {
 entry:
-  %0 = load ppc_fp128, ppc_fp128* @ld, align 16
-  %1 = load ppc_fp128, ppc_fp128* @ld, align 16
+  %0 = load ppc_fp128, ptr @ld, align 16
+  %1 = load ppc_fp128, ptr @ld, align 16
   %cmp = fcmp une ppc_fp128 %0, %1
   %conv = zext i1 %cmp to i32
   %conv1 = trunc i32 %conv to i8
-  store i8 %conv1, i8* @var, align 1
+  store i8 %conv1, ptr @var, align 1
   ret void
 
   ; CHECK-LABEL:    __gcc_qunord
@@ -165,12 +165,12 @@ entry:
 
 define void @foo12() #0 {
 entry:
-  %0 = load ppc_fp128, ppc_fp128* @ld, align 16
-  %1 = load ppc_fp128, ppc_fp128* @ld2, align 16
+  %0 = load ppc_fp128, ptr @ld, align 16
+  %1 = load ppc_fp128, ptr @ld2, align 16
   %cmp = fcmp oge ppc_fp128 %0, %1
   %conv = zext i1 %cmp to i32
   %conv1 = trunc i32 %conv to i8
-  store i8 %conv1, i8* @var, align 1
+  store i8 %conv1, ptr @var, align 1
   ret void
 
   ; CHECK-LABEL:    __gcc_qge

diff  --git a/llvm/test/CodeGen/PowerPC/ppcsoftops.ll b/llvm/test/CodeGen/PowerPC/ppcsoftops.ll
index 5ecfbb4414e9..2643f24426e8 100644
--- a/llvm/test/CodeGen/PowerPC/ppcsoftops.ll
+++ b/llvm/test/CodeGen/PowerPC/ppcsoftops.ll
@@ -7,8 +7,8 @@ define double @foo() #0 {
 entry:
   %a = alloca double, align 8
   %b = alloca double, align 8
-  %0 = load double, double* %a, align 8
-  %1 = load double, double* %b, align 8
+  %0 = load double, ptr %a, align 8
+  %1 = load double, ptr %b, align 8
   %add = fadd double %0, %1
   ret double %add
 
@@ -19,8 +19,8 @@ define double @foo1() #0 {
 entry:
   %a = alloca double, align 8
   %b = alloca double, align 8
-  %0 = load double, double* %a, align 8
-  %1 = load double, double* %b, align 8
+  %0 = load double, ptr %a, align 8
+  %1 = load double, ptr %b, align 8
   %mul = fmul double %0, %1
   ret double %mul
 
@@ -31,8 +31,8 @@ define double @foo2() #0 {
 entry:
   %a = alloca double, align 8
   %b = alloca double, align 8
-  %0 = load double, double* %a, align 8
-  %1 = load double, double* %b, align 8
+  %0 = load double, ptr %a, align 8
+  %1 = load double, ptr %b, align 8
   %sub = fsub double %0, %1
   ret double %sub
 
@@ -43,8 +43,8 @@ define double @foo3() #0 {
 entry:
   %a = alloca double, align 8
   %b = alloca double, align 8
-  %0 = load double, double* %a, align 8
-  %1 = load double, double* %b, align 8
+  %0 = load double, ptr %a, align 8
+  %1 = load double, ptr %b, align 8
   %div = fdiv double %0, %1
   ret double %div
 

diff  --git a/llvm/test/CodeGen/PowerPC/pr13891.ll b/llvm/test/CodeGen/PowerPC/pr13891.ll
index 816166a20fed..93a8a4231fa3 100644
--- a/llvm/test/CodeGen/PowerPC/pr13891.ll
+++ b/llvm/test/CodeGen/PowerPC/pr13891.ll
@@ -4,14 +4,13 @@ target triple = "powerpc64-unknown-linux-gnu"
 
 %struct.foo = type { i8, i8 }
 
-define void @_Z5check3foos(%struct.foo* nocapture byval(%struct.foo) %f, i16 signext %i) noinline {
+define void @_Z5check3foos(ptr nocapture byval(%struct.foo) %f, i16 signext %i) noinline {
 ; CHECK-LABEL: _Z5check3foos:
 ; CHECK: sth 3, {{[0-9]+}}(1)
 ; CHECK: lbz {{[0-9]+}}, {{[0-9]+}}(1)
 entry:
-  %0 = bitcast %struct.foo* %f to i16*
-  %1 = load i16, i16* %0, align 2
-  %bf.val.sext = ashr i16 %1, 8
+  %0 = load i16, ptr %f, align 2
+  %bf.val.sext = ashr i16 %0, 8
   %cmp = icmp eq i16 %bf.val.sext, %i
   br i1 %cmp, label %if.end, label %if.then
 

diff  --git a/llvm/test/CodeGen/PowerPC/pr15031.ll b/llvm/test/CodeGen/PowerPC/pr15031.ll
index 8c4c8f42d317..3c0010b00291 100644
--- a/llvm/test/CodeGen/PowerPC/pr15031.ll
+++ b/llvm/test/CodeGen/PowerPC/pr15031.ll
@@ -16,176 +16,176 @@
 target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64"
 target triple = "powerpc64-unknown-linux-gnu"
 
-%"class.llvm::MachineMemOperand" = type { %"struct.llvm::MachinePointerInfo", i64, i32, %"class.llvm::MDNode"*, %"class.llvm::MDNode"* }
-%"struct.llvm::MachinePointerInfo" = type { %"class.llvm::Value"*, i64 }
-%"class.llvm::Value" = type { i32 (...)**, i8, i8, i16, %"class.llvm::Type"*, %"class.llvm::Use"*, %"class.llvm::StringMapEntry"* }
-%"class.llvm::Type" = type { %"class.llvm::LLVMContext"*, i32, i32, %"class.llvm::Type"** }
-%"class.llvm::LLVMContext" = type { %"class.llvm::LLVMContextImpl"* }
+%"class.llvm::MachineMemOperand" = type { %"struct.llvm::MachinePointerInfo", i64, i32, ptr, ptr }
+%"struct.llvm::MachinePointerInfo" = type { ptr, i64 }
+%"class.llvm::Value" = type { ptr, i8, i8, i16, ptr, ptr, ptr }
+%"class.llvm::Type" = type { ptr, i32, i32, ptr }
+%"class.llvm::LLVMContext" = type { ptr }
 %"class.llvm::LLVMContextImpl" = type opaque
-%"class.llvm::Use" = type { %"class.llvm::Value"*, %"class.llvm::Use"*, %"class.llvm::PointerIntPair" }
+%"class.llvm::Use" = type { ptr, ptr, %"class.llvm::PointerIntPair" }
 %"class.llvm::PointerIntPair" = type { i64 }
 %"class.llvm::StringMapEntry" = type opaque
 %"class.llvm::MDNode" = type { %"class.llvm::Value", %"class.llvm::FoldingSetImpl::Node", i32, i32 }
-%"class.llvm::FoldingSetImpl::Node" = type { i8* }
-%"class.llvm::MachineInstr" = type { %"class.llvm::ilist_node", %"class.llvm::MCInstrDesc"*, %"class.llvm::MachineBasicBlock"*, %"class.llvm::MachineOperand"*, i32, %"class.llvm::ArrayRecycler<llvm::MachineOperand, 8>::Capacity", i8, i8, i8, %"class.llvm::MachineMemOperand"**, %"class.llvm::DebugLoc" }
-%"class.llvm::ilist_node" = type { %"class.llvm::ilist_half_node", %"class.llvm::MachineInstr"* }
-%"class.llvm::ilist_half_node" = type { %"class.llvm::MachineInstr"* }
-%"class.llvm::MCInstrDesc" = type { i16, i16, i16, i16, i16, i32, i64, i16*, i16*, %"class.llvm::MCOperandInfo"* }
+%"class.llvm::FoldingSetImpl::Node" = type { ptr }
+%"class.llvm::MachineInstr" = type { %"class.llvm::ilist_node", ptr, ptr, ptr, i32, %"class.llvm::ArrayRecycler<llvm::MachineOperand, 8>::Capacity", i8, i8, i8, ptr, %"class.llvm::DebugLoc" }
+%"class.llvm::ilist_node" = type { %"class.llvm::ilist_half_node", ptr }
+%"class.llvm::ilist_half_node" = type { ptr }
+%"class.llvm::MCInstrDesc" = type { i16, i16, i16, i16, i16, i32, i64, ptr, ptr, ptr }
 %"class.llvm::MCOperandInfo" = type { i16, i8, i8, i32 }
-%"class.llvm::MachineBasicBlock" = type { %"class.llvm::ilist_node.0", %"struct.llvm::ilist", %"class.llvm::BasicBlock"*, i32, %"class.llvm::MachineFunction"*, %"class.std::vector.163", %"class.std::vector.163", %"class.std::vector.123", %"class.std::vector.123", i32, i8, i8 }
-%"class.llvm::ilist_node.0" = type { %"class.llvm::ilist_half_node.1", %"class.llvm::MachineBasicBlock"* }
-%"class.llvm::ilist_half_node.1" = type { %"class.llvm::MachineBasicBlock"* }
+%"class.llvm::MachineBasicBlock" = type { %"class.llvm::ilist_node.0", %"struct.llvm::ilist", ptr, i32, ptr, %"class.std::vector.163", %"class.std::vector.163", %"class.std::vector.123", %"class.std::vector.123", i32, i8, i8 }
+%"class.llvm::ilist_node.0" = type { %"class.llvm::ilist_half_node.1", ptr }
+%"class.llvm::ilist_half_node.1" = type { ptr }
 %"struct.llvm::ilist" = type { %"class.llvm::iplist" }
-%"class.llvm::iplist" = type { %"struct.llvm::ilist_traits", %"class.llvm::MachineInstr"* }
-%"struct.llvm::ilist_traits" = type { %"class.llvm::ilist_half_node", %"class.llvm::MachineBasicBlock"* }
-%"class.llvm::BasicBlock" = type { %"class.llvm::Value", %"class.llvm::ilist_node.2", %"class.llvm::iplist.4", %"class.llvm::Function"* }
-%"class.llvm::ilist_node.2" = type { %"class.llvm::ilist_half_node.3", %"class.llvm::BasicBlock"* }
-%"class.llvm::ilist_half_node.3" = type { %"class.llvm::BasicBlock"* }
-%"class.llvm::iplist.4" = type { %"struct.llvm::ilist_traits.5", %"class.llvm::Instruction"* }
+%"class.llvm::iplist" = type { %"struct.llvm::ilist_traits", ptr }
+%"struct.llvm::ilist_traits" = type { %"class.llvm::ilist_half_node", ptr }
+%"class.llvm::BasicBlock" = type { %"class.llvm::Value", %"class.llvm::ilist_node.2", %"class.llvm::iplist.4", ptr }
+%"class.llvm::ilist_node.2" = type { %"class.llvm::ilist_half_node.3", ptr }
+%"class.llvm::ilist_half_node.3" = type { ptr }
+%"class.llvm::iplist.4" = type { %"struct.llvm::ilist_traits.5", ptr }
 %"struct.llvm::ilist_traits.5" = type { %"class.llvm::ilist_half_node.10" }
-%"class.llvm::ilist_half_node.10" = type { %"class.llvm::Instruction"* }
-%"class.llvm::Instruction" = type { %"class.llvm::User", %"class.llvm::ilist_node.193", %"class.llvm::BasicBlock"*, %"class.llvm::DebugLoc" }
-%"class.llvm::User" = type { %"class.llvm::Value", %"class.llvm::Use"*, i32 }
-%"class.llvm::ilist_node.193" = type { %"class.llvm::ilist_half_node.10", %"class.llvm::Instruction"* }
+%"class.llvm::ilist_half_node.10" = type { ptr }
+%"class.llvm::Instruction" = type { %"class.llvm::User", %"class.llvm::ilist_node.193", ptr, %"class.llvm::DebugLoc" }
+%"class.llvm::User" = type { %"class.llvm::Value", ptr, i32 }
+%"class.llvm::ilist_node.193" = type { %"class.llvm::ilist_half_node.10", ptr }
 %"class.llvm::DebugLoc" = type { i32, i32 }
-%"class.llvm::Function" = type { %"class.llvm::GlobalValue", %"class.llvm::ilist_node.27", %"class.llvm::iplist.47", %"class.llvm::iplist.54", %"class.llvm::ValueSymbolTable"*, %"class.llvm::AttributeSet" }
-%"class.llvm::GlobalValue" = type { [52 x i8], [4 x i8], %"class.llvm::Module"*, %"class.std::basic_string" }
-%"class.llvm::Module" = type { %"class.llvm::LLVMContext"*, %"class.llvm::iplist.11", %"class.llvm::iplist.20", %"class.llvm::iplist.29", %"struct.llvm::ilist.38", %"class.std::basic_string", %"class.llvm::ValueSymbolTable"*, %"class.llvm::OwningPtr", %"class.std::basic_string", %"class.std::basic_string", %"class.std::basic_string", i8* }
-%"class.llvm::iplist.11" = type { %"struct.llvm::ilist_traits.12", %"class.llvm::GlobalVariable"* }
+%"class.llvm::Function" = type { %"class.llvm::GlobalValue", %"class.llvm::ilist_node.27", %"class.llvm::iplist.47", %"class.llvm::iplist.54", ptr, %"class.llvm::AttributeSet" }
+%"class.llvm::GlobalValue" = type { [52 x i8], [4 x i8], ptr, %"class.std::basic_string" }
+%"class.llvm::Module" = type { ptr, %"class.llvm::iplist.11", %"class.llvm::iplist.20", %"class.llvm::iplist.29", %"struct.llvm::ilist.38", %"class.std::basic_string", ptr, %"class.llvm::OwningPtr", %"class.std::basic_string", %"class.std::basic_string", %"class.std::basic_string", ptr }
+%"class.llvm::iplist.11" = type { %"struct.llvm::ilist_traits.12", ptr }
 %"struct.llvm::ilist_traits.12" = type { %"class.llvm::ilist_node.18" }
-%"class.llvm::ilist_node.18" = type { %"class.llvm::ilist_half_node.19", %"class.llvm::GlobalVariable"* }
-%"class.llvm::ilist_half_node.19" = type { %"class.llvm::GlobalVariable"* }
+%"class.llvm::ilist_node.18" = type { %"class.llvm::ilist_half_node.19", ptr }
+%"class.llvm::ilist_half_node.19" = type { ptr }
 %"class.llvm::GlobalVariable" = type { %"class.llvm::GlobalValue", %"class.llvm::ilist_node.18", i8 }
-%"class.llvm::iplist.20" = type { %"struct.llvm::ilist_traits.21", %"class.llvm::Function"* }
+%"class.llvm::iplist.20" = type { %"struct.llvm::ilist_traits.21", ptr }
 %"struct.llvm::ilist_traits.21" = type { %"class.llvm::ilist_node.27" }
-%"class.llvm::ilist_node.27" = type { %"class.llvm::ilist_half_node.28", %"class.llvm::Function"* }
-%"class.llvm::ilist_half_node.28" = type { %"class.llvm::Function"* }
-%"class.llvm::iplist.29" = type { %"struct.llvm::ilist_traits.30", %"class.llvm::GlobalAlias"* }
+%"class.llvm::ilist_node.27" = type { %"class.llvm::ilist_half_node.28", ptr }
+%"class.llvm::ilist_half_node.28" = type { ptr }
+%"class.llvm::iplist.29" = type { %"struct.llvm::ilist_traits.30", ptr }
 %"struct.llvm::ilist_traits.30" = type { %"class.llvm::ilist_node.36" }
-%"class.llvm::ilist_node.36" = type { %"class.llvm::ilist_half_node.37", %"class.llvm::GlobalAlias"* }
-%"class.llvm::ilist_half_node.37" = type { %"class.llvm::GlobalAlias"* }
+%"class.llvm::ilist_node.36" = type { %"class.llvm::ilist_half_node.37", ptr }
+%"class.llvm::ilist_half_node.37" = type { ptr }
 %"class.llvm::GlobalAlias" = type { %"class.llvm::GlobalValue", %"class.llvm::ilist_node.36" }
 %"struct.llvm::ilist.38" = type { %"class.llvm::iplist.39" }
-%"class.llvm::iplist.39" = type { %"struct.llvm::ilist_traits.40", %"class.llvm::NamedMDNode"* }
+%"class.llvm::iplist.39" = type { %"struct.llvm::ilist_traits.40", ptr }
 %"struct.llvm::ilist_traits.40" = type { %"class.llvm::ilist_node.45" }
-%"class.llvm::ilist_node.45" = type { %"class.llvm::ilist_half_node.46", %"class.llvm::NamedMDNode"* }
-%"class.llvm::ilist_half_node.46" = type { %"class.llvm::NamedMDNode"* }
-%"class.llvm::NamedMDNode" = type { %"class.llvm::ilist_node.45", %"class.std::basic_string", %"class.llvm::Module"*, i8* }
+%"class.llvm::ilist_node.45" = type { %"class.llvm::ilist_half_node.46", ptr }
+%"class.llvm::ilist_half_node.46" = type { ptr }
+%"class.llvm::NamedMDNode" = type { %"class.llvm::ilist_node.45", %"class.std::basic_string", ptr, ptr }
 %"class.std::basic_string" = type { %"struct.std::basic_string<char, std::char_traits<char>, std::allocator<char> >::_Alloc_hider" }
-%"struct.std::basic_string<char, std::char_traits<char>, std::allocator<char> >::_Alloc_hider" = type { i8* }
+%"struct.std::basic_string<char, std::char_traits<char>, std::allocator<char> >::_Alloc_hider" = type { ptr }
 %"class.llvm::ValueSymbolTable" = type opaque
-%"class.llvm::OwningPtr" = type { %"class.llvm::GVMaterializer"* }
+%"class.llvm::OwningPtr" = type { ptr }
 %"class.llvm::GVMaterializer" = type opaque
-%"class.llvm::iplist.47" = type { %"struct.llvm::ilist_traits.48", %"class.llvm::BasicBlock"* }
+%"class.llvm::iplist.47" = type { %"struct.llvm::ilist_traits.48", ptr }
 %"struct.llvm::ilist_traits.48" = type { %"class.llvm::ilist_half_node.3" }
-%"class.llvm::iplist.54" = type { %"struct.llvm::ilist_traits.55", %"class.llvm::Argument"* }
+%"class.llvm::iplist.54" = type { %"struct.llvm::ilist_traits.55", ptr }
 %"struct.llvm::ilist_traits.55" = type { %"class.llvm::ilist_half_node.61" }
-%"class.llvm::ilist_half_node.61" = type { %"class.llvm::Argument"* }
-%"class.llvm::Argument" = type { %"class.llvm::Value", %"class.llvm::ilist_node.192", %"class.llvm::Function"* }
-%"class.llvm::ilist_node.192" = type { %"class.llvm::ilist_half_node.61", %"class.llvm::Argument"* }
-%"class.llvm::AttributeSet" = type { %"class.llvm::AttributeSetImpl"* }
+%"class.llvm::ilist_half_node.61" = type { ptr }
+%"class.llvm::Argument" = type { %"class.llvm::Value", %"class.llvm::ilist_node.192", ptr }
+%"class.llvm::ilist_node.192" = type { %"class.llvm::ilist_half_node.61", ptr }
+%"class.llvm::AttributeSet" = type { ptr }
 %"class.llvm::AttributeSetImpl" = type opaque
-%"class.llvm::MachineFunction" = type { %"class.llvm::Function"*, %"class.llvm::TargetMachine"*, %"class.llvm::MCContext"*, %"class.llvm::MachineModuleInfo"*, %"class.llvm::GCModuleInfo"*, %"class.llvm::MachineRegisterInfo"*, %"struct.llvm::MachineFunctionInfo"*, %"class.llvm::MachineFrameInfo"*, %"class.llvm::MachineConstantPool"*, %"class.llvm::MachineJumpTableInfo"*, %"class.std::vector.163", %"class.llvm::BumpPtrAllocator", %"class.llvm::Recycler", %"class.llvm::ArrayRecycler", %"class.llvm::Recycler.180", %"struct.llvm::ilist.181", i32, i32, i8 }
-%"class.llvm::TargetMachine" = type { i32 (...)**, %"class.llvm::Target"*, %"class.std::basic_string", %"class.std::basic_string", %"class.std::basic_string", %"class.llvm::MCCodeGenInfo"*, %"class.llvm::MCAsmInfo"*, i8, %"class.llvm::TargetOptions" }
+%"class.llvm::MachineFunction" = type { ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, %"class.std::vector.163", %"class.llvm::BumpPtrAllocator", %"class.llvm::Recycler", %"class.llvm::ArrayRecycler", %"class.llvm::Recycler.180", %"struct.llvm::ilist.181", i32, i32, i8 }
+%"class.llvm::TargetMachine" = type { ptr, ptr, %"class.std::basic_string", %"class.std::basic_string", %"class.std::basic_string", ptr, ptr, i8, %"class.llvm::TargetOptions" }
 %"class.llvm::Target" = type opaque
 %"class.llvm::MCCodeGenInfo" = type opaque
 %"class.llvm::MCAsmInfo" = type opaque
 %"class.llvm::TargetOptions" = type { [2 x i8], i32, i8, i32, i8, %"class.std::basic_string", i32, i32 }
-%"class.llvm::MCContext" = type { %"class.llvm::SourceMgr"*, %"class.llvm::MCAsmInfo"*, %"class.llvm::MCRegisterInfo"*, %"class.llvm::MCObjectFileInfo"*, %"class.llvm::BumpPtrAllocator", %"class.llvm::StringMap", %"class.llvm::StringMap.62", i32, %"class.llvm::DenseMap.63", i8*, %"class.llvm::raw_ostream"*, i8, %"class.std::basic_string", %"class.std::basic_string", %"class.std::vector", %"class.std::vector.70", %"class.llvm::MCDwarfLoc", i8, i8, i32, %"class.llvm::MCSection"*, %"class.llvm::MCSymbol"*, %"class.llvm::MCSymbol"*, %"class.std::vector.75", %"class.llvm::StringRef", %"class.llvm::StringRef", i8, %"class.llvm::DenseMap.80", %"class.std::vector.84", i8*, i8*, i8*, i8 }
+%"class.llvm::MCContext" = type { ptr, ptr, ptr, ptr, %"class.llvm::BumpPtrAllocator", %"class.llvm::StringMap", %"class.llvm::StringMap.62", i32, %"class.llvm::DenseMap.63", ptr, ptr, i8, %"class.std::basic_string", %"class.std::basic_string", %"class.std::vector", %"class.std::vector.70", %"class.llvm::MCDwarfLoc", i8, i8, i32, ptr, ptr, ptr, %"class.std::vector.75", %"class.llvm::StringRef", %"class.llvm::StringRef", i8, %"class.llvm::DenseMap.80", %"class.std::vector.84", ptr, ptr, ptr, i8 }
 %"class.llvm::SourceMgr" = type opaque
-%"class.llvm::MCRegisterInfo" = type { %"struct.llvm::MCRegisterDesc"*, i32, i32, i32, %"class.llvm::MCRegisterClass"*, i32, i32, [2 x i16]*, i16*, i8*, i16*, i32, i16*, i32, i32, i32, i32, %"struct.llvm::MCRegisterInfo::DwarfLLVMRegPair"*, %"struct.llvm::MCRegisterInfo::DwarfLLVMRegPair"*, %"struct.llvm::MCRegisterInfo::DwarfLLVMRegPair"*, %"struct.llvm::MCRegisterInfo::DwarfLLVMRegPair"*, %"class.llvm::DenseMap" }
+%"class.llvm::MCRegisterInfo" = type { ptr, i32, i32, i32, ptr, i32, i32, ptr, ptr, ptr, ptr, i32, ptr, i32, i32, i32, i32, ptr, ptr, ptr, ptr, %"class.llvm::DenseMap" }
 %"struct.llvm::MCRegisterDesc" = type { i32, i32, i32, i32, i32, i32 }
-%"class.llvm::MCRegisterClass" = type { i8*, i16*, i8*, i16, i16, i16, i16, i16, i8, i8 }
+%"class.llvm::MCRegisterClass" = type { ptr, ptr, ptr, i16, i16, i16, i16, i16, i8, i8 }
 %"struct.llvm::MCRegisterInfo::DwarfLLVMRegPair" = type { i32, i32 }
-%"class.llvm::DenseMap" = type { %"struct.std::pair"*, i32, i32, i32 }
+%"class.llvm::DenseMap" = type { ptr, i32, i32, i32 }
 %"struct.std::pair" = type { i32, i32 }
 %"class.llvm::MCObjectFileInfo" = type opaque
-%"class.llvm::BumpPtrAllocator" = type { i64, i64, %"class.llvm::SlabAllocator"*, %"class.llvm::MemSlab"*, i8*, i8*, i64 }
-%"class.llvm::SlabAllocator" = type { i32 (...)** }
-%"class.llvm::MemSlab" = type { i64, %"class.llvm::MemSlab"* }
-%"class.llvm::StringMap" = type { %"class.llvm::StringMapImpl", %"class.llvm::BumpPtrAllocator"* }
-%"class.llvm::StringMapImpl" = type { %"class.llvm::StringMapEntryBase"**, i32, i32, i32, i32 }
+%"class.llvm::BumpPtrAllocator" = type { i64, i64, ptr, ptr, ptr, ptr, i64 }
+%"class.llvm::SlabAllocator" = type { ptr }
+%"class.llvm::MemSlab" = type { i64, ptr }
+%"class.llvm::StringMap" = type { %"class.llvm::StringMapImpl", ptr }
+%"class.llvm::StringMapImpl" = type { ptr, i32, i32, i32, i32 }
 %"class.llvm::StringMapEntryBase" = type { i32 }
-%"class.llvm::StringMap.62" = type { %"class.llvm::StringMapImpl", %"class.llvm::BumpPtrAllocator"* }
-%"class.llvm::DenseMap.63" = type { %"struct.std::pair.66"*, i32, i32, i32 }
+%"class.llvm::StringMap.62" = type { %"class.llvm::StringMapImpl", ptr }
+%"class.llvm::DenseMap.63" = type { ptr, i32, i32, i32 }
 %"struct.std::pair.66" = type opaque
-%"class.llvm::raw_ostream" = type { i32 (...)**, i8*, i8*, i8*, i32 }
+%"class.llvm::raw_ostream" = type { ptr, ptr, ptr, ptr, i32 }
 %"class.std::vector" = type { %"struct.std::_Vector_base" }
 %"struct.std::_Vector_base" = type { %"struct.std::_Vector_base<llvm::MCDwarfFile *, std::allocator<llvm::MCDwarfFile *> >::_Vector_impl" }
-%"struct.std::_Vector_base<llvm::MCDwarfFile *, std::allocator<llvm::MCDwarfFile *> >::_Vector_impl" = type { %"class.llvm::MCDwarfFile"**, %"class.llvm::MCDwarfFile"**, %"class.llvm::MCDwarfFile"** }
+%"struct.std::_Vector_base<llvm::MCDwarfFile *, std::allocator<llvm::MCDwarfFile *> >::_Vector_impl" = type { ptr, ptr, ptr }
 %"class.llvm::MCDwarfFile" = type { %"class.llvm::StringRef", i32 }
-%"class.llvm::StringRef" = type { i8*, i64 }
+%"class.llvm::StringRef" = type { ptr, i64 }
 %"class.std::vector.70" = type { %"struct.std::_Vector_base.71" }
 %"struct.std::_Vector_base.71" = type { %"struct.std::_Vector_base<llvm::StringRef, std::allocator<llvm::StringRef> >::_Vector_impl" }
-%"struct.std::_Vector_base<llvm::StringRef, std::allocator<llvm::StringRef> >::_Vector_impl" = type { %"class.llvm::StringRef"*, %"class.llvm::StringRef"*, %"class.llvm::StringRef"* }
+%"struct.std::_Vector_base<llvm::StringRef, std::allocator<llvm::StringRef> >::_Vector_impl" = type { ptr, ptr, ptr }
 %"class.llvm::MCDwarfLoc" = type { i32, i32, i32, i32, i32, i32 }
 %"class.llvm::MCSection" = type opaque
-%"class.llvm::MCSymbol" = type { %"class.llvm::StringRef", %"class.llvm::MCSection"*, %"class.llvm::MCExpr"*, i8 }
+%"class.llvm::MCSymbol" = type { %"class.llvm::StringRef", ptr, ptr, i8 }
 %"class.llvm::MCExpr" = type opaque
 %"class.std::vector.75" = type { %"struct.std::_Vector_base.76" }
 %"struct.std::_Vector_base.76" = type { %"struct.std::_Vector_base<const llvm::MCGenDwarfLabelEntry *, std::allocator<const llvm::MCGenDwarfLabelEntry *> >::_Vector_impl" }
-%"struct.std::_Vector_base<const llvm::MCGenDwarfLabelEntry *, std::allocator<const llvm::MCGenDwarfLabelEntry *> >::_Vector_impl" = type { %"class.llvm::MCGenDwarfLabelEntry"**, %"class.llvm::MCGenDwarfLabelEntry"**, %"class.llvm::MCGenDwarfLabelEntry"** }
-%"class.llvm::MCGenDwarfLabelEntry" = type { %"class.llvm::StringRef", i32, i32, %"class.llvm::MCSymbol"* }
-%"class.llvm::DenseMap.80" = type { %"struct.std::pair.83"*, i32, i32, i32 }
-%"struct.std::pair.83" = type { %"class.llvm::MCSection"*, %"class.llvm::MCLineSection"* }
+%"struct.std::_Vector_base<const llvm::MCGenDwarfLabelEntry *, std::allocator<const llvm::MCGenDwarfLabelEntry *> >::_Vector_impl" = type { ptr, ptr, ptr }
+%"class.llvm::MCGenDwarfLabelEntry" = type { %"class.llvm::StringRef", i32, i32, ptr }
+%"class.llvm::DenseMap.80" = type { ptr, i32, i32, i32 }
+%"struct.std::pair.83" = type { ptr, ptr }
 %"class.llvm::MCLineSection" = type { %"class.std::vector.215" }
 %"class.std::vector.215" = type { %"struct.std::_Vector_base.216" }
 %"struct.std::_Vector_base.216" = type { %"struct.std::_Vector_base<llvm::MCLineEntry, std::allocator<llvm::MCLineEntry> >::_Vector_impl" }
-%"struct.std::_Vector_base<llvm::MCLineEntry, std::allocator<llvm::MCLineEntry> >::_Vector_impl" = type { %"class.llvm::MCLineEntry"*, %"class.llvm::MCLineEntry"*, %"class.llvm::MCLineEntry"* }
-%"class.llvm::MCLineEntry" = type { %"class.llvm::MCDwarfLoc", %"class.llvm::MCSymbol"* }
+%"struct.std::_Vector_base<llvm::MCLineEntry, std::allocator<llvm::MCLineEntry> >::_Vector_impl" = type { ptr, ptr, ptr }
+%"class.llvm::MCLineEntry" = type { %"class.llvm::MCDwarfLoc", ptr }
 %"class.std::vector.84" = type { %"struct.std::_Vector_base.85" }
 %"struct.std::_Vector_base.85" = type { %"struct.std::_Vector_base<const llvm::MCSection *, std::allocator<const llvm::MCSection *> >::_Vector_impl" }
-%"struct.std::_Vector_base<const llvm::MCSection *, std::allocator<const llvm::MCSection *> >::_Vector_impl" = type { %"class.llvm::MCSection"**, %"class.llvm::MCSection"**, %"class.llvm::MCSection"** }
-%"class.llvm::MachineModuleInfo" = type { %"class.llvm::ImmutablePass", %"class.llvm::MCContext", %"class.llvm::Module"*, %"class.llvm::MachineModuleInfoImpl"*, %"class.std::vector.95", i32, %"class.std::vector.100", %"class.llvm::DenseMap.110", %"class.llvm::DenseMap.114", i32, %"class.std::vector.118", %"class.std::vector.123", %"class.std::vector.123", %"class.std::vector.128", %"class.llvm::SmallPtrSet", %"class.llvm::MMIAddrLabelMap"*, i8, i8, i8, i8, %"class.llvm::SmallVector.133" }
+%"struct.std::_Vector_base<const llvm::MCSection *, std::allocator<const llvm::MCSection *> >::_Vector_impl" = type { ptr, ptr, ptr }
+%"class.llvm::MachineModuleInfo" = type { %"class.llvm::ImmutablePass", %"class.llvm::MCContext", ptr, ptr, %"class.std::vector.95", i32, %"class.std::vector.100", %"class.llvm::DenseMap.110", %"class.llvm::DenseMap.114", i32, %"class.std::vector.118", %"class.std::vector.123", %"class.std::vector.123", %"class.std::vector.128", %"class.llvm::SmallPtrSet", ptr, i8, i8, i8, i8, %"class.llvm::SmallVector.133" }
 %"class.llvm::ImmutablePass" = type { %"class.llvm::ModulePass" }
 %"class.llvm::ModulePass" = type { %"class.llvm::Pass" }
-%"class.llvm::Pass" = type { i32 (...)**, %"class.llvm::AnalysisResolver"*, i8*, i32 }
-%"class.llvm::AnalysisResolver" = type { %"class.std::vector.89", %"class.llvm::PMDataManager"* }
+%"class.llvm::Pass" = type { ptr, ptr, ptr, i32 }
+%"class.llvm::AnalysisResolver" = type { %"class.std::vector.89", ptr }
 %"class.std::vector.89" = type { %"struct.std::_Vector_base.90" }
-%"struct.std::_Vector_base.90" = type { %"struct.std::_Vector_base<std::pair<const void *, llvm::Pass *>, std::allocator<std::pair<const void *, llvm::Pass *> > >::_Vector_impl" }
-%"struct.std::_Vector_base<std::pair<const void *, llvm::Pass *>, std::allocator<std::pair<const void *, llvm::Pass *> > >::_Vector_impl" = type { %"struct.std::pair.94"*, %"struct.std::pair.94"*, %"struct.std::pair.94"* }
-%"struct.std::pair.94" = type { i8*, %"class.llvm::Pass"* }
+%"struct.std::_Vector_base.90" = type { %"struct.std::_Vector_base<std::pair<const ptr, llvm::Pass *>, std::allocator<std::pair<const ptr, llvm::Pass *> > >::_Vector_impl" }
+%"struct.std::_Vector_base<std::pair<const ptr, llvm::Pass *>, std::allocator<std::pair<const ptr, llvm::Pass *> > >::_Vector_impl" = type { ptr, ptr, ptr }
+%"struct.std::pair.94" = type { ptr, ptr }
 %"class.llvm::PMDataManager" = type opaque
-%"class.llvm::MachineModuleInfoImpl" = type { i32 (...)** }
+%"class.llvm::MachineModuleInfoImpl" = type { ptr }
 %"class.std::vector.95" = type { %"struct.std::_Vector_base.96" }
 %"struct.std::_Vector_base.96" = type { %"struct.std::_Vector_base<llvm::MachineMove, std::allocator<llvm::MachineMove> >::_Vector_impl" }
-%"struct.std::_Vector_base<llvm::MachineMove, std::allocator<llvm::MachineMove> >::_Vector_impl" = type { %"class.llvm::MachineMove"*, %"class.llvm::MachineMove"*, %"class.llvm::MachineMove"* }
-%"class.llvm::MachineMove" = type { %"class.llvm::MCSymbol"*, %"class.llvm::MachineLocation", %"class.llvm::MachineLocation" }
+%"struct.std::_Vector_base<llvm::MachineMove, std::allocator<llvm::MachineMove> >::_Vector_impl" = type { ptr, ptr, ptr }
+%"class.llvm::MachineMove" = type { ptr, %"class.llvm::MachineLocation", %"class.llvm::MachineLocation" }
 %"class.llvm::MachineLocation" = type { i8, i32, i32 }
 %"class.std::vector.100" = type { %"struct.std::_Vector_base.101" }
 %"struct.std::_Vector_base.101" = type { %"struct.std::_Vector_base<llvm::LandingPadInfo, std::allocator<llvm::LandingPadInfo> >::_Vector_impl" }
-%"struct.std::_Vector_base<llvm::LandingPadInfo, std::allocator<llvm::LandingPadInfo> >::_Vector_impl" = type { %"struct.llvm::LandingPadInfo"*, %"struct.llvm::LandingPadInfo"*, %"struct.llvm::LandingPadInfo"* }
-%"struct.llvm::LandingPadInfo" = type { %"class.llvm::MachineBasicBlock"*, %"class.llvm::SmallVector", %"class.llvm::SmallVector", %"class.llvm::MCSymbol"*, %"class.llvm::Function"*, %"class.std::vector.105" }
+%"struct.std::_Vector_base<llvm::LandingPadInfo, std::allocator<llvm::LandingPadInfo> >::_Vector_impl" = type { ptr, ptr, ptr }
+%"struct.llvm::LandingPadInfo" = type { ptr, %"class.llvm::SmallVector", %"class.llvm::SmallVector", ptr, ptr, %"class.std::vector.105" }
 %"class.llvm::SmallVector" = type { %"class.llvm::SmallVectorImpl", %"struct.llvm::SmallVectorStorage" }
 %"class.llvm::SmallVectorImpl" = type { %"class.llvm::SmallVectorTemplateBase" }
 %"class.llvm::SmallVectorTemplateBase" = type { %"class.llvm::SmallVectorTemplateCommon" }
 %"class.llvm::SmallVectorTemplateCommon" = type { %"class.llvm::SmallVectorBase", %"struct.llvm::AlignedCharArrayUnion" }
-%"class.llvm::SmallVectorBase" = type { i8*, i8*, i8* }
+%"class.llvm::SmallVectorBase" = type { ptr, ptr, ptr }
 %"struct.llvm::AlignedCharArrayUnion" = type { %"struct.llvm::AlignedCharArray" }
 %"struct.llvm::AlignedCharArray" = type { [8 x i8] }
 %"struct.llvm::SmallVectorStorage" = type { i8 }
 %"class.std::vector.105" = type { %"struct.std::_Vector_base.106" }
 %"struct.std::_Vector_base.106" = type { %"struct.std::_Vector_base<int, std::allocator<int> >::_Vector_impl" }
-%"struct.std::_Vector_base<int, std::allocator<int> >::_Vector_impl" = type { i32*, i32*, i32* }
-%"class.llvm::DenseMap.110" = type { %"struct.std::pair.113"*, i32, i32, i32 }
-%"struct.std::pair.113" = type { %"class.llvm::MCSymbol"*, %"class.llvm::SmallVector.206" }
+%"struct.std::_Vector_base<int, std::allocator<int> >::_Vector_impl" = type { ptr, ptr, ptr }
+%"class.llvm::DenseMap.110" = type { ptr, i32, i32, i32 }
+%"struct.std::pair.113" = type { ptr, %"class.llvm::SmallVector.206" }
 %"class.llvm::SmallVector.206" = type { [28 x i8], %"struct.llvm::SmallVectorStorage.207" }
 %"struct.llvm::SmallVectorStorage.207" = type { [3 x %"struct.llvm::AlignedCharArrayUnion.198"] }
 %"struct.llvm::AlignedCharArrayUnion.198" = type { %"struct.llvm::AlignedCharArray.199" }
 %"struct.llvm::AlignedCharArray.199" = type { [4 x i8] }
-%"class.llvm::DenseMap.114" = type { %"struct.std::pair.117"*, i32, i32, i32 }
-%"struct.std::pair.117" = type { %"class.llvm::MCSymbol"*, i32 }
+%"class.llvm::DenseMap.114" = type { ptr, i32, i32, i32 }
+%"struct.std::pair.117" = type { ptr, i32 }
 %"class.std::vector.118" = type { %"struct.std::_Vector_base.119" }
 %"struct.std::_Vector_base.119" = type { %"struct.std::_Vector_base<const llvm::GlobalVariable *, std::allocator<const llvm::GlobalVariable *> >::_Vector_impl" }
-%"struct.std::_Vector_base<const llvm::GlobalVariable *, std::allocator<const llvm::GlobalVariable *> >::_Vector_impl" = type { %"class.llvm::GlobalVariable"**, %"class.llvm::GlobalVariable"**, %"class.llvm::GlobalVariable"** }
+%"struct.std::_Vector_base<const llvm::GlobalVariable *, std::allocator<const llvm::GlobalVariable *> >::_Vector_impl" = type { ptr, ptr, ptr }
 %"class.std::vector.123" = type { %"struct.std::_Vector_base.124" }
 %"struct.std::_Vector_base.124" = type { %"struct.std::_Vector_base<unsigned int, std::allocator<unsigned int> >::_Vector_impl" }
-%"struct.std::_Vector_base<unsigned int, std::allocator<unsigned int> >::_Vector_impl" = type { i32*, i32*, i32* }
+%"struct.std::_Vector_base<unsigned int, std::allocator<unsigned int> >::_Vector_impl" = type { ptr, ptr, ptr }
 %"class.std::vector.128" = type { %"struct.std::_Vector_base.129" }
 %"struct.std::_Vector_base.129" = type { %"struct.std::_Vector_base<const llvm::Function *, std::allocator<const llvm::Function *> >::_Vector_impl" }
-%"struct.std::_Vector_base<const llvm::Function *, std::allocator<const llvm::Function *> >::_Vector_impl" = type { %"class.llvm::Function"**, %"class.llvm::Function"**, %"class.llvm::Function"** }
-%"class.llvm::SmallPtrSet" = type { %"class.llvm::SmallPtrSetImpl", [33 x i8*] }
-%"class.llvm::SmallPtrSetImpl" = type { i8**, i8**, i32, i32, i32 }
+%"struct.std::_Vector_base<const llvm::Function *, std::allocator<const llvm::Function *> >::_Vector_impl" = type { ptr, ptr, ptr }
+%"class.llvm::SmallPtrSet" = type { %"class.llvm::SmallPtrSetImpl", [33 x ptr] }
+%"class.llvm::SmallPtrSetImpl" = type { ptr, ptr, i32, i32, i32 }
 %"class.llvm::MMIAddrLabelMap" = type opaque
 %"class.llvm::SmallVector.133" = type { %"class.llvm::SmallVectorImpl.134", %"struct.llvm::SmallVectorStorage.139" }
 %"class.llvm::SmallVectorImpl.134" = type { %"class.llvm::SmallVectorTemplateBase.135" }
@@ -195,49 +195,49 @@ target triple = "powerpc64-unknown-linux-gnu"
 %"struct.llvm::AlignedCharArray.138" = type { [40 x i8] }
 %"struct.llvm::SmallVectorStorage.139" = type { [3 x %"struct.llvm::AlignedCharArrayUnion.137"] }
 %"class.llvm::GCModuleInfo" = type opaque
-%"class.llvm::MachineRegisterInfo" = type { %"class.llvm::TargetRegisterInfo"*, i8, i8, %"class.llvm::IndexedMap", %"class.llvm::IndexedMap.146", %"class.llvm::MachineOperand"**, %"class.llvm::BitVector", %"class.llvm::BitVector", %"class.llvm::BitVector", %"class.std::vector.147", %"class.std::vector.123" }
-%"class.llvm::TargetRegisterInfo" = type { i32 (...)**, %"class.llvm::MCRegisterInfo", %"struct.llvm::TargetRegisterInfoDesc"*, i8**, i32*, %"class.llvm::TargetRegisterClass"**, %"class.llvm::TargetRegisterClass"** }
+%"class.llvm::MachineRegisterInfo" = type { ptr, i8, i8, %"class.llvm::IndexedMap", %"class.llvm::IndexedMap.146", ptr, %"class.llvm::BitVector", %"class.llvm::BitVector", %"class.llvm::BitVector", %"class.std::vector.147", %"class.std::vector.123" }
+%"class.llvm::TargetRegisterInfo" = type { ptr, %"class.llvm::MCRegisterInfo", ptr, ptr, ptr, ptr, ptr }
 %"struct.llvm::TargetRegisterInfoDesc" = type { i32, i8 }
-%"class.llvm::TargetRegisterClass" = type { %"class.llvm::MCRegisterClass"*, i32*, i32*, i16*, %"class.llvm::TargetRegisterClass"**, void (%"class.llvm::ArrayRef"*, %"class.llvm::MachineFunction"*)* }
-%"class.llvm::ArrayRef" = type { i16*, i64 }
+%"class.llvm::TargetRegisterClass" = type { ptr, ptr, ptr, ptr, ptr, ptr }
+%"class.llvm::ArrayRef" = type { ptr, i64 }
 %"class.llvm::IndexedMap" = type { %"class.std::vector.140", %"struct.std::pair.145", %"struct.llvm::VirtReg2IndexFunctor" }
 %"class.std::vector.140" = type { %"struct.std::_Vector_base.141" }
 %"struct.std::_Vector_base.141" = type { %"struct.std::_Vector_base<std::pair<const llvm::TargetRegisterClass *, llvm::MachineOperand *>, std::allocator<std::pair<const llvm::TargetRegisterClass *, llvm::MachineOperand *> > >::_Vector_impl" }
-%"struct.std::_Vector_base<std::pair<const llvm::TargetRegisterClass *, llvm::MachineOperand *>, std::allocator<std::pair<const llvm::TargetRegisterClass *, llvm::MachineOperand *> > >::_Vector_impl" = type { %"struct.std::pair.145"*, %"struct.std::pair.145"*, %"struct.std::pair.145"* }
-%"struct.std::pair.145" = type { %"class.llvm::TargetRegisterClass"*, %"class.llvm::MachineOperand"* }
-%"class.llvm::MachineOperand" = type { i8, [3 x i8], %union.anon, %"class.llvm::MachineInstr"*, %union.anon.188 }
+%"struct.std::_Vector_base<std::pair<const llvm::TargetRegisterClass *, llvm::MachineOperand *>, std::allocator<std::pair<const llvm::TargetRegisterClass *, llvm::MachineOperand *> > >::_Vector_impl" = type { ptr, ptr, ptr }
+%"struct.std::pair.145" = type { ptr, ptr }
+%"class.llvm::MachineOperand" = type { i8, [3 x i8], %union.anon, ptr, %union.anon.188 }
 %union.anon = type { i32 }
 %union.anon.188 = type { %struct.anon }
-%struct.anon = type { %"class.llvm::MachineOperand"*, %"class.llvm::MachineOperand"* }
+%struct.anon = type { ptr, ptr }
 %"struct.llvm::VirtReg2IndexFunctor" = type { i8 }
 %"class.llvm::IndexedMap.146" = type { %"class.std::vector.147", %"struct.std::pair.152", %"struct.llvm::VirtReg2IndexFunctor" }
 %"class.std::vector.147" = type { %"struct.std::_Vector_base.148" }
 %"struct.std::_Vector_base.148" = type { %"struct.std::_Vector_base<std::pair<unsigned int, unsigned int>, std::allocator<std::pair<unsigned int, unsigned int> > >::_Vector_impl" }
-%"struct.std::_Vector_base<std::pair<unsigned int, unsigned int>, std::allocator<std::pair<unsigned int, unsigned int> > >::_Vector_impl" = type { %"struct.std::pair.152"*, %"struct.std::pair.152"*, %"struct.std::pair.152"* }
+%"struct.std::_Vector_base<std::pair<unsigned int, unsigned int>, std::allocator<std::pair<unsigned int, unsigned int> > >::_Vector_impl" = type { ptr, ptr, ptr }
 %"struct.std::pair.152" = type { i32, i32 }
-%"class.llvm::BitVector" = type { i64*, i32, i32 }
-%"struct.llvm::MachineFunctionInfo" = type { i32 (...)** }
+%"class.llvm::BitVector" = type { ptr, i32, i32 }
+%"struct.llvm::MachineFunctionInfo" = type { ptr }
 %"class.llvm::MachineFrameInfo" = type opaque
-%"class.llvm::MachineConstantPool" = type { %"class.llvm::DataLayout"*, i32, %"class.std::vector.153", %"class.llvm::DenseSet" }
+%"class.llvm::MachineConstantPool" = type { ptr, i32, %"class.std::vector.153", %"class.llvm::DenseSet" }
 %"class.llvm::DataLayout" = type opaque
 %"class.std::vector.153" = type { %"struct.std::_Vector_base.154" }
 %"struct.std::_Vector_base.154" = type { %"struct.std::_Vector_base<llvm::MachineConstantPoolEntry, std::allocator<llvm::MachineConstantPoolEntry> >::_Vector_impl" }
-%"struct.std::_Vector_base<llvm::MachineConstantPoolEntry, std::allocator<llvm::MachineConstantPoolEntry> >::_Vector_impl" = type { %"class.llvm::MachineConstantPoolEntry"*, %"class.llvm::MachineConstantPoolEntry"*, %"class.llvm::MachineConstantPoolEntry"* }
+%"struct.std::_Vector_base<llvm::MachineConstantPoolEntry, std::allocator<llvm::MachineConstantPoolEntry> >::_Vector_impl" = type { ptr, ptr, ptr }
 %"class.llvm::MachineConstantPoolEntry" = type { %union.anon.158, i32 }
-%union.anon.158 = type { %"class.llvm::Constant"* }
+%union.anon.158 = type { ptr }
 %"class.llvm::Constant" = type { %"class.llvm::User" }
 %"class.llvm::DenseSet" = type { %"class.llvm::DenseMap.159" }
-%"class.llvm::DenseMap.159" = type { %"struct.std::pair.162"*, i32, i32, i32 }
-%"struct.std::pair.162" = type { %"class.llvm::MachineConstantPoolValue"*, i8 }
-%"class.llvm::MachineConstantPoolValue" = type { i32 (...)**, %"class.llvm::Type"* }
+%"class.llvm::DenseMap.159" = type { ptr, i32, i32, i32 }
+%"struct.std::pair.162" = type { ptr, i8 }
+%"class.llvm::MachineConstantPoolValue" = type { ptr, ptr }
 %"class.llvm::MachineJumpTableInfo" = type opaque
 %"class.std::vector.163" = type { %"struct.std::_Vector_base.164" }
 %"struct.std::_Vector_base.164" = type { %"struct.std::_Vector_base<llvm::MachineBasicBlock *, std::allocator<llvm::MachineBasicBlock *> >::_Vector_impl" }
-%"struct.std::_Vector_base<llvm::MachineBasicBlock *, std::allocator<llvm::MachineBasicBlock *> >::_Vector_impl" = type { %"class.llvm::MachineBasicBlock"**, %"class.llvm::MachineBasicBlock"**, %"class.llvm::MachineBasicBlock"** }
+%"struct.std::_Vector_base<llvm::MachineBasicBlock *, std::allocator<llvm::MachineBasicBlock *> >::_Vector_impl" = type { ptr, ptr, ptr }
 %"class.llvm::Recycler" = type { %"class.llvm::iplist.168" }
-%"class.llvm::iplist.168" = type { %"struct.llvm::ilist_traits.169", %"struct.llvm::RecyclerStruct"* }
+%"class.llvm::iplist.168" = type { %"struct.llvm::ilist_traits.169", ptr }
 %"struct.llvm::ilist_traits.169" = type { %"struct.llvm::RecyclerStruct" }
-%"struct.llvm::RecyclerStruct" = type { %"struct.llvm::RecyclerStruct"*, %"struct.llvm::RecyclerStruct"* }
+%"struct.llvm::RecyclerStruct" = type { ptr, ptr }
 %"class.llvm::ArrayRecycler" = type { %"class.llvm::SmallVector.174" }
 %"class.llvm::SmallVector.174" = type { %"class.llvm::SmallVectorImpl.175", %"struct.llvm::SmallVectorStorage.179" }
 %"class.llvm::SmallVectorImpl.175" = type { %"class.llvm::SmallVectorTemplateBase.176" }
@@ -247,39 +247,39 @@ target triple = "powerpc64-unknown-linux-gnu"
 %"struct.llvm::SmallVectorStorage.179" = type { [7 x %"struct.llvm::AlignedCharArrayUnion.178"] }
 %"class.llvm::Recycler.180" = type { %"class.llvm::iplist.168" }
 %"struct.llvm::ilist.181" = type { %"class.llvm::iplist.182" }
-%"class.llvm::iplist.182" = type { %"struct.llvm::ilist_traits.183", %"class.llvm::MachineBasicBlock"* }
+%"class.llvm::iplist.182" = type { %"struct.llvm::ilist_traits.183", ptr }
 %"struct.llvm::ilist_traits.183" = type { %"class.llvm::ilist_half_node.1" }
 %"class.llvm::ArrayRecycler<llvm::MachineOperand, 8>::Capacity" = type { i8 }
 %"class.llvm::ConstantInt" = type { %"class.llvm::Constant", %"class.llvm::APInt" }
 %"class.llvm::APInt" = type { i32, %union.anon.189 }
 %union.anon.189 = type { i64 }
 %"class.llvm::ConstantFP" = type { %"class.llvm::Constant", %"class.llvm::APFloat" }
-%"class.llvm::APFloat" = type { %"struct.llvm::fltSemantics"*, %"union.llvm::APFloat::Significand", i16, i8 }
+%"class.llvm::APFloat" = type { ptr, %"union.llvm::APFloat::Significand", i16, i8 }
 %"struct.llvm::fltSemantics" = type opaque
 %"union.llvm::APFloat::Significand" = type { i64 }
 %"class.llvm::BlockAddress" = type { %"class.llvm::Constant" }
 %"class.llvm::hash_code" = type { i64 }
 %"struct.llvm::hashing::detail::hash_combine_recursive_helper" = type { [64 x i8], %"struct.llvm::hashing::detail::hash_state", i64 }
 %"struct.llvm::hashing::detail::hash_state" = type { i64, i64, i64, i64, i64, i64, i64, i64 }
-%"class.llvm::PrintReg" = type { %"class.llvm::TargetRegisterInfo"*, i32, i32 }
+%"class.llvm::PrintReg" = type { ptr, i32, i32 }
 %"class.llvm::PseudoSourceValue" = type { %"class.llvm::Value" }
 %"class.llvm::FoldingSetNodeID" = type { %"class.llvm::SmallVector.194" }
 %"class.llvm::SmallVector.194" = type { [28 x i8], %"struct.llvm::SmallVectorStorage.200" }
 %"struct.llvm::SmallVectorStorage.200" = type { [31 x %"struct.llvm::AlignedCharArrayUnion.198"] }
-%"struct.llvm::ArrayRecycler<llvm::MachineOperand, 8>::FreeList" = type { %"struct.llvm::ArrayRecycler<llvm::MachineOperand, 8>::FreeList"* }
-%"class.llvm::ilist_iterator.202" = type { %"class.llvm::MachineInstr"* }
-%"class.llvm::TargetInstrInfo" = type { i32 (...)**, [28 x i8], i32, i32 }
+%"struct.llvm::ArrayRecycler<llvm::MachineOperand, 8>::FreeList" = type { ptr }
+%"class.llvm::ilist_iterator.202" = type { ptr }
+%"class.llvm::TargetInstrInfo" = type { ptr, [28 x i8], i32, i32 }
 %"struct.std::pair.203" = type { i8, i8 }
 %"class.llvm::SmallVectorImpl.195" = type { %"class.llvm::SmallVectorTemplateBase.196" }
 %"class.llvm::SmallVectorTemplateBase.196" = type { %"class.llvm::SmallVectorTemplateCommon.197" }
 %"class.llvm::SmallVectorTemplateCommon.197" = type { %"class.llvm::SmallVectorBase", %"struct.llvm::AlignedCharArrayUnion.198" }
-%"class.llvm::AliasAnalysis" = type { i32 (...)**, %"class.llvm::DataLayout"*, %"class.llvm::TargetLibraryInfo"*, %"class.llvm::AliasAnalysis"* }
+%"class.llvm::AliasAnalysis" = type { ptr, ptr, ptr, ptr }
 %"class.llvm::TargetLibraryInfo" = type opaque
-%"struct.llvm::AliasAnalysis::Location" = type { %"class.llvm::Value"*, i64, %"class.llvm::MDNode"* }
+%"struct.llvm::AliasAnalysis::Location" = type { ptr, i64, ptr }
 %"class.llvm::DIVariable" = type { %"class.llvm::DIDescriptor" }
-%"class.llvm::DIDescriptor" = type { %"class.llvm::MDNode"* }
+%"class.llvm::DIDescriptor" = type { ptr }
 %"class.llvm::DIScope" = type { %"class.llvm::DIDescriptor" }
-%"class.llvm::ArrayRef.208" = type { i32*, i64 }
+%"class.llvm::ArrayRef.208" = type { ptr, i64 }
 %"class.llvm::SmallVector.209" = type { %"class.llvm::SmallVectorImpl.210", %"struct.llvm::SmallVectorStorage.214" }
 %"class.llvm::SmallVectorImpl.210" = type { %"class.llvm::SmallVectorTemplateBase.211" }
 %"class.llvm::SmallVectorTemplateBase.211" = type { %"class.llvm::SmallVectorTemplateCommon.212" }
@@ -287,68 +287,67 @@ target triple = "powerpc64-unknown-linux-gnu"
 %"struct.llvm::AlignedCharArrayUnion.213" = type { %"struct.llvm::AlignedCharArray" }
 %"struct.llvm::SmallVectorStorage.214" = type { [7 x %"struct.llvm::AlignedCharArrayUnion.213"] }
 %"class.llvm::Twine" = type { %"union.llvm::Twine::Child", %"union.llvm::Twine::Child", i8, i8 }
-%"union.llvm::Twine::Child" = type { %"class.llvm::Twine"* }
+%"union.llvm::Twine::Child" = type { ptr }
 %"struct.std::random_access_iterator_tag" = type { i8 }
 
-declare void @_ZN4llvm19MachineRegisterInfo27removeRegOperandFromUseListEPNS_14MachineOperandE(%"class.llvm::MachineRegisterInfo"*, %"class.llvm::MachineOperand"*)
+declare void @_ZN4llvm19MachineRegisterInfo27removeRegOperandFromUseListEPNS_14MachineOperandE(ptr, ptr)
 
-declare void @_ZN4llvm19MachineRegisterInfo22addRegOperandToUseListEPNS_14MachineOperandE(%"class.llvm::MachineRegisterInfo"*, %"class.llvm::MachineOperand"*)
+declare void @_ZN4llvm19MachineRegisterInfo22addRegOperandToUseListEPNS_14MachineOperandE(ptr, ptr)
 
-declare zeroext i32 @_ZNK4llvm14MCRegisterInfo9getSubRegEjj(%"class.llvm::MCRegisterInfo"*, i32 zeroext, i32 zeroext)
+declare zeroext i32 @_ZNK4llvm14MCRegisterInfo9getSubRegEjj(ptr, i32 zeroext, i32 zeroext)
 
-define void @_ZN4llvm14MachineOperand12substPhysRegEjRKNS_18TargetRegisterInfoE(%"class.llvm::MachineOperand"* %this, i32 zeroext %Reg, %"class.llvm::TargetRegisterInfo"* %TRI) align 2 {
+define void @_ZN4llvm14MachineOperand12substPhysRegEjRKNS_18TargetRegisterInfoE(ptr %this, i32 zeroext %Reg, ptr %TRI) align 2 {
 entry:
-  %SubReg_TargetFlags.i = getelementptr inbounds %"class.llvm::MachineOperand", %"class.llvm::MachineOperand"* %this, i64 0, i32 1
-  %0 = bitcast [3 x i8]* %SubReg_TargetFlags.i to i24*
-  %bf.load.i = load i24, i24* %0, align 1
+  %SubReg_TargetFlags.i = getelementptr inbounds %"class.llvm::MachineOperand", ptr %this, i64 0, i32 1
+  %bf.load.i = load i24, ptr %SubReg_TargetFlags.i, align 1
   %bf.lshr.i = lshr i24 %bf.load.i, 12
   %tobool = icmp eq i24 %bf.lshr.i, 0
   br i1 %tobool, label %if.end, label %if.then
 
 if.then:                                          ; preds = %entry
   %bf.cast.i = zext i24 %bf.lshr.i to i32
-  %add.ptr = getelementptr inbounds %"class.llvm::TargetRegisterInfo", %"class.llvm::TargetRegisterInfo"* %TRI, i64 0, i32 1
-  %call3 = tail call zeroext i32 @_ZNK4llvm14MCRegisterInfo9getSubRegEjj(%"class.llvm::MCRegisterInfo"* %add.ptr, i32 zeroext %Reg, i32 zeroext %bf.cast.i)
-  %bf.load.i10 = load i24, i24* %0, align 1
+  %add.ptr = getelementptr inbounds %"class.llvm::TargetRegisterInfo", ptr %TRI, i64 0, i32 1
+  %call3 = tail call zeroext i32 @_ZNK4llvm14MCRegisterInfo9getSubRegEjj(ptr %add.ptr, i32 zeroext %Reg, i32 zeroext %bf.cast.i)
+  %bf.load.i10 = load i24, ptr %SubReg_TargetFlags.i, align 1
   %bf.clear.i = and i24 %bf.load.i10, 4095
-  store i24 %bf.clear.i, i24* %0, align 1
+  store i24 %bf.clear.i, ptr %SubReg_TargetFlags.i, align 1
   br label %if.end
 
 if.end:                                           ; preds = %entry, %if.then
   %Reg.addr.0 = phi i32 [ %call3, %if.then ], [ %Reg, %entry ]
-  %RegNo.i.i = getelementptr inbounds %"class.llvm::MachineOperand", %"class.llvm::MachineOperand"* %this, i64 0, i32 2, i32 0
-  %1 = load i32, i32* %RegNo.i.i, align 4
-  %cmp.i = icmp eq i32 %1, %Reg.addr.0
+  %RegNo.i.i = getelementptr inbounds %"class.llvm::MachineOperand", ptr %this, i64 0, i32 2, i32 0
+  %0 = load i32, ptr %RegNo.i.i, align 4
+  %cmp.i = icmp eq i32 %0, %Reg.addr.0
   br i1 %cmp.i, label %_ZN4llvm14MachineOperand6setRegEj.exit, label %if.end.i
 
 if.end.i:                                         ; preds = %if.end
-  %ParentMI.i.i = getelementptr inbounds %"class.llvm::MachineOperand", %"class.llvm::MachineOperand"* %this, i64 0, i32 3
-  %2 = load %"class.llvm::MachineInstr"*, %"class.llvm::MachineInstr"** %ParentMI.i.i, align 8
-  %tobool.i = icmp eq %"class.llvm::MachineInstr"* %2, null
+  %ParentMI.i.i = getelementptr inbounds %"class.llvm::MachineOperand", ptr %this, i64 0, i32 3
+  %1 = load ptr, ptr %ParentMI.i.i, align 8
+  %tobool.i = icmp eq ptr %1, null
   br i1 %tobool.i, label %if.end13.i, label %if.then3.i
 
 if.then3.i:                                       ; preds = %if.end.i
-  %Parent.i.i = getelementptr inbounds %"class.llvm::MachineInstr", %"class.llvm::MachineInstr"* %2, i64 0, i32 2
-  %3 = load %"class.llvm::MachineBasicBlock"*, %"class.llvm::MachineBasicBlock"** %Parent.i.i, align 8
-  %tobool5.i = icmp eq %"class.llvm::MachineBasicBlock"* %3, null
+  %Parent.i.i = getelementptr inbounds %"class.llvm::MachineInstr", ptr %1, i64 0, i32 2
+  %2 = load ptr, ptr %Parent.i.i, align 8
+  %tobool5.i = icmp eq ptr %2, null
   br i1 %tobool5.i, label %if.end13.i, label %if.then6.i
 
 if.then6.i:                                       ; preds = %if.then3.i
-  %xParent.i.i = getelementptr inbounds %"class.llvm::MachineBasicBlock", %"class.llvm::MachineBasicBlock"* %3, i64 0, i32 4
-  %4 = load %"class.llvm::MachineFunction"*, %"class.llvm::MachineFunction"** %xParent.i.i, align 8
-  %tobool8.i = icmp eq %"class.llvm::MachineFunction"* %4, null
+  %xParent.i.i = getelementptr inbounds %"class.llvm::MachineBasicBlock", ptr %2, i64 0, i32 4
+  %3 = load ptr, ptr %xParent.i.i, align 8
+  %tobool8.i = icmp eq ptr %3, null
   br i1 %tobool8.i, label %if.end13.i, label %if.then9.i
 
 if.then9.i:                                       ; preds = %if.then6.i
-  %RegInfo.i.i = getelementptr inbounds %"class.llvm::MachineFunction", %"class.llvm::MachineFunction"* %4, i64 0, i32 5
-  %5 = load %"class.llvm::MachineRegisterInfo"*, %"class.llvm::MachineRegisterInfo"** %RegInfo.i.i, align 8
-  tail call void @_ZN4llvm19MachineRegisterInfo27removeRegOperandFromUseListEPNS_14MachineOperandE(%"class.llvm::MachineRegisterInfo"* %5, %"class.llvm::MachineOperand"* %this)
-  store i32 %Reg.addr.0, i32* %RegNo.i.i, align 4
-  tail call void @_ZN4llvm19MachineRegisterInfo22addRegOperandToUseListEPNS_14MachineOperandE(%"class.llvm::MachineRegisterInfo"* %5, %"class.llvm::MachineOperand"* %this)
+  %RegInfo.i.i = getelementptr inbounds %"class.llvm::MachineFunction", ptr %3, i64 0, i32 5
+  %4 = load ptr, ptr %RegInfo.i.i, align 8
+  tail call void @_ZN4llvm19MachineRegisterInfo27removeRegOperandFromUseListEPNS_14MachineOperandE(ptr %4, ptr %this)
+  store i32 %Reg.addr.0, ptr %RegNo.i.i, align 4
+  tail call void @_ZN4llvm19MachineRegisterInfo22addRegOperandToUseListEPNS_14MachineOperandE(ptr %4, ptr %this)
   br label %_ZN4llvm14MachineOperand6setRegEj.exit
 
 if.end13.i:                                       ; preds = %if.then6.i, %if.then3.i, %if.end.i
-  store i32 %Reg.addr.0, i32* %RegNo.i.i, align 4
+  store i32 %Reg.addr.0, ptr %RegNo.i.i, align 4
   br label %_ZN4llvm14MachineOperand6setRegEj.exit
 
 _ZN4llvm14MachineOperand6setRegEj.exit:           ; preds = %if.end, %if.then9.i, %if.end13.i

diff  --git a/llvm/test/CodeGen/PowerPC/pr15359.ll b/llvm/test/CodeGen/PowerPC/pr15359.ll
index 273663e4f5aa..c7be78e5c3ce 100644
--- a/llvm/test/CodeGen/PowerPC/pr15359.ll
+++ b/llvm/test/CodeGen/PowerPC/pr15359.ll
@@ -8,7 +8,7 @@ target triple = "powerpc64-unknown-linux-gnu"
 
 define fastcc void @func() nounwind {
 entry:
-  store i32 42, i32* @nextIdx
+  store i32 42, ptr @nextIdx
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/pr15630.ll b/llvm/test/CodeGen/PowerPC/pr15630.ll
index f9a5282a8e5f..6293a13f5ab8 100644
--- a/llvm/test/CodeGen/PowerPC/pr15630.ll
+++ b/llvm/test/CodeGen/PowerPC/pr15630.ll
@@ -3,13 +3,13 @@
 target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v128:128:128-n32:64"
 target triple = "powerpc64-unknown-linux-gnu"
 
-define weak_odr void @_D4core6atomic49__T11atomicStoreVE4core6atomic11MemoryOrder3ThThZ11atomicStoreFNaNbKOhhZv(i8* %val_arg, i8 zeroext %newval_arg) {
+define weak_odr void @_D4core6atomic49__T11atomicStoreVE4core6atomic11MemoryOrder3ThThZ11atomicStoreFNaNbKOhhZv(ptr %val_arg, i8 zeroext %newval_arg) {
 entry:
   %newval = alloca i8
   %ordering = alloca i32, align 4
-  store i8 %newval_arg, i8* %newval
-  %tmp = load i8, i8* %newval
-  store atomic volatile i8 %tmp, i8* %val_arg seq_cst, align 1
+  store i8 %newval_arg, ptr %newval
+  %tmp = load i8, ptr %newval
+  store atomic volatile i8 %tmp, ptr %val_arg seq_cst, align 1
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/pr15632.ll b/llvm/test/CodeGen/PowerPC/pr15632.ll
index 3b794b752eca..d0b29e238ac7 100644
--- a/llvm/test/CodeGen/PowerPC/pr15632.ll
+++ b/llvm/test/CodeGen/PowerPC/pr15632.ll
@@ -8,7 +8,7 @@ declare void @other(ppc_fp128 %tmp70)
 
 define void @bug() {
 entry:
-  %x = load ppc_fp128, ppc_fp128* @ld2, align 16
+  %x = load ppc_fp128, ptr @ld2, align 16
   %tmp70 = frem ppc_fp128 0xM00000000000000000000000000000000, %x
   call void @other(ppc_fp128 %tmp70)
   unreachable

diff  --git a/llvm/test/CodeGen/PowerPC/pr16556-2.ll b/llvm/test/CodeGen/PowerPC/pr16556-2.ll
index e6ec43af1054..9b7b3b21ff30 100644
--- a/llvm/test/CodeGen/PowerPC/pr16556-2.ll
+++ b/llvm/test/CodeGen/PowerPC/pr16556-2.ll
@@ -11,27 +11,26 @@ target triple = "powerpc-unknown-linux-gnu"
 @_D4core4time12TickDuration11ticksPerSecyl = global i64 0
 @.str5 = internal unnamed_addr constant [40 x i8] c"..\5Cldc\5Cruntime\5Cdruntime\5Csrc\5Ccore\5Ctime.d\00"
 @.str83 = internal constant [10 x i8] c"null this\00"
- at .modulefilename = internal constant { i32, i8* } { i32 39, i8* getelementptr inbounds ([40 x i8], [40 x i8]* @.str5, i32 0, i32 0) }
+ at .modulefilename = internal constant { i32, ptr } { i32 39, ptr @.str5 }
 
-declare i8* @_d_assert_msg({ i32, i8* }, { i32, i8* }, i32)
+declare ptr @_d_assert_msg({ i32, ptr }, { i32, ptr }, i32)
 
 
-define weak_odr fastcc i64 @_D4core4time12TickDuration30__T2toVAyaa7_7365636f6e6473TlZ2toMxFNaNbNfZl(%core.time.TickDuration* %.this_arg) {
+define weak_odr fastcc i64 @_D4core4time12TickDuration30__T2toVAyaa7_7365636f6e6473TlZ2toMxFNaNbNfZl(ptr %.this_arg) {
 entry:
   %unitsPerSec = alloca i64, align 8
-  %tmp = icmp ne %core.time.TickDuration* %.this_arg, null
+  %tmp = icmp ne ptr %.this_arg, null
   br i1 %tmp, label %noassert, label %assert
 
 assert:                                           ; preds = %entry
-  %tmp1 = load { i32, i8* }, { i32, i8* }* @.modulefilename
-  %0 = call i8* @_d_assert_msg({ i32, i8* } { i32 9, i8* getelementptr inbounds ([10 x i8], [10 x i8]* @.str83, i32 0, i32 0) }, { i32, i8* } %tmp1, i32 1586)
+  %tmp1 = load { i32, ptr }, ptr @.modulefilename
+  %0 = call ptr @_d_assert_msg({ i32, ptr } { i32 9, ptr @.str83 }, { i32, ptr } %tmp1, i32 1586)
   unreachable
 
 noassert:                                         ; preds = %entry
-  %tmp2 = getelementptr %core.time.TickDuration, %core.time.TickDuration* %.this_arg, i32 0, i32 0
-  %tmp3 = load i64, i64* %tmp2
+  %tmp3 = load i64, ptr %.this_arg
   %tmp4 = sitofp i64 %tmp3 to ppc_fp128
-  %tmp5 = load i64, i64* @_D4core4time12TickDuration11ticksPerSecyl
+  %tmp5 = load i64, ptr @_D4core4time12TickDuration11ticksPerSecyl
   %tmp6 = sitofp i64 %tmp5 to ppc_fp128
   %tmp7 = fdiv ppc_fp128 %tmp6, 0xM80000000000000000000000000000000
   %tmp8 = fdiv ppc_fp128 %tmp4, %tmp7

diff  --git a/llvm/test/CodeGen/PowerPC/pr16556.ll b/llvm/test/CodeGen/PowerPC/pr16556.ll
index eea2db4501ed..95f4d1df5b27 100644
--- a/llvm/test/CodeGen/PowerPC/pr16556.ll
+++ b/llvm/test/CodeGen/PowerPC/pr16556.ll
@@ -7,7 +7,7 @@ target triple = "powerpc-unknown-linux-gnu"
 
 %core.time.TickDuration.37.125 = type { i64 }
 
-define weak_odr fastcc i64 @_D4core4time12TickDuration30__T2toVAyaa7_7365636f6e6473TlZ2toMxFNaNbNfZl(%core.time.TickDuration.37.125* %.this_arg) {
+define weak_odr fastcc i64 @_D4core4time12TickDuration30__T2toVAyaa7_7365636f6e6473TlZ2toMxFNaNbNfZl(ptr %.this_arg) {
 entry:
   br i1 undef, label %noassert, label %assert
 

diff  --git a/llvm/test/CodeGen/PowerPC/pr17168.ll b/llvm/test/CodeGen/PowerPC/pr17168.ll
index c77424959300..828bc2b74067 100644
--- a/llvm/test/CodeGen/PowerPC/pr17168.ll
+++ b/llvm/test/CodeGen/PowerPC/pr17168.ll
@@ -26,7 +26,7 @@ for.cond968.preheader:                            ; preds = %for.cond968.prehead
 for.end1042:                                      ; preds = %for.cond968.preheader, %for.cond964.preheader, %entry
 
   %0 = phi i32 [ undef, %for.cond964.preheader ], [ undef, %for.cond968.preheader ], [ undef, %entry ]
-  %1 = load i32, i32* getelementptr inbounds ([3 x i32], [3 x i32]* @grid_points, i64 0, i64 0), align 4, !dbg !285, !tbaa !286
+  %1 = load i32, ptr @grid_points, align 4, !dbg !285, !tbaa !286
   tail call void @llvm.dbg.value(metadata i32 1, i64 0, metadata !268, metadata !290), !dbg !291
   %sub10454270 = add nsw i32 %0, -1, !dbg !291
   %cmp10464271 = icmp sgt i32 %sub10454270, 1, !dbg !291

diff  --git a/llvm/test/CodeGen/PowerPC/pr17354.ll b/llvm/test/CodeGen/PowerPC/pr17354.ll
index 83a9ac981862..38b98dffccea 100644
--- a/llvm/test/CodeGen/PowerPC/pr17354.ll
+++ b/llvm/test/CodeGen/PowerPC/pr17354.ll
@@ -10,11 +10,11 @@ target triple = "powerpc64-unknown-linux-gnu"
 %struct.CS = type { i32 }
 
 @_ZL3glb = internal global [1 x %struct.CS] zeroinitializer, align 4
- at llvm.global_ctors = appending global [1 x { i32, void ()*, i8* }] [{ i32, void ()*, i8* } { i32 65535, void ()* @_GLOBAL__I_a, i8* null }]
+ at llvm.global_ctors = appending global [1 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 65535, ptr @_GLOBAL__I_a, ptr null }]
 
 define internal void @__cxx_global_var_init() section ".text.startup" {
 entry:
-  call void @_Z4funcv(%struct.CS* sret(%struct.CS) getelementptr inbounds ([1 x %struct.CS], [1 x %struct.CS]* @_ZL3glb, i64 0, i64 0))
+  call void @_Z4funcv(ptr sret(%struct.CS) @_ZL3glb)
   ret void
 }
 
@@ -23,10 +23,9 @@ entry:
 ; CHECK-NEXT: nop
 
 ; Function Attrs: nounwind
-define void @_Z4funcv(%struct.CS* noalias sret(%struct.CS) %agg.result) #0 {
+define void @_Z4funcv(ptr noalias sret(%struct.CS) %agg.result) #0 {
 entry:
-  %a_ = getelementptr inbounds %struct.CS, %struct.CS* %agg.result, i32 0, i32 0
-  store i32 0, i32* %a_, align 4
+  store i32 0, ptr %agg.result, align 4
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/pr18663-2.ll b/llvm/test/CodeGen/PowerPC/pr18663-2.ll
index 7725c90a6984..64d767aa0328 100644
--- a/llvm/test/CodeGen/PowerPC/pr18663-2.ll
+++ b/llvm/test/CodeGen/PowerPC/pr18663-2.ll
@@ -4,19 +4,19 @@
 %"class.std::__1::locale::id.1580.4307.4610.8491" = type { %"struct.std::__1::once_flag.1579.4306.4609.8490", i32 }
 %"struct.std::__1::once_flag.1579.4306.4609.8490" = type { i64 }
 %"class.Foam::IOerror.1581.4308.4611.8505" = type { %"class.Foam::error.1535.4262.4565.8504", %"class.Foam::string.1530.4257.4560.8499", i32, i32 }
-%"class.Foam::error.1535.4262.4565.8504" = type { %"class.std::exception.1523.4250.4553.8492", [36 x i8], %"class.Foam::string.1530.4257.4560.8499", %"class.Foam::string.1530.4257.4560.8499", i32, i8, i8, %"class.Foam::OStringStream.1534.4261.4564.8503"* }
-%"class.std::exception.1523.4250.4553.8492" = type { i32 (...)** }
+%"class.Foam::error.1535.4262.4565.8504" = type { %"class.std::exception.1523.4250.4553.8492", [36 x i8], %"class.Foam::string.1530.4257.4560.8499", %"class.Foam::string.1530.4257.4560.8499", i32, i8, i8, ptr }
+%"class.std::exception.1523.4250.4553.8492" = type { ptr }
 %"class.Foam::OStringStream.1534.4261.4564.8503" = type { %"class.Foam::OSstream.1533.4260.4563.8502" }
-%"class.Foam::OSstream.1533.4260.4563.8502" = type { [50 x i8], %"class.Foam::fileName.1531.4258.4561.8500", %"class.std::__1::basic_ostream.1532.4259.4562.8501"* }
+%"class.Foam::OSstream.1533.4260.4563.8502" = type { [50 x i8], %"class.Foam::fileName.1531.4258.4561.8500", ptr }
 %"class.Foam::fileName.1531.4258.4561.8500" = type { %"class.Foam::string.1530.4257.4560.8499" }
-%"class.std::__1::basic_ostream.1532.4259.4562.8501" = type { i32 (...)**, [148 x i8] }
+%"class.std::__1::basic_ostream.1532.4259.4562.8501" = type { ptr, [148 x i8] }
 %"class.Foam::string.1530.4257.4560.8499" = type { %"class.std::__1::basic_string.1529.4256.4559.8498" }
 %"class.std::__1::basic_string.1529.4256.4559.8498" = type { %"class.std::__1::__compressed_pair.1528.4255.4558.8497" }
 %"class.std::__1::__compressed_pair.1528.4255.4558.8497" = type { %"class.std::__1::__libcpp_compressed_pair_imp.1527.4254.4557.8496" }
 %"class.std::__1::__libcpp_compressed_pair_imp.1527.4254.4557.8496" = type { %"struct.std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >::__rep.1526.4253.4556.8495" }
 %"struct.std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >::__rep.1526.4253.4556.8495" = type { %union.anon.1525.4252.4555.8494 }
 %union.anon.1525.4252.4555.8494 = type { %"struct.std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >::__long.1524.4251.4554.8493" }
-%"struct.std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >::__long.1524.4251.4554.8493" = type { i64, i64, i8* }
+%"struct.std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >::__long.1524.4251.4554.8493" = type { i64, i64, ptr }
 
 @.str3 = external unnamed_addr constant [16 x i8], align 1
 @_ZNSt3__15ctypeIcE2idE = external global %"class.std::__1::locale::id.1580.4307.4610.8491"
@@ -46,7 +46,7 @@ declare void @_ZN4Foam7IOerror4exitEi() #0
 ; Function Attrs: inlinehint
 declare void @_ZN4Foam8fileName12stripInvalidEv() #2 align 2
 
-define void @_ZN4Foam3CSVINS_6VectorIdEEE4readEv() #0 align 2 personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+define void @_ZN4Foam3CSVINS_6VectorIdEEE4readEv() #0 align 2 personality ptr @__gxx_personality_v0 {
 entry:
   invoke void @_ZN4Foam6string6expandEb()
           to label %invoke.cont unwind label %lpad
@@ -66,7 +66,7 @@ _ZN4Foam6stringC2ERKS0_.exit.i:                   ; preds = %invoke.cont
           to label %invoke.cont2 unwind label %lpad.i
 
 lpad.i:                                           ; preds = %_ZN4Foam6stringC2ERKS0_.exit.i
-  %0 = landingpad { i8*, i32 }
+  %0 = landingpad { ptr, i32 }
           cleanup
   br label %ehcleanup142
 
@@ -90,17 +90,17 @@ memptr.end.i:                                     ; preds = %invoke.cont8
           to label %if.end unwind label %lpad5
 
 lpad:                                             ; preds = %if.then.i.i.i.i176, %entry
-  %1 = landingpad { i8*, i32 }
+  %1 = landingpad { ptr, i32 }
           cleanup
   br label %ehcleanup142
 
 lpad3:                                            ; preds = %invoke.cont2
-  %2 = landingpad { i8*, i32 }
+  %2 = landingpad { ptr, i32 }
           cleanup
   br label %ehcleanup142
 
 lpad5:                                            ; preds = %memptr.end.i, %invoke.cont8, %if.then
-  %3 = landingpad { i8*, i32 }
+  %3 = landingpad { ptr, i32 }
           cleanup
   br label %ehcleanup142
 
@@ -119,12 +119,12 @@ invoke.cont.i.i.i:                                ; preds = %.noexc205
   unreachable
 
 lpad.i.i.i:                                       ; preds = %.noexc205
-  %4 = landingpad { i8*, i32 }
+  %4 = landingpad { ptr, i32 }
           cleanup
   br label %ehcleanup142
 
 lpad19:                                           ; preds = %for.body
-  %5 = landingpad { i8*, i32 }
+  %5 = landingpad { ptr, i32 }
           cleanup
   br label %ehcleanup142
 
@@ -144,7 +144,7 @@ vector.body:                                      ; preds = %vector.body, %if.en
   br label %vector.body
 
 ehcleanup142:                                     ; preds = %lpad19, %lpad.i.i.i, %lpad5, %lpad3, %lpad, %lpad.i
-  resume { i8*, i32 } undef
+  resume { ptr, i32 } undef
 }
 
 attributes #0 = { "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }

diff  --git a/llvm/test/CodeGen/PowerPC/pr18663.ll b/llvm/test/CodeGen/PowerPC/pr18663.ll
index 8e66858b893a..1603919189f7 100644
--- a/llvm/test/CodeGen/PowerPC/pr18663.ll
+++ b/llvm/test/CodeGen/PowerPC/pr18663.ll
@@ -4,78 +4,78 @@
 %class.Point.1 = type { %class.Tensor.0 }
 %class.Tensor.0 = type { [3 x double] }
 %class.TriaObjectAccessor.57 = type { %class.TriaAccessor.56 }
-%class.TriaAccessor.56 = type { i32, i32, %class.Triangulation.55* }
-%class.Triangulation.55 = type { %class.Subscriptor, %"class.std::vector.46", %"class.std::vector", %"class.std::vector.3.8", [255 x %class.Boundary.50*], i32, %struct.TriaNumberCache.54 }
-%class.Subscriptor = type { i32 (...)**, i32, %"class.std::type_info.2"* }
-%"class.std::type_info.2" = type { i32 (...)**, i8* }
+%class.TriaAccessor.56 = type { i32, i32, ptr }
+%class.Triangulation.55 = type { %class.Subscriptor, %"class.std::vector.46", %"class.std::vector", %"class.std::vector.3.8", [255 x ptr], i32, %struct.TriaNumberCache.54 }
+%class.Subscriptor = type { ptr, i32, ptr }
+%"class.std::type_info.2" = type { ptr, ptr }
 %"class.std::vector.46" = type { %"struct.std::_Vector_base.45" }
-%"struct.std::_Vector_base.45" = type { %"struct.std::_Vector_base<TriangulationLevel<3> *, std::allocator<TriangulationLevel<3> *> >::_Vector_impl.44" }
-%"struct.std::_Vector_base<TriangulationLevel<3> *, std::allocator<TriangulationLevel<3> *> >::_Vector_impl.44" = type { %class.TriangulationLevel.43**, %class.TriangulationLevel.43**, %class.TriangulationLevel.43** }
+%"struct.std::_Vector_base.45" = type { %"struct.std::_Vector_base<TriangulationLevelptr, std::allocator<TriangulationLevelptr> >::_Vector_impl.44" }
+%"struct.std::_Vector_base<TriangulationLevelptr, std::allocator<TriangulationLevelptr> >::_Vector_impl.44" = type { ptr, ptr, ptr }
 %class.TriangulationLevel.43 = type { %class.TriangulationLevel.0.37, %"struct.TriangulationLevel<3>::HexesData.42" }
 %class.TriangulationLevel.0.37 = type { %class.TriangulationLevel.1.31, %"struct.TriangulationLevel<2>::QuadsData.36" }
 %class.TriangulationLevel.1.31 = type { %class.TriangulationLevel, %"struct.TriangulationLevel<1>::LinesData.30" }
 %class.TriangulationLevel = type { %"class.std::vector.3.8", %"class.std::vector.3.8", %"class.std::vector.7.12", %"class.std::vector.12.15" }
 %"class.std::vector.7.12" = type { %"struct.std::_Vector_base" }
 %"struct.std::_Vector_base" = type { %"struct.std::_Vector_base<std::pair<int, int>, std::allocator<std::pair<int, int> > >::_Vector_impl.10" }
-%"struct.std::_Vector_base<std::pair<int, int>, std::allocator<std::pair<int, int> > >::_Vector_impl.10" = type { %"struct.std::pair.9"*, %"struct.std::pair.9"*, %"struct.std::pair.9"* }
+%"struct.std::_Vector_base<std::pair<int, int>, std::allocator<std::pair<int, int> > >::_Vector_impl.10" = type { ptr, ptr, ptr }
 %"struct.std::pair.9" = type opaque
 %"class.std::vector.12.15" = type { %"struct.std::_Vector_base.13.14" }
 %"struct.std::_Vector_base.13.14" = type { %"struct.std::_Vector_base<unsigned int, std::allocator<unsigned int> >::_Vector_impl.13" }
-%"struct.std::_Vector_base<unsigned int, std::allocator<unsigned int> >::_Vector_impl.13" = type { i32*, i32*, i32* }
+%"struct.std::_Vector_base<unsigned int, std::allocator<unsigned int> >::_Vector_impl.13" = type { ptr, ptr, ptr }
 %"struct.TriangulationLevel<1>::LinesData.30" = type { %"class.std::vector.17.20", %"class.std::vector.22.23", %"class.std::vector.3.8", %"class.std::vector.3.8", %"class.std::vector.27.26", %"class.std::vector.32.29" }
 %"class.std::vector.17.20" = type { %"struct.std::_Vector_base.18.19" }
 %"struct.std::_Vector_base.18.19" = type { %"struct.std::_Vector_base<Line, std::allocator<Line> >::_Vector_impl.18" }
-%"struct.std::_Vector_base<Line, std::allocator<Line> >::_Vector_impl.18" = type { %class.Line.17*, %class.Line.17*, %class.Line.17* }
+%"struct.std::_Vector_base<Line, std::allocator<Line> >::_Vector_impl.18" = type { ptr, ptr, ptr }
 %class.Line.17 = type { [2 x i32] }
 %"class.std::vector.22.23" = type { %"struct.std::_Vector_base.23.22" }
 %"struct.std::_Vector_base.23.22" = type { %"struct.std::_Vector_base<int, std::allocator<int> >::_Vector_impl.21" }
-%"struct.std::_Vector_base<int, std::allocator<int> >::_Vector_impl.21" = type { i32*, i32*, i32* }
+%"struct.std::_Vector_base<int, std::allocator<int> >::_Vector_impl.21" = type { ptr, ptr, ptr }
 %"class.std::vector.27.26" = type { %"struct.std::_Vector_base.28.25" }
 %"struct.std::_Vector_base.28.25" = type { %"struct.std::_Vector_base<unsigned char, std::allocator<unsigned char> >::_Vector_impl.24" }
-%"struct.std::_Vector_base<unsigned char, std::allocator<unsigned char> >::_Vector_impl.24" = type { i8*, i8*, i8* }
+%"struct.std::_Vector_base<unsigned char, std::allocator<unsigned char> >::_Vector_impl.24" = type { ptr, ptr, ptr }
 %"class.std::vector.32.29" = type { %"struct.std::_Vector_base.33.28" }
-%"struct.std::_Vector_base.33.28" = type { %"struct.std::_Vector_base<void *, std::allocator<void *> >::_Vector_impl.27" }
-%"struct.std::_Vector_base<void *, std::allocator<void *> >::_Vector_impl.27" = type { i8**, i8**, i8** }
+%"struct.std::_Vector_base.33.28" = type { %"struct.std::_Vector_base<ptr, std::allocator<ptr> >::_Vector_impl.27" }
+%"struct.std::_Vector_base<ptr, std::allocator<ptr> >::_Vector_impl.27" = type { ptr, ptr, ptr }
 %"struct.TriangulationLevel<2>::QuadsData.36" = type { %"class.std::vector.37.35", %"class.std::vector.22.23", %"class.std::vector.3.8", %"class.std::vector.3.8", %"class.std::vector.27.26", %"class.std::vector.32.29" }
 %"class.std::vector.37.35" = type { %"struct.std::_Vector_base.38.34" }
 %"struct.std::_Vector_base.38.34" = type { %"struct.std::_Vector_base<Quad, std::allocator<Quad> >::_Vector_impl.33" }
-%"struct.std::_Vector_base<Quad, std::allocator<Quad> >::_Vector_impl.33" = type { %class.Quad.32*, %class.Quad.32*, %class.Quad.32* }
+%"struct.std::_Vector_base<Quad, std::allocator<Quad> >::_Vector_impl.33" = type { ptr, ptr, ptr }
 %class.Quad.32 = type { [4 x i32] }
 %"struct.TriangulationLevel<3>::HexesData.42" = type { %"class.std::vector.42.41", %"class.std::vector.22.23", %"class.std::vector.3.8", %"class.std::vector.3.8", %"class.std::vector.27.26", %"class.std::vector.32.29", %"class.std::vector.3.8" }
 %"class.std::vector.42.41" = type { %"struct.std::_Vector_base.43.40" }
 %"struct.std::_Vector_base.43.40" = type { %"struct.std::_Vector_base<Hexahedron, std::allocator<Hexahedron> >::_Vector_impl.39" }
-%"struct.std::_Vector_base<Hexahedron, std::allocator<Hexahedron> >::_Vector_impl.39" = type { %class.Hexahedron.38*, %class.Hexahedron.38*, %class.Hexahedron.38* }
+%"struct.std::_Vector_base<Hexahedron, std::allocator<Hexahedron> >::_Vector_impl.39" = type { ptr, ptr, ptr }
 %class.Hexahedron.38= type { [6 x i32] }
 %"class.std::vector" = type { %"struct.std::_Vector_base.48.48" }
 %"struct.std::_Vector_base.48.48" = type { %"struct.std::_Vector_base<Point<3>, std::allocator<Point<3> > >::_Vector_impl.47" }
-%"struct.std::_Vector_base<Point<3>, std::allocator<Point<3> > >::_Vector_impl.47" = type { %class.Point.1*, %class.Point.1*, %class.Point.1* }
+%"struct.std::_Vector_base<Point<3>, std::allocator<Point<3> > >::_Vector_impl.47" = type { ptr, ptr, ptr }
 %"class.std::vector.3.8" = type { %"struct.std::_Bvector_base.7" }
 %"struct.std::_Bvector_base.7" = type { %"struct.std::_Bvector_base<std::allocator<bool> >::_Bvector_impl.6" }
-%"struct.std::_Bvector_base<std::allocator<bool> >::_Bvector_impl.6" = type { %"struct.std::_Bit_iterator.5", %"struct.std::_Bit_iterator.5", i64* }
+%"struct.std::_Bvector_base<std::allocator<bool> >::_Bvector_impl.6" = type { %"struct.std::_Bit_iterator.5", %"struct.std::_Bit_iterator.5", ptr }
 %"struct.std::_Bit_iterator.5" = type { %"struct.std::_Bit_iterator_base.base.4", [4 x i8] }
-%"struct.std::_Bit_iterator_base.base.4" = type <{ i64*, i32 }>
+%"struct.std::_Bit_iterator_base.base.4" = type <{ ptr, i32 }>
 %class.Boundary.50 = type opaque
 %struct.TriaNumberCache.54 = type { %struct.TriaNumberCache.52.52, i32, %"class.std::vector.12.15", i32, %"class.std::vector.12.15" }
 %struct.TriaNumberCache.52.52 = type { %struct.TriaNumberCache.53.51, i32, %"class.std::vector.12.15", i32, %"class.std::vector.12.15" }
 %struct.TriaNumberCache.53.51 = type { i32, %"class.std::vector.12.15", i32, %"class.std::vector.12.15" }
 
-define void @_ZNK18TriaObjectAccessorILi3ELi3EE10barycenterEv(%class.Point.1* noalias nocapture sret(%class.Point.1) %agg.result, %class.TriaObjectAccessor.57* %this) #0 align 2 {
+define void @_ZNK18TriaObjectAccessorILi3ELi3EE10barycenterEv(ptr noalias nocapture sret(%class.Point.1) %agg.result, ptr %this) #0 align 2 {
 entry:
-  %0 = load double, double* null, align 8
-  %1 = load double, double* undef, align 8
-  %call18 = tail call dereferenceable(24) %class.Point.1* @_ZNK18TriaObjectAccessorILi3ELi3EE6vertexEj(%class.TriaObjectAccessor.57* %this, i32 zeroext 6)
-  %2 = load double, double* undef, align 8
-  %call21 = tail call dereferenceable(24) %class.Point.1* @_ZNK18TriaObjectAccessorILi3ELi3EE6vertexEj(%class.TriaObjectAccessor.57* %this, i32 zeroext 7)
-  %3 = load double, double* undef, align 8
-  %call33 = tail call dereferenceable(24) %class.Point.1* @_ZNK18TriaObjectAccessorILi3ELi3EE6vertexEj(%class.TriaObjectAccessor.57* %this, i32 zeroext 3)
-  %4 = load double, double* null, align 8
-  %5 = load double, double* undef, align 8
-  %call45 = tail call dereferenceable(24) %class.Point.1* @_ZNK18TriaObjectAccessorILi3ELi3EE6vertexEj(%class.TriaObjectAccessor.57* %this, i32 zeroext 7)
-  %6 = load double, double* undef, align 8
-  %call48 = tail call dereferenceable(24) %class.Point.1* @_ZNK18TriaObjectAccessorILi3ELi3EE6vertexEj(%class.TriaObjectAccessor.57* %this, i32 zeroext 0)
-  %7 = load double, double* undef, align 8
-  %call66 = tail call dereferenceable(24) %class.Point.1* @_ZNK18TriaObjectAccessorILi3ELi3EE6vertexEj(%class.TriaObjectAccessor.57* %this, i32 zeroext 6)
-  %8 = load double, double* undef, align 8
+  %0 = load double, ptr null, align 8
+  %1 = load double, ptr undef, align 8
+  %call18 = tail call dereferenceable(24) ptr @_ZNK18TriaObjectAccessorILi3ELi3EE6vertexEj(ptr %this, i32 zeroext 6)
+  %2 = load double, ptr undef, align 8
+  %call21 = tail call dereferenceable(24) ptr @_ZNK18TriaObjectAccessorILi3ELi3EE6vertexEj(ptr %this, i32 zeroext 7)
+  %3 = load double, ptr undef, align 8
+  %call33 = tail call dereferenceable(24) ptr @_ZNK18TriaObjectAccessorILi3ELi3EE6vertexEj(ptr %this, i32 zeroext 3)
+  %4 = load double, ptr null, align 8
+  %5 = load double, ptr undef, align 8
+  %call45 = tail call dereferenceable(24) ptr @_ZNK18TriaObjectAccessorILi3ELi3EE6vertexEj(ptr %this, i32 zeroext 7)
+  %6 = load double, ptr undef, align 8
+  %call48 = tail call dereferenceable(24) ptr @_ZNK18TriaObjectAccessorILi3ELi3EE6vertexEj(ptr %this, i32 zeroext 0)
+  %7 = load double, ptr undef, align 8
+  %call66 = tail call dereferenceable(24) ptr @_ZNK18TriaObjectAccessorILi3ELi3EE6vertexEj(ptr %this, i32 zeroext 6)
+  %8 = load double, ptr undef, align 8
   %mul334 = fmul double undef, 2.000000e+00
   %mul579 = fmul double %2, %5
   %mul597 = fmul double undef, %mul579
@@ -289,10 +289,10 @@ entry:
   %add8901 = fadd double %mul8900, %add8893
   %mul9767 = fmul double 0.000000e+00, %add8901
   %mul9768 = fmul double %mul9767, 0x3FC5555555555555
-  store double %mul4917, double* undef, align 8
-  store double %mul9768, double* undef, align 8
+  store double %mul4917, ptr undef, align 8
+  store double %mul9768, ptr undef, align 8
   ret void
 }
 
-declare dereferenceable(24) %class.Point.1* @_ZNK18TriaObjectAccessorILi3ELi3EE6vertexEj(%class.TriaObjectAccessor.57*, i32 zeroext) #0
+declare dereferenceable(24) ptr @_ZNK18TriaObjectAccessorILi3ELi3EE6vertexEj(ptr, i32 zeroext) #0
 

diff  --git a/llvm/test/CodeGen/PowerPC/pr20442.ll b/llvm/test/CodeGen/PowerPC/pr20442.ll
index b1d71056c3e8..d00c01ea69f7 100644
--- a/llvm/test/CodeGen/PowerPC/pr20442.ll
+++ b/llvm/test/CodeGen/PowerPC/pr20442.ll
@@ -14,21 +14,20 @@ target triple = "powerpc-unknown-linux-gnu"
 %struct.anon = type { i32 }
 %struct.anon.0 = type { i32 }
 
- at b = common global %struct.anon* null, align 4
- at a = common global %struct.anon.0* null, align 4
+ at b = common global ptr null, align 4
+ at a = common global ptr null, align 4
 
 ; Function Attrs: nounwind readonly uwtable
 define i32 @fn1() #0 {
 entry:
-  %0 = load %struct.anon*, %struct.anon** @b, align 4
-  %1 = ptrtoint %struct.anon* %0 to i32
-  %cmp = icmp sgt %struct.anon* %0, null
-  %2 = load %struct.anon.0*, %struct.anon.0** @a, align 4
+  %0 = load ptr, ptr @b, align 4
+  %1 = ptrtoint ptr %0 to i32
+  %cmp = icmp sgt ptr %0, null
+  %2 = load ptr, ptr @a, align 4
   br i1 %cmp, label %for.bodythread-pre-split, label %if.end8
 
 for.bodythread-pre-split:                         ; preds = %entry
-  %aclass = getelementptr inbounds %struct.anon.0, %struct.anon.0* %2, i32 0, i32 0
-  %.pr = load i32, i32* %aclass, align 4
+  %.pr = load i32, ptr %2, align 4
   br label %for.body
 
 for.body:                                         ; preds = %for.bodythread-pre-split, %for.body
@@ -51,10 +50,10 @@ while.cond:                                       ; preds = %while.body
 
 while.body:                                       ; preds = %while.body.lr.ph, %while.cond
   %j.110 = phi i32 [ %j.1.ph13, %while.body.lr.ph ], [ %inc7, %while.cond ]
-  %aclass_index = getelementptr inbounds %struct.anon, %struct.anon* %0, i32 %j.110, i32 0
-  %3 = load i32, i32* %aclass_index, align 4
-  %aclass5 = getelementptr inbounds %struct.anon.0, %struct.anon.0* %2, i32 %3, i32 0
-  %4 = load i32, i32* %aclass5, align 4
+  %aclass_index = getelementptr inbounds %struct.anon, ptr %0, i32 %j.110, i32 0
+  %3 = load i32, ptr %aclass_index, align 4
+  %aclass5 = getelementptr inbounds %struct.anon.0, ptr %2, i32 %3, i32 0
+  %4 = load i32, ptr %aclass5, align 4
   %tobool = icmp eq i32 %4, 0
   %inc7 = add nsw i32 %j.110, 1
   br i1 %tobool, label %while.cond, label %if.then6

diff  --git a/llvm/test/CodeGen/PowerPC/pr22711.ll b/llvm/test/CodeGen/PowerPC/pr22711.ll
index 044486553a6d..8acaa6123ac7 100644
--- a/llvm/test/CodeGen/PowerPC/pr22711.ll
+++ b/llvm/test/CodeGen/PowerPC/pr22711.ll
@@ -2,14 +2,14 @@
 
 ; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr8 -filetype=obj -o - | llvm-readobj --sections - | FileCheck %s
 
-define void @test(i32* %a) {
+define void @test(ptr %a) {
 entry:
-  %a.addr = alloca i32*, align 8
-  store i32* %a, i32** %a.addr, align 8
-  %0 = load i32*,  i32** %a.addr, align 8
-  %incdec.ptr = getelementptr inbounds i32, i32* %0, i32 1
-  store i32* %incdec.ptr, i32** %a.addr, align 8
-  %1 = load i32,  i32* %0, align 4
+  %a.addr = alloca ptr, align 8
+  store ptr %a, ptr %a.addr, align 8
+  %0 = load ptr,  ptr %a.addr, align 8
+  %incdec.ptr = getelementptr inbounds i32, ptr %0, i32 1
+  store ptr %incdec.ptr, ptr %a.addr, align 8
+  %1 = load i32,  ptr %0, align 4
   switch i32 %1, label %sw.epilog [
     i32 17, label %sw.bb
     i32 13, label %sw.bb1
@@ -21,38 +21,38 @@ entry:
   ]
 
 sw.bb:                                            ; preds = %entry
-  %2 = load i32*,  i32** %a.addr, align 8
-  store i32 2, i32* %2, align 4
+  %2 = load ptr,  ptr %a.addr, align 8
+  store i32 2, ptr %2, align 4
   br label %sw.epilog
 
 sw.bb1:                                           ; preds = %entry
-  %3 = load i32*,  i32** %a.addr, align 8
-  store i32 3, i32* %3, align 4
+  %3 = load ptr,  ptr %a.addr, align 8
+  store i32 3, ptr %3, align 4
   br label %sw.epilog
 
 sw.bb2:                                           ; preds = %entry
-  %4 = load i32*,  i32** %a.addr, align 8
-  store i32 5, i32* %4, align 4
+  %4 = load ptr,  ptr %a.addr, align 8
+  store i32 5, ptr %4, align 4
   br label %sw.epilog
 
 sw.bb3:                                           ; preds = %entry
-  %5 = load i32*,  i32** %a.addr, align 8
-  store i32 7, i32* %5, align 4
+  %5 = load ptr,  ptr %a.addr, align 8
+  store i32 7, ptr %5, align 4
   br label %sw.epilog
 
 sw.bb4:                                           ; preds = %entry
-  %6 = load i32*,  i32** %a.addr, align 8
-  store i32 11, i32* %6, align 4
+  %6 = load ptr,  ptr %a.addr, align 8
+  store i32 11, ptr %6, align 4
   br label %sw.epilog
 
 sw.bb5:                                           ; preds = %entry
-  %7 = load i32*,  i32** %a.addr, align 8
-  store i32 13, i32* %7, align 4
+  %7 = load ptr,  ptr %a.addr, align 8
+  store i32 13, ptr %7, align 4
   br label %sw.epilog
 
 sw.bb6:                                           ; preds = %entry
-  %8 = load i32*,  i32** %a.addr, align 8
-  store i32 17, i32* %8, align 4
+  %8 = load ptr,  ptr %a.addr, align 8
+  store i32 17, ptr %8, align 4
   br label %sw.epilog
 
 sw.epilog:                                        ; preds = %entry, %sw.bb6, %sw.bb5, %sw.bb4, %sw.bb3, %sw.bb2, %sw.bb1, %sw.bb

diff  --git a/llvm/test/CodeGen/PowerPC/pr24216.ll b/llvm/test/CodeGen/PowerPC/pr24216.ll
index 522c56192267..5163e1280705 100644
--- a/llvm/test/CodeGen/PowerPC/pr24216.ll
+++ b/llvm/test/CodeGen/PowerPC/pr24216.ll
@@ -2,11 +2,11 @@
 
 ; Test case adapted from PR24216.
 
-define void @foo(<16 x i8>* nocapture readonly %in, <16 x i8>* nocapture %out) {
+define void @foo(ptr nocapture readonly %in, ptr nocapture %out) {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* %in, align 16
+  %0 = load <16 x i8>, ptr %in, align 16
   %1 = shufflevector <16 x i8> %0, <16 x i8> undef, <16 x i32> <i32 2, i32 3, i32 4, i32 5, i32 2, i32 3, i32 4, i32 5, i32 2, i32 3, i32 4, i32 5, i32 2, i32 3, i32 4, i32 5>
-  store <16 x i8> %1, <16 x i8>* %out, align 16
+  store <16 x i8> %1, ptr %out, align 16
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/pr24546.ll b/llvm/test/CodeGen/PowerPC/pr24546.ll
index 028fd2d8f006..527139a4d3a0 100644
--- a/llvm/test/CodeGen/PowerPC/pr24546.ll
+++ b/llvm/test/CodeGen/PowerPC/pr24546.ll
@@ -19,7 +19,7 @@ if.then.i:                                        ; preds = %if.then
   br label %php_intpow10.exit, !dbg !41
 
 if.end.i:                                         ; preds = %if.then
-  %0 = load double, double* undef, align 8, !dbg !42, !tbaa !43
+  %0 = load double, ptr undef, align 8, !dbg !42, !tbaa !43
   br label %php_intpow10.exit, !dbg !47
 
 php_intpow10.exit:                                ; preds = %if.end.i, %if.then.i

diff  --git a/llvm/test/CodeGen/PowerPC/pr24636.ll b/llvm/test/CodeGen/PowerPC/pr24636.ll
index 59567aefcc2e..d423e6a053f0 100644
--- a/llvm/test/CodeGen/PowerPC/pr24636.ll
+++ b/llvm/test/CodeGen/PowerPC/pr24636.ll
@@ -25,7 +25,7 @@ define void @fn2() #0 align 4 {
 
 .lr.ph.split.split:                               ; preds = %.lr.ph.split.split, %.lr.ph.split
   %1 = phi i32 [ %2, %.lr.ph.split.split ], [ undef, %.lr.ph.split ]
-  %2 = and i32 %1, and (i32 and (i32 and (i32 and (i32 and (i32 and (i32 and (i32 zext (i1 select (i1 icmp eq ([1 x i32]* bitcast (i32* @c to [1 x i32]*), [1 x i32]* @b), i1 true, i1 false) to i32), i32 zext (i1 select (i1 icmp eq ([1 x i32]* bitcast (i32* @c to [1 x i32]*), [1 x i32]* @b), i1 true, i1 false) to i32)), i32 zext (i1 select (i1 icmp eq ([1 x i32]* bitcast (i32* @c to [1 x i32]*), [1 x i32]* @b), i1 true, i1 false) to i32)), i32 zext (i1 select (i1 icmp eq ([1 x i32]* bitcast (i32* @c to [1 x i32]*), [1 x i32]* @b), i1 true, i1 false) to i32)), i32 zext (i1 select (i1 icmp eq ([1 x i32]* bitcast (i32* @c to [1 x i32]*), [1 x i32]* @b), i1 true, i1 false) to i32)), i32 zext (i1 select (i1 icmp eq ([1 x i32]* bitcast (i32* @c to [1 x i32]*), [1 x i32]* @b), i1 true, i1 false) to i32)), i32 zext (i1 select (i1 icmp eq ([1 x i32]* bitcast (i32* @c to [1 x i32]*), [1 x i32]* @b), i1 true, i1 false) to i32)), i32 zext (i1 select (i1 icmp eq ([1 x i32]* bitcast (i32* @c to [1 x i32]*), [1 x i32]* @b), i1 true, i1 false) to i32))
+  %2 = and i32 %1, and (i32 and (i32 and (i32 and (i32 and (i32 and (i32 and (i32 zext (i1 select (i1 icmp eq (ptr @c, ptr @b), i1 true, i1 false) to i32), i32 zext (i1 select (i1 icmp eq (ptr @c, ptr @b), i1 true, i1 false) to i32)), i32 zext (i1 select (i1 icmp eq (ptr @c, ptr @b), i1 true, i1 false) to i32)), i32 zext (i1 select (i1 icmp eq (ptr @c, ptr @b), i1 true, i1 false) to i32)), i32 zext (i1 select (i1 icmp eq (ptr @c, ptr @b), i1 true, i1 false) to i32)), i32 zext (i1 select (i1 icmp eq (ptr @c, ptr @b), i1 true, i1 false) to i32)), i32 zext (i1 select (i1 icmp eq (ptr @c, ptr @b), i1 true, i1 false) to i32)), i32 zext (i1 select (i1 icmp eq (ptr @c, ptr @b), i1 true, i1 false) to i32))
   %3 = icmp slt i32 undef, 4
   br i1 %3, label %.lr.ph.split.split, label %._crit_edge
 

diff  --git a/llvm/test/CodeGen/PowerPC/pr25157-peephole.ll b/llvm/test/CodeGen/PowerPC/pr25157-peephole.ll
index d3bfb910fc9f..0dccbabb9cc5 100644
--- a/llvm/test/CodeGen/PowerPC/pr25157-peephole.ll
+++ b/llvm/test/CodeGen/PowerPC/pr25157-peephole.ll
@@ -49,10 +49,10 @@ L.LB38_2937:
   unreachable
 
 L.LB38_2452:
-  %0 = load float, float* bitcast (i8* getelementptr inbounds (%struct.BSS38.51.4488.9911.14348.16813.20264.24701.28152.31603.35054.39491.44914.45407.46393.46886.47872.49351.49844.50830.51323.52309.53295.53788.54281.55267.55760.59211.61625, %struct.BSS38.51.4488.9911.14348.16813.20264.24701.28152.31603.35054.39491.44914.45407.46393.46886.47872.49351.49844.50830.51323.52309.53295.53788.54281.55267.55760.59211.61625* @.BSS38, i64 0, i32 0, i64 16) to float*), align 16
+  %0 = load float, ptr getelementptr inbounds (%struct.BSS38.51.4488.9911.14348.16813.20264.24701.28152.31603.35054.39491.44914.45407.46393.46886.47872.49351.49844.50830.51323.52309.53295.53788.54281.55267.55760.59211.61625, ptr @.BSS38, i64 0, i32 0, i64 16), align 16
   %1 = fpext float %0 to double
   %2 = insertelement <2 x double> undef, double %1, i32 1
-  store <2 x double> %2, <2 x double>* bitcast (i8* getelementptr inbounds (%struct_main1_2_.491.4928.10351.14788.17253.20704.25141.28592.32043.35494.39931.45354.45847.46833.47326.48312.49791.50284.51270.51763.52749.53735.54228.54721.55707.56200.59651.61626, %struct_main1_2_.491.4928.10351.14788.17253.20704.25141.28592.32043.35494.39931.45354.45847.46833.47326.48312.49791.50284.51270.51763.52749.53735.54228.54721.55707.56200.59651.61626* @_main1_2_, i64 0, i32 0, i64 32) to <2 x double>*), align 16
+  store <2 x double> %2, ptr getelementptr inbounds (%struct_main1_2_.491.4928.10351.14788.17253.20704.25141.28592.32043.35494.39931.45354.45847.46833.47326.48312.49791.50284.51270.51763.52749.53735.54228.54721.55707.56200.59651.61626, ptr @_main1_2_, i64 0, i32 0, i64 32), align 16
   unreachable
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/pr25157.ll b/llvm/test/CodeGen/PowerPC/pr25157.ll
index 982dfcd74a8e..386d65cf0ef9 100644
--- a/llvm/test/CodeGen/PowerPC/pr25157.ll
+++ b/llvm/test/CodeGen/PowerPC/pr25157.ll
@@ -49,10 +49,10 @@ L.LB38_2937:
   unreachable
 
 L.LB38_2452:
-  %0 = load float, float* bitcast (i8* getelementptr inbounds (%struct.BSS38.51.4488.9911.14348.16813.20264.24701.28152.31603.35054.39491.44914.45407.46393.46886.47872.49351.49844.50830.51323.52309.53295.53788.54281.55267.55760.59211.61625, %struct.BSS38.51.4488.9911.14348.16813.20264.24701.28152.31603.35054.39491.44914.45407.46393.46886.47872.49351.49844.50830.51323.52309.53295.53788.54281.55267.55760.59211.61625* @.BSS38, i64 0, i32 0, i64 16) to float*), align 16
+  %0 = load float, ptr getelementptr inbounds (%struct.BSS38.51.4488.9911.14348.16813.20264.24701.28152.31603.35054.39491.44914.45407.46393.46886.47872.49351.49844.50830.51323.52309.53295.53788.54281.55267.55760.59211.61625, ptr @.BSS38, i64 0, i32 0, i64 16), align 16
   %1 = fpext float %0 to double
   %2 = insertelement <2 x double> undef, double %1, i32 1
-  store <2 x double> %2, <2 x double>* bitcast (i8* getelementptr inbounds (%struct_main1_2_.491.4928.10351.14788.17253.20704.25141.28592.32043.35494.39931.45354.45847.46833.47326.48312.49791.50284.51270.51763.52749.53735.54228.54721.55707.56200.59651.61626, %struct_main1_2_.491.4928.10351.14788.17253.20704.25141.28592.32043.35494.39931.45354.45847.46833.47326.48312.49791.50284.51270.51763.52749.53735.54228.54721.55707.56200.59651.61626* @_main1_2_, i64 0, i32 0, i64 32) to <2 x double>*), align 16
+  store <2 x double> %2, ptr getelementptr inbounds (%struct_main1_2_.491.4928.10351.14788.17253.20704.25141.28592.32043.35494.39931.45354.45847.46833.47326.48312.49791.50284.51270.51763.52749.53735.54228.54721.55707.56200.59651.61626, ptr @_main1_2_, i64 0, i32 0, i64 32), align 16
   unreachable
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/pr26378.ll b/llvm/test/CodeGen/PowerPC/pr26378.ll
index 36966a13e30b..472c3a7c993d 100644
--- a/llvm/test/CodeGen/PowerPC/pr26378.ll
+++ b/llvm/test/CodeGen/PowerPC/pr26378.ll
@@ -1,6 +1,6 @@
 ; RUN: llc -verify-machineinstrs -compile-twice -filetype obj \
 ; RUN:   -mtriple=powerpc64le-unknown-unknown -mcpu=pwr8 < %s
 @foo = common global i32 0, align 4
-define i8* @blah() #0 {
-  ret i8* bitcast (i32* @foo to i8*)
+define ptr @blah() #0 {
+  ret ptr @foo
 }  

diff  --git a/llvm/test/CodeGen/PowerPC/pr26690.ll b/llvm/test/CodeGen/PowerPC/pr26690.ll
index 363740818131..fb3dca005562 100644
--- a/llvm/test/CodeGen/PowerPC/pr26690.ll
+++ b/llvm/test/CodeGen/PowerPC/pr26690.ll
@@ -5,7 +5,7 @@
 %struct.anon.1 = type { i32 }
 
 @i = common global i32 0, align 4
- at b = common global i32* null, align 8
+ at b = common global ptr null, align 8
 @c = common global i32 0, align 4
 @a = common global i32 0, align 4
 @h = common global i32 0, align 4
@@ -16,20 +16,20 @@
 @e = common global i32 0, align 4
 
 ; Function Attrs: norecurse nounwind
-define signext i32 @fn1(i32* nocapture %p1, i32 signext %p2, i32* nocapture %p3) {
+define signext i32 @fn1(ptr nocapture %p1, i32 signext %p2, ptr nocapture %p3) {
 entry:
-  %0 = load i32, i32* @i, align 4, !tbaa !1
+  %0 = load i32, ptr @i, align 4, !tbaa !1
   %cond = icmp eq i32 %0, 8
   br i1 %cond, label %if.end16, label %while.cond.preheader
 
 while.cond.preheader:                             ; preds = %entry
-  %1 = load i32*, i32** @b, align 8, !tbaa !5
-  %2 = load i32, i32* %1, align 4, !tbaa !1
+  %1 = load ptr, ptr @b, align 8, !tbaa !5
+  %2 = load i32, ptr %1, align 4, !tbaa !1
   %tobool18 = icmp eq i32 %2, 0
   br i1 %tobool18, label %while.end, label %while.body.lr.ph
 
 while.body.lr.ph:                                 ; preds = %while.cond.preheader
-  %.pre = load i32, i32* @c, align 4, !tbaa !1
+  %.pre = load i32, ptr @c, align 4, !tbaa !1
   br label %while.body
 
 while.body:                                       ; preds = %while.body.backedge, %while.body.lr.ph
@@ -44,12 +44,12 @@ while.body.backedge:                              ; preds = %while.body, %while.
   br label %while.body
 
 sw.bb1:                                           ; preds = %while.body, %while.body, %while.body
-  store i32 2, i32* @a, align 4, !tbaa !1
+  store i32 2, ptr @a, align 4, !tbaa !1
   br label %while.cond.backedge
 
 while.cond.backedge:                              ; preds = %while.body, %sw.bb1
-  store i32 4, i32* @a, align 4, !tbaa !1
-  %.pre19 = load i32, i32* %1, align 4, !tbaa !1
+  store i32 4, ptr @a, align 4, !tbaa !1
+  %.pre19 = load i32, ptr %1, align 4, !tbaa !1
   %tobool = icmp eq i32 %.pre19, 0
   br i1 %tobool, label %while.end.loopexit, label %while.body.backedge
 
@@ -57,23 +57,23 @@ while.end.loopexit:                               ; preds = %while.cond.backedge
   br label %while.end
 
 while.end:                                        ; preds = %while.end.loopexit, %while.cond.preheader
-  %3 = load i32, i32* @h, align 4, !tbaa !1
+  %3 = load i32, ptr @h, align 4, !tbaa !1
   %mul = mul nsw i32 %0, %3
-  %4 = load i32, i32* @g, align 4, !tbaa !1
+  %4 = load i32, ptr @g, align 4, !tbaa !1
   %mul4 = mul nsw i32 %mul, %4
-  store i32 %mul4, i32* @j, align 4, !tbaa !1
-  %5 = load i32, i32* getelementptr inbounds (%struct.anon, %struct.anon* @f, i64 0, i32 0, i32 0), align 4, !tbaa !7
+  store i32 %mul4, ptr @j, align 4, !tbaa !1
+  %5 = load i32, ptr @f, align 4, !tbaa !7
   %tobool5 = icmp eq i32 %5, 0
   br i1 %tobool5, label %if.end, label %if.then
 
 if.then:                                          ; preds = %while.end
   %div = sdiv i32 %5, %mul
-  store i32 %div, i32* @g, align 4, !tbaa !1
+  store i32 %div, ptr @g, align 4, !tbaa !1
   br label %if.end
 
 if.end:                                           ; preds = %while.end, %if.then
   %6 = phi i32 [ %4, %while.end ], [ %div, %if.then ]
-  %7 = load i32, i32* getelementptr inbounds (%struct.anon, %struct.anon* @f, i64 0, i32 1, i32 0), align 4, !tbaa !10
+  %7 = load i32, ptr getelementptr inbounds (%struct.anon, ptr @f, i64 0, i32 1, i32 0), align 4, !tbaa !10
   %tobool7 = icmp ne i32 %7, 0
   %tobool8 = icmp ne i32 %mul4, 0
   %or.cond = and i1 %tobool7, %tobool8
@@ -82,10 +82,10 @@ if.end:                                           ; preds = %while.end, %if.then
   br i1 %or.cond17, label %if.then11, label %if.end13
 
 if.then11:                                        ; preds = %if.end
-  store i32 %3, i32* @d, align 4, !tbaa !1
-  %8 = load i32, i32* @e, align 4, !tbaa !1
-  store i32 %8, i32* %p3, align 4, !tbaa !1
-  %.pre20 = load i32, i32* @g, align 4, !tbaa !1
+  store i32 %3, ptr @d, align 4, !tbaa !1
+  %8 = load i32, ptr @e, align 4, !tbaa !1
+  store i32 %8, ptr %p3, align 4, !tbaa !1
+  %.pre20 = load i32, ptr @g, align 4, !tbaa !1
   br label %if.end13
 
 if.end13:                                         ; preds = %if.then11, %if.end
@@ -94,7 +94,7 @@ if.end13:                                         ; preds = %if.then11, %if.end
   br i1 %tobool14, label %if.end16, label %if.then15
 
 if.then15:                                        ; preds = %if.end13
-  store i32 %p2, i32* %p1, align 4, !tbaa !1
+  store i32 %p2, ptr %p1, align 4, !tbaa !1
   br label %if.end16
 
 if.end16:                                         ; preds = %entry, %if.end13, %if.then15

diff  --git a/llvm/test/CodeGen/PowerPC/pr27078.ll b/llvm/test/CodeGen/PowerPC/pr27078.ll
index 9a1bd3e1957f..6036e4e58300 100644
--- a/llvm/test/CodeGen/PowerPC/pr27078.ll
+++ b/llvm/test/CodeGen/PowerPC/pr27078.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -verify-machineinstrs -mtriple=powerpc64-linux-gnu -mcpu=pwr8 -mattr=+vsx < %s | FileCheck %s
 
-define <4 x float> @bar(float* %p, float* %q) {
+define <4 x float> @bar(ptr %p, ptr %q) {
 ; CHECK-LABEL: bar:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    li 5, 16
@@ -24,11 +24,9 @@ define <4 x float> @bar(float* %p, float* %q) {
 ; CHECK-NEXT:    lxvw4x 35, 0, 3
 ; CHECK-NEXT:    vperm 2, 2, 5, 3
 ; CHECK-NEXT:    blr
-  %1 = bitcast float* %p to <12 x float>*
-  %2 = bitcast float* %q to <12 x float>*
-  %3 = load <12 x float>, <12 x float>* %1, align 16
-  %4 = load <12 x float>, <12 x float>* %2, align 16
-  %5 = fsub <12 x float> %4, %3
-  %6 = shufflevector <12 x float> %5, <12 x float> undef, <4 x i32> <i32 0, i32 3, i32 6, i32 9>
-  ret <4 x float>  %6
+  %1 = load <12 x float>, ptr %p, align 16
+  %2 = load <12 x float>, ptr %q, align 16
+  %3 = fsub <12 x float> %2, %1
+  %4 = shufflevector <12 x float> %3, <12 x float> undef, <4 x i32> <i32 0, i32 3, i32 6, i32 9>
+  ret <4 x float>  %4
 }

diff  --git a/llvm/test/CodeGen/PowerPC/pr27350.ll b/llvm/test/CodeGen/PowerPC/pr27350.ll
index 93dbd10fecde..1a68f2cb9462 100644
--- a/llvm/test/CodeGen/PowerPC/pr27350.ll
+++ b/llvm/test/CodeGen/PowerPC/pr27350.ll
@@ -1,13 +1,13 @@
 ; RUN: llc -verify-machineinstrs -mcpu=ppc64le -mtriple=powerpc64le-unknown-linux-gnu < %s
 
 ; Function Attrs: argmemonly nounwind
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i1) #0
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture readonly, i64, i1) #0
 
 ; Function Attrs: nounwind
 define internal fastcc void @foo() unnamed_addr #1 align 2 {
 entry:
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 undef, i8* align 8 null, i64 16, i1 false)
-  %0 = load <2 x i64>, <2 x i64>* null, align 8
+  call void @llvm.memcpy.p0.p0.i64(ptr align 8 undef, ptr align 8 null, i64 16, i1 false)
+  %0 = load <2 x i64>, ptr null, align 8
   %1 = extractelement <2 x i64> %0, i32 1
   %.fca.1.insert159.i = insertvalue [2 x i64] undef, i64 %1, 1
   tail call fastcc void @bar([2 x i64] undef, [2 x i64] %.fca.1.insert159.i) #2

diff  --git a/llvm/test/CodeGen/PowerPC/pr28130.ll b/llvm/test/CodeGen/PowerPC/pr28130.ll
index 4da415bd2926..135c6aa97edf 100644
--- a/llvm/test/CodeGen/PowerPC/pr28130.ll
+++ b/llvm/test/CodeGen/PowerPC/pr28130.ll
@@ -3,7 +3,7 @@ target triple = "powerpc64le-unknown-linux-gnu"
 
 %StructA = type { double, double, double, double, double, double, double, double }
 
-define void @Test(%StructA* %tmp) unnamed_addr #0 align 2 {
+define void @Test(ptr %tmp) unnamed_addr #0 align 2 {
 ; CHECK-LABEL: Test:
 ; CHECK: lxvd2x
 ; CHECK-NEXT: xxswapd
@@ -16,24 +16,22 @@ define void @Test(%StructA* %tmp) unnamed_addr #0 align 2 {
 ; CHECK: xxswapd [[OUTPUT:[0-9]+]]
 ; CHECK-NEXT: stxvd2x [[OUTPUT]]
 bb:
-  %tmp2 = getelementptr inbounds %StructA, %StructA* %tmp, i64 0, i32 0
-  %tmp4 = bitcast %StructA* %tmp to <2 x double>*
-  %tmp5 = getelementptr inbounds %StructA, %StructA* %tmp, i64 0, i32 2
-  %tmp9 = getelementptr inbounds %StructA, %StructA* %tmp, i64 0, i32 4
-  %tmp11 = getelementptr inbounds %StructA, %StructA* %tmp, i64 0, i32 5
-  %tmp13 = getelementptr inbounds %StructA, %StructA* %tmp, i64 0, i32 6
-  %tmp15 = getelementptr inbounds %StructA, %StructA* %tmp, i64 0, i32 7
-  %tmp18 = load double, double* %tmp2, align 16
-  %tmp19 = load double, double* %tmp11, align 8
-  %tmp20 = load double, double* %tmp9, align 16
+  %tmp5 = getelementptr inbounds %StructA, ptr %tmp, i64 0, i32 2
+  %tmp9 = getelementptr inbounds %StructA, ptr %tmp, i64 0, i32 4
+  %tmp11 = getelementptr inbounds %StructA, ptr %tmp, i64 0, i32 5
+  %tmp13 = getelementptr inbounds %StructA, ptr %tmp, i64 0, i32 6
+  %tmp15 = getelementptr inbounds %StructA, ptr %tmp, i64 0, i32 7
+  %tmp18 = load double, ptr %tmp, align 16
+  %tmp19 = load double, ptr %tmp11, align 8
+  %tmp20 = load double, ptr %tmp9, align 16
   %tmp21 = fsub double 1.210000e+04, %tmp20
   %tmp22 = fmul double %tmp18, %tmp21
   %tmp23 = fadd double %tmp20, %tmp22
-  %tmp24 = load double, double* %tmp13, align 16
+  %tmp24 = load double, ptr %tmp13, align 16
   %tmp25 = fsub double 1.000000e+02, %tmp24
   %tmp26 = fmul double %tmp18, %tmp25
   %tmp27 = fadd double %tmp24, %tmp26
-  %tmp28 = load double, double* %tmp15, align 8
+  %tmp28 = load double, ptr %tmp15, align 8
   %tmp29 = insertelement <2 x double> undef, double %tmp19, i32 0
   %tmp30 = insertelement <2 x double> %tmp29, double %tmp28, i32 1
   %tmp31 = fsub <2 x double> <double 1.100000e+04, double 1.100000e+02>, %tmp30
@@ -41,8 +39,7 @@ bb:
   %tmp33 = insertelement <2 x double> %tmp32, double %tmp18, i32 1
   %tmp34 = fmul <2 x double> %tmp33, %tmp31
   %tmp35 = fadd <2 x double> %tmp30, %tmp34
-  %tmp36 = bitcast double* %tmp5 to <2 x double>*
-  %tmp37 = load <2 x double>, <2 x double>* %tmp36, align 16
+  %tmp37 = load <2 x double>, ptr %tmp5, align 16
   %tmp38 = fsub <2 x double> <double 1.000000e+00, double 1.000000e+04>, %tmp37
   %tmp39 = fmul <2 x double> %tmp33, %tmp38
   %tmp40 = fadd <2 x double> %tmp37, %tmp39
@@ -55,15 +52,14 @@ bb:
   %tmp48 = fsub double 1.440000e+04, %tmp23
   %tmp49 = fmul double %tmp18, %tmp48
   %tmp50 = fadd double %tmp23, %tmp49
-  store double %tmp50, double* %tmp9, align 16
+  store double %tmp50, ptr %tmp9, align 16
   %tmp51 = fsub double 1.000000e+02, %tmp27
   %tmp52 = fmul double %tmp18, %tmp51
   %tmp53 = fadd double %tmp27, %tmp52
-  store double %tmp53, double* %tmp13, align 16
+  store double %tmp53, ptr %tmp13, align 16
   %tmp54 = extractelement <2 x double> %tmp46, i32 1
-  store double %tmp54, double* %tmp15, align 8
-  %tmp55 = bitcast double* %tmp5 to <2 x double>*
-  store <2 x double> %tmp43, <2 x double>* %tmp55, align 16
+  store double %tmp54, ptr %tmp15, align 8
+  store <2 x double> %tmp43, ptr %tmp5, align 16
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/pr28630.ll b/llvm/test/CodeGen/PowerPC/pr28630.ll
index a6759fa04b5c..95d7aa57b1b5 100644
--- a/llvm/test/CodeGen/PowerPC/pr28630.ll
+++ b/llvm/test/CodeGen/PowerPC/pr28630.ll
@@ -7,7 +7,7 @@ define double @test() {
 @g = common global double 0.000000e+00, align 8
 
 define double @testitd() {
-  %g = load double, double* @g, align 8
+  %g = load double, ptr @g, align 8
   ret double %g
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/pr30451.ll b/llvm/test/CodeGen/PowerPC/pr30451.ll
index 55d0cabe76b5..96f4e95b7c74 100644
--- a/llvm/test/CodeGen/PowerPC/pr30451.ll
+++ b/llvm/test/CodeGen/PowerPC/pr30451.ll
@@ -2,13 +2,12 @@
 define i8 @atomic_min_i8() {
     top:
       %0 = alloca i8, align 2
-      %1 = bitcast i8* %0 to i8*
-      call void @llvm.lifetime.start.p0i8(i64 2, i8* %1)
-      store i8 -1, i8* %0, align 2
-      %2 = atomicrmw min i8* %0, i8 0 acq_rel
-      %3 = load atomic i8, i8* %0 acquire, align 8
-      call void @llvm.lifetime.end.p0i8(i64 2, i8* %1)
-      ret i8 %3
+      call void @llvm.lifetime.start.p0(i64 2, ptr %0)
+      store i8 -1, ptr %0, align 2
+      %1 = atomicrmw min ptr %0, i8 0 acq_rel
+      %2 = load atomic i8, ptr %0 acquire, align 8
+      call void @llvm.lifetime.end.p0(i64 2, ptr %0)
+      ret i8 %2
 ; CHECK-LABEL: atomic_min_i8
 ; CHECK: lbarx [[DST:[0-9]+]],
 ; CHECK-NEXT: extsb [[EXT:[0-9]+]], [[DST]]
@@ -18,13 +17,12 @@ define i8 @atomic_min_i8() {
 define i16 @atomic_min_i16() {
     top:
       %0 = alloca i16, align 2
-      %1 = bitcast i16* %0 to i8*
-      call void @llvm.lifetime.start.p0i8(i64 2, i8* %1)
-      store i16 -1, i16* %0, align 2
-      %2 = atomicrmw min i16* %0, i16 0 acq_rel
-      %3 = load atomic i16, i16* %0 acquire, align 8
-      call void @llvm.lifetime.end.p0i8(i64 2, i8* %1)
-      ret i16 %3
+      call void @llvm.lifetime.start.p0(i64 2, ptr %0)
+      store i16 -1, ptr %0, align 2
+      %1 = atomicrmw min ptr %0, i16 0 acq_rel
+      %2 = load atomic i16, ptr %0 acquire, align 8
+      call void @llvm.lifetime.end.p0(i64 2, ptr %0)
+      ret i16 %2
 ; CHECK-LABEL: atomic_min_i16
 ; CHECK: lharx [[DST:[0-9]+]],
 ; CHECK-NEXT: extsh [[EXT:[0-9]+]], [[DST]]
@@ -35,13 +33,12 @@ define i16 @atomic_min_i16() {
 define i8 @atomic_max_i8() {
     top:
       %0 = alloca i8, align 2
-      %1 = bitcast i8* %0 to i8*
-      call void @llvm.lifetime.start.p0i8(i64 2, i8* %1)
-      store i8 -1, i8* %0, align 2
-      %2 = atomicrmw max i8* %0, i8 0 acq_rel
-      %3 = load atomic i8, i8* %0 acquire, align 8
-      call void @llvm.lifetime.end.p0i8(i64 2, i8* %1)
-      ret i8 %3
+      call void @llvm.lifetime.start.p0(i64 2, ptr %0)
+      store i8 -1, ptr %0, align 2
+      %1 = atomicrmw max ptr %0, i8 0 acq_rel
+      %2 = load atomic i8, ptr %0 acquire, align 8
+      call void @llvm.lifetime.end.p0(i64 2, ptr %0)
+      ret i8 %2
 ; CHECK-LABEL: atomic_max_i8
 ; CHECK: lbarx [[DST:[0-9]+]],
 ; CHECK-NEXT: extsb [[EXT:[0-9]+]], [[DST]]
@@ -51,13 +48,12 @@ define i8 @atomic_max_i8() {
 define i16 @atomic_max_i16() {
     top:
       %0 = alloca i16, align 2
-      %1 = bitcast i16* %0 to i8*
-      call void @llvm.lifetime.start.p0i8(i64 2, i8* %1)
-      store i16 -1, i16* %0, align 2
-      %2 = atomicrmw max i16* %0, i16 0 acq_rel
-      %3 = load atomic i16, i16* %0 acquire, align 8
-      call void @llvm.lifetime.end.p0i8(i64 2, i8* %1)
-      ret i16 %3
+      call void @llvm.lifetime.start.p0(i64 2, ptr %0)
+      store i16 -1, ptr %0, align 2
+      %1 = atomicrmw max ptr %0, i16 0 acq_rel
+      %2 = load atomic i16, ptr %0 acquire, align 8
+      call void @llvm.lifetime.end.p0(i64 2, ptr %0)
+      ret i16 %2
 ; CHECK-LABEL: atomic_max_i16
 ; CHECK: lharx [[DST:[0-9]+]],
 ; CHECK-NEXT: extsh [[EXT:[0-9]+]], [[DST]]
@@ -65,5 +61,5 @@ define i16 @atomic_max_i16() {
 ; CHECK-NEXT: bgt 0
 }
 
-declare void @llvm.lifetime.start.p0i8(i64, i8*)
-declare void @llvm.lifetime.end.p0i8(i64, i8*)
+declare void @llvm.lifetime.start.p0(i64, ptr)
+declare void @llvm.lifetime.end.p0(i64, ptr)

diff  --git a/llvm/test/CodeGen/PowerPC/pr30663.ll b/llvm/test/CodeGen/PowerPC/pr30663.ll
index 0772fcaadfe9..fc412e46b617 100644
--- a/llvm/test/CodeGen/PowerPC/pr30663.ll
+++ b/llvm/test/CodeGen/PowerPC/pr30663.ll
@@ -6,19 +6,19 @@ target triple = "powerpc64le-linux-gnu"
 ; CHECK-NOT: xxspltw
 define void @Test() {
 bb4:
-  %tmp = load <4 x i8>, <4 x i8>* undef
+  %tmp = load <4 x i8>, ptr undef
   %tmp8 = bitcast <4 x i8> %tmp to float
   %tmp18 = fmul float %tmp8, undef
   %tmp19 = fsub float 0.000000e+00, %tmp18
-  store float %tmp19, float* undef
+  store float %tmp19, ptr undef
   %tmp22 = shufflevector <4 x i8> %tmp, <4 x i8> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
   %tmp23 = bitcast <16 x i8> %tmp22 to <4 x float>
   %tmp25 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> undef, <4 x float> %tmp23, <4 x float> undef)
   %tmp26 = fsub <4 x float> zeroinitializer, %tmp25
   %tmp27 = bitcast <4 x float> %tmp26 to <4 x i32>
-  tail call void @llvm.ppc.altivec.stvx(<4 x i32> %tmp27, i8* undef)
+  tail call void @llvm.ppc.altivec.stvx(<4 x i32> %tmp27, ptr undef)
   ret void
 }
 
-declare void @llvm.ppc.altivec.stvx(<4 x i32>, i8*)
+declare void @llvm.ppc.altivec.stvx(<4 x i32>, ptr)
 declare <4 x float> @llvm.fma.v4f32(<4 x float>, <4 x float>, <4 x float>)

diff  --git a/llvm/test/CodeGen/PowerPC/pr30715.ll b/llvm/test/CodeGen/PowerPC/pr30715.ll
index 51086c6279d3..e4f322956685 100644
--- a/llvm/test/CodeGen/PowerPC/pr30715.ll
+++ b/llvm/test/CodeGen/PowerPC/pr30715.ll
@@ -1,19 +1,18 @@
 ; RUN: llc -verify-machineinstrs -mcpu=pwr8 -mtriple=powerpc64le-unknown-linux-gnu < %s | FileCheck %s
 
 %class.FullMatrix = type { i8 }
-%class.Vector = type { float* }
+%class.Vector = type { ptr }
 
 $test = comdat any
 
-define weak_odr void @test(%class.FullMatrix* %this, %class.Vector* dereferenceable(8) %p1, %class.Vector* dereferenceable(8), i1 zeroext) {
+define weak_odr void @test(ptr %this, ptr dereferenceable(8) %p1, ptr dereferenceable(8), i1 zeroext) {
 entry:
-  %call = tail call signext i32 @fn1(%class.FullMatrix* %this)
+  %call = tail call signext i32 @fn1(ptr %this)
   %cmp10 = icmp sgt i32 %call, 0
   br i1 %cmp10, label %for.body.lr.ph, label %for.cond.cleanup
 
 for.body.lr.ph:                                   ; preds = %entry
-  %val.i = getelementptr inbounds %class.Vector, %class.Vector* %p1, i64 0, i32 0
-  %2 = load float*, float** %val.i, align 8
+  %2 = load ptr, ptr %p1, align 8
   %wide.trip.count = zext i32 %call to i64
   %min.iters.check = icmp ult i32 %call, 4
   br i1 %min.iters.check, label %for.body.preheader, label %min.iters.checked
@@ -34,17 +33,15 @@ vector.body.preheader:                            ; preds = %min.iters.checked
 
 vector.body:                                      ; preds = %vector.body.preheader, %vector.body
   %index = phi i64 [ %index.next, %vector.body ], [ 0, %vector.body.preheader ]
-  %4 = getelementptr inbounds float, float* %2, i64 %index
-  %5 = bitcast float* %4 to <4 x float>*
-  %wide.load = load <4 x float>, <4 x float>* %5, align 4
-  %6 = fpext <4 x float> %wide.load to <4 x ppc_fp128>
-  %7 = fadd <4 x ppc_fp128> %6, %6
-  %8 = fptrunc <4 x ppc_fp128> %7 to <4 x float>
-  %9 = bitcast float* %4 to <4 x float>*
-  store <4 x float> %8, <4 x float>* %9, align 4
+  %4 = getelementptr inbounds float, ptr %2, i64 %index
+  %wide.load = load <4 x float>, ptr %4, align 4
+  %5 = fpext <4 x float> %wide.load to <4 x ppc_fp128>
+  %6 = fadd <4 x ppc_fp128> %5, %5
+  %7 = fptrunc <4 x ppc_fp128> %6 to <4 x float>
+  store <4 x float> %7, ptr %4, align 4
   %index.next = add i64 %index, 4
-  %10 = icmp eq i64 %index.next, %n.vec
-  br i1 %10, label %middle.block, label %vector.body
+  %8 = icmp eq i64 %index.next, %n.vec
+  br i1 %8, label %middle.block, label %vector.body
 
 middle.block:                                     ; preds = %vector.body
   %cmp.n = icmp eq i32 %3, 0
@@ -58,12 +55,12 @@ for.cond.cleanup:                                 ; preds = %for.cond.cleanup.lo
 
 for.body:                                         ; preds = %for.body.preheader, %for.body
   %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ %indvars.iv.ph, %for.body.preheader ]
-  %arrayidx.i = getelementptr inbounds float, float* %2, i64 %indvars.iv
-  %11 = load float, float* %arrayidx.i, align 4
-  %conv = fpext float %11 to ppc_fp128
+  %arrayidx.i = getelementptr inbounds float, ptr %2, i64 %indvars.iv
+  %9 = load float, ptr %arrayidx.i, align 4
+  %conv = fpext float %9 to ppc_fp128
   %add = fadd ppc_fp128 %conv, %conv
   %conv4 = fptrunc ppc_fp128 %add to float
-  store float %conv4, float* %arrayidx.i, align 4
+  store float %conv4, ptr %arrayidx.i, align 4
   %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
   %exitcond = icmp eq i64 %indvars.iv.next, %wide.trip.count
   br i1 %exitcond, label %for.cond.cleanup.loopexit, label %for.body
@@ -71,4 +68,4 @@ for.body:                                         ; preds = %for.body.preheader,
 ; CHECK: lxvd2x
 }
 
-declare signext i32 @fn1(%class.FullMatrix*) local_unnamed_addr #1
+declare signext i32 @fn1(ptr) local_unnamed_addr #1

diff  --git a/llvm/test/CodeGen/PowerPC/pr31144.ll b/llvm/test/CodeGen/PowerPC/pr31144.ll
index b968b4b80ac6..5e16fe8ab85d 100644
--- a/llvm/test/CodeGen/PowerPC/pr31144.ll
+++ b/llvm/test/CodeGen/PowerPC/pr31144.ll
@@ -2,9 +2,9 @@
 
 declare void @bar(double)
 
-define void @foo1(i8* %p) {
+define void @foo1(ptr %p) {
 entry:
-  %0 = load i8, i8* %p, align 1
+  %0 = load i8, ptr %p, align 1
   %conv = uitofp i8 %0 to double
   call void @bar(double %conv)
   ret void
@@ -13,9 +13,9 @@ entry:
 ; CHECK:     mtfprwz
 }
 
-define void @foo2(i16* %p) {
+define void @foo2(ptr %p) {
 entry:
-  %0 = load i16, i16* %p, align 2
+  %0 = load i16, ptr %p, align 2
   %conv = uitofp i16 %0 to double
   call void @bar(double %conv)
   ret void

diff  --git a/llvm/test/CodeGen/PowerPC/pr32063.ll b/llvm/test/CodeGen/PowerPC/pr32063.ll
index f031ec83c55e..9922e083b567 100644
--- a/llvm/test/CodeGen/PowerPC/pr32063.ll
+++ b/llvm/test/CodeGen/PowerPC/pr32063.ll
@@ -1,11 +1,11 @@
 ; RUN: llc -O2 < %s | FileCheck %s
 target triple = "powerpc64le-linux-gnu"
 
-define void @foo(i32 %v, i16* %p) {
+define void @foo(i32 %v, ptr %p) {
         %1 = and i32 %v, -65536
         %2 = tail call i32 @llvm.bswap.i32(i32 %1)
         %conv = trunc i32 %2 to i16
-        store i16 %conv, i16* %p
+        store i16 %conv, ptr %p
         ret void
 
 ; CHECK:     srwi

diff  --git a/llvm/test/CodeGen/PowerPC/pr32140.ll b/llvm/test/CodeGen/PowerPC/pr32140.ll
index bd0b267ab049..bc50f88c8259 100644
--- a/llvm/test/CodeGen/PowerPC/pr32140.ll
+++ b/llvm/test/CodeGen/PowerPC/pr32140.ll
@@ -37,11 +37,11 @@ define dso_local void @bswapStorei64Toi32() {
 ; CHECK-BE-NEXT:    stwbrx 3, 0, 4
 ; CHECK-BE-NEXT:    blr
 entry:
-  %0 = load i32, i32* @ai, align 4
+  %0 = load i32, ptr @ai, align 4
   %conv.i = sext i32 %0 to i64
   %or26.i = tail call i64 @llvm.bswap.i64(i64 %conv.i)
   %conv = trunc i64 %or26.i to i32
-  store i32 %conv, i32* @bi, align 4
+  store i32 %conv, ptr @bi, align 4
   ret void
 }
 
@@ -75,11 +75,11 @@ define dso_local void @bswapStorei32Toi16() {
 ; CHECK-BE-NEXT:    sthbrx 3, 0, 4
 ; CHECK-BE-NEXT:    blr
 entry:
-  %0 = load i16, i16* @as, align 2
+  %0 = load i16, ptr @as, align 2
   %conv.i = sext i16 %0 to i32
   %or26.i = tail call i32 @llvm.bswap.i32(i32 %conv.i)
   %conv = trunc i32 %or26.i to i16
-  store i16 %conv, i16* @bs, align 2
+  store i16 %conv, ptr @bs, align 2
   ret void
 }
 
@@ -113,11 +113,11 @@ define dso_local void @bswapStorei64Toi16() {
 ; CHECK-BE-NEXT:    sthbrx 3, 0, 4
 ; CHECK-BE-NEXT:    blr
 entry:
-  %0 = load i16, i16* @as, align 2
+  %0 = load i16, ptr @as, align 2
   %conv.i = sext i16 %0 to i64
   %or26.i = tail call i64 @llvm.bswap.i64(i64 %conv.i)
   %conv = trunc i64 %or26.i to i16
-  store i16 %conv, i16* @bs, align 2
+  store i16 %conv, ptr @bs, align 2
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/pr33547.ll b/llvm/test/CodeGen/PowerPC/pr33547.ll
index 67fbab3e3b4e..0df1e35a18bf 100644
--- a/llvm/test/CodeGen/PowerPC/pr33547.ll
+++ b/llvm/test/CodeGen/PowerPC/pr33547.ll
@@ -27,24 +27,23 @@ define void @main() {
 ; CHECK-NEXT:    mtlr 0
 ; CHECK-NEXT:    blr
 L.entry:
-  tail call void @testFunc(i64* bitcast (i8* getelementptr inbounds (%struct.STATICS1, %struct.STATICS1* @.STATICS1, i64 0, i32 0, i64 124) to i64*), i64* bitcast (i32* @.C302_MAIN_ to i64*))
+  tail call void @testFunc(ptr getelementptr inbounds (%struct.STATICS1, ptr @.STATICS1, i64 0, i32 0, i64 124), ptr @.C302_MAIN_)
   ret void
 }
 
 ; Function Attrs: noinline norecurse nounwind readonly
-define signext i32 @ifunc_(i64* nocapture readonly %i) {
+define signext i32 @ifunc_(ptr nocapture readonly %i) {
 ; CHECK-LABEL: ifunc_:
 ; CHECK:       # %bb.0: # %L.entry
 ; CHECK-NEXT:    lwa 3, 0(3)
 ; CHECK-NEXT:    blr
 L.entry:
-  %0 = bitcast i64* %i to i32*
-  %1 = load i32, i32* %0, align 4
-  ret i32 %1
+  %0 = load i32, ptr %i, align 4
+  ret i32 %0
 }
 
 ; Function Attrs: noinline norecurse nounwind
-define void @testFunc(i64* nocapture %r, i64* nocapture readonly %k) {
+define void @testFunc(ptr nocapture %r, ptr nocapture readonly %k) {
 ; CHECK-LABEL: testFunc:
 ; CHECK:       # %bb.0: # %L.entry
 ; CHECK-NEXT:    mflr 0
@@ -86,9 +85,8 @@ define void @testFunc(i64* nocapture %r, i64* nocapture readonly %k) {
 ; CHECK-NEXT:    mtlr 0
 ; CHECK-NEXT:    blr
 L.entry:
-  %0 = bitcast i64* %k to i32*
-  %1 = load i32, i32* %0, align 4
-  switch i32 %1, label %L.LB3_307 [
+  %0 = load i32, ptr %k, align 4
+  switch i32 %0, label %L.LB3_307 [
     i32 1, label %L.LB3_307.sink.split
     i32 3, label %L.LB3_307.sink.split
     i32 4, label %L.LB3_321.split
@@ -108,8 +106,7 @@ L.LB3_321.split:                                  ; preds = %L.entry
 
 L.LB3_307.sink.split:                             ; preds = %L.LB3_321.split, %L.entry, %L.entry, %L.entry
   %.sink = phi i32 [ 5, %L.LB3_321.split ], [ -3, %L.entry ], [ -3, %L.entry ], [ -3, %L.entry ]
-  %2 = bitcast i64* %r to i32*
-  store i32 %.sink, i32* %2, align 4
+  store i32 %.sink, ptr %r, align 4
   br label %L.LB3_307
 
 L.LB3_307:                                        ; preds = %L.LB3_307.sink.split, %L.entry

diff  --git a/llvm/test/CodeGen/PowerPC/pr35402.ll b/llvm/test/CodeGen/PowerPC/pr35402.ll
index 3bc7a940af42..566beeddeef4 100644
--- a/llvm/test/CodeGen/PowerPC/pr35402.ll
+++ b/llvm/test/CodeGen/PowerPC/pr35402.ll
@@ -2,7 +2,7 @@
 ; RUN: llc -O2 < %s | FileCheck %s
 target triple = "powerpc64le-linux-gnu"
 
-define void @test(i8* %p, i64 %data) {
+define void @test(ptr %p, i64 %data) {
 ; CHECK-LABEL: test:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    rotldi 5, 4, 16
@@ -19,9 +19,8 @@ define void @test(i8* %p, i64 %data) {
 ; CHECK-NEXT:    blr
 entry:
   %0 = tail call i64 @llvm.bswap.i64(i64 %data)
-  %ptr = bitcast i8* %p to i48*
   %val = trunc i64 %0 to i48
-  store i48 %val, i48* %ptr, align 1
+  store i48 %val, ptr %p, align 1
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/pr35688.ll b/llvm/test/CodeGen/PowerPC/pr35688.ll
index 5b156fcc057d..8a4351b229fd 100644
--- a/llvm/test/CodeGen/PowerPC/pr35688.ll
+++ b/llvm/test/CodeGen/PowerPC/pr35688.ll
@@ -25,8 +25,8 @@ entry:
   br label %fe_cmovznz.exit.i534.i.15
 
 fe_cmovznz.exit.i534.i.15:                        ; preds = %fe_cmovznz.exit.i534.i.15, %entry
-  %0 = load i64, i64* undef, align 8
-  %1 = load i64, i64* undef, align 8
+  %0 = load i64, ptr undef, align 8
+  %1 = load i64, ptr undef, align 8
   %conv.i69.i.i = zext i64 %0 to i128
   %sub.i72.i.i = sub nsw i128 0, %conv.i69.i.i
   %conv.i63.i.i = zext i64 %1 to i128
@@ -36,7 +36,7 @@ fe_cmovznz.exit.i534.i.15:                        ; preds = %fe_cmovznz.exit.i53
   %conv1.i58.i.i = and i128 %sub.i65.lobit.i.i, 18446744073709551615
   %add3.i59.i.i = add nuw nsw i128 %conv1.i58.i.i, 0
   %conv4.i60.i.i = trunc i128 %add3.i59.i.i to i64
-  store i64 %conv4.i60.i.i, i64* undef, align 16
+  store i64 %conv4.i60.i.i, ptr undef, align 16
   br label %fe_cmovznz.exit.i534.i.15
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/pr36068.ll b/llvm/test/CodeGen/PowerPC/pr36068.ll
index ee56d020e3cc..591f4ae66232 100644
--- a/llvm/test/CodeGen/PowerPC/pr36068.ll
+++ b/llvm/test/CodeGen/PowerPC/pr36068.ll
@@ -4,15 +4,15 @@
 @glob = local_unnamed_addr global <4 x float> zeroinitializer, align 4
 
 ; Function Attrs: norecurse nounwind
-define void @test(float %a, <4 x float>* nocapture readonly %b) {
+define void @test(float %a, ptr nocapture readonly %b) {
 ; CHECK-LABEL: test
 ; CHECK: xscvdpspn [[REG:[0-9]+]], 1
 ; CHECK: xxspltw {{[0-9]+}}, [[REG]], 0
 entry:
   %splat.splatinsert = insertelement <4 x float> undef, float %a, i32 0
   %splat.splat = shufflevector <4 x float> %splat.splatinsert, <4 x float> undef, <4 x i32> zeroinitializer
-  %0 = load <4 x float>, <4 x float>* %b, align 4
+  %0 = load <4 x float>, ptr %b, align 4
   %mul = fmul <4 x float> %splat.splat, %0
-  store <4 x float> %mul, <4 x float>* @glob, align 4
+  store <4 x float> %mul, ptr @glob, align 4
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/pr36292.ll b/llvm/test/CodeGen/PowerPC/pr36292.ll
index 5ee1a28bf125..3cdc8b623e12 100644
--- a/llvm/test/CodeGen/PowerPC/pr36292.ll
+++ b/llvm/test/CodeGen/PowerPC/pr36292.ll
@@ -33,18 +33,18 @@ define void @test() nounwind comdat {
   br label %forcond
 
 forcond:                                          ; preds = %bounds.ok, %0
-  %1 = load i64, i64* %pos
-  %.len1 = load i64, i64* undef
+  %1 = load i64, ptr %pos
+  %.len1 = load i64, ptr undef
   %bounds.cmp = icmp ult i64 %1, %.len1
   br i1 %bounds.cmp, label %bounds.ok, label %bounds.fail
 
 bounds.ok:                                        ; preds = %forcond
-  %2 = load float, float* undef
+  %2 = load float, ptr undef
   %3 = frem float 0.000000e+00, %2
-  store float %3, float* undef
-  %4 = load i64, i64* %pos
+  store float %3, ptr undef
+  %4 = load i64, ptr %pos
   %5 = add i64 %4, 1
-  store i64 %5, i64* %pos
+  store i64 %5, ptr %pos
   br label %forcond
 
 bounds.fail:                                      ; preds = %forcond

diff  --git a/llvm/test/CodeGen/PowerPC/pr38087.ll b/llvm/test/CodeGen/PowerPC/pr38087.ll
index 95bb3ec62106..1216fa9cf8f2 100644
--- a/llvm/test/CodeGen/PowerPC/pr38087.ll
+++ b/llvm/test/CodeGen/PowerPC/pr38087.ll
@@ -20,7 +20,7 @@ define void @draw_llvm_vs_variant0(<4 x float> %x) {
 ; CHECK-NEXT:    stxv vs0, 0(r3)
 ; CHECK-NEXT:    blr
 entry:
-  %.size = load i32, i32* undef
+  %.size = load i32, ptr undef
   %0 = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 %.size, i32 7)
   %1 = extractvalue { i32, i1 } %0, 0
   %2 = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 %1, i32 0)
@@ -28,7 +28,7 @@ entry:
   %4 = select i1 false, i32 0, i32 %3
   %5 = xor i1 false, true
   %6 = sext i1 %5 to i32
-  %7 = load <4 x i16>, <4 x i16>* undef, align 2
+  %7 = load <4 x i16>, ptr undef, align 2
   %8 = extractelement <4 x i16> %7, i32 0
   %9 = sext i16 %8 to i32
   %10 = insertelement <4 x i32> undef, i32 %9, i32 0
@@ -49,6 +49,6 @@ entry:
   %25 = bitcast <4 x i32> %24 to <4 x float>
   %26 = shufflevector <4 x float> %25, <4 x float> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
   %27 = call <4 x float> @llvm.fmuladd.v4f32(<4 x float> %x, <4 x float> %x, <4 x float> %26)
-  store <4 x float> %27, <4 x float>* undef
+  store <4 x float> %27, ptr undef
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/pr39478.ll b/llvm/test/CodeGen/PowerPC/pr39478.ll
index 648e74aaa517..ebd8df581136 100644
--- a/llvm/test/CodeGen/PowerPC/pr39478.ll
+++ b/llvm/test/CodeGen/PowerPC/pr39478.ll
@@ -3,7 +3,7 @@
 ; RUN: llc < %s -mtriple=powerpc64-unknown-unknown -verify-machineinstrs | FileCheck %s --check-prefix=CHECKBE
 ; RUN: llc < %s -mtriple=powerpc64-ibm-aix-xcoff -verify-machineinstrs | FileCheck %s --check-prefix=CHECKBE
 
-define void @pr39478(i64* %p64, i32* %p32) {
+define void @pr39478(ptr %p64, ptr %p32) {
 ; CHECKLE-LABEL: pr39478:
 ; CHECKLE:       # %bb.0: # %entry
 ; CHECKLE-NEXT:    lbz 3, 4(3)
@@ -16,15 +16,15 @@ define void @pr39478(i64* %p64, i32* %p32) {
 ; CHECKBE-NEXT:    stb 3, 3(4)
 ; CHECKBE-NEXT:    blr
 entry:
-  %tmp32 = load i64, i64* %p64, align 8
-  %tmp33 = load i32, i32* %p32, align 4
+  %tmp32 = load i64, ptr %p64, align 8
+  %tmp33 = load i32, ptr %p32, align 4
   %tmp34 = and i32 %tmp33, -256
   %tmp35 = lshr i64 %tmp32, 32
   %tmp36 = shl nuw nsw i64 %tmp35, 24
   %tmp37 = trunc i64 %tmp36 to i32
   %tmp38 = call i32 @llvm.bswap.i32(i32 %tmp37)
   %tmp39 = or i32 %tmp38, %tmp34
-  store i32 %tmp39, i32* %p32, align 4
+  store i32 %tmp39, ptr %p32, align 4
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/pr39815.ll b/llvm/test/CodeGen/PowerPC/pr39815.ll
index a0cd0644f19b..37fff85c10a2 100644
--- a/llvm/test/CodeGen/PowerPC/pr39815.ll
+++ b/llvm/test/CodeGen/PowerPC/pr39815.ll
@@ -1,19 +1,19 @@
 ; RUN: llc -mcpu=pwr9 -mtriple=powerpc64le-unknown-linux-gnu < %s \
 ; RUN:   -verify-machineinstrs | FileCheck %s
 
- at b = dso_local local_unnamed_addr global i64* null, align 8
+ at b = dso_local local_unnamed_addr global ptr null, align 8
 @a = dso_local local_unnamed_addr global i8 0, align 1
 
 define void @testADDEPromoteResult() {
 entry:
-  %0 = load i64*, i64** @b, align 8
-  %1 = load i64, i64* %0, align 8
-  %cmp = icmp ne i64* %0, null
+  %0 = load ptr, ptr @b, align 8
+  %1 = load i64, ptr %0, align 8
+  %cmp = icmp ne ptr %0, null
   %conv1 = zext i1 %cmp to i64
   %add = add nsw i64 %1, %conv1
   %2 = trunc i64 %add to i8
   %conv2 = and i8 %2, 5
-  store i8 %conv2, i8* @a, align 1
+  store i8 %conv2, ptr @a, align 1
   ret void
 
 ; CHECK-LABEL: @testADDEPromoteResult

diff  --git a/llvm/test/CodeGen/PowerPC/pr40922.ll b/llvm/test/CodeGen/PowerPC/pr40922.ll
index 291070bdd242..983338a82590 100644
--- a/llvm/test/CodeGen/PowerPC/pr40922.ll
+++ b/llvm/test/CodeGen/PowerPC/pr40922.ll
@@ -6,8 +6,8 @@
 
 define i32 @a() {
 entry:
-  %call = tail call i32 bitcast (i32 (...)* @d to i32 ()*)()
-  %0 = load i32, i32* @a.b, align 4
+  %call = tail call i32 @d()
+  %0 = load i32, ptr @a.b, align 4
   %conv = zext i32 %0 to i64
   %add = add nuw nsw i64 %conv, 6
   %and = and i64 %add, 8589934575
@@ -15,11 +15,11 @@ entry:
   br i1 %cmp, label %if.then, label %if.end
 
 if.then:                                          ; preds = %entry
-  %call3 = tail call i32 bitcast (i32 (...)* @e to i32 ()*)()
+  %call3 = tail call i32 @e()
   br label %if.end
 
 if.end:                                           ; preds = %if.then, %entry
-  store i32 %call, i32* @a.b, align 4
+  store i32 %call, ptr @a.b, align 4
   ret i32 undef
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/pr41088.ll b/llvm/test/CodeGen/PowerPC/pr41088.ll
index bcaf07f98efc..9966d8251b71 100644
--- a/llvm/test/CodeGen/PowerPC/pr41088.ll
+++ b/llvm/test/CodeGen/PowerPC/pr41088.ll
@@ -3,35 +3,35 @@
 ; RUN:   -mtriple=powerpc64le-unknown-unknown -verify-machineinstrs < %s | \
 ; RUN:   FileCheck %s
 
-%0 = type { [0 x i64], %1, [0 x i64], { i64, i8* }, [0 x i64] }
-%1 = type { [0 x i64], %2, [0 x i64], i64*, [0 x i64] }
+%0 = type { [0 x i64], %1, [0 x i64], { i64, ptr }, [0 x i64] }
+%1 = type { [0 x i64], %2, [0 x i64], ptr, [0 x i64] }
 %2 = type { [0 x i64], %3, [0 x i64], %4, [0 x i8], i8, [7 x i8] }
-%3 = type { [0 x i64], { i64*, i64* }, [0 x i64], i64*, [0 x i8], i8, [7 x i8] }
-%4 = type { [0 x i64], { i64*, i64* }, [0 x i64], %5, [0 x i64] }
-%5 = type { [0 x i64], { i64*, i64* }, [0 x i64], i64*, [0 x i64] }
+%3 = type { [0 x i64], { ptr, ptr }, [0 x i64], ptr, [0 x i8], i8, [7 x i8] }
+%4 = type { [0 x i64], { ptr, ptr }, [0 x i64], %5, [0 x i64] }
+%5 = type { [0 x i64], { ptr, ptr }, [0 x i64], ptr, [0 x i64] }
 %6 = type { [0 x i64], i64, [2 x i64] }
-%7 = type { [0 x i64], { i64*, i64* }, [0 x i64], %8, [0 x i64] }
-%8 = type { [0 x i64], %9*, [0 x i32], { i32, i32 }, [0 x i8], i8, [7 x i8] }
+%7 = type { [0 x i64], { ptr, ptr }, [0 x i64], %8, [0 x i64] }
+%8 = type { [0 x i64], ptr, [0 x i32], { i32, i32 }, [0 x i8], i8, [7 x i8] }
 %9 = type { [0 x i64], i64, [0 x i64], [0 x %10], [0 x i8], %11 }
 %10 = type { [0 x i8], i8, [31 x i8] }
 %11 = type {}
 %12 = type { [0 x i64], %13, [0 x i32], i32, [0 x i32], i32, [0 x i32] }
 %13 = type { [0 x i8], i8, [23 x i8] }
 %14 = type { [0 x i64], i64, [0 x i64], %15, [0 x i32], i32, [0 x i8], i8, [0 x i8], { i8, i8 }, [0 x i8], { i8, i8 }, [0 x i8], { i8, i8 }, [0 x i8], { i8, i8 }, [0 x i8], { i8, i8 }, [0 x i8], { i8, i8 }, [0 x i8], { i8, i8 }, [0 x i8], { i8, i8 }, [0 x i8], { i8, i8 }, [0 x i8], { i8, i8 }, [7 x i8] }
-%15 = type { [0 x i64], { i64*, i64 }, [0 x i64], i64, [0 x i64] }
+%15 = type { [0 x i64], { ptr, i64 }, [0 x i64], i64, [0 x i64] }
 %16 = type { [0 x i64], %17, [0 x i64], %18, [0 x i64], %19, [0 x i64], i64, [0 x i8], { i8, i8 }, [6 x i8] }
 %17 = type { [0 x i32], i32, [27 x i32] }
 %18 = type { [0 x i64], i64, [6 x i64] }
 %19 = type { [0 x i8], i8, [103 x i8] }
-%20 = type { [0 x i64], { i64*, i64* }*, [0 x i64], %7**, [0 x i64], i64**, [0 x i64] }
-%21 = type { [0 x i64], i64, [0 x i64], void (i32, %21*)*, [0 x i64], [2 x i64], [0 x i64] }
+%20 = type { [0 x i64], ptr, [0 x i64], ptr, [0 x i64], ptr, [0 x i64] }
+%21 = type { [0 x i64], i64, [0 x i64], ptr, [0 x i64], [2 x i64], [0 x i64] }
 %22 = type { [0 x i8] }
 
- at var = external dso_local unnamed_addr constant <{ i8*, [8 x i8], i8*, [16 x i8] }>, align 8
+ at var = external dso_local unnamed_addr constant <{ ptr, [8 x i8], ptr, [16 x i8] }>, align 8
 
-declare dso_local fastcc { i64*, i8* } @test2(%0**) unnamed_addr
+declare dso_local fastcc { ptr, ptr } @test2(ptr) unnamed_addr
 
-define void @test(%6* %arg, %7* %arg1, %12* %arg2) unnamed_addr personality i32 (i32, i32, i64, %21*, %22*)* @personality {
+define void @test(ptr %arg, ptr %arg1, ptr %arg2) unnamed_addr personality ptr @personality {
 ; CHECK-LABEL: test:
 ; CHECK:         .cfi_personality 148, DW.ref.personality
 ; CHECK-NEXT:    .cfi_lsda 20, .Lexception0
@@ -85,31 +85,31 @@ bb8:                                              ; No predecessors!
   br label %bb12
 
 bb9:                                              ; preds = %bb3
-  %tmp = call i8 @test5(%14* noalias nonnull readonly align 8 dereferenceable(64) undef), !range !0
+  %tmp = call i8 @test5(ptr noalias nonnull readonly align 8 dereferenceable(64) undef), !range !0
   %tmp10 = zext i8 %tmp to i24
   %tmp11 = shl nuw nsw i24 %tmp10, 8
   br label %bb12
 
 bb12:                                             ; preds = %bb9, %bb8, %bb7, %bb6, %bb5, %bb3
   %tmp13 = phi i24 [ 1024, %bb8 ], [ 768, %bb7 ], [ 512, %bb6 ], [ 256, %bb5 ], [ %tmp11, %bb9 ], [ 0, %bb3 ]
-  %tmp14 = call fastcc align 8 dereferenceable(288) %16* @test3(%20* noalias nonnull readonly align 8 dereferenceable(24) undef, i24 %tmp13)
+  %tmp14 = call fastcc align 8 dereferenceable(288) ptr @test3(ptr noalias nonnull readonly align 8 dereferenceable(24) undef, i24 %tmp13)
   br label %bb22
 
 bb15:                                             ; No predecessors!
-  %tmp16 = invoke fastcc { i64*, i8* } @test2(%0** nonnull align 8 dereferenceable(8) undef)
+  %tmp16 = invoke fastcc { ptr, ptr } @test2(ptr nonnull align 8 dereferenceable(8) undef)
           to label %bb17 unwind label %bb18
 
 bb17:                                             ; preds = %bb15
-  invoke void @test4({ [0 x i64], { [0 x i8]*, i64 }, [0 x i64], { [0 x i8]*, i64 }, [0 x i32], i32, [0 x i32], i32, [0 x i32] }* noalias readonly align 8 dereferenceable(40) bitcast (<{ i8*, [8 x i8], i8*, [16 x i8] }>* @var to { [0 x i64], { [0 x i8]*, i64 }, [0 x i64], { [0 x i8]*, i64 }, [0 x i32], i32, [0 x i32], i32, [0 x i32] }*))
+  invoke void @test4(ptr noalias readonly align 8 dereferenceable(40) @var)
           to label %bb23 unwind label %bb25
 
 bb18:                                             ; preds = %bb15
-  %tmp19 = landingpad { i8*, i32 }
+  %tmp19 = landingpad { ptr, i32 }
           cleanup
-  resume { i8*, i32 } undef
+  resume { ptr, i32 } undef
 
 bb20:                                             ; No predecessors!
-  invoke void @test4({ [0 x i64], { [0 x i8]*, i64 }, [0 x i64], { [0 x i8]*, i64 }, [0 x i32], i32, [0 x i32], i32, [0 x i32] }* noalias readonly align 8 dereferenceable(40) bitcast (<{ i8*, [8 x i8], i8*, [16 x i8] }>* @var to { [0 x i64], { [0 x i8]*, i64 }, [0 x i64], { [0 x i8]*, i64 }, [0 x i32], i32, [0 x i32], i32, [0 x i32] }*))
+  invoke void @test4(ptr noalias readonly align 8 dereferenceable(40) @var)
           to label %bb24 unwind label %bb25
 
 bb21:                                             ; preds = %bb
@@ -125,17 +125,17 @@ bb24:                                             ; preds = %bb20
   unreachable
 
 bb25:                                             ; preds = %bb20, %bb17
-  %tmp26 = landingpad { i8*, i32 }
+  %tmp26 = landingpad { ptr, i32 }
           cleanup
-  resume { i8*, i32 } undef
+  resume { ptr, i32 } undef
 }
 
-declare dso_local fastcc %16* @test3(%20*, i24) unnamed_addr
+declare dso_local fastcc ptr @test3(ptr, i24) unnamed_addr
 
-declare i32 @personality(i32, i32, i64, %21*, %22*) unnamed_addr
+declare i32 @personality(i32, i32, i64, ptr, ptr) unnamed_addr
 
-declare void @test4({ [0 x i64], { [0 x i8]*, i64 }, [0 x i64], { [0 x i8]*, i64 }, [0 x i32], i32, [0 x i32], i32, [0 x i32] }*) unnamed_addr
+declare void @test4(ptr) unnamed_addr
 
-declare i8 @test5(%14*) unnamed_addr
+declare i8 @test5(ptr) unnamed_addr
 
 !0 = !{i8 0, i8 5}

diff  --git a/llvm/test/CodeGen/PowerPC/pr41177.ll b/llvm/test/CodeGen/PowerPC/pr41177.ll
index 52108442893d..95de7b3a76b1 100644
--- a/llvm/test/CodeGen/PowerPC/pr41177.ll
+++ b/llvm/test/CodeGen/PowerPC/pr41177.ll
@@ -2,11 +2,11 @@
 ; REQUIRES: asserts
 
 define protected swiftcc void @"$s22LanguageServerProtocol13HoverResponseV8contents5rangeAcA13MarkupContentV_SnyAA8PositionVGSgtcfC"() {
-  %1 = load <2 x i64>, <2 x i64>* undef, align 16
-  %2 = load i1, i1* undef, align 8
+  %1 = load <2 x i64>, ptr undef, align 16
+  %2 = load i1, ptr undef, align 8
   %3 = insertelement <2 x i1> undef, i1 %2, i32 0
   %4 = shufflevector <2 x i1> %3, <2 x i1> undef, <2 x i32> zeroinitializer
   %5 = select <2 x i1> %4, <2 x i64> zeroinitializer, <2 x i64> %1
-  store <2 x i64> %5, <2 x i64>* undef, align 8
+  store <2 x i64> %5, ptr undef, align 8
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/pr42492.ll b/llvm/test/CodeGen/PowerPC/pr42492.ll
index ec1697bea1b4..ba5ac9169bf6 100644
--- a/llvm/test/CodeGen/PowerPC/pr42492.ll
+++ b/llvm/test/CodeGen/PowerPC/pr42492.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr9 < %s | FileCheck %s
 
-define void @f(i8*, i8*, i64*) {
+define void @f(ptr, ptr, ptr) {
 ; Check we don't assert and this is not a Hardware Loop
 ; CHECK-LABEL: f:
 ; CHECK:       # %bb.0:
@@ -24,16 +24,16 @@ define void @f(i8*, i8*, i64*) {
 ; CHECK-NEXT:    std 3, 8(5)
 ; CHECK-NEXT:    blr
 
-  %4 = icmp eq i8* %0, %1
+  %4 = icmp eq ptr %0, %1
   br i1 %4, label %9, label %5
 
 5:                                                ; preds = %3
-  %6 = getelementptr inbounds i64, i64* %2, i64 1
-  %7 = load i64, i64* %6, align 8
+  %6 = getelementptr inbounds i64, ptr %2, i64 1
+  %7 = load i64, ptr %6, align 8
   br label %10
 
 8:                                                ; preds = %10
-  store i64 %14, i64* %6, align 8
+  store i64 %14, ptr %6, align 8
   br label %9
 
 9:                                                ; preds = %8, %3
@@ -42,12 +42,12 @@ define void @f(i8*, i8*, i64*) {
 10:                                               ; preds = %5, %10
   %11 = phi i64 [ %7, %5 ], [ %14, %10 ]
   %12 = phi i32 [ 0, %5 ], [ %15, %10 ]
-  %13 = phi i8* [ %0, %5 ], [ %16, %10 ]
+  %13 = phi ptr [ %0, %5 ], [ %16, %10 ]
   %14 = shl nsw i64 %11, 4
   %15 = add nuw nsw i32 %12, 1
-  %16 = getelementptr inbounds i8, i8* %13, i64 1
+  %16 = getelementptr inbounds i8, ptr %13, i64 1
   %17 = icmp ugt i32 %12, 14
-  %18 = icmp eq i8* %16, %1
+  %18 = icmp eq ptr %16, %1
   %19 = or i1 %18, %17
   br i1 %19, label %8, label %10
 }

diff  --git a/llvm/test/CodeGen/PowerPC/pr43527.ll b/llvm/test/CodeGen/PowerPC/pr43527.ll
index 0a03c3f7112f..577d09ad9999 100644
--- a/llvm/test/CodeGen/PowerPC/pr43527.ll
+++ b/llvm/test/CodeGen/PowerPC/pr43527.ll
@@ -54,12 +54,12 @@ bb4:                                              ; preds = %bb3
 
 bb5:                                              ; preds = %bb5, %bb4
   %tmp6 = phi i64 [ %tmp12, %bb5 ], [ 0, %bb4 ]
-  %tmp7 = getelementptr inbounds float, float* null, i64 %tmp6
-  %tmp8 = load float, float* %tmp7, align 4
+  %tmp7 = getelementptr inbounds float, ptr null, i64 %tmp6
+  %tmp8 = load float, ptr %tmp7, align 4
   %tmp9 = fpext float %tmp8 to double
   %tmp10 = tail call i64 @llvm.lrint.i64.f64(double %tmp9) #2
   %tmp11 = trunc i64 %tmp10 to i8
-  store i8 %tmp11, i8* undef, align 1
+  store i8 %tmp11, ptr undef, align 1
   %tmp12 = add nuw i64 %tmp6, 1
   %tmp13 = icmp eq i64 %tmp12, %tmp
   br i1 %tmp13, label %bb15, label %bb5

diff  --git a/llvm/test/CodeGen/PowerPC/pr43976.ll b/llvm/test/CodeGen/PowerPC/pr43976.ll
index debf4399a678..4dfb9343385b 100644
--- a/llvm/test/CodeGen/PowerPC/pr43976.ll
+++ b/llvm/test/CodeGen/PowerPC/pr43976.ll
@@ -45,7 +45,7 @@ define dso_local signext i32 @b() local_unnamed_addr #0 {
 ; CHECK-NEXT:    mtlr r0
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load double, double* @a, align 8
+  %0 = load double, ptr @a, align 8
   %conv = fptoui double %0 to i64
   %conv1 = sitofp i64 %conv to double
   %mul = fmul double %conv1, 1.000000e+06

diff  --git a/llvm/test/CodeGen/PowerPC/pr44183.ll b/llvm/test/CodeGen/PowerPC/pr44183.ll
index b58ae71c5249..befa20b7f552 100644
--- a/llvm/test/CodeGen/PowerPC/pr44183.ll
+++ b/llvm/test/CodeGen/PowerPC/pr44183.ll
@@ -6,7 +6,7 @@
 %struct.l.0.3.6.9 = type { i8 }
 %struct.a.1.4.7.10 = type { [27 x i8], [0 x i32], [4 x i8] }
 
-define void @_ZN1m1nEv(%struct.m.2.5.8.11* %this) local_unnamed_addr nounwind align 2 {
+define void @_ZN1m1nEv(ptr %this) local_unnamed_addr nounwind align 2 {
 ; CHECK-LABEL: _ZN1m1nEv:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    mflr r0
@@ -37,22 +37,21 @@ define void @_ZN1m1nEv(%struct.m.2.5.8.11* %this) local_unnamed_addr nounwind al
 ; CHECK-NEXT:    mtlr r0
 ; CHECK-NEXT:    blr
 entry:
-  %bc = getelementptr inbounds %struct.m.2.5.8.11, %struct.m.2.5.8.11* %this, i64 0, i32 2
-  %0 = bitcast %struct.a.1.4.7.10* %bc to i216*
-  %bf.load = load i216, i216* %0, align 8
+  %bc = getelementptr inbounds %struct.m.2.5.8.11, ptr %this, i64 0, i32 2
+  %bf.load = load i216, ptr %bc, align 8
   %bf.lshr = lshr i216 %bf.load, 4
   %shl.i23 = shl i216 %bf.lshr, 31
   %shl.i = trunc i216 %shl.i23 to i32
-  %arrayidx = getelementptr inbounds %struct.m.2.5.8.11, %struct.m.2.5.8.11* %this, i64 0, i32 2, i32 1, i64 0
-  %1 = load i32, i32* %arrayidx, align 4
-  %and.i = and i32 %1, 1
+  %arrayidx = getelementptr inbounds %struct.m.2.5.8.11, ptr %this, i64 0, i32 2, i32 1, i64 0
+  %0 = load i32, ptr %arrayidx, align 4
+  %and.i = and i32 %0, 1
   %or.i = or i32 %and.i, %shl.i
-  tail call void @_ZN1llsE1d(%struct.l.0.3.6.9* undef, i32 %or.i) #1
-  %bf.load10 = load i216, i216* %0, align 8
+  tail call void @_ZN1llsE1d(ptr undef, i32 %or.i) #1
+  %bf.load10 = load i216, ptr %bc, align 8
   %bf.lshr11 = lshr i216 %bf.load10, 4
   %shl.i1524 = shl i216 %bf.lshr11, 31
   %shl.i15 = trunc i216 %shl.i1524 to i32
-  tail call void @_ZN1llsE1d(%struct.l.0.3.6.9* undef, i32 %shl.i15) #1
+  tail call void @_ZN1llsE1d(ptr undef, i32 %shl.i15) #1
   ret void
 }
-declare void @_ZN1llsE1d(%struct.l.0.3.6.9*, i32) local_unnamed_addr #0
+declare void @_ZN1llsE1d(ptr, i32) local_unnamed_addr #0

diff  --git a/llvm/test/CodeGen/PowerPC/pr45186.ll b/llvm/test/CodeGen/PowerPC/pr45186.ll
index 5e4a3277d922..41c79f90d7b4 100644
--- a/llvm/test/CodeGen/PowerPC/pr45186.ll
+++ b/llvm/test/CodeGen/PowerPC/pr45186.ll
@@ -6,47 +6,47 @@
 @d = local_unnamed_addr global %struct.anon zeroinitializer, align 8
 
 ; Function Attrs: norecurse nounwind readonly
-define i64 @e(i8* nocapture readonly %f) local_unnamed_addr #0 {
+define i64 @e(ptr nocapture readonly %f) local_unnamed_addr #0 {
 ; CHECK-LABEL: e:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    ld r3, 0(r3)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i8, i8* %f, align 1
+  %0 = load i8, ptr %f, align 1
   %conv = zext i8 %0 to i64
   %shl = shl nuw i64 %conv, 56
-  %arrayidx1 = getelementptr inbounds i8, i8* %f, i64 1
-  %1 = load i8, i8* %arrayidx1, align 1
+  %arrayidx1 = getelementptr inbounds i8, ptr %f, i64 1
+  %1 = load i8, ptr %arrayidx1, align 1
   %conv2 = zext i8 %1 to i64
   %shl3 = shl nuw nsw i64 %conv2, 48
   %or = or i64 %shl3, %shl
-  %arrayidx4 = getelementptr inbounds i8, i8* %f, i64 2
-  %2 = load i8, i8* %arrayidx4, align 1
+  %arrayidx4 = getelementptr inbounds i8, ptr %f, i64 2
+  %2 = load i8, ptr %arrayidx4, align 1
   %conv5 = zext i8 %2 to i64
   %shl6 = shl nuw nsw i64 %conv5, 40
   %or7 = or i64 %or, %shl6
-  %arrayidx8 = getelementptr inbounds i8, i8* %f, i64 3
-  %3 = load i8, i8* %arrayidx8, align 1
+  %arrayidx8 = getelementptr inbounds i8, ptr %f, i64 3
+  %3 = load i8, ptr %arrayidx8, align 1
   %conv9 = zext i8 %3 to i64
   %shl10 = shl nuw nsw i64 %conv9, 32
   %or11 = or i64 %or7, %shl10
-  %arrayidx12 = getelementptr inbounds i8, i8* %f, i64 4
-  %4 = load i8, i8* %arrayidx12, align 1
+  %arrayidx12 = getelementptr inbounds i8, ptr %f, i64 4
+  %4 = load i8, ptr %arrayidx12, align 1
   %conv13 = zext i8 %4 to i64
   %shl14 = shl nuw nsw i64 %conv13, 24
   %or15 = or i64 %or11, %shl14
-  %arrayidx16 = getelementptr inbounds i8, i8* %f, i64 5
-  %5 = load i8, i8* %arrayidx16, align 1
+  %arrayidx16 = getelementptr inbounds i8, ptr %f, i64 5
+  %5 = load i8, ptr %arrayidx16, align 1
   %conv17 = zext i8 %5 to i64
   %shl18 = shl nuw nsw i64 %conv17, 16
   %or20 = or i64 %or15, %shl18
-  %arrayidx21 = getelementptr inbounds i8, i8* %f, i64 6
-  %6 = load i8, i8* %arrayidx21, align 1
+  %arrayidx21 = getelementptr inbounds i8, ptr %f, i64 6
+  %6 = load i8, ptr %arrayidx21, align 1
   %conv22 = zext i8 %6 to i64
   %shl23 = shl nuw nsw i64 %conv22, 8
   %or25 = or i64 %or20, %shl23
-  %arrayidx26 = getelementptr inbounds i8, i8* %f, i64 7
-  %7 = load i8, i8* %arrayidx26, align 1
+  %arrayidx26 = getelementptr inbounds i8, ptr %f, i64 7
+  %7 = load i8, ptr %arrayidx26, align 1
   %conv27 = zext i8 %7 to i64
   %or28 = or i64 %or25, %conv27
   ret i64 %or28
@@ -64,68 +64,68 @@ define void @g() local_unnamed_addr #0 {
 ; CHECK-NEXT:    stxvd2x vs0, 0, r4
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i8, i8* getelementptr inbounds (i8, i8* bitcast (void ()* @g to i8*), i64 8), align 1
+  %0 = load i8, ptr getelementptr inbounds (i8, ptr @g, i64 8), align 1
   %conv.i = zext i8 %0 to i64
   %shl.i = shl nuw i64 %conv.i, 56
-  %1 = load i8, i8* getelementptr (i8, i8* bitcast (void ()* @g to i8*), i64 9), align 1
+  %1 = load i8, ptr getelementptr (i8, ptr @g, i64 9), align 1
   %conv2.i = zext i8 %1 to i64
   %shl3.i = shl nuw nsw i64 %conv2.i, 48
   %or.i = or i64 %shl3.i, %shl.i
-  %2 = load i8, i8* getelementptr (i8, i8* bitcast (void ()* @g to i8*), i64 10), align 1
+  %2 = load i8, ptr getelementptr (i8, ptr @g, i64 10), align 1
   %conv5.i = zext i8 %2 to i64
   %shl6.i = shl nuw nsw i64 %conv5.i, 40
   %or7.i = or i64 %or.i, %shl6.i
-  %3 = load i8, i8* getelementptr (i8, i8* bitcast (void ()* @g to i8*), i64 11), align 1
+  %3 = load i8, ptr getelementptr (i8, ptr @g, i64 11), align 1
   %conv9.i = zext i8 %3 to i64
   %shl10.i = shl nuw nsw i64 %conv9.i, 32
   %or11.i = or i64 %or7.i, %shl10.i
-  %4 = load i8, i8* getelementptr (i8, i8* bitcast (void ()* @g to i8*), i64 12), align 1
+  %4 = load i8, ptr getelementptr (i8, ptr @g, i64 12), align 1
   %conv13.i = zext i8 %4 to i64
   %shl14.i = shl nuw nsw i64 %conv13.i, 24
   %or15.i = or i64 %or11.i, %shl14.i
-  %5 = load i8, i8* getelementptr (i8, i8* bitcast (void ()* @g to i8*), i64 13), align 1
+  %5 = load i8, ptr getelementptr (i8, ptr @g, i64 13), align 1
   %conv17.i = zext i8 %5 to i64
   %shl18.i = shl nuw nsw i64 %conv17.i, 16
   %or20.i = or i64 %or15.i, %shl18.i
-  %6 = load i8, i8* getelementptr (i8, i8* bitcast (void ()* @g to i8*), i64 14), align 1
+  %6 = load i8, ptr getelementptr (i8, ptr @g, i64 14), align 1
   %conv22.i = zext i8 %6 to i64
   %shl23.i = shl nuw nsw i64 %conv22.i, 8
   %or25.i = or i64 %or20.i, %shl23.i
-  %7 = load i8, i8* getelementptr (i8, i8* bitcast (void ()* @g to i8*), i64 15), align 1
+  %7 = load i8, ptr getelementptr (i8, ptr @g, i64 15), align 1
   %conv27.i = zext i8 %7 to i64
   %or28.i = or i64 %or25.i, %conv27.i
-  store i64 %or28.i, i64* getelementptr inbounds (%struct.anon, %struct.anon* @d, i64 0, i32 1), align 8
-  %8 = load i8, i8* bitcast (void ()* @g to i8*), align 1
+  store i64 %or28.i, ptr getelementptr inbounds (%struct.anon, ptr @d, i64 0, i32 1), align 8
+  %8 = load i8, ptr @g, align 1
   %conv.i2 = zext i8 %8 to i64
   %shl.i3 = shl nuw i64 %conv.i2, 56
-  %9 = load i8, i8* getelementptr (i8, i8* bitcast (void ()* @g to i8*), i64 1), align 1
+  %9 = load i8, ptr getelementptr (i8, ptr @g, i64 1), align 1
   %conv2.i4 = zext i8 %9 to i64
   %shl3.i5 = shl nuw nsw i64 %conv2.i4, 48
   %or.i6 = or i64 %shl3.i5, %shl.i3
-  %10 = load i8, i8* getelementptr (i8, i8* bitcast (void ()* @g to i8*), i64 2), align 1
+  %10 = load i8, ptr getelementptr (i8, ptr @g, i64 2), align 1
   %conv5.i7 = zext i8 %10 to i64
   %shl6.i8 = shl nuw nsw i64 %conv5.i7, 40
   %or7.i9 = or i64 %or.i6, %shl6.i8
-  %11 = load i8, i8* getelementptr (i8, i8* bitcast (void ()* @g to i8*), i64 3), align 1
+  %11 = load i8, ptr getelementptr (i8, ptr @g, i64 3), align 1
   %conv9.i10 = zext i8 %11 to i64
   %shl10.i11 = shl nuw nsw i64 %conv9.i10, 32
   %or11.i12 = or i64 %or7.i9, %shl10.i11
-  %12 = load i8, i8* getelementptr (i8, i8* bitcast (void ()* @g to i8*), i64 4), align 1
+  %12 = load i8, ptr getelementptr (i8, ptr @g, i64 4), align 1
   %conv13.i13 = zext i8 %12 to i64
   %shl14.i14 = shl nuw nsw i64 %conv13.i13, 24
   %or15.i15 = or i64 %or11.i12, %shl14.i14
-  %13 = load i8, i8* getelementptr (i8, i8* bitcast (void ()* @g to i8*), i64 5), align 1
+  %13 = load i8, ptr getelementptr (i8, ptr @g, i64 5), align 1
   %conv17.i16 = zext i8 %13 to i64
   %shl18.i17 = shl nuw nsw i64 %conv17.i16, 16
   %or20.i18 = or i64 %or15.i15, %shl18.i17
-  %14 = load i8, i8* getelementptr (i8, i8* bitcast (void ()* @g to i8*), i64 6), align 1
+  %14 = load i8, ptr getelementptr (i8, ptr @g, i64 6), align 1
   %conv22.i19 = zext i8 %14 to i64
   %shl23.i20 = shl nuw nsw i64 %conv22.i19, 8
   %or25.i21 = or i64 %or20.i18, %shl23.i20
-  %15 = load i8, i8* getelementptr (i8, i8* bitcast (void ()* @g to i8*), i64 7), align 1
+  %15 = load i8, ptr getelementptr (i8, ptr @g, i64 7), align 1
   %conv27.i22 = zext i8 %15 to i64
   %or28.i23 = or i64 %or25.i21, %conv27.i22
-  store i64 %or28.i23, i64* getelementptr inbounds (%struct.anon, %struct.anon* @d, i64 0, i32 0), align 8
+  store i64 %or28.i23, ptr @d, align 8
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/pr45297.ll b/llvm/test/CodeGen/PowerPC/pr45297.ll
index 39583d5a04cc..f25f5aaa9190 100644
--- a/llvm/test/CodeGen/PowerPC/pr45297.ll
+++ b/llvm/test/CodeGen/PowerPC/pr45297.ll
@@ -17,6 +17,6 @@ define dso_local void @test(float %0) local_unnamed_addr {
 ; CHECK-NEXT:    blr
 entry:
   %1 = fptosi float %0 to i32
-  store i32 %1, i32* @Global, align 4
+  store i32 %1, ptr @Global, align 4
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/pr45301.ll b/llvm/test/CodeGen/PowerPC/pr45301.ll
index 606eaa566500..6259716fc480 100644
--- a/llvm/test/CodeGen/PowerPC/pr45301.ll
+++ b/llvm/test/CodeGen/PowerPC/pr45301.ll
@@ -3,7 +3,7 @@
 ; RUN:   -ppc-asm-full-reg-names < %s | FileCheck %s
 %struct.e.0.1.2.3.12.29 = type { [10 x i32] }
 
-define dso_local void @g(%struct.e.0.1.2.3.12.29* %agg.result) local_unnamed_addr #0 {
+define dso_local void @g(ptr %agg.result) local_unnamed_addr #0 {
 ; CHECK-LABEL: g:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    mflr r0
@@ -32,25 +32,25 @@ define dso_local void @g(%struct.e.0.1.2.3.12.29* %agg.result) local_unnamed_add
 ; CHECK-NEXT:    mtlr r0
 ; CHECK-NEXT:    blr
 entry:
-  %call = tail call signext i32 bitcast (i32 (...)* @i to i32 ()*)()
+  %call = tail call signext i32 @i()
   %conv = sext i32 %call to i64
-  %0 = inttoptr i64 %conv to i8*
-  tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* nonnull align 4 dereferenceable(40) %0, i8* nonnull align 4 dereferenceable(40) bitcast (void (%struct.e.0.1.2.3.12.29*)* @g to i8*), i64 40, i1 false)
-  %1 = inttoptr i64 %conv to i32*
-  %2 = load i32, i32* %1, align 4
+  %0 = inttoptr i64 %conv to ptr
+  tail call void @llvm.memcpy.p0.p0.i64(ptr nonnull align 4 dereferenceable(40) %0, ptr nonnull align 4 dereferenceable(40) @g, i64 40, i1 false)
+  %1 = inttoptr i64 %conv to ptr
+  %2 = load i32, ptr %1, align 4
   %rev.i = tail call i32 @llvm.bswap.i32(i32 %2)
-  store i32 %rev.i, i32* %1, align 4
-  %incdec.ptr.i.4 = getelementptr inbounds i32, i32* %1, i64 5
-  %3 = load i32, i32* %incdec.ptr.i.4, align 4
+  store i32 %rev.i, ptr %1, align 4
+  %incdec.ptr.i.4 = getelementptr inbounds i32, ptr %1, i64 5
+  %3 = load i32, ptr %incdec.ptr.i.4, align 4
   %rev.i.5 = tail call i32 @llvm.bswap.i32(i32 %3)
-  store i32 %rev.i.5, i32* %incdec.ptr.i.4, align 4
+  store i32 %rev.i.5, ptr %incdec.ptr.i.4, align 4
   ret void
 }
 
 declare i32 @i(...) local_unnamed_addr
 
 ; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* noalias nocapture writeonly, i8* noalias nocapture readonly, i64, i1 immarg) #1
+declare void @llvm.memcpy.p0.p0.i64(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i64, i1 immarg) #1
 
 ; Function Attrs: nounwind readnone speculatable willreturn
 declare i32 @llvm.bswap.i32(i32)

diff  --git a/llvm/test/CodeGen/PowerPC/pr45432.ll b/llvm/test/CodeGen/PowerPC/pr45432.ll
index 7ce996f893f5..a82b2b624778 100644
--- a/llvm/test/CodeGen/PowerPC/pr45432.ll
+++ b/llvm/test/CodeGen/PowerPC/pr45432.ll
@@ -35,10 +35,10 @@ define dso_local void @h() local_unnamed_addr #0 {
 ; CHECK-NEXT:    blr
 ; CHECK-NEXT:  .LBB0_2: # %bb5
 bb:
-  %i = load i32, i32* @g, align 4
+  %i = load i32, ptr @g, align 4
   %i1 = sext i32 %i to i64
-  %i2 = getelementptr inbounds [0 x %1], [0 x %1]* bitcast (double* getelementptr inbounds (%0, %0* @f, i64 1, i32 0) to [0 x %1]*), i64 0, i64 %i1, i32 0
-  %i3 = load i32, i32* %i2, align 4
+  %i2 = getelementptr inbounds [0 x %1], ptr getelementptr inbounds (%0, ptr @f, i64 1, i32 0), i64 0, i64 %i1, i32 0
+  %i3 = load i32, ptr %i2, align 4
   %i4 = icmp eq i32 %i3, 0
   br i1 %i4, label %bb6, label %bb5
 

diff  --git a/llvm/test/CodeGen/PowerPC/pr45448.ll b/llvm/test/CodeGen/PowerPC/pr45448.ll
index 7af188dc092f..0f8014df8adc 100644
--- a/llvm/test/CodeGen/PowerPC/pr45448.ll
+++ b/llvm/test/CodeGen/PowerPC/pr45448.ll
@@ -35,7 +35,7 @@ define hidden void @julia_tryparse_internal_45896() #0 {
 ; CHECK-NEXT:  # %bb.9: # %L917
 ; CHECK-NEXT:  .LBB0_10: # %L994
 top:
-  %0 = load i64, i64* undef, align 8
+  %0 = load i64, ptr undef, align 8
   %1 = icmp ne i64 %0, 0
   %2 = sext i64 %0 to i128
   switch i64 %0, label %pass195 [

diff  --git a/llvm/test/CodeGen/PowerPC/pr46759.ll b/llvm/test/CodeGen/PowerPC/pr46759.ll
index d6d02921efca..16186480fe00 100644
--- a/llvm/test/CodeGen/PowerPC/pr46759.ll
+++ b/llvm/test/CodeGen/PowerPC/pr46759.ll
@@ -62,7 +62,7 @@ define void @foo(i32 %vla_size) #0 {
 entry:
   %0 = zext i32 %vla_size to i64
   %vla = alloca i8, i64 %0, align 2048
-  %1 = load volatile i8, i8* %vla, align 2048
+  %1 = load volatile i8, ptr %vla, align 2048
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/pr46923.ll b/llvm/test/CodeGen/PowerPC/pr46923.ll
index 0e35d09ed769..fcec60bb67ef 100644
--- a/llvm/test/CodeGen/PowerPC/pr46923.ll
+++ b/llvm/test/CodeGen/PowerPC/pr46923.ll
@@ -22,7 +22,7 @@ false:
   br label %end
 
 end:
-  %a = phi i1 [ icmp ugt (i64 0, i64 ptrtoint (i64* @bar to i64)), %true ],
+  %a = phi i1 [ icmp ugt (i64 0, i64 ptrtoint (ptr @bar to i64)), %true ],
               [ icmp ugt (i64 0, i64 2), %false ]
   ret i1 %a
 }

diff  --git a/llvm/test/CodeGen/PowerPC/pr47707.ll b/llvm/test/CodeGen/PowerPC/pr47707.ll
index 099fcf10d994..047087ed71a2 100644
--- a/llvm/test/CodeGen/PowerPC/pr47707.ll
+++ b/llvm/test/CodeGen/PowerPC/pr47707.ll
@@ -4,7 +4,7 @@
 target datalayout = "e-m:e-i64:64-n32:64"
 target triple = "powerpc64le-grtev4-linux-gnu"
 
-define void @foo(i64* %p1, i64 %v1, i8 %v2, i64 %v3) {
+define void @foo(ptr %p1, i64 %v1, i8 %v2, i64 %v3) {
 ; CHECK-LABEL: foo:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    mr 7, 5
@@ -22,7 +22,7 @@ define void @foo(i64* %p1, i64 %v1, i8 %v2, i64 %v3) {
 ; CHECK-NEXT:  # %bb.3: # %bb3
 ; CHECK-NEXT:    std 6, 0(3)
 ; CHECK-NEXT:    blr
-  store i64 0, i64* %p1, align 8
+  store i64 0, ptr %p1, align 8
   %ext = zext i8 %v2 to i64
   %shift = shl nuw i64 %v1, 8
   %merge = or i64 %shift, %ext
@@ -32,14 +32,14 @@ define void @foo(i64* %p1, i64 %v1, i8 %v2, i64 %v3) {
   br i1 %cond1, label %bb2, label %bb1    ; be used by this conditional branch
 
 bb1:
-  store i64 %v1, i64* %p1, align 8
+  store i64 %v1, ptr %p1, align 8
   br label %bb2
 
 bb2:
   br i1 %not0, label %exit, label %bb3
 
 bb3:
-  store i64 %v3, i64* %p1, align 8
+  store i64 %v3, ptr %p1, align 8
   br label %exit
 
 exit:

diff  --git a/llvm/test/CodeGen/PowerPC/pr47891.ll b/llvm/test/CodeGen/PowerPC/pr47891.ll
index 2c53769e069d..17ddf8d6cbd5 100644
--- a/llvm/test/CodeGen/PowerPC/pr47891.ll
+++ b/llvm/test/CodeGen/PowerPC/pr47891.ll
@@ -4,7 +4,7 @@
 %struct.poly2 = type { [11 x i64] }
 
 ; Function Attrs: nofree norecurse nounwind
-define dso_local void @poly2_lshift1(%struct.poly2* nocapture %p) local_unnamed_addr #0 {
+define dso_local void @poly2_lshift1(ptr nocapture %p) local_unnamed_addr #0 {
 ; CHECK-LABEL: poly2_lshift1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    li r4, 72
@@ -62,50 +62,47 @@ define dso_local void @poly2_lshift1(%struct.poly2* nocapture %p) local_unnamed_
 ; CHECK-NEXT:    stxvd2x vs0, r3, r4
 ; CHECK-NEXT:    blr
 entry:
-  %arrayidx = getelementptr inbounds %struct.poly2, %struct.poly2* %p, i64 0, i32 0, i64 0
-  %0 = load i64, i64* %arrayidx, align 8
+  %0 = load i64, ptr %p, align 8
   %shl = shl i64 %0, 1
-  store i64 %shl, i64* %arrayidx, align 8
-  %arrayidx.1 = getelementptr inbounds %struct.poly2, %struct.poly2* %p, i64 0, i32 0, i64 1
-  %1 = load i64, i64* %arrayidx.1, align 8
+  store i64 %shl, ptr %p, align 8
+  %arrayidx.1 = getelementptr inbounds %struct.poly2, ptr %p, i64 0, i32 0, i64 1
+  %1 = load i64, ptr %arrayidx.1, align 8
   %or.1 = call i64 @llvm.fshl.i64(i64 %1, i64 %0, i64 1)
-  store i64 %or.1, i64* %arrayidx.1, align 8
-  %arrayidx.2 = getelementptr inbounds %struct.poly2, %struct.poly2* %p, i64 0, i32 0, i64 2
-  %2 = load i64, i64* %arrayidx.2, align 8
+  store i64 %or.1, ptr %arrayidx.1, align 8
+  %arrayidx.2 = getelementptr inbounds %struct.poly2, ptr %p, i64 0, i32 0, i64 2
+  %2 = load i64, ptr %arrayidx.2, align 8
   %or.2 = call i64 @llvm.fshl.i64(i64 %2, i64 %1, i64 1)
-  store i64 %or.2, i64* %arrayidx.2, align 8
-  %arrayidx.3 = getelementptr inbounds %struct.poly2, %struct.poly2* %p, i64 0, i32 0, i64 3
-  %3 = load i64, i64* %arrayidx.3, align 8
+  store i64 %or.2, ptr %arrayidx.2, align 8
+  %arrayidx.3 = getelementptr inbounds %struct.poly2, ptr %p, i64 0, i32 0, i64 3
+  %3 = load i64, ptr %arrayidx.3, align 8
   %or.3 = call i64 @llvm.fshl.i64(i64 %3, i64 %2, i64 1)
-  store i64 %or.3, i64* %arrayidx.3, align 8
-  %arrayidx.4 = getelementptr inbounds %struct.poly2, %struct.poly2* %p, i64 0, i32 0, i64 4
-  %4 = load i64, i64* %arrayidx.4, align 8
+  store i64 %or.3, ptr %arrayidx.3, align 8
+  %arrayidx.4 = getelementptr inbounds %struct.poly2, ptr %p, i64 0, i32 0, i64 4
+  %4 = load i64, ptr %arrayidx.4, align 8
   %or.4 = call i64 @llvm.fshl.i64(i64 %4, i64 %3, i64 1)
-  store i64 %or.4, i64* %arrayidx.4, align 8
-  %arrayidx.5 = getelementptr inbounds %struct.poly2, %struct.poly2* %p, i64 0, i32 0, i64 5
-  %5 = load i64, i64* %arrayidx.5, align 8
+  store i64 %or.4, ptr %arrayidx.4, align 8
+  %arrayidx.5 = getelementptr inbounds %struct.poly2, ptr %p, i64 0, i32 0, i64 5
+  %5 = load i64, ptr %arrayidx.5, align 8
   %or.5 = call i64 @llvm.fshl.i64(i64 %5, i64 %4, i64 1)
-  store i64 %or.5, i64* %arrayidx.5, align 8
-  %arrayidx.6 = getelementptr inbounds %struct.poly2, %struct.poly2* %p, i64 0, i32 0, i64 6
-  %6 = load i64, i64* %arrayidx.6, align 8
+  store i64 %or.5, ptr %arrayidx.5, align 8
+  %arrayidx.6 = getelementptr inbounds %struct.poly2, ptr %p, i64 0, i32 0, i64 6
+  %6 = load i64, ptr %arrayidx.6, align 8
   %or.6 = call i64 @llvm.fshl.i64(i64 %6, i64 %5, i64 1)
-  store i64 %or.6, i64* %arrayidx.6, align 8
-  %arrayidx.7 = getelementptr inbounds %struct.poly2, %struct.poly2* %p, i64 0, i32 0, i64 7
-  %7 = load i64, i64* %arrayidx.7, align 8
+  store i64 %or.6, ptr %arrayidx.6, align 8
+  %arrayidx.7 = getelementptr inbounds %struct.poly2, ptr %p, i64 0, i32 0, i64 7
+  %7 = load i64, ptr %arrayidx.7, align 8
   %or.7 = call i64 @llvm.fshl.i64(i64 %7, i64 %6, i64 1)
-  store i64 %or.7, i64* %arrayidx.7, align 8
-  %arrayidx.8 = getelementptr inbounds %struct.poly2, %struct.poly2* %p, i64 0, i32 0, i64 8
-  %8 = load i64, i64* %arrayidx.8, align 8
+  store i64 %or.7, ptr %arrayidx.7, align 8
+  %arrayidx.8 = getelementptr inbounds %struct.poly2, ptr %p, i64 0, i32 0, i64 8
+  %8 = load i64, ptr %arrayidx.8, align 8
   %or.8 = call i64 @llvm.fshl.i64(i64 %8, i64 %7, i64 1)
-  store i64 %or.8, i64* %arrayidx.8, align 8
-  %arrayidx.9 = getelementptr inbounds %struct.poly2, %struct.poly2* %p, i64 0, i32 0, i64 9
-  %9 = bitcast i64* %arrayidx.9 to <2 x i64>*
-  %10 = load <2 x i64>, <2 x i64>* %9, align 8
-  %11 = insertelement <2 x i64> undef, i64 %8, i32 0
-  %12 = shufflevector <2 x i64> %11, <2 x i64> %10, <2 x i32> <i32 0, i32 2>
-  %13 = call <2 x i64> @llvm.fshl.v2i64(<2 x i64> %10, <2 x i64> %12, <2 x i64> <i64 1, i64 1>)
-  %14 = bitcast i64* %arrayidx.9 to <2 x i64>*
-  store <2 x i64> %13, <2 x i64>* %14, align 8
+  store i64 %or.8, ptr %arrayidx.8, align 8
+  %arrayidx.9 = getelementptr inbounds %struct.poly2, ptr %p, i64 0, i32 0, i64 9
+  %9 = load <2 x i64>, ptr %arrayidx.9, align 8
+  %10 = insertelement <2 x i64> undef, i64 %8, i32 0
+  %11 = shufflevector <2 x i64> %10, <2 x i64> %9, <2 x i32> <i32 0, i32 2>
+  %12 = call <2 x i64> @llvm.fshl.v2i64(<2 x i64> %9, <2 x i64> %11, <2 x i64> <i64 1, i64 1>)
+  store <2 x i64> %12, ptr %arrayidx.9, align 8
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/pr47916.ll b/llvm/test/CodeGen/PowerPC/pr47916.ll
index af5c45df8430..6b1389e83d2d 100644
--- a/llvm/test/CodeGen/PowerPC/pr47916.ll
+++ b/llvm/test/CodeGen/PowerPC/pr47916.ll
@@ -14,8 +14,8 @@ define dso_local void @_Z1jjPiPj() local_unnamed_addr #0 {
 ; CHECK-NEXT:    stxvd2x vs0, 0, r3
 ; CHECK-NEXT:    blr
 entry:
-  %wide.load42 = load <2 x i32>, <2 x i32>* undef, align 4
+  %wide.load42 = load <2 x i32>, ptr undef, align 4
   %interleaved.vec49 = shufflevector <2 x i32> %wide.load42, <2 x i32> undef, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
-  store <4 x i32> %interleaved.vec49, <4 x i32>* undef, align 4
+  store <4 x i32> %interleaved.vec49, ptr undef, align 4
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/pr48519.ll b/llvm/test/CodeGen/PowerPC/pr48519.ll
index 3552c049e321..e846619c11a6 100644
--- a/llvm/test/CodeGen/PowerPC/pr48519.ll
+++ b/llvm/test/CodeGen/PowerPC/pr48519.ll
@@ -56,7 +56,7 @@ define void @julia__typed_vcat_20() #0 {
 ; CHECK-P9-NEXT:    xscvdphp f0, f0
 ; CHECK-P9-NEXT:    stxsihx f0, 0, r3
 bb:
-  %i = load i64, i64 addrspace(11)* null, align 8
+  %i = load i64, ptr addrspace(11) null, align 8
   %i1 = call { i64, i1 } @llvm.ssub.with.overflow.i64(i64 %i, i64 0)
   %i2 = extractvalue { i64, i1 } %i1, 0
   br label %bb3
@@ -67,7 +67,7 @@ bb3:                                              ; preds = %bb3, %bb
   %i6 = add nsw i64 %i5, -1
   %i7 = add i64 %i6, 0
   %i8 = sitofp i64 %i7 to half
-  store half %i8, half addrspace(13)* undef, align 2
+  store half %i8, ptr addrspace(13) undef, align 2
   %i9 = icmp eq i64 %i4, 0
   %i10 = add i64 %i4, 1
   br i1 %i9, label %bb11, label %bb3
@@ -133,7 +133,7 @@ bb1:                                              ; preds = %bb3, %bb
 
 bb3:                                              ; preds = %bb1
   %i4 = add nuw nsw i64 %i, 1
-  %i5 = load half, half* null, align 2
+  %i5 = load half, ptr null, align 2
   %i6 = fpext half %i5 to float
   %i7 = fcmp uno float %i6, 0.000000e+00
   %i8 = or i1 %i7, false
@@ -213,8 +213,8 @@ define void @func_48786() #0 {
 ; CHECK-P9-NEXT:    b .LBB2_1
 ; CHECK-P9-NEXT:  .LBB2_5: # %bb15
 bb:
-  %i = load i64, i64 addrspace(11)* undef, align 8
-  %i1 = load i64, i64 addrspace(11)* undef, align 8
+  %i = load i64, ptr addrspace(11) undef, align 8
+  %i1 = load i64, ptr addrspace(11) undef, align 8
   br label %bb2
 
 bb2:                                              ; preds = %bb12, %bb
@@ -228,12 +228,12 @@ bb4:                                              ; preds = %bb2
   ]
 
 bb5:                                              ; preds = %bb4, %bb4
-  %i6 = load half, half addrspace(13)* undef, align 2
+  %i6 = load half, ptr addrspace(13) undef, align 2
   %i7 = icmp ult i64 0, %i1
   br i1 %i7, label %bb8, label %bb15
 
 bb8:                                              ; preds = %bb5
-  store half %i6, half addrspace(13)* null, align 2
+  store half %i6, ptr addrspace(13) null, align 2
   br label %bb10
 
 bb9:                                              ; preds = %bb4
@@ -298,8 +298,8 @@ bb:
 
 bb1:                                              ; preds = %bb1, %bb
   %i = phi i64 [ 0, %bb ], [ %i3, %bb1 ]
-  %i2 = getelementptr inbounds half, half addrspace(13)* null, i64 %i
-  store half %arg, half addrspace(13)* %i2, align 2
+  %i2 = getelementptr inbounds half, ptr addrspace(13) null, i64 %i
+  store half %arg, ptr addrspace(13) %i2, align 2
   %i3 = add i64 %i, 12
   %i4 = icmp eq i64 %i3, 0
   br i1 %i4, label %bb5, label %bb1

diff  --git a/llvm/test/CodeGen/PowerPC/pr48527.ll b/llvm/test/CodeGen/PowerPC/pr48527.ll
index eaff15ce071e..d09363125081 100644
--- a/llvm/test/CodeGen/PowerPC/pr48527.ll
+++ b/llvm/test/CodeGen/PowerPC/pr48527.ll
@@ -46,7 +46,7 @@ define void @_ZNK1q1rEv() local_unnamed_addr #0 align 2 {
 ; CHECK-NEXT:    mtlr 0
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i32, i32* undef, align 4
+  %0 = load i32, ptr undef, align 4
   br label %monotonic.i
 
 for.cond.i:                                       ; preds = %monotonic.i
@@ -55,15 +55,15 @@ for.cond.i:                                       ; preds = %monotonic.i
 
 monotonic.i:                                      ; preds = %for.cond.i, %entry
   %i.018.i = phi i32 [ %inc.i, %for.cond.i ], [ 0, %entry ]
-  %1 = load atomic i32, i32* getelementptr inbounds (%struct.e.0.12.28.44.104.108.112.188, %struct.e.0.12.28.44.104.108.112.188* @g, i64 0, i32 0) monotonic, align 4
+  %1 = load atomic i32, ptr @g monotonic, align 4
   %conv.i = trunc i32 %1 to i8
   %tobool.not.i = icmp eq i8 %conv.i, 0
   %inc.i = add nuw nsw i32 %i.018.i, 1
   br i1 %tobool.not.i, label %for.cond.i, label %if.end
 
 if.end:                                           ; preds = %monotonic.i, %for.cond.i
-  %.sink = phi i64* [ getelementptr inbounds (%struct.t.1.13.29.45.105.109.113.189, %struct.t.1.13.29.45.105.109.113.189* @aj, i64 0, i32 1), %monotonic.i ], [ getelementptr inbounds (%struct.t.1.13.29.45.105.109.113.189, %struct.t.1.13.29.45.105.109.113.189* @aj, i64 0, i32 0), %for.cond.i ]
-  store i64 1, i64* %.sink, align 8
+  %.sink = phi ptr [ getelementptr inbounds (%struct.t.1.13.29.45.105.109.113.189, ptr @aj, i64 0, i32 1), %monotonic.i ], [ @aj, %for.cond.i ]
+  store i64 1, ptr %.sink, align 8
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/pr49509.ll b/llvm/test/CodeGen/PowerPC/pr49509.ll
index f13733c18047..7b6248c60ab4 100644
--- a/llvm/test/CodeGen/PowerPC/pr49509.ll
+++ b/llvm/test/CodeGen/PowerPC/pr49509.ll
@@ -56,12 +56,12 @@ bb:
 
 bb2:                                              ; preds = %bb
   %i = select i1 undef, i64 0, i64 72057594037927936
-  store i64 %i, i64* undef, align 8
+  store i64 %i, ptr undef, align 8
   ret void
 
 bb1:                                              ; preds = %bb
-  %i50 = load i8, i8* undef, align 8
-  %i52 = load i128, i128* null, align 8
+  %i50 = load i8, ptr undef, align 8
+  %i52 = load i128, ptr null, align 8
   %i62 = icmp eq i8 %i50, 0
   br i1 undef, label %bb66, label %bb64
 
@@ -76,6 +76,6 @@ bb66:                                             ; preds = %bb63
   %i71 = icmp eq i128 %i70, 0
   %i74 = select i1 %i62, i64 0, i64 72057594037927936
   %i75 = select i1 %i71, i64 144115188075855872, i64 %i74
-  store i64 %i75, i64* undef, align 8
+  store i64 %i75, ptr undef, align 8
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/pr56469.ll b/llvm/test/CodeGen/PowerPC/pr56469.ll
index a1865de80cf7..f7025fc4e8d6 100644
--- a/llvm/test/CodeGen/PowerPC/pr56469.ll
+++ b/llvm/test/CodeGen/PowerPC/pr56469.ll
@@ -26,9 +26,9 @@ entry:
   %conv1 = fpext float %j to double
   %conv2 = fpext float %k to double
   %conv3 = fpext float %l to double
-  %call = tail call signext i32 (i8*, ...) @printf(i8*  nonnull dereferenceable(1) getelementptr inbounds ([32 x i8], [32 x i8]* @.str, i64 0, i64 0), double  %conv, double  %conv1, double  %conv2, double  %conv3)
+  %call = tail call signext i32 (ptr, ...) @printf(ptr  nonnull dereferenceable(1) @.str, double  %conv, double  %conv1, double  %conv2, double  %conv3)
   ret void
 }
 
-declare  signext i32 @printf(i8* nocapture  readonly, ...)
+declare  signext i32 @printf(ptr nocapture  readonly, ...)
 

diff  --git a/llvm/test/CodeGen/PowerPC/pre-inc-disable.ll b/llvm/test/CodeGen/PowerPC/pre-inc-disable.ll
index ad7891c691ea..c33ab66cd98c 100644
--- a/llvm/test/CodeGen/PowerPC/pre-inc-disable.ll
+++ b/llvm/test/CodeGen/PowerPC/pre-inc-disable.ll
@@ -15,7 +15,7 @@
 ; RUN:     -mtriple=powerpc-ibm-aix-xcoff -vec-extabi \
 ; RUN:     < %s | FileCheck %s --check-prefixes=P9BE-AIX32
 
-define void @test64(i8* nocapture readonly %pix2, i32 signext %i_pix2) {
+define void @test64(ptr nocapture readonly %pix2, i32 signext %i_pix2) {
 ; P9LE-LABEL: test64:
 ; P9LE:       # %bb.0: # %entry
 ; P9LE-NEXT:    add 5, 3, 4
@@ -113,29 +113,27 @@ define void @test64(i8* nocapture readonly %pix2, i32 signext %i_pix2) {
 ; P9BE-AIX32-NEXT:    blr
 entry:
   %idx.ext63 = sext i32 %i_pix2 to i64
-  %add.ptr64 = getelementptr inbounds i8, i8* %pix2, i64 %idx.ext63
-  %arrayidx5.1 = getelementptr inbounds i8, i8* %add.ptr64, i64 4
-  %0 = bitcast i8* %add.ptr64 to <4 x i16>*
-  %1 = load <4 x i16>, <4 x i16>* %0, align 1
-  %reorder_shuffle117 = shufflevector <4 x i16> %1, <4 x i16> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
-  %2 = zext <4 x i16> %reorder_shuffle117 to <4 x i32>
-  %3 = sub nsw <4 x i32> zeroinitializer, %2
-  %4 = bitcast i8* %arrayidx5.1 to <4 x i16>*
-  %5 = load <4 x i16>, <4 x i16>* %4, align 1
-  %reorder_shuffle115 = shufflevector <4 x i16> %5, <4 x i16> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
-  %6 = zext <4 x i16> %reorder_shuffle115 to <4 x i32>
-  %7 = sub nsw <4 x i32> zeroinitializer, %6
-  %8 = shl nsw <4 x i32> %7, <i32 16, i32 16, i32 16, i32 16>
-  %9 = add nsw <4 x i32> %8, %3
-  %10 = sub nsw <4 x i32> %9, zeroinitializer
-  %11 = shufflevector <4 x i32> undef, <4 x i32> %10, <4 x i32> <i32 2, i32 7, i32 0, i32 5>
-  %12 = add nsw <4 x i32> zeroinitializer, %11
-  %13 = shufflevector <4 x i32> %12, <4 x i32> undef, <4 x i32> <i32 0, i32 1, i32 6, i32 7>
-  store <4 x i32> %13, <4 x i32>* undef, align 16
+  %add.ptr64 = getelementptr inbounds i8, ptr %pix2, i64 %idx.ext63
+  %arrayidx5.1 = getelementptr inbounds i8, ptr %add.ptr64, i64 4
+  %0 = load <4 x i16>, ptr %add.ptr64, align 1
+  %reorder_shuffle117 = shufflevector <4 x i16> %0, <4 x i16> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+  %1 = zext <4 x i16> %reorder_shuffle117 to <4 x i32>
+  %2 = sub nsw <4 x i32> zeroinitializer, %1
+  %3 = load <4 x i16>, ptr %arrayidx5.1, align 1
+  %reorder_shuffle115 = shufflevector <4 x i16> %3, <4 x i16> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+  %4 = zext <4 x i16> %reorder_shuffle115 to <4 x i32>
+  %5 = sub nsw <4 x i32> zeroinitializer, %4
+  %6 = shl nsw <4 x i32> %5, <i32 16, i32 16, i32 16, i32 16>
+  %7 = add nsw <4 x i32> %6, %2
+  %8 = sub nsw <4 x i32> %7, zeroinitializer
+  %9 = shufflevector <4 x i32> undef, <4 x i32> %8, <4 x i32> <i32 2, i32 7, i32 0, i32 5>
+  %10 = add nsw <4 x i32> zeroinitializer, %9
+  %11 = shufflevector <4 x i32> %10, <4 x i32> undef, <4 x i32> <i32 0, i32 1, i32 6, i32 7>
+  store <4 x i32> %11, ptr undef, align 16
   ret void
 }
 
-define void @test32(i8* nocapture readonly %pix2, i32 signext %i_pix2) {
+define void @test32(ptr nocapture readonly %pix2, i32 signext %i_pix2) {
 ; P9LE-LABEL: test32:
 ; P9LE:       # %bb.0: # %entry
 ; P9LE-NEXT:    add 5, 3, 4
@@ -219,29 +217,27 @@ define void @test32(i8* nocapture readonly %pix2, i32 signext %i_pix2) {
 ; P9BE-AIX32-NEXT:    blr
 entry:
   %idx.ext63 = sext i32 %i_pix2 to i64
-  %add.ptr64 = getelementptr inbounds i8, i8* %pix2, i64 %idx.ext63
-  %arrayidx5.1 = getelementptr inbounds i8, i8* %add.ptr64, i64 4
-  %0 = bitcast i8* %add.ptr64 to <4 x i8>*
-  %1 = load <4 x i8>, <4 x i8>* %0, align 1
-  %reorder_shuffle117 = shufflevector <4 x i8> %1, <4 x i8> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
-  %2 = zext <4 x i8> %reorder_shuffle117 to <4 x i32>
-  %3 = sub nsw <4 x i32> zeroinitializer, %2
-  %4 = bitcast i8* %arrayidx5.1 to <4 x i8>*
-  %5 = load <4 x i8>, <4 x i8>* %4, align 1
-  %reorder_shuffle115 = shufflevector <4 x i8> %5, <4 x i8> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
-  %6 = zext <4 x i8> %reorder_shuffle115 to <4 x i32>
-  %7 = sub nsw <4 x i32> zeroinitializer, %6
-  %8 = shl nsw <4 x i32> %7, <i32 16, i32 16, i32 16, i32 16>
-  %9 = add nsw <4 x i32> %8, %3
-  %10 = sub nsw <4 x i32> %9, zeroinitializer
-  %11 = shufflevector <4 x i32> undef, <4 x i32> %10, <4 x i32> <i32 2, i32 7, i32 0, i32 5>
-  %12 = add nsw <4 x i32> zeroinitializer, %11
-  %13 = shufflevector <4 x i32> %12, <4 x i32> undef, <4 x i32> <i32 0, i32 1, i32 6, i32 7>
-  store <4 x i32> %13, <4 x i32>* undef, align 16
+  %add.ptr64 = getelementptr inbounds i8, ptr %pix2, i64 %idx.ext63
+  %arrayidx5.1 = getelementptr inbounds i8, ptr %add.ptr64, i64 4
+  %0 = load <4 x i8>, ptr %add.ptr64, align 1
+  %reorder_shuffle117 = shufflevector <4 x i8> %0, <4 x i8> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+  %1 = zext <4 x i8> %reorder_shuffle117 to <4 x i32>
+  %2 = sub nsw <4 x i32> zeroinitializer, %1
+  %3 = load <4 x i8>, ptr %arrayidx5.1, align 1
+  %reorder_shuffle115 = shufflevector <4 x i8> %3, <4 x i8> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+  %4 = zext <4 x i8> %reorder_shuffle115 to <4 x i32>
+  %5 = sub nsw <4 x i32> zeroinitializer, %4
+  %6 = shl nsw <4 x i32> %5, <i32 16, i32 16, i32 16, i32 16>
+  %7 = add nsw <4 x i32> %6, %2
+  %8 = sub nsw <4 x i32> %7, zeroinitializer
+  %9 = shufflevector <4 x i32> undef, <4 x i32> %8, <4 x i32> <i32 2, i32 7, i32 0, i32 5>
+  %10 = add nsw <4 x i32> zeroinitializer, %9
+  %11 = shufflevector <4 x i32> %10, <4 x i32> undef, <4 x i32> <i32 0, i32 1, i32 6, i32 7>
+  store <4 x i32> %11, ptr undef, align 16
   ret void
 }
 
-define void @test16(i16* nocapture readonly %sums, i32 signext %delta, i32 signext %thresh) {
+define void @test16(ptr nocapture readonly %sums, i32 signext %delta, i32 signext %thresh) {
 ; P9LE-LABEL: test16:
 ; P9LE:       # %bb.0: # %entry
 ; P9LE-NEXT:    sldi 4, 4, 1
@@ -354,10 +350,10 @@ entry:
   br label %for.body
 
 for.body:                                         ; preds = %entry
-  %arrayidx8 = getelementptr inbounds i16, i16* %sums, i64 %idxprom
-  %0 = load i16, i16* %arrayidx8, align 2
-  %arrayidx16 = getelementptr inbounds i16, i16* %sums, i64 %idxprom15
-  %1 = load i16, i16* %arrayidx16, align 2
+  %arrayidx8 = getelementptr inbounds i16, ptr %sums, i64 %idxprom
+  %0 = load i16, ptr %arrayidx8, align 2
+  %arrayidx16 = getelementptr inbounds i16, ptr %sums, i64 %idxprom15
+  %1 = load i16, ptr %arrayidx16, align 2
   %2 = insertelement <4 x i16> undef, i16 %0, i32 2
   %3 = insertelement <4 x i16> %2, i16 %1, i32 3
   %4 = zext <4 x i16> %3 to <4 x i32>
@@ -379,7 +375,7 @@ if.end:                                           ; preds = %for.body
   ret void
 }
 
-define void @test8(i8* nocapture readonly %sums, i32 signext %delta, i32 signext %thresh) {
+define void @test8(ptr nocapture readonly %sums, i32 signext %delta, i32 signext %thresh) {
 ; P9LE-LABEL: test8:
 ; P9LE:       # %bb.0: # %entry
 ; P9LE-NEXT:    add 6, 3, 4
@@ -496,10 +492,10 @@ entry:
   br label %for.body
 
 for.body:                                         ; preds = %entry
-  %arrayidx8 = getelementptr inbounds i8, i8* %sums, i64 %idxprom
-  %0 = load i8, i8* %arrayidx8, align 2
-  %arrayidx16 = getelementptr inbounds i8, i8* %sums, i64 %idxprom15
-  %1 = load i8, i8* %arrayidx16, align 2
+  %arrayidx8 = getelementptr inbounds i8, ptr %sums, i64 %idxprom
+  %0 = load i8, ptr %arrayidx8, align 2
+  %arrayidx16 = getelementptr inbounds i8, ptr %sums, i64 %idxprom15
+  %1 = load i8, ptr %arrayidx16, align 2
   %2 = insertelement <4 x i8> undef, i8 %0, i32 2
   %3 = insertelement <4 x i8> %2, i8 %1, i32 3
   %4 = zext <4 x i8> %3 to <4 x i32>

diff  --git a/llvm/test/CodeGen/PowerPC/preinc-ld-sel-crash.ll b/llvm/test/CodeGen/PowerPC/preinc-ld-sel-crash.ll
index 35aec57ec264..aea3246cceb1 100644
--- a/llvm/test/CodeGen/PowerPC/preinc-ld-sel-crash.ll
+++ b/llvm/test/CodeGen/PowerPC/preinc-ld-sel-crash.ll
@@ -2,9 +2,9 @@
 target datalayout = "E-m:e-i64:64-n32:64"
 target triple = "powerpc64le-unknown-linux"
 
-%t1 = type { %t2*, %t3* }
-%t2 = type <{ %t3*, i32, [4 x i8] }>
-%t3 = type { %t3* }
+%t1 = type { ptr, ptr }
+%t2 = type <{ ptr, i32, [4 x i8] }>
+%t3 = type { ptr }
 
 @_ZN4Foam10SLListBase13endConstIter_E = external global %t1
 
@@ -36,12 +36,12 @@ if.then.i181:                                     ; preds = %if.end75
 
 if.then17.i:                                      ; preds = %if.end75
   %tobool.i.i.i = icmp eq i32 undef, 0
-  %0 = load i64*, i64** undef, align 8
-  %agg.tmp.sroa.3.0.copyload33.in.i = select i1 %tobool.i.i.i, i64* bitcast (%t3** getelementptr inbounds (%t1, %t1* @_ZN4Foam10SLListBase13endConstIter_E, i64 0, i32 1) to i64*), i64* %0
-  %agg.tmp.sroa.3.0.copyload33.i = load i64, i64* %agg.tmp.sroa.3.0.copyload33.in.i, align 8
-  %1 = inttoptr i64 %agg.tmp.sroa.3.0.copyload33.i to %t3*
-  %2 = load %t3*, %t3** getelementptr inbounds (%t1, %t1* @_ZN4Foam10SLListBase13endConstIter_E, i64 0, i32 1), align 8
-  %cmp.i37.i = icmp eq %t3* %1, %2
+  %0 = load ptr, ptr undef, align 8
+  %agg.tmp.sroa.3.0.copyload33.in.i = select i1 %tobool.i.i.i, ptr getelementptr inbounds (%t1, ptr @_ZN4Foam10SLListBase13endConstIter_E, i64 0, i32 1), ptr %0
+  %agg.tmp.sroa.3.0.copyload33.i = load i64, ptr %agg.tmp.sroa.3.0.copyload33.in.i, align 8
+  %1 = inttoptr i64 %agg.tmp.sroa.3.0.copyload33.i to ptr
+  %2 = load ptr, ptr getelementptr inbounds (%t1, ptr @_ZN4Foam10SLListBase13endConstIter_E, i64 0, i32 1), align 8
+  %cmp.i37.i = icmp eq ptr %1, %2
   br i1 %cmp.i37.i, label %invoke.cont79, label %for.body.lr.ph.i
 
 ; CHECK-LABEL: @_ZN4FoamrsIbEERNS_7IstreamES2_RNS_4ListIT_EE

diff  --git a/llvm/test/CodeGen/PowerPC/preincprep-invoke.ll b/llvm/test/CodeGen/PowerPC/preincprep-invoke.ll
index e52e29414c28..ae3680057a74 100644
--- a/llvm/test/CodeGen/PowerPC/preincprep-invoke.ll
+++ b/llvm/test/CodeGen/PowerPC/preincprep-invoke.ll
@@ -11,7 +11,7 @@ declare void @_ZN13CStdOutStream5FlushEv()
 
 declare i32 @__gxx_personality_v0(...)
 
-define void @_Z11GetPasswordP13CStdOutStreamb(i1 %cond, i8 %arg1, i8* %arg2) personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+define void @_Z11GetPasswordP13CStdOutStreamb(i1 %cond, i8 %arg1, ptr %arg2) personality ptr @__gxx_personality_v0 {
 entry:
   br label %for.cond.i.i
 
@@ -27,7 +27,7 @@ invoke.cont:                                      ; preds = %_ZN11CStringBaseIcE
           to label %invoke.cont4 unwind label %lpad
 
 invoke.cont4:                                     ; preds = %invoke.cont
-  %call7 = invoke i8* @getpass()
+  %call7 = invoke ptr @getpass()
           to label %for.cond.i.i30 unwind label %lpad
 
 ; CHECK-LABEL: @_Z11GetPasswordP13CStdOutStreamb
@@ -35,18 +35,18 @@ invoke.cont4:                                     ; preds = %invoke.cont
 
 for.cond.i.i30:                                   ; preds = %for.cond.i.i30, %invoke.cont4
   %indvars.iv.i.i26 = phi i64 [ %indvars.iv.next.i.i29, %for.cond.i.i30 ], [ 0, %invoke.cont4 ]
-  %arrayidx.i.i27 = getelementptr inbounds i8, i8* %call7, i64 %indvars.iv.i.i26
-  %0 = load i8, i8* %arrayidx.i.i27, align 1
+  %arrayidx.i.i27 = getelementptr inbounds i8, ptr %call7, i64 %indvars.iv.i.i26
+  %0 = load i8, ptr %arrayidx.i.i27, align 1
   %1 = add i8 %0, %arg1
-  store i8 %1, i8* %arg2, align 1
+  store i8 %1, ptr %arg2, align 1
   %indvars.iv.next.i.i29 = add nuw nsw i64 %indvars.iv.i.i26, 1
   br label %for.cond.i.i30
 
 lpad:                                             ; preds = %invoke.cont4, %invoke.cont, %_ZN11CStringBaseIcEC2EPKc.exit.critedge
-  %2 = landingpad { i8*, i32 }
+  %2 = landingpad { ptr, i32 }
           cleanup
-  resume { i8*, i32 } undef
+  resume { ptr, i32 } undef
 }
 
-declare i8* @getpass()
+declare ptr @getpass()
 

diff  --git a/llvm/test/CodeGen/PowerPC/private.ll b/llvm/test/CodeGen/PowerPC/private.ll
index 7eb4423b7b7b..4b0be7b664c5 100644
--- a/llvm/test/CodeGen/PowerPC/private.ll
+++ b/llvm/test/CodeGen/PowerPC/private.ll
@@ -12,7 +12,7 @@ define i32 @bar() nounwind {
         call void @foo()
 
 ; CHECK: lis{{.*}}.Lbaz
-	%1 = load i32, i32* @baz, align 4
+	%1 = load i32, ptr @baz, align 4
         ret i32 %1
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/pwr7-gt-nop.ll b/llvm/test/CodeGen/PowerPC/pwr7-gt-nop.ll
index b7c899c0f0d4..9e2a8417ac41 100644
--- a/llvm/test/CodeGen/PowerPC/pwr7-gt-nop.ll
+++ b/llvm/test/CodeGen/PowerPC/pwr7-gt-nop.ll
@@ -3,17 +3,17 @@ target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3
 target triple = "powerpc64-unknown-linux-gnu"
 
 ; Function Attrs: nounwind
-define void @foo(float* nocapture %a, float* nocapture %b, float* nocapture readonly %c, float* nocapture %d) #0 {
+define void @foo(ptr nocapture %a, ptr nocapture %b, ptr nocapture readonly %c, ptr nocapture %d) #0 {
 
 ; CHECK-LABEL: @foo
 
 entry:
-  %0 = load float, float* %b, align 4
-  store float %0, float* %a, align 4
-  %1 = load float, float* %c, align 4
-  store float %1, float* %b, align 4
-  %2 = load float, float* %a, align 4
-  store float %2, float* %d, align 4
+  %0 = load float, ptr %b, align 4
+  store float %0, ptr %a, align 4
+  %1 = load float, ptr %c, align 4
+  store float %1, ptr %b, align 4
+  %2 = load float, ptr %a, align 4
+  store float %2, ptr %d, align 4
   ret void
 
 ; CHECK: lwz [[REG1:[0-9]+]], 0(4)

diff  --git a/llvm/test/CodeGen/PowerPC/quadint-return.ll b/llvm/test/CodeGen/PowerPC/quadint-return.ll
index ab93e7ee7d1e..41842ddabfa7 100644
--- a/llvm/test/CodeGen/PowerPC/quadint-return.ll
+++ b/llvm/test/CodeGen/PowerPC/quadint-return.ll
@@ -7,8 +7,8 @@ target triple = "powerpc64-unknown-linux-gnu"
 define i128 @foo() nounwind {
 entry:
   %x = alloca i128, align 16
-  store i128 27, i128* %x, align 16
-  %0 = load i128, i128* %x, align 16
+  store i128 27, ptr %x, align 16
+  %0 = load i128, ptr %x, align 16
   ret i128 %0
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/read-set-flm.ll b/llvm/test/CodeGen/PowerPC/read-set-flm.ll
index 83c8edf2f688..9d47eec6a2d3 100644
--- a/llvm/test/CodeGen/PowerPC/read-set-flm.ll
+++ b/llvm/test/CodeGen/PowerPC/read-set-flm.ll
@@ -74,7 +74,7 @@ entry:
   ret double %7
 }
 
-define void @cse_nomerge(double* %f1, double* %f2, double %f3) #0 {
+define void @cse_nomerge(ptr %f1, ptr %f2, double %f3) #0 {
 ; CHECK-LABEL: cse_nomerge:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    mflr 0
@@ -103,15 +103,15 @@ define void @cse_nomerge(double* %f1, double* %f2, double %f3) #0 {
 ; CHECK-NEXT:    blr
 entry:
   %0 = call double @llvm.ppc.readflm()
-  store double %0, double* %f1, align 8
+  store double %0, ptr %f1, align 8
   call void @effect_func()
   %1 = call double @llvm.ppc.readflm()
-  store double %1, double* %f2, align 8
+  store double %1, ptr %f2, align 8
   %2 = call contract double @llvm.ppc.setflm(double %f3)
   ret void
 }
 
-define void @cse_nomerge_readonly(double* %f1, double* %f2, double %f3) #0 {
+define void @cse_nomerge_readonly(ptr %f1, ptr %f2, double %f3) #0 {
 ; CHECK-LABEL: cse_nomerge_readonly:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    mflr 0
@@ -140,10 +140,10 @@ define void @cse_nomerge_readonly(double* %f1, double* %f2, double %f3) #0 {
 ; CHECK-NEXT:    blr
 entry:
   %0 = call double @llvm.ppc.readflm()
-  store double %0, double* %f1, align 8
+  store double %0, ptr %f1, align 8
   call void @readonly_func()
   %1 = call double @llvm.ppc.readflm()
-  store double %1, double* %f2, align 8
+  store double %1, ptr %f2, align 8
   %2 = call contract double @llvm.ppc.setflm(double %f3)
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/reduce_cr.ll b/llvm/test/CodeGen/PowerPC/reduce_cr.ll
index 6ef00d52149a..b1cac1cbc871 100644
--- a/llvm/test/CodeGen/PowerPC/reduce_cr.ll
+++ b/llvm/test/CodeGen/PowerPC/reduce_cr.ll
@@ -22,14 +22,14 @@ target triple = "powerpc64le-grtev4-linux-gnu"
 ;CHECK-NEXT: - BB4[optional1]: float = 0.625, int = 8
 
 
-define void @loop_test(i32* %tags, i32 %count) {
+define void @loop_test(ptr %tags, i32 %count) {
 entry:
   br label %for.check
 for.check:
   %count.loop = phi i32 [%count, %entry], [%count.sub, %for.latch]
   %done.count = icmp ugt i32 %count.loop, 0
-  %tag_ptr = getelementptr inbounds i32, i32* %tags, i32 %count
-  %tag = load i32, i32* %tag_ptr
+  %tag_ptr = getelementptr inbounds i32, ptr %tags, i32 %count
+  %tag = load i32, ptr %tag_ptr
   %done.tag = icmp eq i32 %tag, 0
   %done = and i1 %done.count, %done.tag
   br i1 %done, label %test1, label %exit, !prof !1

diff  --git a/llvm/test/CodeGen/PowerPC/reduce_scalarization.ll b/llvm/test/CodeGen/PowerPC/reduce_scalarization.ll
index 2c5bc80b7965..b7c7fe2f93f5 100644
--- a/llvm/test/CodeGen/PowerPC/reduce_scalarization.ll
+++ b/llvm/test/CodeGen/PowerPC/reduce_scalarization.ll
@@ -19,7 +19,7 @@
 ; RUN: FileCheck %s --check-prefix=AIX-32
 
 ; Function Attrs: norecurse nounwind readonly
-define dso_local <2 x double> @test1(<2 x float>* nocapture readonly %Ptr) {
+define dso_local <2 x double> @test1(ptr nocapture readonly %Ptr) {
 ; CHECK-LABEL: test1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lfd f0, 0(r3)
@@ -41,13 +41,13 @@ define dso_local <2 x double> @test1(<2 x float>* nocapture readonly %Ptr) {
 ; AIX-32-NEXT:    xxmrghd v2, vs1, vs0
 ; AIX-32-NEXT:    blr
 entry:
-  %0 = load <2 x float>, <2 x float>* %Ptr, align 8
+  %0 = load <2 x float>, ptr %Ptr, align 8
   %1 = fpext <2 x float> %0 to <2 x double>
   ret <2 x double> %1
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define dso_local <2 x double> @test2(<2 x float>* nocapture readonly %a, <2 x float>* nocapture readonly %b) {
+define dso_local <2 x double> @test2(ptr nocapture readonly %a, ptr nocapture readonly %b) {
 ; CHECK-LABEL: test2:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lfd f0, 0(r4)
@@ -82,8 +82,8 @@ define dso_local <2 x double> @test2(<2 x float>* nocapture readonly %a, <2 x fl
 ; AIX-32-NEXT:    xxmrghd v2, vs0, vs1
 ; AIX-32-NEXT:    blr
 entry:
-  %0 = load <2 x float>, <2 x float>* %a, align 8
-  %1 = load <2 x float>, <2 x float>* %b, align 8
+  %0 = load <2 x float>, ptr %a, align 8
+  %1 = load <2 x float>, ptr %b, align 8
   %sub = fsub <2 x float> %0, %1
   %2 = fpext <2 x float> %sub to <2 x double>
   ret <2 x double> %2
@@ -91,7 +91,7 @@ entry:
 
 ; Function Attrs: norecurse nounwind readonly
 ; Function Attrs: norecurse nounwind readonly
-define dso_local <2 x double> @test3(<2 x float>* nocapture readonly %a, <2 x float>* nocapture readonly %b) {
+define dso_local <2 x double> @test3(ptr nocapture readonly %a, ptr nocapture readonly %b) {
 ; CHECK-LABEL: test3:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lfd f0, 0(r4)
@@ -126,8 +126,8 @@ define dso_local <2 x double> @test3(<2 x float>* nocapture readonly %a, <2 x fl
 ; AIX-32-NEXT:    xxmrghd v2, vs0, vs1
 ; AIX-32-NEXT:    blr
 entry:
-  %0 = load <2 x float>, <2 x float>* %a, align 8
-  %1 = load <2 x float>, <2 x float>* %b, align 8
+  %0 = load <2 x float>, ptr %a, align 8
+  %1 = load <2 x float>, ptr %b, align 8
   %sub = fadd <2 x float> %0, %1
   %2 = fpext <2 x float> %sub to <2 x double>
   ret <2 x double> %2
@@ -135,7 +135,7 @@ entry:
 
 ; Function Attrs: norecurse nounwind readonly
 ; Function Attrs: norecurse nounwind readonly
-define dso_local <2 x double> @test4(<2 x float>* nocapture readonly %a, <2 x float>* nocapture readonly %b) {
+define dso_local <2 x double> @test4(ptr nocapture readonly %a, ptr nocapture readonly %b) {
 ; CHECK-LABEL: test4:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lfd f0, 0(r4)
@@ -170,8 +170,8 @@ define dso_local <2 x double> @test4(<2 x float>* nocapture readonly %a, <2 x fl
 ; AIX-32-NEXT:    xxmrghd v2, vs0, vs1
 ; AIX-32-NEXT:    blr
 entry:
-  %0 = load <2 x float>, <2 x float>* %a, align 8
-  %1 = load <2 x float>, <2 x float>* %b, align 8
+  %0 = load <2 x float>, ptr %a, align 8
+  %1 = load <2 x float>, ptr %b, align 8
   %sub = fmul <2 x float> %0, %1
   %2 = fpext <2 x float> %sub to <2 x double>
   ret <2 x double> %2
@@ -216,7 +216,7 @@ define dso_local <2 x double> @test5(<2 x double> %a) {
 ; AIX-32-NEXT:    xvadddp v2, vs0, v2
 ; AIX-32-NEXT:    blr
 entry:
-  %0 = load <2 x float>, <2 x float>* @G, align 8
+  %0 = load <2 x float>, ptr @G, align 8
   %1 = fpext <2 x float> %0 to <2 x double>
   %add = fadd <2 x double> %1, %a
   ret <2 x double> %add
@@ -299,7 +299,7 @@ bb:
   br label %bb1
 
 bb1:                                              ; preds = %bb
-  %i = load <2 x float>, <2 x float>* bitcast (i8* getelementptr inbounds ([25 x %0], [25 x %0]* @Glob1, i64 0, i64 6, i32 20, i64 22392) to <2 x float>*), align 8
+  %i = load <2 x float>, ptr getelementptr inbounds ([25 x %0], ptr @Glob1, i64 0, i64 6, i32 20, i64 22392), align 8
   %i2 = fpext <2 x float> %i to <2 x double>
   %i3 = fcmp contract oeq <2 x double> zeroinitializer, %i2
   %i4 = shufflevector <2 x i1> %i3, <2 x i1> poison, <2 x i32> <i32 1, i32 undef>

diff  --git a/llvm/test/CodeGen/PowerPC/reduce_scalarization02.ll b/llvm/test/CodeGen/PowerPC/reduce_scalarization02.ll
index 1dc40edf7146..ea77ed26e97e 100644
--- a/llvm/test/CodeGen/PowerPC/reduce_scalarization02.ll
+++ b/llvm/test/CodeGen/PowerPC/reduce_scalarization02.ll
@@ -8,7 +8,7 @@
 
 ; Test reduce scalarization in fpext v2f32 to v2f64 from the extract_subvector v4f32 node.
 
-define dso_local void @test(<4 x float>* nocapture readonly %a, <2 x double>* nocapture %b, <2 x double>* nocapture %c) {
+define dso_local void @test(ptr nocapture readonly %a, ptr nocapture %b, ptr nocapture %c) {
 ; CHECK-LABEL: test:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lxv vs0, 0(r3)
@@ -31,19 +31,19 @@ define dso_local void @test(<4 x float>* nocapture readonly %a, <2 x double>* no
 ; CHECK-BE-NEXT:    stxv vs0, 0(r5)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %0 = load <4 x float>, <4 x float>* %a, align 16
+  %0 = load <4 x float>, ptr %a, align 16
   %shuffle = shufflevector <4 x float> %0, <4 x float> undef, <2 x i32> <i32 0, i32 1>
   %shuffle1 = shufflevector <4 x float> %0, <4 x float> undef, <2 x i32> <i32 2, i32 3>
   %vecinit4 = fpext <2 x float> %shuffle to <2 x double>
   %vecinit11 = fpext <2 x float> %shuffle1 to <2 x double>
-  store <2 x double> %vecinit4, <2 x double>* %b, align 16
-  store <2 x double> %vecinit11, <2 x double>* %c, align 16
+  store <2 x double> %vecinit4, ptr %b, align 16
+  store <2 x double> %vecinit11, ptr %c, align 16
   ret void
 }
 
 ; Ensure we don't crash for wider types
 
-define dso_local void @test2(<16 x float>* nocapture readonly %a, <2 x double>* nocapture %b, <2 x double>* nocapture %c) {
+define dso_local void @test2(ptr nocapture readonly %a, ptr nocapture %b, ptr nocapture %c) {
 ; CHECK-LABEL: test2:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lxv vs0, 0(r3)
@@ -66,12 +66,12 @@ define dso_local void @test2(<16 x float>* nocapture readonly %a, <2 x double>*
 ; CHECK-BE-NEXT:    stxv vs0, 0(r5)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %0 = load <16 x float>, <16 x float>* %a, align 16
+  %0 = load <16 x float>, ptr %a, align 16
   %shuffle = shufflevector <16 x float> %0, <16 x float> undef, <2 x i32> <i32 0, i32 1>
   %shuffle1 = shufflevector <16 x float> %0, <16 x float> undef, <2 x i32> <i32 2, i32 3>
   %vecinit4 = fpext <2 x float> %shuffle to <2 x double>
   %vecinit11 = fpext <2 x float> %shuffle1 to <2 x double>
-  store <2 x double> %vecinit4, <2 x double>* %b, align 16
-  store <2 x double> %vecinit11, <2 x double>* %c, align 16
+  store <2 x double> %vecinit4, ptr %b, align 16
+  store <2 x double> %vecinit11, ptr %c, align 16
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/redundant-copy-after-tail-dup.ll b/llvm/test/CodeGen/PowerPC/redundant-copy-after-tail-dup.ll
index f6506b3c87f8..376655bfb2b0 100644
--- a/llvm/test/CodeGen/PowerPC/redundant-copy-after-tail-dup.ll
+++ b/llvm/test/CodeGen/PowerPC/redundant-copy-after-tail-dup.ll
@@ -12,9 +12,9 @@
 %"class.G" = type { [2 x i32] }
 %"class.H" = type { %"struct.A" }
 %"struct.A" = type { %"struct.B" }
-%"struct.B" = type { i32*, i32*, i32* }
+%"struct.B" = type { ptr, ptr, ptr }
 
-define dso_local i1 @t(%class.A* %this, i32 %color, i32 %vertex) local_unnamed_addr {
+define dso_local i1 @t(ptr %this, i32 %color, i32 %vertex) local_unnamed_addr {
 ; CHECK-P9-LABEL: t:
 ; CHECK-P9:       # %bb.0: # %entry
 ; CHECK-P9-NEXT:    li r5, 1
@@ -48,8 +48,8 @@ lor.lhs.false:                                    ; preds = %entry
   br i1 %or.cond, label %cleanup16, label %for.inc
 
 for.inc:                                          ; preds = %lor.lhs.false, %land.lhs.true
-  %arrayidx.i31.1 = getelementptr inbounds %class.A, %class.A* %this, i64 0, i32 8, i32 0, i64 undef
-  %0 = load i16, i16* %arrayidx.i31.1, align 2
+  %arrayidx.i31.1 = getelementptr inbounds %class.A, ptr %this, i64 0, i32 8, i32 0, i64 undef
+  %0 = load i16, ptr %arrayidx.i31.1, align 2
   %cmp8.1 = icmp ult i16 %0, 2
   br i1 %cmp8.1, label %land.lhs.true.1, label %lor.lhs.false.1
 

diff  --git a/llvm/test/CodeGen/PowerPC/reg-coalesce-simple.ll b/llvm/test/CodeGen/PowerPC/reg-coalesce-simple.ll
index 06af1c6245a8..2e810bfd445e 100644
--- a/llvm/test/CodeGen/PowerPC/reg-coalesce-simple.ll
+++ b/llvm/test/CodeGen/PowerPC/reg-coalesce-simple.ll
@@ -2,9 +2,9 @@
 
 %struct.foo = type { i32, i32, [0 x i8] }
 
-define i32 @test(%struct.foo* %X) nounwind {
-        %tmp1 = getelementptr %struct.foo, %struct.foo* %X, i32 0, i32 2, i32 100            ; <i8*> [#uses=1]
-        %tmp = load i8, i8* %tmp1           ; <i8> [#uses=1]
+define i32 @test(ptr %X) nounwind {
+        %tmp1 = getelementptr %struct.foo, ptr %X, i32 0, i32 2, i32 100            ; <ptr> [#uses=1]
+        %tmp = load i8, ptr %tmp1           ; <i8> [#uses=1]
         %tmp2 = zext i8 %tmp to i32             ; <i32> [#uses=1]
         ret i32 %tmp2
 }

diff  --git a/llvm/test/CodeGen/PowerPC/register-pressure-reduction.ll b/llvm/test/CodeGen/PowerPC/register-pressure-reduction.ll
index e26e3772632f..87f6ffaa2481 100644
--- a/llvm/test/CodeGen/PowerPC/register-pressure-reduction.ll
+++ b/llvm/test/CodeGen/PowerPC/register-pressure-reduction.ll
@@ -130,6 +130,6 @@ define float @foo_float_reuse_const(float %0, float %1, float %2, float %3) {
   %7 = fmul contract reassoc nsz float %6, 0x3DB2533FE0000000
   %8 = fadd contract reassoc nsz float %7, %5
   %9 = fmul contract reassoc nsz float %1, 0xBDB2533FE0000000
-  store float %9, float* @global_val, align 4
+  store float %9, ptr @global_val, align 4
   ret float %8
 }

diff  --git a/llvm/test/CodeGen/PowerPC/reloc-align.ll b/llvm/test/CodeGen/PowerPC/reloc-align.ll
index 929d2bf86c80..1640e3d2ba9d 100644
--- a/llvm/test/CodeGen/PowerPC/reloc-align.ll
+++ b/llvm/test/CodeGen/PowerPC/reloc-align.ll
@@ -15,16 +15,15 @@ target triple = "powerpc64-unknown-linux-gnu"
 ; Function Attrs: nounwind readonly
 define signext i32 @main() #0 {
 entry:
-  %call = tail call fastcc signext i32 @func_90(%struct.S1* byval(%struct.S1) bitcast ({ i8, i8, i8, i8, i8, i8, i8, i8 }* @main.l_1554 to %struct.S1*))
+  %call = tail call fastcc signext i32 @func_90(ptr byval(%struct.S1) @main.l_1554)
 ; CHECK-NOT: ld {{[0-9]+}}, main.l_1554 at toc@l
   ret i32 %call
 }
 
 ; Function Attrs: nounwind readonly
-define internal fastcc signext i32 @func_90(%struct.S1* byval(%struct.S1) nocapture %p_91) #0 {
+define internal fastcc signext i32 @func_90(ptr byval(%struct.S1) nocapture %p_91) #0 {
 entry:
-  %0 = bitcast %struct.S1* %p_91 to i64*
-  %bf.load = load i64, i64* %0, align 1
+  %bf.load = load i64, ptr %p_91, align 1
   %bf.shl = shl i64 %bf.load, 26
   %bf.ashr = ashr i64 %bf.shl, 54
   %bf.cast = trunc i64 %bf.ashr to i32

diff  --git a/llvm/test/CodeGen/PowerPC/remat-imm.ll b/llvm/test/CodeGen/PowerPC/remat-imm.ll
index 91275a633e29..66cb34a979df 100644
--- a/llvm/test/CodeGen/PowerPC/remat-imm.ll
+++ b/llvm/test/CodeGen/PowerPC/remat-imm.ll
@@ -9,8 +9,8 @@ define i32 @main() nounwind {
 entry:
 ; CHECK: li 4, 128
 ; CHECK-NOT: mr 4, {{.*}}
-  %call = tail call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([6 x i8], [6 x i8]* @.str, i32 0, i32 0), i32 128, i32 128) nounwind
+  %call = tail call i32 (ptr, ...) @printf(ptr @.str, i32 128, i32 128) nounwind
   ret i32 0
 }
 
-declare i32 @printf(i8* nocapture, ...) nounwind
+declare i32 @printf(ptr nocapture, ...) nounwind

diff  --git a/llvm/test/CodeGen/PowerPC/rematerializable-instruction-machine-licm.ll b/llvm/test/CodeGen/PowerPC/rematerializable-instruction-machine-licm.ll
index 423c61d800c0..82a50d6ac174 100644
--- a/llvm/test/CodeGen/PowerPC/rematerializable-instruction-machine-licm.ll
+++ b/llvm/test/CodeGen/PowerPC/rematerializable-instruction-machine-licm.ll
@@ -3,7 +3,7 @@
 target datalayout = "e-m:e-i64:64-n32:64"
 target triple = "powerpc64le-unknown-linux-gnu"
 
-define zeroext i32 @test1(i64 %0, i64* %1) {
+define zeroext i32 @test1(i64 %0, ptr %1) {
 ; CHECK-LABEL: test1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    stdu 1, -720(1)
@@ -433,23 +433,23 @@ define zeroext i32 @test1(i64 %0, i64* %1) {
 ; CHECK-NEXT:    ld 14, 576(1) # 8-byte Folded Reload
 ; CHECK-NEXT:    addi 1, 1, 720
 ; CHECK-NEXT:    blr
-  %3 = getelementptr inbounds i64, i64* %1, i64 144115188075855
-  %4 = getelementptr i64, i64* %1, i64 144115586875855
-  %5 = getelementptr i64, i64* %1, i64 144115587175855
-  %6 = getelementptr i64, i64* %1, i64 144115587075855
-  %7 = getelementptr i64, i64* %1, i64 144115586975855
-  %8 = getelementptr i64, i64* %1, i64 144115587275855
-  %9 = getelementptr i64, i64* %1, i64 144115587575855
-  %10 = getelementptr i64, i64* %1, i64 144115587475855
-  %11 = getelementptr i64, i64* %1, i64 144115587375855
-  %12 = getelementptr i64, i64* %1, i64 144115587675855
-  %13 = getelementptr i64, i64* %1, i64 144115587975855
-  %14 = getelementptr i64, i64* %1, i64 144115587875855
-  %15 = getelementptr i64, i64* %1, i64 144115587775855
-  %16 = getelementptr i64, i64* %1, i64 144115588075855
-  %17 = getelementptr i64, i64* %1, i64 144115588375855
-  %18 = getelementptr i64, i64* %1, i64 144115588275855
-  %19 = getelementptr i64, i64* %1, i64 144115588175855
+  %3 = getelementptr inbounds i64, ptr %1, i64 144115188075855
+  %4 = getelementptr i64, ptr %1, i64 144115586875855
+  %5 = getelementptr i64, ptr %1, i64 144115587175855
+  %6 = getelementptr i64, ptr %1, i64 144115587075855
+  %7 = getelementptr i64, ptr %1, i64 144115586975855
+  %8 = getelementptr i64, ptr %1, i64 144115587275855
+  %9 = getelementptr i64, ptr %1, i64 144115587575855
+  %10 = getelementptr i64, ptr %1, i64 144115587475855
+  %11 = getelementptr i64, ptr %1, i64 144115587375855
+  %12 = getelementptr i64, ptr %1, i64 144115587675855
+  %13 = getelementptr i64, ptr %1, i64 144115587975855
+  %14 = getelementptr i64, ptr %1, i64 144115587875855
+  %15 = getelementptr i64, ptr %1, i64 144115587775855
+  %16 = getelementptr i64, ptr %1, i64 144115588075855
+  %17 = getelementptr i64, ptr %1, i64 144115588375855
+  %18 = getelementptr i64, ptr %1, i64 144115588275855
+  %19 = getelementptr i64, ptr %1, i64 144115588175855
   br label %20
 
 20:                                               ; preds = %2, %109
@@ -459,157 +459,157 @@ define zeroext i32 @test1(i64 %0, i64* %1) {
 22:                                               ; preds = %22, %20
   %23 = phi i64 [ 0, %20 ], [ %107, %22 ]
   %24 = mul i64 %23, 400000
-  %25 = getelementptr i64, i64* %3, i64 %24
+  %25 = getelementptr i64, ptr %3, i64 %24
   %26 = or i64 %23, 1
   %27 = mul i64 %26, 400000
-  %28 = getelementptr i64, i64* %3, i64 %27
+  %28 = getelementptr i64, ptr %3, i64 %27
   %29 = or i64 %23, 2
   %30 = mul i64 %29, 400000
-  %31 = getelementptr i64, i64* %3, i64 %30
+  %31 = getelementptr i64, ptr %3, i64 %30
   %32 = or i64 %23, 3
   %33 = mul i64 %32, 400000
-  %34 = getelementptr i64, i64* %3, i64 %33
+  %34 = getelementptr i64, ptr %3, i64 %33
   %35 = mul i64 %23, 400000
   %36 = add i64 %35, 1600000
-  %37 = getelementptr i64, i64* %3, i64 %36
+  %37 = getelementptr i64, ptr %3, i64 %36
   %38 = mul i64 %23, 400000
   %39 = add i64 %38, 2000000
-  %40 = getelementptr i64, i64* %3, i64 %39
+  %40 = getelementptr i64, ptr %3, i64 %39
   %41 = mul i64 %23, 400000
   %42 = add i64 %41, 2400000
-  %43 = getelementptr i64, i64* %3, i64 %42
+  %43 = getelementptr i64, ptr %3, i64 %42
   %44 = mul i64 %23, 400000
   %45 = add i64 %44, 2800000
-  %46 = getelementptr i64, i64* %3, i64 %45
+  %46 = getelementptr i64, ptr %3, i64 %45
   %47 = mul i64 %23, 400000
   %48 = add i64 %47, 3200000
-  %49 = getelementptr i64, i64* %3, i64 %48
+  %49 = getelementptr i64, ptr %3, i64 %48
   %50 = mul i64 %23, 400000
   %51 = add i64 %50, 3600000
-  %52 = getelementptr i64, i64* %3, i64 %51
+  %52 = getelementptr i64, ptr %3, i64 %51
   %53 = mul i64 %23, 400000
   %54 = add i64 %53, 4000000
-  %55 = getelementptr i64, i64* %3, i64 %54
+  %55 = getelementptr i64, ptr %3, i64 %54
   %56 = mul i64 %23, 400000
   %57 = add i64 %56, 4400000
-  %58 = getelementptr i64, i64* %3, i64 %57
-  %59 = getelementptr inbounds i64, i64* %25, i64 400000
-  %60 = getelementptr inbounds i64, i64* %28, i64 400000
-  %61 = getelementptr inbounds i64, i64* %31, i64 400000
-  %62 = getelementptr inbounds i64, i64* %34, i64 400000
-  %63 = getelementptr inbounds i64, i64* %37, i64 400000
-  %64 = getelementptr inbounds i64, i64* %40, i64 400000
-  %65 = getelementptr inbounds i64, i64* %43, i64 400000
-  %66 = getelementptr inbounds i64, i64* %46, i64 400000
-  %67 = getelementptr inbounds i64, i64* %49, i64 400000
-  %68 = getelementptr inbounds i64, i64* %52, i64 400000
-  %69 = getelementptr inbounds i64, i64* %55, i64 400000
-  %70 = getelementptr inbounds i64, i64* %58, i64 400000
-  store i64 %0, i64* %59, align 8
-  store i64 %0, i64* %60, align 8
-  store i64 %0, i64* %61, align 8
-  store i64 %0, i64* %62, align 8
-  store i64 %0, i64* %63, align 8
-  store i64 %0, i64* %64, align 8
-  store i64 %0, i64* %65, align 8
-  store i64 %0, i64* %66, align 8
-  store i64 %0, i64* %67, align 8
-  store i64 %0, i64* %68, align 8
-  store i64 %0, i64* %69, align 8
-  store i64 %0, i64* %70, align 8
-  %71 = getelementptr inbounds i64, i64* %25, i64 700000
-  %72 = getelementptr inbounds i64, i64* %28, i64 700000
-  %73 = getelementptr inbounds i64, i64* %31, i64 700000
-  %74 = getelementptr inbounds i64, i64* %34, i64 700000
-  %75 = getelementptr inbounds i64, i64* %37, i64 700000
-  %76 = getelementptr inbounds i64, i64* %40, i64 700000
-  %77 = getelementptr inbounds i64, i64* %43, i64 700000
-  %78 = getelementptr inbounds i64, i64* %46, i64 700000
-  %79 = getelementptr inbounds i64, i64* %49, i64 700000
-  %80 = getelementptr inbounds i64, i64* %52, i64 700000
-  %81 = getelementptr inbounds i64, i64* %55, i64 700000
-  %82 = getelementptr inbounds i64, i64* %58, i64 700000
-  store i64 %0, i64* %71, align 8
-  store i64 %0, i64* %72, align 8
-  store i64 %0, i64* %73, align 8
-  store i64 %0, i64* %74, align 8
-  store i64 %0, i64* %75, align 8
-  store i64 %0, i64* %76, align 8
-  store i64 %0, i64* %77, align 8
-  store i64 %0, i64* %78, align 8
-  store i64 %0, i64* %79, align 8
-  store i64 %0, i64* %80, align 8
-  store i64 %0, i64* %81, align 8
-  store i64 %0, i64* %82, align 8
-  %83 = getelementptr inbounds i64, i64* %25, i64 600000
-  %84 = getelementptr inbounds i64, i64* %28, i64 600000
-  %85 = getelementptr inbounds i64, i64* %31, i64 600000
-  %86 = getelementptr inbounds i64, i64* %34, i64 600000
-  %87 = getelementptr inbounds i64, i64* %37, i64 600000
-  %88 = getelementptr inbounds i64, i64* %40, i64 600000
-  %89 = getelementptr inbounds i64, i64* %43, i64 600000
-  %90 = getelementptr inbounds i64, i64* %46, i64 600000
-  %91 = getelementptr inbounds i64, i64* %49, i64 600000
-  %92 = getelementptr inbounds i64, i64* %52, i64 600000
-  %93 = getelementptr inbounds i64, i64* %55, i64 600000
-  %94 = getelementptr inbounds i64, i64* %58, i64 600000
-  store i64 %0, i64* %83, align 8
-  store i64 %0, i64* %84, align 8
-  store i64 %0, i64* %85, align 8
-  store i64 %0, i64* %86, align 8
-  store i64 %0, i64* %87, align 8
-  store i64 %0, i64* %88, align 8
-  store i64 %0, i64* %89, align 8
-  store i64 %0, i64* %90, align 8
-  store i64 %0, i64* %91, align 8
-  store i64 %0, i64* %92, align 8
-  store i64 %0, i64* %93, align 8
-  store i64 %0, i64* %94, align 8
-  %95 = getelementptr inbounds i64, i64* %25, i64 500000
-  %96 = getelementptr inbounds i64, i64* %28, i64 500000
-  %97 = getelementptr inbounds i64, i64* %31, i64 500000
-  %98 = getelementptr inbounds i64, i64* %34, i64 500000
-  %99 = getelementptr inbounds i64, i64* %37, i64 500000
-  %100 = getelementptr inbounds i64, i64* %40, i64 500000
-  %101 = getelementptr inbounds i64, i64* %43, i64 500000
-  %102 = getelementptr inbounds i64, i64* %46, i64 500000
-  %103 = getelementptr inbounds i64, i64* %49, i64 500000
-  %104 = getelementptr inbounds i64, i64* %52, i64 500000
-  %105 = getelementptr inbounds i64, i64* %55, i64 500000
-  %106 = getelementptr inbounds i64, i64* %58, i64 500000
-  store i64 %0, i64* %95, align 8
-  store i64 %0, i64* %96, align 8
-  store i64 %0, i64* %97, align 8
-  store i64 %0, i64* %98, align 8
-  store i64 %0, i64* %99, align 8
-  store i64 %0, i64* %100, align 8
-  store i64 %0, i64* %101, align 8
-  store i64 %0, i64* %102, align 8
-  store i64 %0, i64* %103, align 8
-  store i64 %0, i64* %104, align 8
-  store i64 %0, i64* %105, align 8
-  store i64 %0, i64* %106, align 8
+  %58 = getelementptr i64, ptr %3, i64 %57
+  %59 = getelementptr inbounds i64, ptr %25, i64 400000
+  %60 = getelementptr inbounds i64, ptr %28, i64 400000
+  %61 = getelementptr inbounds i64, ptr %31, i64 400000
+  %62 = getelementptr inbounds i64, ptr %34, i64 400000
+  %63 = getelementptr inbounds i64, ptr %37, i64 400000
+  %64 = getelementptr inbounds i64, ptr %40, i64 400000
+  %65 = getelementptr inbounds i64, ptr %43, i64 400000
+  %66 = getelementptr inbounds i64, ptr %46, i64 400000
+  %67 = getelementptr inbounds i64, ptr %49, i64 400000
+  %68 = getelementptr inbounds i64, ptr %52, i64 400000
+  %69 = getelementptr inbounds i64, ptr %55, i64 400000
+  %70 = getelementptr inbounds i64, ptr %58, i64 400000
+  store i64 %0, ptr %59, align 8
+  store i64 %0, ptr %60, align 8
+  store i64 %0, ptr %61, align 8
+  store i64 %0, ptr %62, align 8
+  store i64 %0, ptr %63, align 8
+  store i64 %0, ptr %64, align 8
+  store i64 %0, ptr %65, align 8
+  store i64 %0, ptr %66, align 8
+  store i64 %0, ptr %67, align 8
+  store i64 %0, ptr %68, align 8
+  store i64 %0, ptr %69, align 8
+  store i64 %0, ptr %70, align 8
+  %71 = getelementptr inbounds i64, ptr %25, i64 700000
+  %72 = getelementptr inbounds i64, ptr %28, i64 700000
+  %73 = getelementptr inbounds i64, ptr %31, i64 700000
+  %74 = getelementptr inbounds i64, ptr %34, i64 700000
+  %75 = getelementptr inbounds i64, ptr %37, i64 700000
+  %76 = getelementptr inbounds i64, ptr %40, i64 700000
+  %77 = getelementptr inbounds i64, ptr %43, i64 700000
+  %78 = getelementptr inbounds i64, ptr %46, i64 700000
+  %79 = getelementptr inbounds i64, ptr %49, i64 700000
+  %80 = getelementptr inbounds i64, ptr %52, i64 700000
+  %81 = getelementptr inbounds i64, ptr %55, i64 700000
+  %82 = getelementptr inbounds i64, ptr %58, i64 700000
+  store i64 %0, ptr %71, align 8
+  store i64 %0, ptr %72, align 8
+  store i64 %0, ptr %73, align 8
+  store i64 %0, ptr %74, align 8
+  store i64 %0, ptr %75, align 8
+  store i64 %0, ptr %76, align 8
+  store i64 %0, ptr %77, align 8
+  store i64 %0, ptr %78, align 8
+  store i64 %0, ptr %79, align 8
+  store i64 %0, ptr %80, align 8
+  store i64 %0, ptr %81, align 8
+  store i64 %0, ptr %82, align 8
+  %83 = getelementptr inbounds i64, ptr %25, i64 600000
+  %84 = getelementptr inbounds i64, ptr %28, i64 600000
+  %85 = getelementptr inbounds i64, ptr %31, i64 600000
+  %86 = getelementptr inbounds i64, ptr %34, i64 600000
+  %87 = getelementptr inbounds i64, ptr %37, i64 600000
+  %88 = getelementptr inbounds i64, ptr %40, i64 600000
+  %89 = getelementptr inbounds i64, ptr %43, i64 600000
+  %90 = getelementptr inbounds i64, ptr %46, i64 600000
+  %91 = getelementptr inbounds i64, ptr %49, i64 600000
+  %92 = getelementptr inbounds i64, ptr %52, i64 600000
+  %93 = getelementptr inbounds i64, ptr %55, i64 600000
+  %94 = getelementptr inbounds i64, ptr %58, i64 600000
+  store i64 %0, ptr %83, align 8
+  store i64 %0, ptr %84, align 8
+  store i64 %0, ptr %85, align 8
+  store i64 %0, ptr %86, align 8
+  store i64 %0, ptr %87, align 8
+  store i64 %0, ptr %88, align 8
+  store i64 %0, ptr %89, align 8
+  store i64 %0, ptr %90, align 8
+  store i64 %0, ptr %91, align 8
+  store i64 %0, ptr %92, align 8
+  store i64 %0, ptr %93, align 8
+  store i64 %0, ptr %94, align 8
+  %95 = getelementptr inbounds i64, ptr %25, i64 500000
+  %96 = getelementptr inbounds i64, ptr %28, i64 500000
+  %97 = getelementptr inbounds i64, ptr %31, i64 500000
+  %98 = getelementptr inbounds i64, ptr %34, i64 500000
+  %99 = getelementptr inbounds i64, ptr %37, i64 500000
+  %100 = getelementptr inbounds i64, ptr %40, i64 500000
+  %101 = getelementptr inbounds i64, ptr %43, i64 500000
+  %102 = getelementptr inbounds i64, ptr %46, i64 500000
+  %103 = getelementptr inbounds i64, ptr %49, i64 500000
+  %104 = getelementptr inbounds i64, ptr %52, i64 500000
+  %105 = getelementptr inbounds i64, ptr %55, i64 500000
+  %106 = getelementptr inbounds i64, ptr %58, i64 500000
+  store i64 %0, ptr %95, align 8
+  store i64 %0, ptr %96, align 8
+  store i64 %0, ptr %97, align 8
+  store i64 %0, ptr %98, align 8
+  store i64 %0, ptr %99, align 8
+  store i64 %0, ptr %100, align 8
+  store i64 %0, ptr %101, align 8
+  store i64 %0, ptr %102, align 8
+  store i64 %0, ptr %103, align 8
+  store i64 %0, ptr %104, align 8
+  store i64 %0, ptr %105, align 8
+  store i64 %0, ptr %106, align 8
   %107 = add i64 %23, 12
   %108 = icmp eq i64 %107, 996
   br i1 %108, label %109, label %22
 
 109:                                              ; preds = %22
-  store i64 %0, i64* %4, align 8
-  store i64 %0, i64* %5, align 8
-  store i64 %0, i64* %6, align 8
-  store i64 %0, i64* %7, align 8
-  store i64 %0, i64* %8, align 8
-  store i64 %0, i64* %9, align 8
-  store i64 %0, i64* %10, align 8
-  store i64 %0, i64* %11, align 8
-  store i64 %0, i64* %12, align 8
-  store i64 %0, i64* %13, align 8
-  store i64 %0, i64* %14, align 8
-  store i64 %0, i64* %15, align 8
-  store i64 %0, i64* %16, align 8
-  store i64 %0, i64* %17, align 8
-  store i64 %0, i64* %18, align 8
-  store i64 %0, i64* %19, align 8
+  store i64 %0, ptr %4, align 8
+  store i64 %0, ptr %5, align 8
+  store i64 %0, ptr %6, align 8
+  store i64 %0, ptr %7, align 8
+  store i64 %0, ptr %8, align 8
+  store i64 %0, ptr %9, align 8
+  store i64 %0, ptr %10, align 8
+  store i64 %0, ptr %11, align 8
+  store i64 %0, ptr %12, align 8
+  store i64 %0, ptr %13, align 8
+  store i64 %0, ptr %14, align 8
+  store i64 %0, ptr %15, align 8
+  store i64 %0, ptr %16, align 8
+  store i64 %0, ptr %17, align 8
+  store i64 %0, ptr %18, align 8
+  store i64 %0, ptr %19, align 8
   %110 = add nuw nsw i32 %21, 1
   %111 = icmp eq i32 %110, 400000
   br i1 %111, label %112, label %20

diff  --git a/llvm/test/CodeGen/PowerPC/remove-redundant-load-imm.ll b/llvm/test/CodeGen/PowerPC/remove-redundant-load-imm.ll
index ebe2d2f56146..7ac15408d63a 100644
--- a/llvm/test/CodeGen/PowerPC/remove-redundant-load-imm.ll
+++ b/llvm/test/CodeGen/PowerPC/remove-redundant-load-imm.ll
@@ -4,10 +4,10 @@
 target datalayout = "e-m:e-i64:64-n32:64"
 target triple = "powerpc64le-unknown-linux-gnu"
 
- at global.6 = external global i32*
+ at global.6 = external global ptr
 
-declare void @barney.88(i1, i32*)
-declare void @barney.94(i8*, i32)
+declare void @barney.88(i1, ptr)
+declare void @barney.94(ptr, i32)
 
 define void @redundancy_on_ppc_only(i1 %arg7) nounwind {
 ; PPC64LE-LABEL: redundancy_on_ppc_only:
@@ -29,7 +29,7 @@ bb:
   br label %bb10
 
 bb10:                                             ; preds = %bb
-  call void @barney.88(i1 %arg7, i32* null)
+  call void @barney.88(i1 %arg7, ptr null)
   ret void
 }
 
@@ -45,7 +45,7 @@ define void @redundancy_on_ppc_and_other_targets() nounwind {
 ; PPC64LE-NEXT:    std 4, 0(3)
 ; PPC64LE-NEXT:    bl barney.94
 ; PPC64LE-NEXT:    nop
-  store i32* null, i32** @global.6
-  call void @barney.94(i8* undef, i32 0)
+  store ptr null, ptr @global.6
+  call void @barney.94(ptr undef, i32 0)
   unreachable
 }

diff  --git a/llvm/test/CodeGen/PowerPC/remove-redundant-toc-saves.ll b/llvm/test/CodeGen/PowerPC/remove-redundant-toc-saves.ll
index 58c959bc0efa..c4a8955fa47b 100644
--- a/llvm/test/CodeGen/PowerPC/remove-redundant-toc-saves.ll
+++ b/llvm/test/CodeGen/PowerPC/remove-redundant-toc-saves.ll
@@ -2,7 +2,7 @@
 ; RUN: llc -verify-machineinstrs -mtriple=powerpc64-ibm-aix-xcoff < %s | FileCheck %s --check-prefix=AIX64
 ; RUN: llc -verify-machineinstrs -mtriple=powerpc-ibm-aix-xcoff < %s | FileCheck %s --check-prefix=AIX32
 
-define signext i32 @test1(i32 signext %i, i32 (i32)* nocapture %Func, i32 (i32)* nocapture %Func2) {
+define signext i32 @test1(i32 signext %i, ptr nocapture %Func, ptr nocapture %Func2) {
 entry:
 ; CHECK-LABEL: test1:
 ; CHECK:    std 2, 24(1)
@@ -21,7 +21,7 @@ entry:
   ret i32 %add2
 }
 
-define signext i32 @test2(i32 signext %i, i32 signext %j, i32 (i32)* nocapture %Func, i32 (i32)* nocapture %Func2) {
+define signext i32 @test2(i32 signext %i, i32 signext %j, ptr nocapture %Func, ptr nocapture %Func2) {
 entry:
 ; CHECK-LABEL: test2:
 ; CHECK:    std 2, 24(1)
@@ -53,7 +53,7 @@ if.end:                                           ; preds = %entry, %if.then
 }
 
 ; Check for multiple TOC saves with if then else where neither dominates the other.
-define signext i32 @test3(i32 signext %i, i32 (i32)* nocapture %Func, i32 (i32)* nocapture %Func2) {
+define signext i32 @test3(i32 signext %i, ptr nocapture %Func, ptr nocapture %Func2) {
 ; CHECK-LABEL: test3:
 ; CHECK:    std 2, 24(1)
 ; CHECK-NOT:    std 2, 24(1)
@@ -82,7 +82,7 @@ if.end:                                           ; preds = %if.else, %if.then
   ret i32 %add4
 }
 
-define signext i32 @test4(i32 signext %i, i32 (i32)* nocapture %Func, i32 (i32)* nocapture %Func2) {
+define signext i32 @test4(i32 signext %i, ptr nocapture %Func, ptr nocapture %Func2) {
 ; CHECK-LABEL: test4:
 ; CHECK:    std 2, 24(1)
 ; CHECK-NOT:    std 2, 24(1)
@@ -114,7 +114,7 @@ if.end:                                           ; preds = %if.else, %if.then
 }
 
 ; Check for multiple TOC saves with if then where neither is redundant.
-define signext i32 @test5(i32 signext %i, i32 (i32)* nocapture %Func, i32 (i32)* nocapture readnone %Func2) {
+define signext i32 @test5(i32 signext %i, ptr nocapture %Func, ptr nocapture readnone %Func2) {
 entry:
 ; CHECK-LABEL: test5:
 ; CHECK:    std 2, 24(1)
@@ -139,7 +139,7 @@ if.end:                                           ; preds = %entry, %if.then
 }
 
 ; Check for multiple TOC saves if there are dynamic allocations on the stack.
-define signext i32 @test6(i32 signext %i, i32 (i32)* nocapture %Func, i32 (i32)* nocapture %Func2) {
+define signext i32 @test6(i32 signext %i, ptr nocapture %Func, ptr nocapture %Func2) {
 entry:
 ; CHECK-LABEL: test6:
 ; CHECK:    std 2, 24(1)
@@ -154,12 +154,11 @@ entry:
 ; AIX32: stw 2, 20(1)
   %conv = sext i32 %i to i64
   %0 = alloca i8, i64 %conv, align 16
-  %1 = bitcast i8* %0 to i32*
   %call = tail call signext i32 %Func(i32 signext %i)
-  call void @useAlloca(i32* nonnull %1, i32 signext %call)
+  call void @useAlloca(ptr nonnull %0, i32 signext %call)
   %call1 = call signext i32 %Func2(i32 signext %i)
   %add2 = add nsw i32 %call1, %call
   ret i32 %add2
 }
 
-declare void @useAlloca(i32*, i32 signext)
+declare void @useAlloca(ptr, i32 signext)

diff  --git a/llvm/test/CodeGen/PowerPC/resolvefi-basereg.ll b/llvm/test/CodeGen/PowerPC/resolvefi-basereg.ll
index b9029b205db9..10fba54c2d6a 100644
--- a/llvm/test/CodeGen/PowerPC/resolvefi-basereg.ll
+++ b/llvm/test/CodeGen/PowerPC/resolvefi-basereg.ll
@@ -7,8 +7,8 @@
 target datalayout = "E-m:e-i64:64-n32:64"
 target triple = "powerpc64-unknown-linux-gnu"
 
-%struct.Info = type { i32, i32, i8*, i8*, i8*, [32 x i8*], i64, [32 x i64], i64, i64, i64, [32 x i64] }
-%struct.S1998 = type { [2 x i32*], i64, i64, double, i16, i32, [29 x %struct.anon], i16, i8, i32, [8 x i8] }
+%struct.Info = type { i32, i32, ptr, ptr, ptr, [32 x ptr], i64, [32 x i64], i64, i64, i64, [32 x i64] }
+%struct.S1998 = type { [2 x ptr], i64, i64, double, i16, i32, [29 x %struct.anon], i16, i8, i32, [8 x i8] }
 %struct.anon = type { [16 x double], i32, i16, i32, [3 x i8], [6 x i8], [4 x i32], i8 }
 
 @info = global %struct.Info zeroinitializer, align 8
@@ -32,331 +32,322 @@ entry:
   %agg.tmp117 = alloca %struct.S1998, align 16
   %agg.tmp118 = alloca %struct.S1998, align 16
   %agg.tmp119 = alloca %struct.S1998, align 16
-  call void @llvm.memset.p0i8.i64(i8* align 16 bitcast (%struct.S1998* @s1998 to i8*), i8 0, i64 5168, i1 false)
-  call void @llvm.memset.p0i8.i64(i8* align 16 bitcast ([5 x %struct.S1998]* @a1998 to i8*), i8 0, i64 25840, i1 false)
-  call void @llvm.memset.p0i8.i64(i8* align 8 bitcast (%struct.Info* @info to i8*), i8 0, i64 832, i1 false)
-  store i8* bitcast (%struct.S1998* @s1998 to i8*), i8** getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 2), align 8
-  store i8* bitcast ([5 x %struct.S1998]* @a1998 to i8*), i8** getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 3), align 8
-  store i8* bitcast (%struct.S1998* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 3) to i8*), i8** getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 4), align 8
-  store i64 5168, i64* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 6), align 8
-  store i64 16, i64* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 8), align 8
-  store i64 16, i64* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 9), align 8
-  store i64 16, i64* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 10), align 8
-  %0 = load i64, i64* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 8), align 8
+  call void @llvm.memset.p0.i64(ptr align 16 @s1998, i8 0, i64 5168, i1 false)
+  call void @llvm.memset.p0.i64(ptr align 16 @a1998, i8 0, i64 25840, i1 false)
+  call void @llvm.memset.p0.i64(ptr align 8 @info, i8 0, i64 832, i1 false)
+  store ptr @s1998, ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 2), align 8
+  store ptr @a1998, ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 3), align 8
+  store ptr getelementptr inbounds ([5 x %struct.S1998], ptr @a1998, i32 0, i64 3), ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 4), align 8
+  store i64 5168, ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 6), align 8
+  store i64 16, ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 8), align 8
+  store i64 16, ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 9), align 8
+  store i64 16, ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 10), align 8
+  %0 = load i64, ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 8), align 8
   %sub = sub i64 %0, 1
-  %and = and i64 ptrtoint (%struct.S1998* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 3) to i64), %sub
+  %and = and i64 ptrtoint (ptr getelementptr inbounds ([5 x %struct.S1998], ptr @a1998, i32 0, i64 3) to i64), %sub
   %tobool = icmp ne i64 %and, 0
   br i1 %tobool, label %if.then, label %if.end
 
 if.then:                                          ; preds = %entry
-  %1 = load i32, i32* @fails, align 4
+  %1 = load i32, ptr @fails, align 4
   %inc = add nsw i32 %1, 1
-  store i32 %inc, i32* @fails, align 4
+  store i32 %inc, ptr @fails, align 4
   br label %if.end
 
 if.end:                                           ; preds = %if.then, %entry
-  store i32 0, i32* %i, align 4
-  store i32 0, i32* %j, align 4
-  %2 = load i32, i32* %i, align 4
+  store i32 0, ptr %i, align 4
+  store i32 0, ptr %j, align 4
+  %2 = load i32, ptr %i, align 4
   %idxprom = sext i32 %2 to i64
-  %arrayidx = getelementptr inbounds [32 x i8*], [32 x i8*]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom
-  store i8* bitcast (i32** getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 0, i64 1) to i8*), i8** %arrayidx, align 8
-  %3 = load i32, i32* %i, align 4
+  %arrayidx = getelementptr inbounds [32 x ptr], ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 5), i32 0, i64 %idxprom
+  store ptr getelementptr inbounds (%struct.S1998, ptr @s1998, i32 0, i32 0, i64 1), ptr %arrayidx, align 8
+  %3 = load i32, ptr %i, align 4
   %idxprom1 = sext i32 %3 to i64
-  %arrayidx2 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom1
-  store i64 8, i64* %arrayidx2, align 8
-  %4 = load i32, i32* %i, align 4
+  %arrayidx2 = getelementptr inbounds [32 x i64], ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 7), i32 0, i64 %idxprom1
+  store i64 8, ptr %arrayidx2, align 8
+  %4 = load i32, ptr %i, align 4
   %idxprom3 = sext i32 %4 to i64
-  %arrayidx4 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom3
-  store i64 8, i64* %arrayidx4, align 8
-  store i32* getelementptr inbounds ([256 x i32], [256 x i32]* @intarray, i32 0, i64 190), i32** getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 0, i64 1), align 8
-  store i32* getelementptr inbounds ([256 x i32], [256 x i32]* @intarray, i32 0, i64 241), i32** getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 0, i64 1), align 8
-  %5 = load i32, i32* %i, align 4
+  %arrayidx4 = getelementptr inbounds [32 x i64], ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 11), i32 0, i64 %idxprom3
+  store i64 8, ptr %arrayidx4, align 8
+  store ptr getelementptr inbounds ([256 x i32], ptr @intarray, i32 0, i64 190), ptr getelementptr inbounds (%struct.S1998, ptr @s1998, i32 0, i32 0, i64 1), align 8
+  store ptr getelementptr inbounds ([256 x i32], ptr @intarray, i32 0, i64 241), ptr getelementptr inbounds ([5 x %struct.S1998], ptr @a1998, i32 0, i64 2, i32 0, i64 1), align 8
+  %5 = load i32, ptr %i, align 4
   %inc5 = add nsw i32 %5, 1
-  store i32 %inc5, i32* %i, align 4
-  %6 = load i32, i32* %i, align 4
+  store i32 %inc5, ptr %i, align 4
+  %6 = load i32, ptr %i, align 4
   %idxprom6 = sext i32 %6 to i64
-  %arrayidx7 = getelementptr inbounds [32 x i8*], [32 x i8*]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom6
-  store i8* bitcast (i64* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 1) to i8*), i8** %arrayidx7, align 8
-  %7 = load i32, i32* %i, align 4
+  %arrayidx7 = getelementptr inbounds [32 x ptr], ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 5), i32 0, i64 %idxprom6
+  store ptr getelementptr inbounds (%struct.S1998, ptr @s1998, i32 0, i32 1), ptr %arrayidx7, align 8
+  %7 = load i32, ptr %i, align 4
   %idxprom8 = sext i32 %7 to i64
-  %arrayidx9 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom8
-  store i64 8, i64* %arrayidx9, align 8
-  %8 = load i32, i32* %i, align 4
+  %arrayidx9 = getelementptr inbounds [32 x i64], ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 7), i32 0, i64 %idxprom8
+  store i64 8, ptr %arrayidx9, align 8
+  %8 = load i32, ptr %i, align 4
   %idxprom10 = sext i32 %8 to i64
-  %arrayidx11 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom10
-  store i64 8, i64* %arrayidx11, align 8
-  store i64 -3866974208859106459, i64* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 1), align 8
-  store i64 -185376695371304091, i64* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 1), align 8
-  %9 = load i32, i32* %i, align 4
+  %arrayidx11 = getelementptr inbounds [32 x i64], ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 11), i32 0, i64 %idxprom10
+  store i64 8, ptr %arrayidx11, align 8
+  store i64 -3866974208859106459, ptr getelementptr inbounds (%struct.S1998, ptr @s1998, i32 0, i32 1), align 8
+  store i64 -185376695371304091, ptr getelementptr inbounds ([5 x %struct.S1998], ptr @a1998, i32 0, i64 2, i32 1), align 8
+  %9 = load i32, ptr %i, align 4
   %inc12 = add nsw i32 %9, 1
-  store i32 %inc12, i32* %i, align 4
-  %10 = load i32, i32* %i, align 4
+  store i32 %inc12, ptr %i, align 4
+  %10 = load i32, ptr %i, align 4
   %idxprom13 = sext i32 %10 to i64
-  %arrayidx14 = getelementptr inbounds [32 x i8*], [32 x i8*]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom13
-  store i8* bitcast (i64* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 2) to i8*), i8** %arrayidx14, align 8
-  %11 = load i32, i32* %i, align 4
+  %arrayidx14 = getelementptr inbounds [32 x ptr], ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 5), i32 0, i64 %idxprom13
+  store ptr getelementptr inbounds (%struct.S1998, ptr @s1998, i32 0, i32 2), ptr %arrayidx14, align 8
+  %11 = load i32, ptr %i, align 4
   %idxprom15 = sext i32 %11 to i64
-  %arrayidx16 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom15
-  store i64 8, i64* %arrayidx16, align 8
-  %12 = load i32, i32* %i, align 4
+  %arrayidx16 = getelementptr inbounds [32 x i64], ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 7), i32 0, i64 %idxprom15
+  store i64 8, ptr %arrayidx16, align 8
+  %12 = load i32, ptr %i, align 4
   %idxprom17 = sext i32 %12 to i64
-  %arrayidx18 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom17
-  store i64 8, i64* %arrayidx18, align 8
-  store i64 -963638028680427187, i64* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 2), align 8
-  store i64 7510542175772455554, i64* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 2), align 8
-  %13 = load i32, i32* %i, align 4
+  %arrayidx18 = getelementptr inbounds [32 x i64], ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 11), i32 0, i64 %idxprom17
+  store i64 8, ptr %arrayidx18, align 8
+  store i64 -963638028680427187, ptr getelementptr inbounds (%struct.S1998, ptr @s1998, i32 0, i32 2), align 8
+  store i64 7510542175772455554, ptr getelementptr inbounds ([5 x %struct.S1998], ptr @a1998, i32 0, i64 2, i32 2), align 8
+  %13 = load i32, ptr %i, align 4
   %inc19 = add nsw i32 %13, 1
-  store i32 %inc19, i32* %i, align 4
-  %14 = load i32, i32* %i, align 4
+  store i32 %inc19, ptr %i, align 4
+  %14 = load i32, ptr %i, align 4
   %idxprom20 = sext i32 %14 to i64
-  %arrayidx21 = getelementptr inbounds [32 x i8*], [32 x i8*]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom20
-  store i8* bitcast (double* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 3) to i8*), i8** %arrayidx21, align 8
-  %15 = load i32, i32* %i, align 4
+  %arrayidx21 = getelementptr inbounds [32 x ptr], ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 5), i32 0, i64 %idxprom20
+  store ptr getelementptr inbounds (%struct.S1998, ptr @s1998, i32 0, i32 3), ptr %arrayidx21, align 8
+  %15 = load i32, ptr %i, align 4
   %idxprom22 = sext i32 %15 to i64
-  %arrayidx23 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom22
-  store i64 8, i64* %arrayidx23, align 8
-  %16 = load i32, i32* %i, align 4
+  %arrayidx23 = getelementptr inbounds [32 x i64], ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 7), i32 0, i64 %idxprom22
+  store i64 8, ptr %arrayidx23, align 8
+  %16 = load i32, ptr %i, align 4
   %idxprom24 = sext i32 %16 to i64
-  %arrayidx25 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom24
-  store i64 16, i64* %arrayidx25, align 8
-  store double 0xC0F8783300000000, double* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 3), align 16
-  store double 0xC10DF3CCC0000000, double* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 3), align 16
-  %17 = load i32, i32* %i, align 4
+  %arrayidx25 = getelementptr inbounds [32 x i64], ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 11), i32 0, i64 %idxprom24
+  store i64 16, ptr %arrayidx25, align 8
+  store double 0xC0F8783300000000, ptr getelementptr inbounds (%struct.S1998, ptr @s1998, i32 0, i32 3), align 16
+  store double 0xC10DF3CCC0000000, ptr getelementptr inbounds ([5 x %struct.S1998], ptr @a1998, i32 0, i64 2, i32 3), align 16
+  %17 = load i32, ptr %i, align 4
   %inc26 = add nsw i32 %17, 1
-  store i32 %inc26, i32* %i, align 4
-  %18 = load i32, i32* %i, align 4
+  store i32 %inc26, ptr %i, align 4
+  %18 = load i32, ptr %i, align 4
   %idxprom27 = sext i32 %18 to i64
-  %arrayidx28 = getelementptr inbounds [32 x i8*], [32 x i8*]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom27
-  store i8* bitcast (i16* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 4) to i8*), i8** %arrayidx28, align 8
-  %19 = load i32, i32* %i, align 4
+  %arrayidx28 = getelementptr inbounds [32 x ptr], ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 5), i32 0, i64 %idxprom27
+  store ptr getelementptr inbounds (%struct.S1998, ptr @s1998, i32 0, i32 4), ptr %arrayidx28, align 8
+  %19 = load i32, ptr %i, align 4
   %idxprom29 = sext i32 %19 to i64
-  %arrayidx30 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom29
-  store i64 2, i64* %arrayidx30, align 8
-  %20 = load i32, i32* %i, align 4
+  %arrayidx30 = getelementptr inbounds [32 x i64], ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 7), i32 0, i64 %idxprom29
+  store i64 2, ptr %arrayidx30, align 8
+  %20 = load i32, ptr %i, align 4
   %idxprom31 = sext i32 %20 to i64
-  %arrayidx32 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom31
-  store i64 2, i64* %arrayidx32, align 8
-  store i16 -15897, i16* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 4), align 2
-  store i16 30935, i16* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 4), align 2
-  %21 = load i32, i32* %i, align 4
+  %arrayidx32 = getelementptr inbounds [32 x i64], ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 11), i32 0, i64 %idxprom31
+  store i64 2, ptr %arrayidx32, align 8
+  store i16 -15897, ptr getelementptr inbounds (%struct.S1998, ptr @s1998, i32 0, i32 4), align 2
+  store i16 30935, ptr getelementptr inbounds ([5 x %struct.S1998], ptr @a1998, i32 0, i64 2, i32 4), align 2
+  %21 = load i32, ptr %i, align 4
   %inc33 = add nsw i32 %21, 1
-  store i32 %inc33, i32* %i, align 4
-  store i32 -419541644, i32* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 5), align 4
-  store i32 2125926812, i32* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 5), align 4
-  %22 = load i32, i32* %j, align 4
+  store i32 %inc33, ptr %i, align 4
+  store i32 -419541644, ptr getelementptr inbounds (%struct.S1998, ptr @s1998, i32 0, i32 5), align 4
+  store i32 2125926812, ptr getelementptr inbounds ([5 x %struct.S1998], ptr @a1998, i32 0, i64 2, i32 5), align 4
+  %22 = load i32, ptr %j, align 4
   %inc34 = add nsw i32 %22, 1
-  store i32 %inc34, i32* %j, align 4
-  %23 = load i32, i32* %i, align 4
+  store i32 %inc34, ptr %j, align 4
+  %23 = load i32, ptr %i, align 4
   %idxprom35 = sext i32 %23 to i64
-  %arrayidx36 = getelementptr inbounds [32 x i8*], [32 x i8*]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom35
-  store i8* bitcast (double* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 0, i64 0) to i8*), i8** %arrayidx36, align 8
-  %24 = load i32, i32* %i, align 4
+  %arrayidx36 = getelementptr inbounds [32 x ptr], ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 5), i32 0, i64 %idxprom35
+  store ptr getelementptr inbounds (%struct.S1998, ptr @s1998, i32 0, i32 6, i64 4, i32 0, i64 0), ptr %arrayidx36, align 8
+  %24 = load i32, ptr %i, align 4
   %idxprom37 = sext i32 %24 to i64
-  %arrayidx38 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom37
-  store i64 8, i64* %arrayidx38, align 8
-  %25 = load i32, i32* %i, align 4
+  %arrayidx38 = getelementptr inbounds [32 x i64], ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 7), i32 0, i64 %idxprom37
+  store i64 8, ptr %arrayidx38, align 8
+  %25 = load i32, ptr %i, align 4
   %idxprom39 = sext i32 %25 to i64
-  %arrayidx40 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom39
-  store i64 8, i64* %arrayidx40, align 8
-  store double 0xC0FC765780000000, double* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 0, i64 0), align 8
-  store double 0xC1025CD7A0000000, double* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 0, i64 0), align 8
-  %26 = load i32, i32* %i, align 4
+  %arrayidx40 = getelementptr inbounds [32 x i64], ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 11), i32 0, i64 %idxprom39
+  store i64 8, ptr %arrayidx40, align 8
+  store double 0xC0FC765780000000, ptr getelementptr inbounds (%struct.S1998, ptr @s1998, i32 0, i32 6, i64 4, i32 0, i64 0), align 8
+  store double 0xC1025CD7A0000000, ptr getelementptr inbounds ([5 x %struct.S1998], ptr @a1998, i32 0, i64 2, i32 6, i64 4, i32 0, i64 0), align 8
+  %26 = load i32, ptr %i, align 4
   %inc41 = add nsw i32 %26, 1
-  store i32 %inc41, i32* %i, align 4
-  %bf.load = load i32, i32* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 1), align 8
+  store i32 %inc41, ptr %i, align 4
+  %bf.load = load i32, ptr getelementptr inbounds (%struct.S1998, ptr @s1998, i32 0, i32 6, i64 4, i32 1), align 8
   %bf.clear = and i32 %bf.load, 7
   %bf.set = or i32 %bf.clear, 16
-  store i32 %bf.set, i32* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 1), align 8
-  %bf.load42 = load i32, i32* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 1), align 8
+  store i32 %bf.set, ptr getelementptr inbounds (%struct.S1998, ptr @s1998, i32 0, i32 6, i64 4, i32 1), align 8
+  %bf.load42 = load i32, ptr getelementptr inbounds ([5 x %struct.S1998], ptr @a1998, i32 0, i64 2, i32 6, i64 4, i32 1), align 8
   %bf.clear43 = and i32 %bf.load42, 7
   %bf.set44 = or i32 %bf.clear43, 24
-  store i32 %bf.set44, i32* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 1), align 8
-  %27 = load i32, i32* %j, align 4
+  store i32 %bf.set44, ptr getelementptr inbounds ([5 x %struct.S1998], ptr @a1998, i32 0, i64 2, i32 6, i64 4, i32 1), align 8
+  %27 = load i32, ptr %j, align 4
   %inc45 = add nsw i32 %27, 1
-  store i32 %inc45, i32* %j, align 4
-  %bf.load46 = load i16, i16* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 2), align 4
+  store i32 %inc45, ptr %j, align 4
+  %bf.load46 = load i16, ptr getelementptr inbounds (%struct.S1998, ptr @s1998, i32 0, i32 6, i64 4, i32 2), align 4
   %bf.clear47 = and i16 %bf.load46, 127
-  store i16 %bf.clear47, i16* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 2), align 4
-  %bf.load48 = load i16, i16* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 2), align 4
+  store i16 %bf.clear47, ptr getelementptr inbounds (%struct.S1998, ptr @s1998, i32 0, i32 6, i64 4, i32 2), align 4
+  %bf.load48 = load i16, ptr getelementptr inbounds ([5 x %struct.S1998], ptr @a1998, i32 0, i64 2, i32 6, i64 4, i32 2), align 4
   %bf.clear49 = and i16 %bf.load48, 127
-  store i16 %bf.clear49, i16* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 2), align 4
-  %28 = load i32, i32* %j, align 4
+  store i16 %bf.clear49, ptr getelementptr inbounds ([5 x %struct.S1998], ptr @a1998, i32 0, i64 2, i32 6, i64 4, i32 2), align 4
+  %28 = load i32, ptr %j, align 4
   %inc50 = add nsw i32 %28, 1
-  store i32 %inc50, i32* %j, align 4
-  %bf.load51 = load i32, i32* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 3), align 8
+  store i32 %inc50, ptr %j, align 4
+  %bf.load51 = load i32, ptr getelementptr inbounds (%struct.S1998, ptr @s1998, i32 0, i32 6, i64 4, i32 3), align 8
   %bf.clear52 = and i32 %bf.load51, 63
-  store i32 %bf.clear52, i32* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 3), align 8
-  %bf.load53 = load i32, i32* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 3), align 8
+  store i32 %bf.clear52, ptr getelementptr inbounds (%struct.S1998, ptr @s1998, i32 0, i32 6, i64 4, i32 3), align 8
+  %bf.load53 = load i32, ptr getelementptr inbounds ([5 x %struct.S1998], ptr @a1998, i32 0, i64 2, i32 6, i64 4, i32 3), align 8
   %bf.clear54 = and i32 %bf.load53, 63
   %bf.set55 = or i32 %bf.clear54, 64
-  store i32 %bf.set55, i32* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 3), align 8
-  %29 = load i32, i32* %j, align 4
+  store i32 %bf.set55, ptr getelementptr inbounds ([5 x %struct.S1998], ptr @a1998, i32 0, i64 2, i32 6, i64 4, i32 3), align 8
+  %29 = load i32, ptr %j, align 4
   %inc56 = add nsw i32 %29, 1
-  store i32 %inc56, i32* %j, align 4
-  %bf.load57 = load i24, i24* bitcast ([3 x i8]* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 4) to i24*), align 4
+  store i32 %inc56, ptr %j, align 4
+  %bf.load57 = load i24, ptr getelementptr inbounds (%struct.S1998, ptr @s1998, i32 0, i32 6, i64 4, i32 4), align 4
   %bf.clear58 = and i24 %bf.load57, 63
-  store i24 %bf.clear58, i24* bitcast ([3 x i8]* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 4) to i24*), align 4
-  %bf.load59 = load i24, i24* bitcast ([3 x i8]* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 4) to i24*), align 4
+  store i24 %bf.clear58, ptr getelementptr inbounds (%struct.S1998, ptr @s1998, i32 0, i32 6, i64 4, i32 4), align 4
+  %bf.load59 = load i24, ptr getelementptr inbounds ([5 x %struct.S1998], ptr @a1998, i32 0, i64 2, i32 6, i64 4, i32 4), align 4
   %bf.clear60 = and i24 %bf.load59, 63
-  store i24 %bf.clear60, i24* bitcast ([3 x i8]* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 4) to i24*), align 4
-  %30 = load i32, i32* %j, align 4
+  store i24 %bf.clear60, ptr getelementptr inbounds ([5 x %struct.S1998], ptr @a1998, i32 0, i64 2, i32 6, i64 4, i32 4), align 4
+  %30 = load i32, ptr %j, align 4
   %inc61 = add nsw i32 %30, 1
-  store i32 %inc61, i32* %j, align 4
-  %31 = load i32, i32* %i, align 4
+  store i32 %inc61, ptr %j, align 4
+  %31 = load i32, ptr %i, align 4
   %idxprom62 = sext i32 %31 to i64
-  %arrayidx63 = getelementptr inbounds [32 x i8*], [32 x i8*]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom62
-  store i8* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 5, i64 5), i8** %arrayidx63, align 8
-  %32 = load i32, i32* %i, align 4
+  %arrayidx63 = getelementptr inbounds [32 x ptr], ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 5), i32 0, i64 %idxprom62
+  store ptr getelementptr inbounds (%struct.S1998, ptr @s1998, i32 0, i32 6, i64 4, i32 5, i64 5), ptr %arrayidx63, align 8
+  %32 = load i32, ptr %i, align 4
   %idxprom64 = sext i32 %32 to i64
-  %arrayidx65 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom64
-  store i64 1, i64* %arrayidx65, align 8
-  %33 = load i32, i32* %i, align 4
+  %arrayidx65 = getelementptr inbounds [32 x i64], ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 7), i32 0, i64 %idxprom64
+  store i64 1, ptr %arrayidx65, align 8
+  %33 = load i32, ptr %i, align 4
   %idxprom66 = sext i32 %33 to i64
-  %arrayidx67 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom66
-  store i64 1, i64* %arrayidx67, align 8
-  store i8 -83, i8* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 5, i64 5), align 1
-  store i8 -67, i8* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 5, i64 5), align 1
-  %34 = load i32, i32* %i, align 4
+  %arrayidx67 = getelementptr inbounds [32 x i64], ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 11), i32 0, i64 %idxprom66
+  store i64 1, ptr %arrayidx67, align 8
+  store i8 -83, ptr getelementptr inbounds (%struct.S1998, ptr @s1998, i32 0, i32 6, i64 4, i32 5, i64 5), align 1
+  store i8 -67, ptr getelementptr inbounds ([5 x %struct.S1998], ptr @a1998, i32 0, i64 2, i32 6, i64 4, i32 5, i64 5), align 1
+  %34 = load i32, ptr %i, align 4
   %inc68 = add nsw i32 %34, 1
-  store i32 %inc68, i32* %i, align 4
-  %35 = load i32, i32* %i, align 4
+  store i32 %inc68, ptr %i, align 4
+  %35 = load i32, ptr %i, align 4
   %idxprom69 = sext i32 %35 to i64
-  %arrayidx70 = getelementptr inbounds [32 x i8*], [32 x i8*]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom69
-  store i8* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 5, i64 1), i8** %arrayidx70, align 8
-  %36 = load i32, i32* %i, align 4
+  %arrayidx70 = getelementptr inbounds [32 x ptr], ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 5), i32 0, i64 %idxprom69
+  store ptr getelementptr inbounds (%struct.S1998, ptr @s1998, i32 0, i32 6, i64 4, i32 5, i64 1), ptr %arrayidx70, align 8
+  %36 = load i32, ptr %i, align 4
   %idxprom71 = sext i32 %36 to i64
-  %arrayidx72 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom71
-  store i64 1, i64* %arrayidx72, align 8
-  %37 = load i32, i32* %i, align 4
+  %arrayidx72 = getelementptr inbounds [32 x i64], ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 7), i32 0, i64 %idxprom71
+  store i64 1, ptr %arrayidx72, align 8
+  %37 = load i32, ptr %i, align 4
   %idxprom73 = sext i32 %37 to i64
-  %arrayidx74 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom73
-  store i64 1, i64* %arrayidx74, align 8
-  store i8 34, i8* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 5, i64 1), align 1
-  store i8 64, i8* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 5, i64 1), align 1
-  %38 = load i32, i32* %i, align 4
+  %arrayidx74 = getelementptr inbounds [32 x i64], ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 11), i32 0, i64 %idxprom73
+  store i64 1, ptr %arrayidx74, align 8
+  store i8 34, ptr getelementptr inbounds (%struct.S1998, ptr @s1998, i32 0, i32 6, i64 4, i32 5, i64 1), align 1
+  store i8 64, ptr getelementptr inbounds ([5 x %struct.S1998], ptr @a1998, i32 0, i64 2, i32 6, i64 4, i32 5, i64 1), align 1
+  %38 = load i32, ptr %i, align 4
   %inc75 = add nsw i32 %38, 1
-  store i32 %inc75, i32* %i, align 4
-  %39 = load i32, i32* %i, align 4
+  store i32 %inc75, ptr %i, align 4
+  %39 = load i32, ptr %i, align 4
   %idxprom76 = sext i32 %39 to i64
-  %arrayidx77 = getelementptr inbounds [32 x i8*], [32 x i8*]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom76
-  store i8* bitcast (i32* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 6, i64 3) to i8*), i8** %arrayidx77, align 8
-  %40 = load i32, i32* %i, align 4
+  %arrayidx77 = getelementptr inbounds [32 x ptr], ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 5), i32 0, i64 %idxprom76
+  store ptr getelementptr inbounds (%struct.S1998, ptr @s1998, i32 0, i32 6, i64 4, i32 6, i64 3), ptr %arrayidx77, align 8
+  %40 = load i32, ptr %i, align 4
   %idxprom78 = sext i32 %40 to i64
-  %arrayidx79 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom78
-  store i64 4, i64* %arrayidx79, align 8
-  %41 = load i32, i32* %i, align 4
+  %arrayidx79 = getelementptr inbounds [32 x i64], ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 7), i32 0, i64 %idxprom78
+  store i64 4, ptr %arrayidx79, align 8
+  %41 = load i32, ptr %i, align 4
   %idxprom80 = sext i32 %41 to i64
-  %arrayidx81 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom80
-  store i64 4, i64* %arrayidx81, align 8
-  store i32 -3, i32* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 6, i64 3), align 4
-  store i32 -3, i32* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 6, i64 3), align 4
-  %42 = load i32, i32* %i, align 4
+  %arrayidx81 = getelementptr inbounds [32 x i64], ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 11), i32 0, i64 %idxprom80
+  store i64 4, ptr %arrayidx81, align 8
+  store i32 -3, ptr getelementptr inbounds (%struct.S1998, ptr @s1998, i32 0, i32 6, i64 4, i32 6, i64 3), align 4
+  store i32 -3, ptr getelementptr inbounds ([5 x %struct.S1998], ptr @a1998, i32 0, i64 2, i32 6, i64 4, i32 6, i64 3), align 4
+  %42 = load i32, ptr %i, align 4
   %inc82 = add nsw i32 %42, 1
-  store i32 %inc82, i32* %i, align 4
-  %43 = load i32, i32* %i, align 4
+  store i32 %inc82, ptr %i, align 4
+  %43 = load i32, ptr %i, align 4
   %idxprom83 = sext i32 %43 to i64
-  %arrayidx84 = getelementptr inbounds [32 x i8*], [32 x i8*]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom83
-  store i8* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 7), i8** %arrayidx84, align 8
-  %44 = load i32, i32* %i, align 4
+  %arrayidx84 = getelementptr inbounds [32 x ptr], ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 5), i32 0, i64 %idxprom83
+  store ptr getelementptr inbounds (%struct.S1998, ptr @s1998, i32 0, i32 6, i64 4, i32 7), ptr %arrayidx84, align 8
+  %44 = load i32, ptr %i, align 4
   %idxprom85 = sext i32 %44 to i64
-  %arrayidx86 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom85
-  store i64 1, i64* %arrayidx86, align 8
-  %45 = load i32, i32* %i, align 4
+  %arrayidx86 = getelementptr inbounds [32 x i64], ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 7), i32 0, i64 %idxprom85
+  store i64 1, ptr %arrayidx86, align 8
+  %45 = load i32, ptr %i, align 4
   %idxprom87 = sext i32 %45 to i64
-  %arrayidx88 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom87
-  store i64 1, i64* %arrayidx88, align 8
-  store i8 106, i8* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 7), align 1
-  store i8 -102, i8* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 7), align 1
-  %46 = load i32, i32* %i, align 4
+  %arrayidx88 = getelementptr inbounds [32 x i64], ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 11), i32 0, i64 %idxprom87
+  store i64 1, ptr %arrayidx88, align 8
+  store i8 106, ptr getelementptr inbounds (%struct.S1998, ptr @s1998, i32 0, i32 6, i64 4, i32 7), align 1
+  store i8 -102, ptr getelementptr inbounds ([5 x %struct.S1998], ptr @a1998, i32 0, i64 2, i32 6, i64 4, i32 7), align 1
+  %46 = load i32, ptr %i, align 4
   %inc89 = add nsw i32 %46, 1
-  store i32 %inc89, i32* %i, align 4
-  %47 = load i32, i32* %i, align 4
+  store i32 %inc89, ptr %i, align 4
+  %47 = load i32, ptr %i, align 4
   %idxprom90 = sext i32 %47 to i64
-  %arrayidx91 = getelementptr inbounds [32 x i8*], [32 x i8*]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom90
-  store i8* bitcast (i16* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 7) to i8*), i8** %arrayidx91, align 8
-  %48 = load i32, i32* %i, align 4
+  %arrayidx91 = getelementptr inbounds [32 x ptr], ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 5), i32 0, i64 %idxprom90
+  store ptr getelementptr inbounds (%struct.S1998, ptr @s1998, i32 0, i32 7), ptr %arrayidx91, align 8
+  %48 = load i32, ptr %i, align 4
   %idxprom92 = sext i32 %48 to i64
-  %arrayidx93 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom92
-  store i64 2, i64* %arrayidx93, align 8
-  %49 = load i32, i32* %i, align 4
+  %arrayidx93 = getelementptr inbounds [32 x i64], ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 7), i32 0, i64 %idxprom92
+  store i64 2, ptr %arrayidx93, align 8
+  %49 = load i32, ptr %i, align 4
   %idxprom94 = sext i32 %49 to i64
-  %arrayidx95 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom94
-  store i64 2, i64* %arrayidx95, align 8
-  store i16 29665, i16* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 7), align 2
-  store i16 7107, i16* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 7), align 2
-  %50 = load i32, i32* %i, align 4
+  %arrayidx95 = getelementptr inbounds [32 x i64], ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 11), i32 0, i64 %idxprom94
+  store i64 2, ptr %arrayidx95, align 8
+  store i16 29665, ptr getelementptr inbounds (%struct.S1998, ptr @s1998, i32 0, i32 7), align 2
+  store i16 7107, ptr getelementptr inbounds ([5 x %struct.S1998], ptr @a1998, i32 0, i64 2, i32 7), align 2
+  %50 = load i32, ptr %i, align 4
   %inc96 = add nsw i32 %50, 1
-  store i32 %inc96, i32* %i, align 4
-  %51 = load i32, i32* %i, align 4
+  store i32 %inc96, ptr %i, align 4
+  %51 = load i32, ptr %i, align 4
   %idxprom97 = sext i32 %51 to i64
-  %arrayidx98 = getelementptr inbounds [32 x i8*], [32 x i8*]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom97
-  store i8* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 8), i8** %arrayidx98, align 8
-  %52 = load i32, i32* %i, align 4
+  %arrayidx98 = getelementptr inbounds [32 x ptr], ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 5), i32 0, i64 %idxprom97
+  store ptr getelementptr inbounds (%struct.S1998, ptr @s1998, i32 0, i32 8), ptr %arrayidx98, align 8
+  %52 = load i32, ptr %i, align 4
   %idxprom99 = sext i32 %52 to i64
-  %arrayidx100 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom99
-  store i64 1, i64* %arrayidx100, align 8
-  %53 = load i32, i32* %i, align 4
+  %arrayidx100 = getelementptr inbounds [32 x i64], ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 7), i32 0, i64 %idxprom99
+  store i64 1, ptr %arrayidx100, align 8
+  %53 = load i32, ptr %i, align 4
   %idxprom101 = sext i32 %53 to i64
-  %arrayidx102 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom101
-  store i64 1, i64* %arrayidx102, align 8
-  store i8 52, i8* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 8), align 1
-  store i8 -86, i8* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 8), align 1
-  %54 = load i32, i32* %i, align 4
+  %arrayidx102 = getelementptr inbounds [32 x i64], ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 11), i32 0, i64 %idxprom101
+  store i64 1, ptr %arrayidx102, align 8
+  store i8 52, ptr getelementptr inbounds (%struct.S1998, ptr @s1998, i32 0, i32 8), align 1
+  store i8 -86, ptr getelementptr inbounds ([5 x %struct.S1998], ptr @a1998, i32 0, i64 2, i32 8), align 1
+  %54 = load i32, ptr %i, align 4
   %inc103 = add nsw i32 %54, 1
-  store i32 %inc103, i32* %i, align 4
-  %55 = load i32, i32* %i, align 4
+  store i32 %inc103, ptr %i, align 4
+  %55 = load i32, ptr %i, align 4
   %idxprom104 = sext i32 %55 to i64
-  %arrayidx105 = getelementptr inbounds [32 x i8*], [32 x i8*]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom104
-  store i8* bitcast (i32* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 9) to i8*), i8** %arrayidx105, align 8
-  %56 = load i32, i32* %i, align 4
+  %arrayidx105 = getelementptr inbounds [32 x ptr], ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 5), i32 0, i64 %idxprom104
+  store ptr getelementptr inbounds (%struct.S1998, ptr @s1998, i32 0, i32 9), ptr %arrayidx105, align 8
+  %56 = load i32, ptr %i, align 4
   %idxprom106 = sext i32 %56 to i64
-  %arrayidx107 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom106
-  store i64 4, i64* %arrayidx107, align 8
-  %57 = load i32, i32* %i, align 4
+  %arrayidx107 = getelementptr inbounds [32 x i64], ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 7), i32 0, i64 %idxprom106
+  store i64 4, ptr %arrayidx107, align 8
+  %57 = load i32, ptr %i, align 4
   %idxprom108 = sext i32 %57 to i64
-  %arrayidx109 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom108
-  store i64 4, i64* %arrayidx109, align 8
-  store i32 -54118453, i32* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 9), align 4
-  store i32 1668755823, i32* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 9), align 4
-  %58 = load i32, i32* %i, align 4
+  %arrayidx109 = getelementptr inbounds [32 x i64], ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 11), i32 0, i64 %idxprom108
+  store i64 4, ptr %arrayidx109, align 8
+  store i32 -54118453, ptr getelementptr inbounds (%struct.S1998, ptr @s1998, i32 0, i32 9), align 4
+  store i32 1668755823, ptr getelementptr inbounds ([5 x %struct.S1998], ptr @a1998, i32 0, i64 2, i32 9), align 4
+  %58 = load i32, ptr %i, align 4
   %inc110 = add nsw i32 %58, 1
-  store i32 %inc110, i32* %i, align 4
-  store i32 %inc110, i32* %tmp
-  %59 = load i32, i32* %tmp
-  %60 = load i32, i32* %i, align 4
-  store i32 %60, i32* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 0), align 4
-  %61 = load i32, i32* %j, align 4
-  store i32 %61, i32* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 1), align 4
-  %62 = bitcast %struct.S1998* %agg.tmp111 to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 %62, i8* align 16 bitcast (%struct.S1998* @s1998 to i8*), i64 5168, i1 false)
-  %63 = bitcast %struct.S1998* %agg.tmp112 to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 %63, i8* align 16 bitcast (%struct.S1998* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2) to i8*), i64 5168, i1 false)
-  call void @check1998(%struct.S1998* sret(%struct.S1998) %agg.tmp, %struct.S1998* byval(%struct.S1998) align 16 %agg.tmp111, %struct.S1998* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 1), %struct.S1998* byval(%struct.S1998) align 16 %agg.tmp112)
-  call void @checkx1998(%struct.S1998* byval(%struct.S1998) align 16 %agg.tmp)
-  %64 = bitcast %struct.S1998* %agg.tmp113 to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 %64, i8* align 16 bitcast (%struct.S1998* @s1998 to i8*), i64 5168, i1 false)
-  %65 = bitcast %struct.S1998* %agg.tmp114 to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 %65, i8* align 16 bitcast (%struct.S1998* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2) to i8*), i64 5168, i1 false)
-  %66 = bitcast %struct.S1998* %agg.tmp115 to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 %66, i8* align 16 bitcast (%struct.S1998* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2) to i8*), i64 5168, i1 false)
-  call void (i32, ...) @check1998va(i32 signext 1, double 1.000000e+00, %struct.S1998* byval(%struct.S1998) align 16 %agg.tmp113, i64 2, %struct.S1998* byval(%struct.S1998) align 16 %agg.tmp114, %struct.S1998* byval(%struct.S1998) align 16 %agg.tmp115)
-  %67 = bitcast %struct.S1998* %agg.tmp116 to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 %67, i8* align 16 bitcast (%struct.S1998* @s1998 to i8*), i64 5168, i1 false)
-  %68 = bitcast %struct.S1998* %agg.tmp117 to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 %68, i8* align 16 bitcast (%struct.S1998* @s1998 to i8*), i64 5168, i1 false)
-  %69 = bitcast %struct.S1998* %agg.tmp118 to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 %69, i8* align 16 bitcast (%struct.S1998* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2) to i8*), i64 5168, i1 false)
-  %70 = bitcast %struct.S1998* %agg.tmp119 to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 %70, i8* align 16 bitcast (%struct.S1998* @s1998 to i8*), i64 5168, i1 false)
-  call void (i32, ...) @check1998va(i32 signext 2, %struct.S1998* byval(%struct.S1998) align 16 %agg.tmp116, %struct.S1998* byval(%struct.S1998) align 16 %agg.tmp117, ppc_fp128 0xM40000000000000000000000000000000, %struct.S1998* byval(%struct.S1998) align 16 %agg.tmp118, %struct.S1998* byval(%struct.S1998) align 16 %agg.tmp119)
+  store i32 %inc110, ptr %i, align 4
+  store i32 %inc110, ptr %tmp
+  %59 = load i32, ptr %tmp
+  %60 = load i32, ptr %i, align 4
+  store i32 %60, ptr @info, align 4
+  %61 = load i32, ptr %j, align 4
+  store i32 %61, ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 1), align 4
+  call void @llvm.memcpy.p0.p0.i64(ptr align 16 %agg.tmp111, ptr align 16 @s1998, i64 5168, i1 false)
+  call void @llvm.memcpy.p0.p0.i64(ptr align 16 %agg.tmp112, ptr align 16 getelementptr inbounds ([5 x %struct.S1998], ptr @a1998, i32 0, i64 2), i64 5168, i1 false)
+  call void @check1998(ptr sret(%struct.S1998) %agg.tmp, ptr byval(%struct.S1998) align 16 %agg.tmp111, ptr getelementptr inbounds ([5 x %struct.S1998], ptr @a1998, i32 0, i64 1), ptr byval(%struct.S1998) align 16 %agg.tmp112)
+  call void @checkx1998(ptr byval(%struct.S1998) align 16 %agg.tmp)
+  call void @llvm.memcpy.p0.p0.i64(ptr align 16 %agg.tmp113, ptr align 16 @s1998, i64 5168, i1 false)
+  call void @llvm.memcpy.p0.p0.i64(ptr align 16 %agg.tmp114, ptr align 16 getelementptr inbounds ([5 x %struct.S1998], ptr @a1998, i32 0, i64 2), i64 5168, i1 false)
+  call void @llvm.memcpy.p0.p0.i64(ptr align 16 %agg.tmp115, ptr align 16 getelementptr inbounds ([5 x %struct.S1998], ptr @a1998, i32 0, i64 2), i64 5168, i1 false)
+  call void (i32, ...) @check1998va(i32 signext 1, double 1.000000e+00, ptr byval(%struct.S1998) align 16 %agg.tmp113, i64 2, ptr byval(%struct.S1998) align 16 %agg.tmp114, ptr byval(%struct.S1998) align 16 %agg.tmp115)
+  call void @llvm.memcpy.p0.p0.i64(ptr align 16 %agg.tmp116, ptr align 16 @s1998, i64 5168, i1 false)
+  call void @llvm.memcpy.p0.p0.i64(ptr align 16 %agg.tmp117, ptr align 16 @s1998, i64 5168, i1 false)
+  call void @llvm.memcpy.p0.p0.i64(ptr align 16 %agg.tmp118, ptr align 16 getelementptr inbounds ([5 x %struct.S1998], ptr @a1998, i32 0, i64 2), i64 5168, i1 false)
+  call void @llvm.memcpy.p0.p0.i64(ptr align 16 %agg.tmp119, ptr align 16 @s1998, i64 5168, i1 false)
+  call void (i32, ...) @check1998va(i32 signext 2, ptr byval(%struct.S1998) align 16 %agg.tmp116, ptr byval(%struct.S1998) align 16 %agg.tmp117, ppc_fp128 0xM40000000000000000000000000000000, ptr byval(%struct.S1998) align 16 %agg.tmp118, ptr byval(%struct.S1998) align 16 %agg.tmp119)
   ret void
 }
 
-declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1)
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i1)
+declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i1)
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture readonly, i64, i1)
 
-declare void @check1998(%struct.S1998* sret(%struct.S1998), %struct.S1998* byval(%struct.S1998) align 16, %struct.S1998*, %struct.S1998* byval(%struct.S1998) align 16)
+declare void @check1998(ptr sret(%struct.S1998), ptr byval(%struct.S1998) align 16, ptr, ptr byval(%struct.S1998) align 16)
 declare void @check1998va(i32 signext, ...)
-declare void @checkx1998(%struct.S1998* byval(%struct.S1998) align 16 %arg)
+declare void @checkx1998(ptr byval(%struct.S1998) align 16 %arg)
 

diff  --git a/llvm/test/CodeGen/PowerPC/resolvefi-disp.ll b/llvm/test/CodeGen/PowerPC/resolvefi-disp.ll
index 0bbb01d51170..aaea3fd31e88 100644
--- a/llvm/test/CodeGen/PowerPC/resolvefi-disp.ll
+++ b/llvm/test/CodeGen/PowerPC/resolvefi-disp.ll
@@ -20,52 +20,43 @@ target triple = "powerpc64le-unknown-linux-gnu"
 @s2760 = external global %struct.S2760
 @fails = external global i32
 
-define void @check2760(%struct.S2760* noalias sret(%struct.S2760) %agg.result, %struct.S2760* byval(%struct.S2760) align 16, %struct.S2760* %arg1, %struct.S2760* byval(%struct.S2760) align 16) {
+define void @check2760(ptr noalias sret(%struct.S2760) %agg.result, ptr byval(%struct.S2760) align 16, ptr %arg1, ptr byval(%struct.S2760) align 16) {
 entry:
   %arg0 = alloca %struct.S2760, align 32
   %arg2 = alloca %struct.S2760, align 32
-  %arg1.addr = alloca %struct.S2760*, align 8
+  %arg1.addr = alloca ptr, align 8
   %ret = alloca %struct.S2760, align 32
   %b1 = alloca %struct.S2760, align 32
   %b2 = alloca %struct.S2760, align 32
-  %2 = bitcast %struct.S2760* %arg0 to i8*
-  %3 = bitcast %struct.S2760* %0 to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 %2, i8* align 16 %3, i64 11104, i1 false)
-  %4 = bitcast %struct.S2760* %arg2 to i8*
-  %5 = bitcast %struct.S2760* %1 to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 %4, i8* align 16 %5, i64 11104, i1 false)
-  store %struct.S2760* %arg1, %struct.S2760** %arg1.addr, align 8
-  %6 = bitcast %struct.S2760* %ret to i8*
-  call void @llvm.memset.p0i8.i64(i8* align 32 %6, i8 0, i64 11104, i1 false)
-  %7 = bitcast %struct.S2760* %b1 to i8*
-  call void @llvm.memset.p0i8.i64(i8* align 32 %7, i8 0, i64 11104, i1 false)
-  %8 = bitcast %struct.S2760* %b2 to i8*
-  call void @llvm.memset.p0i8.i64(i8* align 32 %8, i8 0, i64 11104, i1 false)
-  %b = getelementptr inbounds %struct.S2760, %struct.S2760* %arg0, i32 0, i32 1
-  %g = getelementptr inbounds %struct.anon, %struct.anon* %b, i32 0, i32 1
-  %9 = load i64, i64* %g, align 8
-  %10 = load i64, i64* getelementptr inbounds (%struct.S2760, %struct.S2760* @s2760, i32 0, i32 1, i32 1), align 8
-  %cmp = icmp ne i64 %9, %10
+  call void @llvm.memcpy.p0.p0.i64(ptr align 16 %arg0, ptr align 16 %0, i64 11104, i1 false)
+  call void @llvm.memcpy.p0.p0.i64(ptr align 16 %arg2, ptr align 16 %1, i64 11104, i1 false)
+  store ptr %arg1, ptr %arg1.addr, align 8
+  call void @llvm.memset.p0.i64(ptr align 32 %ret, i8 0, i64 11104, i1 false)
+  call void @llvm.memset.p0.i64(ptr align 32 %b1, i8 0, i64 11104, i1 false)
+  call void @llvm.memset.p0.i64(ptr align 32 %b2, i8 0, i64 11104, i1 false)
+  %b = getelementptr inbounds %struct.S2760, ptr %arg0, i32 0, i32 1
+  %g = getelementptr inbounds %struct.anon, ptr %b, i32 0, i32 1
+  %2 = load i64, ptr %g, align 8
+  %3 = load i64, ptr getelementptr inbounds (%struct.S2760, ptr @s2760, i32 0, i32 1, i32 1), align 8
+  %cmp = icmp ne i64 %2, %3
   br i1 %cmp, label %if.then, label %if.end
 
 if.then:                                          ; preds = %entry
-  %11 = load i32, i32* @fails, align 4
-  %inc = add nsw i32 %11, 1
-  store i32 %inc, i32* @fails, align 4
+  %4 = load i32, ptr @fails, align 4
+  %inc = add nsw i32 %4, 1
+  store i32 %inc, ptr @fails, align 4
   br label %if.end
 
 if.end:                                           ; preds = %if.then, %entry
-  %12 = load i64, i64* getelementptr inbounds (%struct.S2760, %struct.S2760* @s2760, i32 0, i32 1, i32 1), align 8
-  %b3 = getelementptr inbounds %struct.S2760, %struct.S2760* %ret, i32 0, i32 1
-  %g4 = getelementptr inbounds %struct.anon, %struct.anon* %b3, i32 0, i32 1
-  store i64 %12, i64* %g4, align 8
-  %13 = bitcast %struct.S2760* %agg.result to i8*
-  %14 = bitcast %struct.S2760* %ret to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 32 %13, i8* align 32 %14, i64 11104, i1 false)
+  %5 = load i64, ptr getelementptr inbounds (%struct.S2760, ptr @s2760, i32 0, i32 1, i32 1), align 8
+  %b3 = getelementptr inbounds %struct.S2760, ptr %ret, i32 0, i32 1
+  %g4 = getelementptr inbounds %struct.anon, ptr %b3, i32 0, i32 1
+  store i64 %5, ptr %g4, align 8
+  call void @llvm.memcpy.p0.p0.i64(ptr align 32 %agg.result, ptr align 32 %ret, i64 11104, i1 false)
   ret void
 }
 
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i1)
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture readonly, i64, i1)
 
-declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1)
+declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i1)
 

diff  --git a/llvm/test/CodeGen/PowerPC/respect-rounding-mode.ll b/llvm/test/CodeGen/PowerPC/respect-rounding-mode.ll
index 9698ed9821be..850c82151c8a 100644
--- a/llvm/test/CodeGen/PowerPC/respect-rounding-mode.ll
+++ b/llvm/test/CodeGen/PowerPC/respect-rounding-mode.ll
@@ -15,16 +15,16 @@
 ; RUN:   -mcpu=pwr9 -ppc-asm-full-reg-names < %s | grep 'xvmuldp' | count 4
 ; RUN: llc -verify-machineinstrs --mtriple powerpc64le-unknown-linux-gnu \
 ; RUN:   -mcpu=pwr10 -ppc-asm-full-reg-names < %s | grep 'xvmuldp' | count 4
- at IndirectCallPtr = dso_local local_unnamed_addr global void (...)* null, align 8
+ at IndirectCallPtr = dso_local local_unnamed_addr global ptr null, align 8
 
 define dso_local signext i32 @func1() local_unnamed_addr #0 {
 entry:
-  tail call void bitcast (void (...)* @directCall to void ()*)() #0
+  tail call void @directCall() #0
   %0 = tail call <2 x double> @llvm.experimental.constrained.rint.v2f64(<2 x double> <double -9.990000e+01, double 9.990000e+01>, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
   %vecext = extractelement <2 x double> %0, i32 0
   %sub = tail call double @llvm.experimental.constrained.fsub.f64(double %vecext, double -9.900000e+01, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
   %conv = tail call i32 @llvm.experimental.constrained.fptosi.i32.f64(double %sub, metadata !"fpexcept.ignore") #0
-  tail call void bitcast (void (...)* @directCall to void ()*)() #0
+  tail call void @directCall() #0
   %1 = tail call <2 x double> @llvm.experimental.constrained.rint.v2f64(<2 x double> <double -9.990000e+01, double 9.990000e+01>, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
   %vecext3 = extractelement <2 x double> %1, i32 1
   %cmp = tail call i1 @llvm.experimental.constrained.fcmp.f64(double %vecext3, double 9.900000e+01, metadata !"une", metadata !"fpexcept.ignore") #0
@@ -50,16 +50,16 @@ declare void @exit(i32 signext) local_unnamed_addr
 
 define dso_local signext i32 @func2() local_unnamed_addr #0 {
 entry:
-  %call = tail call <2 x double> bitcast (<2 x double> (...)* @getvector1 to <2 x double> ()*)() #0
-  %call1 = tail call <2 x double> bitcast (<2 x double> (...)* @getvector2 to <2 x double> ()*)() #0
-  tail call void bitcast (void (...)* @directCall to void ()*)() #0
+  %call = tail call <2 x double> @getvector1() #0
+  %call1 = tail call <2 x double> @getvector2() #0
+  tail call void @directCall() #0
   %mul = tail call <2 x double> @llvm.experimental.constrained.fmul.v2f64(<2 x double> %call, <2 x double> %call1, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
   %vecext = extractelement <2 x double> %mul, i32 0
   %cmp = tail call i1 @llvm.experimental.constrained.fcmp.f64(double %vecext, double 4.000000e+00, metadata !"oeq", metadata !"fpexcept.ignore") #0
   br i1 %cmp, label %cleanup, label %if.end
 
 if.end:                                           ; preds = %entry
-  tail call void bitcast (void (...)* @directCall to void ()*)() #0
+  tail call void @directCall() #0
   %mul10 = tail call <2 x double> @llvm.experimental.constrained.fmul.v2f64(<2 x double> %call, <2 x double> %call1, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
   %0 = tail call i32 @llvm.ppc.vsx.xvcmpeqdp.p(i32 2, <2 x double> %mul, <2 x double> %mul10) #0
   br label %cleanup
@@ -79,13 +79,13 @@ declare i32 @llvm.ppc.vsx.xvcmpeqdp.p(i32, <2 x double>, <2 x double>)
 
 define dso_local signext i32 @func3() local_unnamed_addr #0 {
 entry:
-  %0 = load void ()*, void ()** bitcast (void (...)** @IndirectCallPtr to void ()**), align 8
+  %0 = load ptr, ptr @IndirectCallPtr, align 8
   tail call void %0() #0
   %1 = tail call <2 x double> @llvm.experimental.constrained.rint.v2f64(<2 x double> <double -9.990000e+01, double 9.990000e+01>, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
   %vecext = extractelement <2 x double> %1, i32 0
   %sub = tail call double @llvm.experimental.constrained.fsub.f64(double %vecext, double -9.900000e+01, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
   %conv = tail call i32 @llvm.experimental.constrained.fptosi.i32.f64(double %sub, metadata !"fpexcept.ignore") #0
-  %2 = load void ()*, void ()** bitcast (void (...)** @IndirectCallPtr to void ()**), align 8
+  %2 = load ptr, ptr @IndirectCallPtr, align 8
   tail call void %2() #0
   %3 = tail call <2 x double> @llvm.experimental.constrained.rint.v2f64(<2 x double> <double -9.990000e+01, double 9.990000e+01>, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
   %vecext4 = extractelement <2 x double> %3, i32 1
@@ -102,9 +102,9 @@ if.end:                                           ; preds = %entry
 
 define dso_local signext i32 @func4() local_unnamed_addr #0 {
 entry:
-  %call = tail call <2 x double> bitcast (<2 x double> (...)* @getvector1 to <2 x double> ()*)() #0
-  %call1 = tail call <2 x double> bitcast (<2 x double> (...)* @getvector2 to <2 x double> ()*)() #0
-  %0 = load void ()*, void ()** bitcast (void (...)** @IndirectCallPtr to void ()**), align 8
+  %call = tail call <2 x double> @getvector1() #0
+  %call1 = tail call <2 x double> @getvector2() #0
+  %0 = load ptr, ptr @IndirectCallPtr, align 8
   tail call void %0() #0
   %mul = tail call <2 x double> @llvm.experimental.constrained.fmul.v2f64(<2 x double> %call, <2 x double> %call1, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
   %vecext = extractelement <2 x double> %mul, i32 0
@@ -112,7 +112,7 @@ entry:
   br i1 %cmp, label %cleanup, label %if.end
 
 if.end:                                           ; preds = %entry
-  %1 = load void ()*, void ()** bitcast (void (...)** @IndirectCallPtr to void ()**), align 8
+  %1 = load ptr, ptr @IndirectCallPtr, align 8
   tail call void %1() #0
   %mul11 = tail call <2 x double> @llvm.experimental.constrained.fmul.v2f64(<2 x double> %call, <2 x double> %call1, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
   %2 = tail call i32 @llvm.ppc.vsx.xvcmpeqdp.p(i32 2, <2 x double> %mul, <2 x double> %mul11) #0

diff  --git a/llvm/test/CodeGen/PowerPC/retaddr.ll b/llvm/test/CodeGen/PowerPC/retaddr.ll
index 7cae2db9ad6d..6dd896a5f0dc 100644
--- a/llvm/test/CodeGen/PowerPC/retaddr.ll
+++ b/llvm/test/CodeGen/PowerPC/retaddr.ll
@@ -23,12 +23,12 @@
 ; PPC64: mtlr 0
 ; PPC64: blr
 
-define void @foo(i8** %X) nounwind {
+define void @foo(ptr %X) nounwind {
 entry:
-	%tmp = tail call i8* @llvm.returnaddress( i32 0 )		; <i8*> [#uses=1]
-	store i8* %tmp, i8** %X, align 4
+	%tmp = tail call ptr @llvm.returnaddress( i32 0 )		; <ptr> [#uses=1]
+	store ptr %tmp, ptr %X, align 4
 	ret void
 }
 
-declare i8* @llvm.returnaddress(i32)
+declare ptr @llvm.returnaddress(i32)
 

diff  --git a/llvm/test/CodeGen/PowerPC/retaddr2.ll b/llvm/test/CodeGen/PowerPC/retaddr2.ll
index 7fb381e6d667..9ac082e99b77 100644
--- a/llvm/test/CodeGen/PowerPC/retaddr2.ll
+++ b/llvm/test/CodeGen/PowerPC/retaddr2.ll
@@ -3,10 +3,10 @@ target datalayout = "E-m:e-i64:64-n32:64"
 target triple = "powerpc64-unknown-linux-gnu"
 
 ; Function Attrs: nounwind readnone
-define i8* @test1() #0 {
+define ptr @test1() #0 {
 entry:
-  %0 = tail call i8* @llvm.returnaddress(i32 0)
-  ret i8* %0
+  %0 = tail call ptr @llvm.returnaddress(i32 0)
+  ret ptr %0
 }
 
 ; CHECK-LABEL: @test1
@@ -18,7 +18,7 @@ entry:
 ; CHECK: blr
 
 ; Function Attrs: nounwind readnone
-declare i8* @llvm.returnaddress(i32) #0
+declare ptr @llvm.returnaddress(i32) #0
 
 attributes #0 = { nounwind readnone }
 

diff  --git a/llvm/test/CodeGen/PowerPC/retaddr_multi_levels.ll b/llvm/test/CodeGen/PowerPC/retaddr_multi_levels.ll
index 7e55f05f1df9..8cda3600beff 100644
--- a/llvm/test/CodeGen/PowerPC/retaddr_multi_levels.ll
+++ b/llvm/test/CodeGen/PowerPC/retaddr_multi_levels.ll
@@ -8,9 +8,9 @@
 ; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc-unknown-aix \
 ; RUN:   -mcpu=pwr7 | FileCheck %s -check-prefix=CHECK-32B-BE
 
-declare i8* @llvm.returnaddress(i32) nounwind readnone
+declare ptr @llvm.returnaddress(i32) nounwind readnone
 
-define i8* @test0() nounwind readnone {
+define ptr @test0() nounwind readnone {
 ; CHECK-64B-LE-LABEL: test0:
 ; CHECK-64B-LE:       # %bb.0: # %entry
 ; CHECK-64B-LE-NEXT:    mflr 0
@@ -44,11 +44,11 @@ define i8* @test0() nounwind readnone {
 ; CHECK-32B-BE-NEXT:    mtlr 0
 ; CHECK-32B-BE-NEXT:    blr
 entry:
-  %0 = tail call i8* @llvm.returnaddress(i32 0);
-  ret i8* %0
+  %0 = tail call ptr @llvm.returnaddress(i32 0);
+  ret ptr %0
 }
 
-define i8* @test1() nounwind readnone {
+define ptr @test1() nounwind readnone {
 ; CHECK-64B-LE-LABEL: test1:
 ; CHECK-64B-LE:       # %bb.0: # %entry
 ; CHECK-64B-LE-NEXT:    mflr 0
@@ -88,11 +88,11 @@ define i8* @test1() nounwind readnone {
 ; CHECK-32B-BE-NEXT:    mtlr 0
 ; CHECK-32B-BE-NEXT:    blr
 entry:
-  %0 = tail call i8* @llvm.returnaddress(i32 1);
-  ret i8* %0
+  %0 = tail call ptr @llvm.returnaddress(i32 1);
+  ret ptr %0
 }
 
-define i8* @test2() nounwind readnone {
+define ptr @test2() nounwind readnone {
 ; CHECK-64B-LE-LABEL: test2:
 ; CHECK-64B-LE:       # %bb.0: # %entry
 ; CHECK-64B-LE-NEXT:    mflr 0
@@ -135,6 +135,6 @@ define i8* @test2() nounwind readnone {
 ; CHECK-32B-BE-NEXT:    mtlr 0
 ; CHECK-32B-BE-NEXT:    blr
 entry:
-  %0 = tail call i8* @llvm.returnaddress(i32 2);
-  ret i8* %0
+  %0 = tail call ptr @llvm.returnaddress(i32 2);
+  ret ptr %0
 }

diff  --git a/llvm/test/CodeGen/PowerPC/return-val-i128.ll b/llvm/test/CodeGen/PowerPC/return-val-i128.ll
index 11e5fdc10fad..379761d8385b 100644
--- a/llvm/test/CodeGen/PowerPC/return-val-i128.ll
+++ b/llvm/test/CodeGen/PowerPC/return-val-i128.ll
@@ -2,34 +2,34 @@
 
 define i128 @__fixsfdi(float %a) {
 entry:
-	%a_addr = alloca float		; <float*> [#uses=4]
-	%retval = alloca i128, align 16		; <i128*> [#uses=2]
-	%tmp = alloca i128, align 16		; <i128*> [#uses=3]
+	%a_addr = alloca float		; <ptr> [#uses=4]
+	%retval = alloca i128, align 16		; <ptr> [#uses=2]
+	%tmp = alloca i128, align 16		; <ptr> [#uses=3]
 	%"alloca point" = bitcast i32 0 to i32		; <i32> [#uses=0]
-	store float %a, float* %a_addr
-	%tmp1 = load float, float* %a_addr, align 4		; <float> [#uses=1]
+	store float %a, ptr %a_addr
+	%tmp1 = load float, ptr %a_addr, align 4		; <float> [#uses=1]
 	%tmp2 = fcmp olt float %tmp1, 0.000000e+00		; <i1> [#uses=1]
 	%tmp23 = zext i1 %tmp2 to i8		; <i8> [#uses=1]
 	%toBool = icmp ne i8 %tmp23, 0		; <i1> [#uses=1]
 	br i1 %toBool, label %bb, label %bb8
 bb:		; preds = %entry
-	%tmp4 = load float, float* %a_addr, align 4		; <float> [#uses=1]
+	%tmp4 = load float, ptr %a_addr, align 4		; <float> [#uses=1]
 	%tmp5 = fsub float -0.000000e+00, %tmp4		; <float> [#uses=1]
 	%tmp6 = call i128 @__fixunssfDI( float %tmp5 ) nounwind 		; <i128> [#uses=1]
 	%tmp7 = sub i128 0, %tmp6		; <i128> [#uses=1]
-	store i128 %tmp7, i128* %tmp, align 16
+	store i128 %tmp7, ptr %tmp, align 16
 	br label %bb11
 bb8:		; preds = %entry
-	%tmp9 = load float, float* %a_addr, align 4		; <float> [#uses=1]
+	%tmp9 = load float, ptr %a_addr, align 4		; <float> [#uses=1]
 	%tmp10 = call i128 @__fixunssfDI( float %tmp9 ) nounwind 		; <i128> [#uses=1]
-	store i128 %tmp10, i128* %tmp, align 16
+	store i128 %tmp10, ptr %tmp, align 16
 	br label %bb11
 bb11:		; preds = %bb8, %bb
-	%tmp12 = load i128, i128* %tmp, align 16		; <i128> [#uses=1]
-	store i128 %tmp12, i128* %retval, align 16
+	%tmp12 = load i128, ptr %tmp, align 16		; <i128> [#uses=1]
+	store i128 %tmp12, ptr %retval, align 16
 	br label %return
 return:		; preds = %bb11
-	%retval13 = load i128, i128* %retval		; <i128> [#uses=1]
+	%retval13 = load i128, ptr %retval		; <i128> [#uses=1]
 	ret i128 %retval13
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/rlwimi-and-or-bits.ll b/llvm/test/CodeGen/PowerPC/rlwimi-and-or-bits.ll
index 4d37d691d862..03082fc106fc 100644
--- a/llvm/test/CodeGen/PowerPC/rlwimi-and-or-bits.ll
+++ b/llvm/test/CodeGen/PowerPC/rlwimi-and-or-bits.ll
@@ -12,9 +12,9 @@ entry:
 ; CHECK-NOT: rlwimi
 ; CHECK: andi
 
-  %0 = load i32, i32* @m, align 4
+  %0 = load i32, ptr @m, align 4
   %or = or i32 %0, 250
-  store i32 %or, i32* @m, align 4
+  store i32 %or, ptr @m, align 4
   %and = and i32 %or, 249
   %sub.i = sub i32 %and, 0
   %sext = shl i32 %sub.i, 24

diff  --git a/llvm/test/CodeGen/PowerPC/rlwimi-and.ll b/llvm/test/CodeGen/PowerPC/rlwimi-and.ll
index d512f51a76e7..b6287ca30927 100644
--- a/llvm/test/CodeGen/PowerPC/rlwimi-and.ll
+++ b/llvm/test/CodeGen/PowerPC/rlwimi-and.ll
@@ -14,16 +14,16 @@ codeRepl12:                                       ; preds = %codeRepl4
   unreachable
 
 codeRepl17:                                       ; preds = %codeRepl4
-  %0 = load i8, i8* undef, align 2
+  %0 = load i8, ptr undef, align 2
   %1 = and i8 %0, 1
   %not.tobool.i.i.i = icmp eq i8 %1, 0
   %2 = select i1 %not.tobool.i.i.i, i16 0, i16 256
-  %3 = load i8, i8* undef, align 1
+  %3 = load i8, ptr undef, align 1
   %4 = and i8 %3, 1
   %not.tobool.i.1.i.i = icmp eq i8 %4, 0
   %rvml38.sroa.1.1.insert.ext = select i1 %not.tobool.i.1.i.i, i16 0, i16 1
   %rvml38.sroa.0.0.insert.insert = or i16 %rvml38.sroa.1.1.insert.ext, %2
-  store i16 %rvml38.sroa.0.0.insert.insert, i16* undef, align 2
+  store i16 %rvml38.sroa.0.0.insert.insert, ptr undef, align 2
   unreachable
 
 ; CHECK: @test

diff  --git a/llvm/test/CodeGen/PowerPC/rlwimi-commute.ll b/llvm/test/CodeGen/PowerPC/rlwimi-commute.ll
index e06c339584ee..d59b9f45b0b3 100644
--- a/llvm/test/CodeGen/PowerPC/rlwimi-commute.ll
+++ b/llvm/test/CodeGen/PowerPC/rlwimi-commute.ll
@@ -3,25 +3,25 @@
 
 ; Make sure there is no register-register copies here.
 
-define void @test1(i32* %A, i32* %B, i32* %D, i32* %E) {
-	%A.upgrd.1 = load i32, i32* %A		; <i32> [#uses=2]
-	%B.upgrd.2 = load i32, i32* %B		; <i32> [#uses=1]
+define void @test1(ptr %A, ptr %B, ptr %D, ptr %E) {
+	%A.upgrd.1 = load i32, ptr %A		; <i32> [#uses=2]
+	%B.upgrd.2 = load i32, ptr %B		; <i32> [#uses=1]
 	%X = and i32 %A.upgrd.1, 15		; <i32> [#uses=1]
 	%Y = and i32 %B.upgrd.2, -16		; <i32> [#uses=1]
 	%Z = or i32 %X, %Y		; <i32> [#uses=1]
-	store i32 %Z, i32* %D
-	store i32 %A.upgrd.1, i32* %E
+	store i32 %Z, ptr %D
+	store i32 %A.upgrd.1, ptr %E
 	ret void
 }
 
-define void @test2(i32* %A, i32* %B, i32* %D, i32* %E) {
-	%A.upgrd.3 = load i32, i32* %A		; <i32> [#uses=1]
-	%B.upgrd.4 = load i32, i32* %B		; <i32> [#uses=2]
+define void @test2(ptr %A, ptr %B, ptr %D, ptr %E) {
+	%A.upgrd.3 = load i32, ptr %A		; <i32> [#uses=1]
+	%B.upgrd.4 = load i32, ptr %B		; <i32> [#uses=2]
 	%X = and i32 %A.upgrd.3, 15		; <i32> [#uses=1]
 	%Y = and i32 %B.upgrd.4, -16		; <i32> [#uses=1]
 	%Z = or i32 %X, %Y		; <i32> [#uses=1]
-	store i32 %Z, i32* %D
-	store i32 %B.upgrd.4, i32* %E
+	store i32 %Z, ptr %D
+	store i32 %B.upgrd.4, ptr %E
 	ret void
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/rlwimi-dyn-and.ll b/llvm/test/CodeGen/PowerPC/rlwimi-dyn-and.ll
index 6e2802f6ff9f..173a73f7c2dd 100644
--- a/llvm/test/CodeGen/PowerPC/rlwimi-dyn-and.ll
+++ b/llvm/test/CodeGen/PowerPC/rlwimi-dyn-and.ll
@@ -4,13 +4,13 @@ target triple = "powerpc64-unknown-linux-gnu"
 
 define i32 @test1() #0 {
 entry:
-  %conv67.reload = load i32, i32* undef
+  %conv67.reload = load i32, ptr undef
   %const = bitcast i32 65535 to i32
   br label %next
 
 next:
   %shl161 = shl nuw nsw i32 %conv67.reload, 15
-  %0 = load i8, i8* undef, align 1
+  %0 = load i8, ptr undef, align 1
   %conv169 = zext i8 %0 to i32
   %shl170 = shl nuw nsw i32 %conv169, 7
   %const_mat = add i32 %const, -32767
@@ -25,13 +25,13 @@ next:
 
 define i32 @test2() #0 {
 entry:
-  %conv67.reload = load i32, i32* undef
+  %conv67.reload = load i32, ptr undef
   %const = bitcast i32 65535 to i32
   br label %next
 
 next:
   %shl161 = shl nuw nsw i32 %conv67.reload, 15
-  %0 = load i8, i8* undef, align 1
+  %0 = load i8, ptr undef, align 1
   %conv169 = zext i8 %0 to i32
   %shl170 = shl nuw nsw i32 %conv169, 7
   %shl161.masked = and i32 %shl161, 32768

diff  --git a/llvm/test/CodeGen/PowerPC/rlwimi-keep-rsh.ll b/llvm/test/CodeGen/PowerPC/rlwimi-keep-rsh.ll
index e66d0c0770c8..fa04f4fc9941 100644
--- a/llvm/test/CodeGen/PowerPC/rlwimi-keep-rsh.ll
+++ b/llvm/test/CodeGen/PowerPC/rlwimi-keep-rsh.ll
@@ -19,7 +19,7 @@ entry:
   %tmp6 = and i32 %tmp2, %tmp5
   %tmp7 = shl i32 %c, 8
   %tmp8 = or i32 %tmp6, %tmp7
-  store i32 %tmp8, i32* @foo, align 4
+  store i32 %tmp8, ptr @foo, align 4
   br label %return
 
 return:

diff  --git a/llvm/test/CodeGen/PowerPC/rm-zext.ll b/llvm/test/CodeGen/PowerPC/rm-zext.ll
index a2e640a260c2..81c6b9931e7e 100644
--- a/llvm/test/CodeGen/PowerPC/rm-zext.ll
+++ b/llvm/test/CodeGen/PowerPC/rm-zext.ll
@@ -43,9 +43,9 @@ entry:
 declare i32 @llvm.bswap.i32(i32) #0
 
 ; Function Attrs: nounwind readonly
-define zeroext i32 @bs32(i32* nocapture readonly %x) #1 {
+define zeroext i32 @bs32(ptr nocapture readonly %x) #1 {
 entry:
-  %0 = load i32, i32* %x, align 4
+  %0 = load i32, ptr %x, align 4
   %1 = tail call i32 @llvm.bswap.i32(i32 %0)
   ret i32 %1
 
@@ -55,9 +55,9 @@ entry:
 }
 
 ; Function Attrs: nounwind readonly
-define zeroext i16 @bs16(i16* nocapture readonly %x) #1 {
+define zeroext i16 @bs16(ptr nocapture readonly %x) #1 {
 entry:
-  %0 = load i16, i16* %x, align 2
+  %0 = load i16, ptr %x, align 2
   %1 = tail call i16 @llvm.bswap.i16(i16 %0)
   ret i16 %1
 

diff  --git a/llvm/test/CodeGen/PowerPC/rs-undef-use.ll b/llvm/test/CodeGen/PowerPC/rs-undef-use.ll
index 8c902ad63429..0fccc5469f3a 100644
--- a/llvm/test/CodeGen/PowerPC/rs-undef-use.ll
+++ b/llvm/test/CodeGen/PowerPC/rs-undef-use.ll
@@ -1,7 +1,7 @@
 ; RUN: llc -verify-machineinstrs -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 < %s
 target triple = "powerpc64-unknown-linux-gnu"
 
-define void @autogen_SD156869(i8*, i64*) {
+define void @autogen_SD156869(ptr, ptr) {
 BB:
   %A3 = alloca <2 x i1>
   %A2 = alloca <8 x i32>
@@ -11,31 +11,28 @@ CF:                                               ; preds = %CF85, %CF, %BB
   br i1 undef, label %CF, label %CF82.critedge
 
 CF82.critedge:                                    ; preds = %CF
-  store i8 -59, i8* %0
+  store i8 -59, ptr %0
   br label %CF82
 
 CF82:                                             ; preds = %CF82, %CF82.critedge
-  %L17 = load i8, i8* %0
+  %L17 = load i8, ptr %0
   %E18 = extractelement <2 x i64> undef, i32 0
-  %PC = bitcast <2 x i1>* %A3 to i64*
   br i1 undef, label %CF82, label %CF84.critedge
 
 CF84.critedge:                                    ; preds = %CF82
-  store i64 455385, i64* %PC
+  store i64 455385, ptr %A3
   br label %CF84
 
 CF84:                                             ; preds = %CF84, %CF84.critedge
-  %L40 = load i64, i64* %PC
-  store i64 -1, i64* %PC
+  %L40 = load i64, ptr %A3
+  store i64 -1, ptr %A3
   %Sl46 = select i1 undef, i1 undef, i1 false
   br i1 %Sl46, label %CF84, label %CF85
 
 CF85:                                             ; preds = %CF84
-  %L47 = load i64, i64* %PC
-  store i64 %E18, i64* %PC
-  %PC52 = bitcast <8 x i32>* %A2 to ppc_fp128*
-  store ppc_fp128 0xM4D436562A0416DE00000000000000000, ppc_fp128* %PC52
-  %PC59 = bitcast i64* %1 to i8*
+  %L47 = load i64, ptr %A3
+  store i64 %E18, ptr %A3
+  store ppc_fp128 0xM4D436562A0416DE00000000000000000, ptr %A2
   %Cmp61 = icmp slt i64 %L47, %L40
   br i1 %Cmp61, label %CF, label %CF77
 
@@ -43,6 +40,6 @@ CF77:                                             ; preds = %CF77, %CF85
   br i1 undef, label %CF77, label %CF81
 
 CF81:                                             ; preds = %CF77
-  store i8 %L17, i8* %PC59
+  store i8 %L17, ptr %1
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/s000-alias-misched.ll b/llvm/test/CodeGen/PowerPC/s000-alias-misched.ll
index 20071ea1710c..e217aaf19823 100644
--- a/llvm/test/CodeGen/PowerPC/s000-alias-misched.ll
+++ b/llvm/test/CodeGen/PowerPC/s000-alias-misched.ll
@@ -15,13 +15,13 @@ target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3
 
 declare void @check(i32 signext) nounwind
 
-declare signext i32 @printf(i8* nocapture, ...) nounwind
+declare signext i32 @printf(ptr nocapture, ...) nounwind
 
-declare signext i32 @init(i8*) nounwind
+declare signext i32 @init(ptr) nounwind
 
 define signext i32 @s000() nounwind {
 entry:
-  %call = tail call signext i32 @init(i8* getelementptr inbounds ([6 x i8], [6 x i8]* @.str1, i64 0, i64 0))
+  %call = tail call signext i32 @init(ptr @.str1)
   %call1 = tail call i64 @clock() nounwind
   br label %for.cond2.preheader
 
@@ -33,37 +33,29 @@ for.cond2.preheader:                              ; preds = %for.end, %entry
 
 for.body4:                                        ; preds = %for.body4, %for.cond2.preheader
   %indvars.iv = phi i64 [ 0, %for.cond2.preheader ], [ %indvars.iv.next.15, %for.body4 ]
-  %arrayidx = getelementptr inbounds [16000 x double], [16000 x double]* @Y, i64 0, i64 %indvars.iv
-  %arrayidx6 = getelementptr inbounds [16000 x double], [16000 x double]* @X, i64 0, i64 %indvars.iv
-  %0 = bitcast double* %arrayidx to <1 x double>*
-  %1 = load <1 x double>, <1 x double>* %0, align 32
-  %add = fadd <1 x double> %1, <double 1.000000e+00>
-  %2 = bitcast double* %arrayidx6 to <1 x double>*
-  store <1 x double> %add, <1 x double>* %2, align 32
+  %arrayidx = getelementptr inbounds [16000 x double], ptr @Y, i64 0, i64 %indvars.iv
+  %arrayidx6 = getelementptr inbounds [16000 x double], ptr @X, i64 0, i64 %indvars.iv
+  %0 = load <1 x double>, ptr %arrayidx, align 32
+  %add = fadd <1 x double> %0, <double 1.000000e+00>
+  store <1 x double> %add, ptr %arrayidx6, align 32
   %indvars.iv.next.322 = or i64 %indvars.iv, 4
-  %arrayidx.4 = getelementptr inbounds [16000 x double], [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.322
-  %arrayidx6.4 = getelementptr inbounds [16000 x double], [16000 x double]* @X, i64 0, i64 %indvars.iv.next.322
-  %3 = bitcast double* %arrayidx.4 to <1 x double>*
-  %4 = load <1 x double>, <1 x double>* %3, align 32
-  %add.4 = fadd <1 x double> %4, <double 1.000000e+00>
-  %5 = bitcast double* %arrayidx6.4 to <1 x double>*
-  store <1 x double> %add.4, <1 x double>* %5, align 32
+  %arrayidx.4 = getelementptr inbounds [16000 x double], ptr @Y, i64 0, i64 %indvars.iv.next.322
+  %arrayidx6.4 = getelementptr inbounds [16000 x double], ptr @X, i64 0, i64 %indvars.iv.next.322
+  %1 = load <1 x double>, ptr %arrayidx.4, align 32
+  %add.4 = fadd <1 x double> %1, <double 1.000000e+00>
+  store <1 x double> %add.4, ptr %arrayidx6.4, align 32
   %indvars.iv.next.726 = or i64 %indvars.iv, 8
-  %arrayidx.8 = getelementptr inbounds [16000 x double], [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.726
-  %arrayidx6.8 = getelementptr inbounds [16000 x double], [16000 x double]* @X, i64 0, i64 %indvars.iv.next.726
-  %6 = bitcast double* %arrayidx.8 to <1 x double>*
-  %7 = load <1 x double>, <1 x double>* %6, align 32
-  %add.8 = fadd <1 x double> %7, <double 1.000000e+00>
-  %8 = bitcast double* %arrayidx6.8 to <1 x double>*
-  store <1 x double> %add.8, <1 x double>* %8, align 32
+  %arrayidx.8 = getelementptr inbounds [16000 x double], ptr @Y, i64 0, i64 %indvars.iv.next.726
+  %arrayidx6.8 = getelementptr inbounds [16000 x double], ptr @X, i64 0, i64 %indvars.iv.next.726
+  %2 = load <1 x double>, ptr %arrayidx.8, align 32
+  %add.8 = fadd <1 x double> %2, <double 1.000000e+00>
+  store <1 x double> %add.8, ptr %arrayidx6.8, align 32
   %indvars.iv.next.1130 = or i64 %indvars.iv, 12
-  %arrayidx.12 = getelementptr inbounds [16000 x double], [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.1130
-  %arrayidx6.12 = getelementptr inbounds [16000 x double], [16000 x double]* @X, i64 0, i64 %indvars.iv.next.1130
-  %9 = bitcast double* %arrayidx.12 to <1 x double>*
-  %10 = load <1 x double>, <1 x double>* %9, align 32
-  %add.12 = fadd <1 x double> %10, <double 1.000000e+00>
-  %11 = bitcast double* %arrayidx6.12 to <1 x double>*
-  store <1 x double> %add.12, <1 x double>* %11, align 32
+  %arrayidx.12 = getelementptr inbounds [16000 x double], ptr @Y, i64 0, i64 %indvars.iv.next.1130
+  %arrayidx6.12 = getelementptr inbounds [16000 x double], ptr @X, i64 0, i64 %indvars.iv.next.1130
+  %3 = load <1 x double>, ptr %arrayidx.12, align 32
+  %add.12 = fadd <1 x double> %3, <double 1.000000e+00>
+  store <1 x double> %add.12, ptr %arrayidx6.12, align 32
   %indvars.iv.next.15 = add i64 %indvars.iv, 16
   %lftr.wideiv.15 = trunc i64 %indvars.iv.next.15 to i32
   %exitcond.15 = icmp eq i32 %lftr.wideiv.15, 16000
@@ -76,7 +68,7 @@ for.body4:                                        ; preds = %for.body4, %for.con
 ; CHECK: bdnz
 
 for.end:                                          ; preds = %for.body4
-  %call7 = tail call signext i32 @dummy(double* getelementptr inbounds ([16000 x double], [16000 x double]* @X, i64 0, i64 0), double* getelementptr inbounds ([16000 x double], [16000 x double]* @Y, i64 0, i64 0), double* getelementptr inbounds ([16000 x double], [16000 x double]* @Z, i64 0, i64 0), double* getelementptr inbounds ([16000 x double], [16000 x double]* @U, i64 0, i64 0), double* getelementptr inbounds ([16000 x double], [16000 x double]* @V, i64 0, i64 0), [256 x double]* getelementptr inbounds ([256 x [256 x double]], [256 x [256 x double]]* @aa, i64 0, i64 0), [256 x double]* getelementptr inbounds ([256 x [256 x double]], [256 x [256 x double]]* @bb, i64 0, i64 0), [256 x double]* getelementptr inbounds ([256 x [256 x double]], [256 x [256 x double]]* @cc, i64 0, i64 0), double 0.000000e+00) nounwind
+  %call7 = tail call signext i32 @dummy(ptr @X, ptr @Y, ptr @Z, ptr @U, ptr @V, ptr @aa, ptr @bb, ptr @cc, double 0.000000e+00) nounwind
   %inc9 = add nsw i32 %nl.018, 1
   %exitcond = icmp eq i32 %inc9, 400000
   br i1 %exitcond, label %for.end10, label %for.cond2.preheader
@@ -86,11 +78,11 @@ for.end10:                                        ; preds = %for.end
   %sub = sub nsw i64 %call11, %call1
   %conv = sitofp i64 %sub to double
   %div = fdiv double %conv, 1.000000e+06
-  %call12 = tail call signext i32 (i8*, ...) @printf(i8* getelementptr inbounds ([14 x i8], [14 x i8]* @.str137, i64 0, i64 0), double %div) nounwind
+  %call12 = tail call signext i32 (ptr, ...) @printf(ptr @.str137, double %div) nounwind
   tail call void @check(i32 signext 1)
   ret i32 0
 }
 
 declare i64 @clock() nounwind
 
-declare signext i32 @dummy(double*, double*, double*, double*, double*, [256 x double]*, [256 x double]*, [256 x double]*, double)
+declare signext i32 @dummy(ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, double)

diff  --git a/llvm/test/CodeGen/PowerPC/sat-register-clobber.ll b/llvm/test/CodeGen/PowerPC/sat-register-clobber.ll
index 5c91497b2b1d..428790bece81 100644
--- a/llvm/test/CodeGen/PowerPC/sat-register-clobber.ll
+++ b/llvm/test/CodeGen/PowerPC/sat-register-clobber.ll
@@ -2,7 +2,7 @@
 ; RUN: llc -ppc-asm-full-reg-names -mtriple=powerpc64le-unknown-linux-gnu \
 ; RUN:   %s -o - -verify-machineinstrs -mcpu=pwr9 | FileCheck %s
 
-define <4 x i32> @test(<4 x i32> %a, <4 x i32> %b, <4 x i32> %aa, <8 x i16>* %FromVSCR) {
+define <4 x i32> @test(<4 x i32> %a, <4 x i32> %b, <4 x i32> %aa, ptr %FromVSCR) {
 ; CHECK-LABEL: test:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsumsws v5, v2, v3
@@ -24,7 +24,7 @@ entry:
   %1 = tail call <8 x i16> @llvm.ppc.altivec.vpkswus(<4 x i32> %a, <4 x i32> %b)
   %2 = bitcast <8 x i16> %1 to <4 x i32>
   %3 = tail call <8 x i16> @llvm.ppc.altivec.mfvscr()
-  store <8 x i16> %3, <8 x i16>* %FromVSCR, align 16
+  store <8 x i16> %3, ptr %FromVSCR, align 16
   %4 = tail call <8 x i16> @llvm.ppc.altivec.vpkswus(<4 x i32> %b, <4 x i32> %aa)
   %5 = bitcast <8 x i16> %4 to <4 x i32>
   %add1 = add <4 x i32> %add, %0

diff  --git a/llvm/test/CodeGen/PowerPC/save-crbp-ppc32svr4.ll b/llvm/test/CodeGen/PowerPC/save-crbp-ppc32svr4.ll
index 2d003da447da..514f96b22035 100644
--- a/llvm/test/CodeGen/PowerPC/save-crbp-ppc32svr4.ll
+++ b/llvm/test/CodeGen/PowerPC/save-crbp-ppc32svr4.ll
@@ -22,7 +22,7 @@ target triple = "powerpc-unknown-freebsd"
 define i64 @fred(double %a0) local_unnamed_addr #0 {
 b1:
   %v2 = alloca i64, align 128
-  store i64 0, i64* %v2
+  store i64 0, ptr %v2
   %a1 = tail call double asm "fadd $0, $1, $2", "=f,f,f,~{cr2}"(double %a0, double %a0)
   %v3 = fcmp olt double %a1, 0x43E0000000000000
   br i1 %v3, label %b4, label %b8
@@ -30,7 +30,7 @@ b1:
 b4:                                               ; preds = %b1
   %v5 = fcmp olt double %a0, 0xC3E0000000000000
   %v6 = fptosi double %a0 to i64
-  store i64 %v6, i64* %v2
+  store i64 %v6, ptr %v2
   %v7 = select i1 %v5, i64 -9223372036854775808, i64 %v6
   br label %b15
 
@@ -49,7 +49,7 @@ b12:                                              ; preds = %b8
 
 b15:                                              ; preds = %b12, %b10, %b4
   %v16 = phi i64 [ %v7, %b4 ], [ %v11, %b10 ], [ %v14, %b12 ]
-  %v17 = load i64, i64* %v2
+  %v17 = load i64, ptr %v2
   %v18 = add i64 %v17, %v16
   ret i64 %v18
 }

diff  --git a/llvm/test/CodeGen/PowerPC/scalar-double-ldst.ll b/llvm/test/CodeGen/PowerPC/scalar-double-ldst.ll
index 173560f830fb..62c6117e66e0 100644
--- a/llvm/test/CodeGen/PowerPC/scalar-double-ldst.ll
+++ b/llvm/test/CodeGen/PowerPC/scalar-double-ldst.ll
@@ -33,14 +33,14 @@ define dso_local double @ld_0_double_uint8_t(i64 %ptr) {
 ; CHECK-P8-NEXT:    xscvuxddp f1, f0
 ; CHECK-P8-NEXT:    blr
 entry:
-  %0 = inttoptr i64 %ptr to i8*
-  %1 = load i8, i8* %0, align 1
+  %0 = inttoptr i64 %ptr to ptr
+  %1 = load i8, ptr %0, align 1
   %conv = uitofp i8 %1 to double
   ret double %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local double @ld_align16_double_uint8_t(i8* nocapture readonly %ptr) {
+define dso_local double @ld_align16_double_uint8_t(ptr nocapture readonly %ptr) {
 ; CHECK-POSTP8-LABEL: ld_align16_double_uint8_t:
 ; CHECK-POSTP8:       # %bb.0: # %entry
 ; CHECK-POSTP8-NEXT:    addi r3, r3, 8
@@ -55,14 +55,14 @@ define dso_local double @ld_align16_double_uint8_t(i8* nocapture readonly %ptr)
 ; CHECK-P8-NEXT:    xscvuxddp f1, f0
 ; CHECK-P8-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  %0 = load i8, ptr %add.ptr, align 1
   %conv = uitofp i8 %0 to double
   ret double %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local double @ld_align32_double_uint8_t(i8* nocapture readonly %ptr) {
+define dso_local double @ld_align32_double_uint8_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align32_double_uint8_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r4, 99999000
@@ -87,14 +87,14 @@ define dso_local double @ld_align32_double_uint8_t(i8* nocapture readonly %ptr)
 ; CHECK-P8-NEXT:    xscvuxddp f1, f0
 ; CHECK-P8-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  %0 = load i8, ptr %add.ptr, align 1
   %conv = uitofp i8 %0 to double
   ret double %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local double @ld_align64_double_uint8_t(i8* nocapture readonly %ptr) {
+define dso_local double @ld_align64_double_uint8_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align64_double_uint8_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r4, 244140625
@@ -122,14 +122,14 @@ define dso_local double @ld_align64_double_uint8_t(i8* nocapture readonly %ptr)
 ; CHECK-P8-NEXT:    xscvuxddp f1, f0
 ; CHECK-P8-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  %0 = load i8, ptr %add.ptr, align 1
   %conv = uitofp i8 %0 to double
   ret double %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local double @ld_reg_double_uint8_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local double @ld_reg_double_uint8_t(ptr nocapture readonly %ptr, i64 %off) {
 ; CHECK-POSTP8-LABEL: ld_reg_double_uint8_t:
 ; CHECK-POSTP8:       # %bb.0: # %entry
 ; CHECK-POSTP8-NEXT:    lxsibzx f0, r3, r4
@@ -143,8 +143,8 @@ define dso_local double @ld_reg_double_uint8_t(i8* nocapture readonly %ptr, i64
 ; CHECK-P8-NEXT:    xscvuxddp f1, f0
 ; CHECK-P8-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  %0 = load i8, ptr %add.ptr, align 1
   %conv = uitofp i8 %0 to double
   ret double %conv
 }
@@ -168,8 +168,8 @@ define dso_local double @ld_or_double_uint8_t(i64 %ptr, i8 zeroext %off) {
 entry:
   %conv = zext i8 %off to i64
   %or = or i64 %conv, %ptr
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 1
   %conv1 = uitofp i8 %1 to double
   ret double %conv1
 }
@@ -192,8 +192,8 @@ define dso_local double @ld_not_disjoint16_double_uint8_t(i64 %ptr) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 1
   %conv = uitofp i8 %1 to double
   ret double %conv
 }
@@ -218,8 +218,8 @@ define dso_local double @ld_disjoint_align16_double_uint8_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -4096
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 8
   %conv = uitofp i8 %1 to double
   ret double %conv
 }
@@ -244,8 +244,8 @@ define dso_local double @ld_not_disjoint32_double_uint8_t(i64 %ptr) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 1
   %conv = uitofp i8 %1 to double
   ret double %conv
 }
@@ -284,8 +284,8 @@ define dso_local double @ld_disjoint_align32_double_uint8_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1000341504
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 16
   %conv = uitofp i8 %1 to double
   ret double %conv
 }
@@ -326,8 +326,8 @@ define dso_local double @ld_not_disjoint64_double_uint8_t(i64 %ptr) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 1
   %conv = uitofp i8 %1 to double
   ret double %conv
 }
@@ -366,8 +366,8 @@ define dso_local double @ld_disjoint_align64_double_uint8_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 4096
   %conv = uitofp i8 %1 to double
   ret double %conv
 }
@@ -388,7 +388,7 @@ define dso_local double @ld_cst_align16_double_uint8_t() {
 ; CHECK-P8-NEXT:    xscvuxddp f1, f0
 ; CHECK-P8-NEXT:    blr
 entry:
-  %0 = load i8, i8* inttoptr (i64 4080 to i8*), align 16
+  %0 = load i8, ptr inttoptr (i64 4080 to ptr), align 16
   %conv = uitofp i8 %0 to double
   ret double %conv
 }
@@ -418,7 +418,7 @@ define dso_local double @ld_cst_align32_double_uint8_t() {
 ; CHECK-P8-NEXT:    xscvuxddp f1, f0
 ; CHECK-P8-NEXT:    blr
 entry:
-  %0 = load i8, i8* inttoptr (i64 9999900 to i8*), align 4
+  %0 = load i8, ptr inttoptr (i64 9999900 to ptr), align 4
   %conv = uitofp i8 %0 to double
   ret double %conv
 }
@@ -452,7 +452,7 @@ define dso_local double @ld_cst_align64_double_uint8_t() {
 ; CHECK-P8-NEXT:    xscvuxddp f1, f0
 ; CHECK-P8-NEXT:    blr
 entry:
-  %0 = load i8, i8* inttoptr (i64 1000000000000 to i8*), align 4096
+  %0 = load i8, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   %conv = uitofp i8 %0 to double
   ret double %conv
 }
@@ -474,14 +474,14 @@ define dso_local double @ld_0_double_int8_t(i64 %ptr) {
 ; CHECK-P8-NEXT:    xscvsxddp f1, f0
 ; CHECK-P8-NEXT:    blr
 entry:
-  %0 = inttoptr i64 %ptr to i8*
-  %1 = load i8, i8* %0, align 1
+  %0 = inttoptr i64 %ptr to ptr
+  %1 = load i8, ptr %0, align 1
   %conv = sitofp i8 %1 to double
   ret double %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local double @ld_align16_double_int8_t(i8* nocapture readonly %ptr) {
+define dso_local double @ld_align16_double_int8_t(ptr nocapture readonly %ptr) {
 ; CHECK-POSTP8-LABEL: ld_align16_double_int8_t:
 ; CHECK-POSTP8:       # %bb.0: # %entry
 ; CHECK-POSTP8-NEXT:    addi r3, r3, 8
@@ -498,14 +498,14 @@ define dso_local double @ld_align16_double_int8_t(i8* nocapture readonly %ptr) {
 ; CHECK-P8-NEXT:    xscvsxddp f1, f0
 ; CHECK-P8-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  %0 = load i8, ptr %add.ptr, align 1
   %conv = sitofp i8 %0 to double
   ret double %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local double @ld_align32_double_int8_t(i8* nocapture readonly %ptr) {
+define dso_local double @ld_align32_double_int8_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align32_double_int8_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r4, 99999000
@@ -533,14 +533,14 @@ define dso_local double @ld_align32_double_int8_t(i8* nocapture readonly %ptr) {
 ; CHECK-P8-NEXT:    xscvsxddp f1, f0
 ; CHECK-P8-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  %0 = load i8, ptr %add.ptr, align 1
   %conv = sitofp i8 %0 to double
   ret double %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local double @ld_align64_double_int8_t(i8* nocapture readonly %ptr) {
+define dso_local double @ld_align64_double_int8_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align64_double_int8_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r4, 244140625
@@ -571,14 +571,14 @@ define dso_local double @ld_align64_double_int8_t(i8* nocapture readonly %ptr) {
 ; CHECK-P8-NEXT:    xscvsxddp f1, f0
 ; CHECK-P8-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  %0 = load i8, ptr %add.ptr, align 1
   %conv = sitofp i8 %0 to double
   ret double %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local double @ld_reg_double_int8_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local double @ld_reg_double_int8_t(ptr nocapture readonly %ptr, i64 %off) {
 ; CHECK-POSTP8-LABEL: ld_reg_double_int8_t:
 ; CHECK-POSTP8:       # %bb.0: # %entry
 ; CHECK-POSTP8-NEXT:    lxsibzx v2, r3, r4
@@ -594,8 +594,8 @@ define dso_local double @ld_reg_double_int8_t(i8* nocapture readonly %ptr, i64 %
 ; CHECK-P8-NEXT:    xscvsxddp f1, f0
 ; CHECK-P8-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  %0 = load i8, ptr %add.ptr, align 1
   %conv = sitofp i8 %0 to double
   ret double %conv
 }
@@ -621,8 +621,8 @@ define dso_local double @ld_or_double_int8_t(i64 %ptr, i8 zeroext %off) {
 entry:
   %conv = zext i8 %off to i64
   %or = or i64 %conv, %ptr
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 1
   %conv1 = sitofp i8 %1 to double
   ret double %conv1
 }
@@ -647,8 +647,8 @@ define dso_local double @ld_not_disjoint16_double_int8_t(i64 %ptr) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 1
   %conv = sitofp i8 %1 to double
   ret double %conv
 }
@@ -675,8 +675,8 @@ define dso_local double @ld_disjoint_align16_double_int8_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -4096
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 8
   %conv = sitofp i8 %1 to double
   ret double %conv
 }
@@ -703,8 +703,8 @@ define dso_local double @ld_not_disjoint32_double_int8_t(i64 %ptr) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 1
   %conv = sitofp i8 %1 to double
   ret double %conv
 }
@@ -746,8 +746,8 @@ define dso_local double @ld_disjoint_align32_double_int8_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1000341504
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 16
   %conv = sitofp i8 %1 to double
   ret double %conv
 }
@@ -791,8 +791,8 @@ define dso_local double @ld_not_disjoint64_double_int8_t(i64 %ptr) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 1
   %conv = sitofp i8 %1 to double
   ret double %conv
 }
@@ -834,8 +834,8 @@ define dso_local double @ld_disjoint_align64_double_int8_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 4096
   %conv = sitofp i8 %1 to double
   ret double %conv
 }
@@ -858,7 +858,7 @@ define dso_local double @ld_cst_align16_double_int8_t() {
 ; CHECK-P8-NEXT:    xscvsxddp f1, f0
 ; CHECK-P8-NEXT:    blr
 entry:
-  %0 = load i8, i8* inttoptr (i64 4080 to i8*), align 16
+  %0 = load i8, ptr inttoptr (i64 4080 to ptr), align 16
   %conv = sitofp i8 %0 to double
   ret double %conv
 }
@@ -891,7 +891,7 @@ define dso_local double @ld_cst_align32_double_int8_t() {
 ; CHECK-P8-NEXT:    xscvsxddp f1, f0
 ; CHECK-P8-NEXT:    blr
 entry:
-  %0 = load i8, i8* inttoptr (i64 9999900 to i8*), align 4
+  %0 = load i8, ptr inttoptr (i64 9999900 to ptr), align 4
   %conv = sitofp i8 %0 to double
   ret double %conv
 }
@@ -928,7 +928,7 @@ define dso_local double @ld_cst_align64_double_int8_t() {
 ; CHECK-P8-NEXT:    xscvsxddp f1, f0
 ; CHECK-P8-NEXT:    blr
 entry:
-  %0 = load i8, i8* inttoptr (i64 1000000000000 to i8*), align 4096
+  %0 = load i8, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   %conv = sitofp i8 %0 to double
   ret double %conv
 }
@@ -948,14 +948,14 @@ define dso_local double @ld_0_double_uint16_t(i64 %ptr) {
 ; CHECK-P8-NEXT:    xscvuxddp f1, f0
 ; CHECK-P8-NEXT:    blr
 entry:
-  %0 = inttoptr i64 %ptr to i16*
-  %1 = load i16, i16* %0, align 2
+  %0 = inttoptr i64 %ptr to ptr
+  %1 = load i16, ptr %0, align 2
   %conv = uitofp i16 %1 to double
   ret double %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local double @ld_align16_double_uint16_t(i8* nocapture readonly %ptr) {
+define dso_local double @ld_align16_double_uint16_t(ptr nocapture readonly %ptr) {
 ; CHECK-POSTP8-LABEL: ld_align16_double_uint16_t:
 ; CHECK-POSTP8:       # %bb.0: # %entry
 ; CHECK-POSTP8-NEXT:    addi r3, r3, 8
@@ -970,15 +970,14 @@ define dso_local double @ld_align16_double_uint16_t(i8* nocapture readonly %ptr)
 ; CHECK-P8-NEXT:    xscvuxddp f1, f0
 ; CHECK-P8-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to i16*
-  %1 = load i16, i16* %0, align 2
-  %conv = uitofp i16 %1 to double
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  %0 = load i16, ptr %add.ptr, align 2
+  %conv = uitofp i16 %0 to double
   ret double %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local double @ld_align32_double_uint16_t(i8* nocapture readonly %ptr) {
+define dso_local double @ld_align32_double_uint16_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align32_double_uint16_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r4, 99999000
@@ -1003,15 +1002,14 @@ define dso_local double @ld_align32_double_uint16_t(i8* nocapture readonly %ptr)
 ; CHECK-P8-NEXT:    xscvuxddp f1, f0
 ; CHECK-P8-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to i16*
-  %1 = load i16, i16* %0, align 2
-  %conv = uitofp i16 %1 to double
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  %0 = load i16, ptr %add.ptr, align 2
+  %conv = uitofp i16 %0 to double
   ret double %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local double @ld_align64_double_uint16_t(i8* nocapture readonly %ptr) {
+define dso_local double @ld_align64_double_uint16_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align64_double_uint16_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r4, 244140625
@@ -1039,15 +1037,14 @@ define dso_local double @ld_align64_double_uint16_t(i8* nocapture readonly %ptr)
 ; CHECK-P8-NEXT:    xscvuxddp f1, f0
 ; CHECK-P8-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to i16*
-  %1 = load i16, i16* %0, align 2
-  %conv = uitofp i16 %1 to double
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  %0 = load i16, ptr %add.ptr, align 2
+  %conv = uitofp i16 %0 to double
   ret double %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local double @ld_reg_double_uint16_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local double @ld_reg_double_uint16_t(ptr nocapture readonly %ptr, i64 %off) {
 ; CHECK-POSTP8-LABEL: ld_reg_double_uint16_t:
 ; CHECK-POSTP8:       # %bb.0: # %entry
 ; CHECK-POSTP8-NEXT:    lxsihzx f0, r3, r4
@@ -1061,10 +1058,9 @@ define dso_local double @ld_reg_double_uint16_t(i8* nocapture readonly %ptr, i64
 ; CHECK-P8-NEXT:    xscvuxddp f1, f0
 ; CHECK-P8-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to i16*
-  %1 = load i16, i16* %0, align 2
-  %conv = uitofp i16 %1 to double
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  %0 = load i16, ptr %add.ptr, align 2
+  %conv = uitofp i16 %0 to double
   ret double %conv
 }
 
@@ -1087,8 +1083,8 @@ define dso_local double @ld_or_double_uint16_t(i64 %ptr, i8 zeroext %off) {
 entry:
   %conv = zext i8 %off to i64
   %or = or i64 %conv, %ptr
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 2
   %conv1 = uitofp i16 %1 to double
   ret double %conv1
 }
@@ -1111,8 +1107,8 @@ define dso_local double @ld_not_disjoint16_double_uint16_t(i64 %ptr) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 2
   %conv = uitofp i16 %1 to double
   ret double %conv
 }
@@ -1137,8 +1133,8 @@ define dso_local double @ld_disjoint_align16_double_uint16_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -4096
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 8
   %conv = uitofp i16 %1 to double
   ret double %conv
 }
@@ -1163,8 +1159,8 @@ define dso_local double @ld_not_disjoint32_double_uint16_t(i64 %ptr) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 2
   %conv = uitofp i16 %1 to double
   ret double %conv
 }
@@ -1203,8 +1199,8 @@ define dso_local double @ld_disjoint_align32_double_uint16_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1000341504
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 16
   %conv = uitofp i16 %1 to double
   ret double %conv
 }
@@ -1245,8 +1241,8 @@ define dso_local double @ld_not_disjoint64_double_uint16_t(i64 %ptr) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 2
   %conv = uitofp i16 %1 to double
   ret double %conv
 }
@@ -1285,8 +1281,8 @@ define dso_local double @ld_disjoint_align64_double_uint16_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 4096
   %conv = uitofp i16 %1 to double
   ret double %conv
 }
@@ -1307,7 +1303,7 @@ define dso_local double @ld_cst_align16_double_uint16_t() {
 ; CHECK-P8-NEXT:    xscvuxddp f1, f0
 ; CHECK-P8-NEXT:    blr
 entry:
-  %0 = load i16, i16* inttoptr (i64 4080 to i16*), align 16
+  %0 = load i16, ptr inttoptr (i64 4080 to ptr), align 16
   %conv = uitofp i16 %0 to double
   ret double %conv
 }
@@ -1337,7 +1333,7 @@ define dso_local double @ld_cst_align32_double_uint16_t() {
 ; CHECK-P8-NEXT:    xscvuxddp f1, f0
 ; CHECK-P8-NEXT:    blr
 entry:
-  %0 = load i16, i16* inttoptr (i64 9999900 to i16*), align 4
+  %0 = load i16, ptr inttoptr (i64 9999900 to ptr), align 4
   %conv = uitofp i16 %0 to double
   ret double %conv
 }
@@ -1371,7 +1367,7 @@ define dso_local double @ld_cst_align64_double_uint16_t() {
 ; CHECK-P8-NEXT:    xscvuxddp f1, f0
 ; CHECK-P8-NEXT:    blr
 entry:
-  %0 = load i16, i16* inttoptr (i64 1000000000000 to i16*), align 4096
+  %0 = load i16, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   %conv = uitofp i16 %0 to double
   ret double %conv
 }
@@ -1392,14 +1388,14 @@ define dso_local double @ld_0_double_int16_t(i64 %ptr) {
 ; CHECK-P8-NEXT:    xscvsxddp f1, f0
 ; CHECK-P8-NEXT:    blr
 entry:
-  %0 = inttoptr i64 %ptr to i16*
-  %1 = load i16, i16* %0, align 2
+  %0 = inttoptr i64 %ptr to ptr
+  %1 = load i16, ptr %0, align 2
   %conv = sitofp i16 %1 to double
   ret double %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local double @ld_align16_double_int16_t(i8* nocapture readonly %ptr) {
+define dso_local double @ld_align16_double_int16_t(ptr nocapture readonly %ptr) {
 ; CHECK-POSTP8-LABEL: ld_align16_double_int16_t:
 ; CHECK-POSTP8:       # %bb.0: # %entry
 ; CHECK-POSTP8-NEXT:    addi r3, r3, 8
@@ -1415,15 +1411,14 @@ define dso_local double @ld_align16_double_int16_t(i8* nocapture readonly %ptr)
 ; CHECK-P8-NEXT:    xscvsxddp f1, f0
 ; CHECK-P8-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to i16*
-  %1 = load i16, i16* %0, align 2
-  %conv = sitofp i16 %1 to double
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  %0 = load i16, ptr %add.ptr, align 2
+  %conv = sitofp i16 %0 to double
   ret double %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local double @ld_align32_double_int16_t(i8* nocapture readonly %ptr) {
+define dso_local double @ld_align32_double_int16_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align32_double_int16_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r4, 99999000
@@ -1450,15 +1445,14 @@ define dso_local double @ld_align32_double_int16_t(i8* nocapture readonly %ptr)
 ; CHECK-P8-NEXT:    xscvsxddp f1, f0
 ; CHECK-P8-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to i16*
-  %1 = load i16, i16* %0, align 2
-  %conv = sitofp i16 %1 to double
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  %0 = load i16, ptr %add.ptr, align 2
+  %conv = sitofp i16 %0 to double
   ret double %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local double @ld_align64_double_int16_t(i8* nocapture readonly %ptr) {
+define dso_local double @ld_align64_double_int16_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align64_double_int16_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r4, 244140625
@@ -1488,15 +1482,14 @@ define dso_local double @ld_align64_double_int16_t(i8* nocapture readonly %ptr)
 ; CHECK-P8-NEXT:    xscvsxddp f1, f0
 ; CHECK-P8-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to i16*
-  %1 = load i16, i16* %0, align 2
-  %conv = sitofp i16 %1 to double
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  %0 = load i16, ptr %add.ptr, align 2
+  %conv = sitofp i16 %0 to double
   ret double %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local double @ld_reg_double_int16_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local double @ld_reg_double_int16_t(ptr nocapture readonly %ptr, i64 %off) {
 ; CHECK-POSTP8-LABEL: ld_reg_double_int16_t:
 ; CHECK-POSTP8:       # %bb.0: # %entry
 ; CHECK-POSTP8-NEXT:    lxsihzx v2, r3, r4
@@ -1511,10 +1504,9 @@ define dso_local double @ld_reg_double_int16_t(i8* nocapture readonly %ptr, i64
 ; CHECK-P8-NEXT:    xscvsxddp f1, f0
 ; CHECK-P8-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to i16*
-  %1 = load i16, i16* %0, align 2
-  %conv = sitofp i16 %1 to double
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  %0 = load i16, ptr %add.ptr, align 2
+  %conv = sitofp i16 %0 to double
   ret double %conv
 }
 
@@ -1538,8 +1530,8 @@ define dso_local double @ld_or_double_int16_t(i64 %ptr, i8 zeroext %off) {
 entry:
   %conv = zext i8 %off to i64
   %or = or i64 %conv, %ptr
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 2
   %conv1 = sitofp i16 %1 to double
   ret double %conv1
 }
@@ -1563,8 +1555,8 @@ define dso_local double @ld_not_disjoint16_double_int16_t(i64 %ptr) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 2
   %conv = sitofp i16 %1 to double
   ret double %conv
 }
@@ -1590,8 +1582,8 @@ define dso_local double @ld_disjoint_align16_double_int16_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -4096
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 8
   %conv = sitofp i16 %1 to double
   ret double %conv
 }
@@ -1617,8 +1609,8 @@ define dso_local double @ld_not_disjoint32_double_int16_t(i64 %ptr) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 2
   %conv = sitofp i16 %1 to double
   ret double %conv
 }
@@ -1659,8 +1651,8 @@ define dso_local double @ld_disjoint_align32_double_int16_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1000341504
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 16
   %conv = sitofp i16 %1 to double
   ret double %conv
 }
@@ -1703,8 +1695,8 @@ define dso_local double @ld_not_disjoint64_double_int16_t(i64 %ptr) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 2
   %conv = sitofp i16 %1 to double
   ret double %conv
 }
@@ -1745,8 +1737,8 @@ define dso_local double @ld_disjoint_align64_double_int16_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 4096
   %conv = sitofp i16 %1 to double
   ret double %conv
 }
@@ -1768,7 +1760,7 @@ define dso_local double @ld_cst_align16_double_int16_t() {
 ; CHECK-P8-NEXT:    xscvsxddp f1, f0
 ; CHECK-P8-NEXT:    blr
 entry:
-  %0 = load i16, i16* inttoptr (i64 4080 to i16*), align 16
+  %0 = load i16, ptr inttoptr (i64 4080 to ptr), align 16
   %conv = sitofp i16 %0 to double
   ret double %conv
 }
@@ -1800,7 +1792,7 @@ define dso_local double @ld_cst_align32_double_int16_t() {
 ; CHECK-P8-NEXT:    xscvsxddp f1, f0
 ; CHECK-P8-NEXT:    blr
 entry:
-  %0 = load i16, i16* inttoptr (i64 9999900 to i16*), align 4
+  %0 = load i16, ptr inttoptr (i64 9999900 to ptr), align 4
   %conv = sitofp i16 %0 to double
   ret double %conv
 }
@@ -1836,7 +1828,7 @@ define dso_local double @ld_cst_align64_double_int16_t() {
 ; CHECK-P8-NEXT:    xscvsxddp f1, f0
 ; CHECK-P8-NEXT:    blr
 entry:
-  %0 = load i16, i16* inttoptr (i64 1000000000000 to i16*), align 4096
+  %0 = load i16, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   %conv = sitofp i16 %0 to double
   ret double %conv
 }
@@ -1849,14 +1841,14 @@ define dso_local double @ld_0_double_uint32_t(i64 %ptr) {
 ; CHECK-NEXT:    xscvuxddp f1, f0
 ; CHECK-NEXT:    blr
 entry:
-  %0 = inttoptr i64 %ptr to i32*
-  %1 = load i32, i32* %0, align 4
+  %0 = inttoptr i64 %ptr to ptr
+  %1 = load i32, ptr %0, align 4
   %conv = uitofp i32 %1 to double
   ret double %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local double @ld_align16_double_uint32_t(i8* nocapture readonly %ptr) {
+define dso_local double @ld_align16_double_uint32_t(ptr nocapture readonly %ptr) {
 ; CHECK-LABEL: ld_align16_double_uint32_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    addi r3, r3, 8
@@ -1864,15 +1856,14 @@ define dso_local double @ld_align16_double_uint32_t(i8* nocapture readonly %ptr)
 ; CHECK-NEXT:    xscvuxddp f1, f0
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to i32*
-  %1 = load i32, i32* %0, align 4
-  %conv = uitofp i32 %1 to double
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  %0 = load i32, ptr %add.ptr, align 4
+  %conv = uitofp i32 %0 to double
   ret double %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local double @ld_align32_double_uint32_t(i8* nocapture readonly %ptr) {
+define dso_local double @ld_align32_double_uint32_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align32_double_uint32_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r4, 99999000
@@ -1888,15 +1879,14 @@ define dso_local double @ld_align32_double_uint32_t(i8* nocapture readonly %ptr)
 ; CHECK-PREP10-NEXT:    xscvuxddp f1, f0
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to i32*
-  %1 = load i32, i32* %0, align 4
-  %conv = uitofp i32 %1 to double
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  %0 = load i32, ptr %add.ptr, align 4
+  %conv = uitofp i32 %0 to double
   ret double %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local double @ld_align64_double_uint32_t(i8* nocapture readonly %ptr) {
+define dso_local double @ld_align64_double_uint32_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align64_double_uint32_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r4, 244140625
@@ -1914,25 +1904,23 @@ define dso_local double @ld_align64_double_uint32_t(i8* nocapture readonly %ptr)
 ; CHECK-PREP10-NEXT:    xscvuxddp f1, f0
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to i32*
-  %1 = load i32, i32* %0, align 4
-  %conv = uitofp i32 %1 to double
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  %0 = load i32, ptr %add.ptr, align 4
+  %conv = uitofp i32 %0 to double
   ret double %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local double @ld_reg_double_uint32_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local double @ld_reg_double_uint32_t(ptr nocapture readonly %ptr, i64 %off) {
 ; CHECK-LABEL: ld_reg_double_uint32_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lfiwzx f0, r3, r4
 ; CHECK-NEXT:    xscvuxddp f1, f0
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to i32*
-  %1 = load i32, i32* %0, align 4
-  %conv = uitofp i32 %1 to double
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  %0 = load i32, ptr %add.ptr, align 4
+  %conv = uitofp i32 %0 to double
   ret double %conv
 }
 
@@ -1947,8 +1935,8 @@ define dso_local double @ld_or_double_uint32_t(i64 %ptr, i8 zeroext %off) {
 entry:
   %conv = zext i8 %off to i64
   %or = or i64 %conv, %ptr
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 4
   %conv1 = uitofp i32 %1 to double
   ret double %conv1
 }
@@ -1963,8 +1951,8 @@ define dso_local double @ld_not_disjoint16_double_uint32_t(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 4
   %conv = uitofp i32 %1 to double
   ret double %conv
 }
@@ -1981,8 +1969,8 @@ define dso_local double @ld_disjoint_align16_double_uint32_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -4096
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 8
   %conv = uitofp i32 %1 to double
   ret double %conv
 }
@@ -1998,8 +1986,8 @@ define dso_local double @ld_not_disjoint32_double_uint32_t(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 4
   %conv = uitofp i32 %1 to double
   ret double %conv
 }
@@ -2037,8 +2025,8 @@ define dso_local double @ld_disjoint_align32_double_uint32_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1000341504
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 16
   %conv = uitofp i32 %1 to double
   ret double %conv
 }
@@ -2067,8 +2055,8 @@ define dso_local double @ld_not_disjoint64_double_uint32_t(i64 %ptr) {
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 4
   %conv = uitofp i32 %1 to double
   ret double %conv
 }
@@ -2096,8 +2084,8 @@ define dso_local double @ld_disjoint_align64_double_uint32_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 4096
   %conv = uitofp i32 %1 to double
   ret double %conv
 }
@@ -2111,7 +2099,7 @@ define dso_local double @ld_cst_align16_double_uint32_t() {
 ; CHECK-NEXT:    xscvuxddp f1, f0
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i32, i32* inttoptr (i64 4080 to i32*), align 16
+  %0 = load i32, ptr inttoptr (i64 4080 to ptr), align 16
   %conv = uitofp i32 %0 to double
   ret double %conv
 }
@@ -2133,7 +2121,7 @@ define dso_local double @ld_cst_align32_double_uint32_t() {
 ; CHECK-PREP10-NEXT:    xscvuxddp f1, f0
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %0 = load i32, i32* inttoptr (i64 9999900 to i32*), align 4
+  %0 = load i32, ptr inttoptr (i64 9999900 to ptr), align 4
   %conv = uitofp i32 %0 to double
   ret double %conv
 }
@@ -2157,7 +2145,7 @@ define dso_local double @ld_cst_align64_double_uint32_t() {
 ; CHECK-PREP10-NEXT:    xscvuxddp f1, f0
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %0 = load i32, i32* inttoptr (i64 1000000000000 to i32*), align 4096
+  %0 = load i32, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   %conv = uitofp i32 %0 to double
   ret double %conv
 }
@@ -2170,14 +2158,14 @@ define dso_local double @ld_0_double_int32_t(i64 %ptr) {
 ; CHECK-NEXT:    xscvsxddp f1, f0
 ; CHECK-NEXT:    blr
 entry:
-  %0 = inttoptr i64 %ptr to i32*
-  %1 = load i32, i32* %0, align 4
+  %0 = inttoptr i64 %ptr to ptr
+  %1 = load i32, ptr %0, align 4
   %conv = sitofp i32 %1 to double
   ret double %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local double @ld_align16_double_int32_t(i8* nocapture readonly %ptr) {
+define dso_local double @ld_align16_double_int32_t(ptr nocapture readonly %ptr) {
 ; CHECK-LABEL: ld_align16_double_int32_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    addi r3, r3, 8
@@ -2185,15 +2173,14 @@ define dso_local double @ld_align16_double_int32_t(i8* nocapture readonly %ptr)
 ; CHECK-NEXT:    xscvsxddp f1, f0
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to i32*
-  %1 = load i32, i32* %0, align 4
-  %conv = sitofp i32 %1 to double
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  %0 = load i32, ptr %add.ptr, align 4
+  %conv = sitofp i32 %0 to double
   ret double %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local double @ld_align32_double_int32_t(i8* nocapture readonly %ptr) {
+define dso_local double @ld_align32_double_int32_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align32_double_int32_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r4, 99999000
@@ -2209,15 +2196,14 @@ define dso_local double @ld_align32_double_int32_t(i8* nocapture readonly %ptr)
 ; CHECK-PREP10-NEXT:    xscvsxddp f1, f0
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to i32*
-  %1 = load i32, i32* %0, align 4
-  %conv = sitofp i32 %1 to double
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  %0 = load i32, ptr %add.ptr, align 4
+  %conv = sitofp i32 %0 to double
   ret double %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local double @ld_align64_double_int32_t(i8* nocapture readonly %ptr) {
+define dso_local double @ld_align64_double_int32_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align64_double_int32_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r4, 244140625
@@ -2235,25 +2221,23 @@ define dso_local double @ld_align64_double_int32_t(i8* nocapture readonly %ptr)
 ; CHECK-PREP10-NEXT:    xscvsxddp f1, f0
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to i32*
-  %1 = load i32, i32* %0, align 4
-  %conv = sitofp i32 %1 to double
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  %0 = load i32, ptr %add.ptr, align 4
+  %conv = sitofp i32 %0 to double
   ret double %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local double @ld_reg_double_int32_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local double @ld_reg_double_int32_t(ptr nocapture readonly %ptr, i64 %off) {
 ; CHECK-LABEL: ld_reg_double_int32_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lfiwax f0, r3, r4
 ; CHECK-NEXT:    xscvsxddp f1, f0
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to i32*
-  %1 = load i32, i32* %0, align 4
-  %conv = sitofp i32 %1 to double
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  %0 = load i32, ptr %add.ptr, align 4
+  %conv = sitofp i32 %0 to double
   ret double %conv
 }
 
@@ -2268,8 +2252,8 @@ define dso_local double @ld_or_double_int32_t(i64 %ptr, i8 zeroext %off) {
 entry:
   %conv = zext i8 %off to i64
   %or = or i64 %conv, %ptr
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 4
   %conv1 = sitofp i32 %1 to double
   ret double %conv1
 }
@@ -2284,8 +2268,8 @@ define dso_local double @ld_not_disjoint16_double_int32_t(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 4
   %conv = sitofp i32 %1 to double
   ret double %conv
 }
@@ -2302,8 +2286,8 @@ define dso_local double @ld_disjoint_align16_double_int32_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -4096
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 8
   %conv = sitofp i32 %1 to double
   ret double %conv
 }
@@ -2319,8 +2303,8 @@ define dso_local double @ld_not_disjoint32_double_int32_t(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 4
   %conv = sitofp i32 %1 to double
   ret double %conv
 }
@@ -2358,8 +2342,8 @@ define dso_local double @ld_disjoint_align32_double_int32_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1000341504
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 16
   %conv = sitofp i32 %1 to double
   ret double %conv
 }
@@ -2388,8 +2372,8 @@ define dso_local double @ld_not_disjoint64_double_int32_t(i64 %ptr) {
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 4
   %conv = sitofp i32 %1 to double
   ret double %conv
 }
@@ -2417,8 +2401,8 @@ define dso_local double @ld_disjoint_align64_double_int32_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 4096
   %conv = sitofp i32 %1 to double
   ret double %conv
 }
@@ -2432,7 +2416,7 @@ define dso_local double @ld_cst_align16_double_int32_t() {
 ; CHECK-NEXT:    xscvsxddp f1, f0
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i32, i32* inttoptr (i64 4080 to i32*), align 16
+  %0 = load i32, ptr inttoptr (i64 4080 to ptr), align 16
   %conv = sitofp i32 %0 to double
   ret double %conv
 }
@@ -2454,7 +2438,7 @@ define dso_local double @ld_cst_align32_double_int32_t() {
 ; CHECK-PREP10-NEXT:    xscvsxddp f1, f0
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %0 = load i32, i32* inttoptr (i64 9999900 to i32*), align 4
+  %0 = load i32, ptr inttoptr (i64 9999900 to ptr), align 4
   %conv = sitofp i32 %0 to double
   ret double %conv
 }
@@ -2478,7 +2462,7 @@ define dso_local double @ld_cst_align64_double_int32_t() {
 ; CHECK-PREP10-NEXT:    xscvsxddp f1, f0
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %0 = load i32, i32* inttoptr (i64 1000000000000 to i32*), align 4096
+  %0 = load i32, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   %conv = sitofp i32 %0 to double
   ret double %conv
 }
@@ -2491,29 +2475,28 @@ define dso_local double @ld_0_double_uint64_t(i64 %ptr) {
 ; CHECK-NEXT:    xscvuxddp f1, f0
 ; CHECK-NEXT:    blr
 entry:
-  %0 = inttoptr i64 %ptr to i64*
-  %1 = load i64, i64* %0, align 8
+  %0 = inttoptr i64 %ptr to ptr
+  %1 = load i64, ptr %0, align 8
   %conv = uitofp i64 %1 to double
   ret double %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local double @ld_align16_double_uint64_t(i8* nocapture readonly %ptr) {
+define dso_local double @ld_align16_double_uint64_t(ptr nocapture readonly %ptr) {
 ; CHECK-LABEL: ld_align16_double_uint64_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lfd f0, 8(r3)
 ; CHECK-NEXT:    xscvuxddp f1, f0
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to i64*
-  %1 = load i64, i64* %0, align 8
-  %conv = uitofp i64 %1 to double
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  %0 = load i64, ptr %add.ptr, align 8
+  %conv = uitofp i64 %0 to double
   ret double %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local double @ld_align32_double_uint64_t(i8* nocapture readonly %ptr) {
+define dso_local double @ld_align32_double_uint64_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align32_double_uint64_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    plfd f0, 99999000(r3), 0
@@ -2528,15 +2511,14 @@ define dso_local double @ld_align32_double_uint64_t(i8* nocapture readonly %ptr)
 ; CHECK-PREP10-NEXT:    xscvuxddp f1, f0
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to i64*
-  %1 = load i64, i64* %0, align 8
-  %conv = uitofp i64 %1 to double
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  %0 = load i64, ptr %add.ptr, align 8
+  %conv = uitofp i64 %0 to double
   ret double %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local double @ld_align64_double_uint64_t(i8* nocapture readonly %ptr) {
+define dso_local double @ld_align64_double_uint64_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align64_double_uint64_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r4, 244140625
@@ -2554,25 +2536,23 @@ define dso_local double @ld_align64_double_uint64_t(i8* nocapture readonly %ptr)
 ; CHECK-PREP10-NEXT:    xscvuxddp f1, f0
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to i64*
-  %1 = load i64, i64* %0, align 8
-  %conv = uitofp i64 %1 to double
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  %0 = load i64, ptr %add.ptr, align 8
+  %conv = uitofp i64 %0 to double
   ret double %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local double @ld_reg_double_uint64_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local double @ld_reg_double_uint64_t(ptr nocapture readonly %ptr, i64 %off) {
 ; CHECK-LABEL: ld_reg_double_uint64_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lfdx f0, r3, r4
 ; CHECK-NEXT:    xscvuxddp f1, f0
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to i64*
-  %1 = load i64, i64* %0, align 8
-  %conv = uitofp i64 %1 to double
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  %0 = load i64, ptr %add.ptr, align 8
+  %conv = uitofp i64 %0 to double
   ret double %conv
 }
 
@@ -2587,8 +2567,8 @@ define dso_local double @ld_or_double_uint64_t(i64 %ptr, i8 zeroext %off) {
 entry:
   %conv = zext i8 %off to i64
   %or = or i64 %conv, %ptr
-  %0 = inttoptr i64 %or to i64*
-  %1 = load i64, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i64, ptr %0, align 8
   %conv1 = uitofp i64 %1 to double
   ret double %conv1
 }
@@ -2603,8 +2583,8 @@ define dso_local double @ld_not_disjoint16_double_uint64_t(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to i64*
-  %1 = load i64, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i64, ptr %0, align 8
   %conv = uitofp i64 %1 to double
   ret double %conv
 }
@@ -2620,8 +2600,8 @@ define dso_local double @ld_disjoint_align16_double_uint64_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -4096
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to i64*
-  %1 = load i64, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i64, ptr %0, align 8
   %conv = uitofp i64 %1 to double
   ret double %conv
 }
@@ -2637,8 +2617,8 @@ define dso_local double @ld_not_disjoint32_double_uint64_t(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to i64*
-  %1 = load i64, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i64, ptr %0, align 8
   %conv = uitofp i64 %1 to double
   ret double %conv
 }
@@ -2675,8 +2655,8 @@ define dso_local double @ld_disjoint_align32_double_uint64_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1000341504
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to i64*
-  %1 = load i64, i64* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i64, ptr %0, align 16
   %conv = uitofp i64 %1 to double
   ret double %conv
 }
@@ -2705,8 +2685,8 @@ define dso_local double @ld_not_disjoint64_double_uint64_t(i64 %ptr) {
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to i64*
-  %1 = load i64, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i64, ptr %0, align 8
   %conv = uitofp i64 %1 to double
   ret double %conv
 }
@@ -2734,8 +2714,8 @@ define dso_local double @ld_disjoint_align64_double_uint64_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to i64*
-  %1 = load i64, i64* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i64, ptr %0, align 4096
   %conv = uitofp i64 %1 to double
   ret double %conv
 }
@@ -2748,7 +2728,7 @@ define dso_local double @ld_cst_align16_double_uint64_t() {
 ; CHECK-NEXT:    xscvuxddp f1, f0
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i64, i64* inttoptr (i64 4080 to i64*), align 16
+  %0 = load i64, ptr inttoptr (i64 4080 to ptr), align 16
   %conv = uitofp i64 %0 to double
   ret double %conv
 }
@@ -2762,7 +2742,7 @@ define dso_local double @ld_cst_align32_double_uint64_t() {
 ; CHECK-NEXT:    xscvuxddp f1, f0
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i64, i64* inttoptr (i64 9999900 to i64*), align 8
+  %0 = load i64, ptr inttoptr (i64 9999900 to ptr), align 8
   %conv = uitofp i64 %0 to double
   ret double %conv
 }
@@ -2786,7 +2766,7 @@ define dso_local double @ld_cst_align64_double_uint64_t() {
 ; CHECK-PREP10-NEXT:    xscvuxddp f1, f0
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %0 = load i64, i64* inttoptr (i64 1000000000000 to i64*), align 4096
+  %0 = load i64, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   %conv = uitofp i64 %0 to double
   ret double %conv
 }
@@ -2799,29 +2779,28 @@ define dso_local double @ld_0_double_int64_t(i64 %ptr) {
 ; CHECK-NEXT:    xscvsxddp f1, f0
 ; CHECK-NEXT:    blr
 entry:
-  %0 = inttoptr i64 %ptr to i64*
-  %1 = load i64, i64* %0, align 8
+  %0 = inttoptr i64 %ptr to ptr
+  %1 = load i64, ptr %0, align 8
   %conv = sitofp i64 %1 to double
   ret double %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local double @ld_align16_double_int64_t(i8* nocapture readonly %ptr) {
+define dso_local double @ld_align16_double_int64_t(ptr nocapture readonly %ptr) {
 ; CHECK-LABEL: ld_align16_double_int64_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lfd f0, 8(r3)
 ; CHECK-NEXT:    xscvsxddp f1, f0
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to i64*
-  %1 = load i64, i64* %0, align 8
-  %conv = sitofp i64 %1 to double
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  %0 = load i64, ptr %add.ptr, align 8
+  %conv = sitofp i64 %0 to double
   ret double %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local double @ld_align32_double_int64_t(i8* nocapture readonly %ptr) {
+define dso_local double @ld_align32_double_int64_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align32_double_int64_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    plfd f0, 99999000(r3), 0
@@ -2836,15 +2815,14 @@ define dso_local double @ld_align32_double_int64_t(i8* nocapture readonly %ptr)
 ; CHECK-PREP10-NEXT:    xscvsxddp f1, f0
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to i64*
-  %1 = load i64, i64* %0, align 8
-  %conv = sitofp i64 %1 to double
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  %0 = load i64, ptr %add.ptr, align 8
+  %conv = sitofp i64 %0 to double
   ret double %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local double @ld_align64_double_int64_t(i8* nocapture readonly %ptr) {
+define dso_local double @ld_align64_double_int64_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align64_double_int64_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r4, 244140625
@@ -2862,25 +2840,23 @@ define dso_local double @ld_align64_double_int64_t(i8* nocapture readonly %ptr)
 ; CHECK-PREP10-NEXT:    xscvsxddp f1, f0
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to i64*
-  %1 = load i64, i64* %0, align 8
-  %conv = sitofp i64 %1 to double
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  %0 = load i64, ptr %add.ptr, align 8
+  %conv = sitofp i64 %0 to double
   ret double %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local double @ld_reg_double_int64_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local double @ld_reg_double_int64_t(ptr nocapture readonly %ptr, i64 %off) {
 ; CHECK-LABEL: ld_reg_double_int64_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lfdx f0, r3, r4
 ; CHECK-NEXT:    xscvsxddp f1, f0
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to i64*
-  %1 = load i64, i64* %0, align 8
-  %conv = sitofp i64 %1 to double
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  %0 = load i64, ptr %add.ptr, align 8
+  %conv = sitofp i64 %0 to double
   ret double %conv
 }
 
@@ -2895,8 +2871,8 @@ define dso_local double @ld_or_double_int64_t(i64 %ptr, i8 zeroext %off) {
 entry:
   %conv = zext i8 %off to i64
   %or = or i64 %conv, %ptr
-  %0 = inttoptr i64 %or to i64*
-  %1 = load i64, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i64, ptr %0, align 8
   %conv1 = sitofp i64 %1 to double
   ret double %conv1
 }
@@ -2911,8 +2887,8 @@ define dso_local double @ld_not_disjoint16_double_int64_t(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to i64*
-  %1 = load i64, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i64, ptr %0, align 8
   %conv = sitofp i64 %1 to double
   ret double %conv
 }
@@ -2928,8 +2904,8 @@ define dso_local double @ld_disjoint_align16_double_int64_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -4096
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to i64*
-  %1 = load i64, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i64, ptr %0, align 8
   %conv = sitofp i64 %1 to double
   ret double %conv
 }
@@ -2945,8 +2921,8 @@ define dso_local double @ld_not_disjoint32_double_int64_t(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to i64*
-  %1 = load i64, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i64, ptr %0, align 8
   %conv = sitofp i64 %1 to double
   ret double %conv
 }
@@ -2983,8 +2959,8 @@ define dso_local double @ld_disjoint_align32_double_int64_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1000341504
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to i64*
-  %1 = load i64, i64* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i64, ptr %0, align 16
   %conv = sitofp i64 %1 to double
   ret double %conv
 }
@@ -3013,8 +2989,8 @@ define dso_local double @ld_not_disjoint64_double_int64_t(i64 %ptr) {
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to i64*
-  %1 = load i64, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i64, ptr %0, align 8
   %conv = sitofp i64 %1 to double
   ret double %conv
 }
@@ -3042,8 +3018,8 @@ define dso_local double @ld_disjoint_align64_double_int64_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to i64*
-  %1 = load i64, i64* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i64, ptr %0, align 4096
   %conv = sitofp i64 %1 to double
   ret double %conv
 }
@@ -3056,7 +3032,7 @@ define dso_local double @ld_cst_align16_double_int64_t() {
 ; CHECK-NEXT:    xscvsxddp f1, f0
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i64, i64* inttoptr (i64 4080 to i64*), align 16
+  %0 = load i64, ptr inttoptr (i64 4080 to ptr), align 16
   %conv = sitofp i64 %0 to double
   ret double %conv
 }
@@ -3070,7 +3046,7 @@ define dso_local double @ld_cst_align32_double_int64_t() {
 ; CHECK-NEXT:    xscvsxddp f1, f0
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i64, i64* inttoptr (i64 9999900 to i64*), align 8
+  %0 = load i64, ptr inttoptr (i64 9999900 to ptr), align 8
   %conv = sitofp i64 %0 to double
   ret double %conv
 }
@@ -3094,7 +3070,7 @@ define dso_local double @ld_cst_align64_double_int64_t() {
 ; CHECK-PREP10-NEXT:    xscvsxddp f1, f0
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %0 = load i64, i64* inttoptr (i64 1000000000000 to i64*), align 4096
+  %0 = load i64, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   %conv = sitofp i64 %0 to double
   ret double %conv
 }
@@ -3106,28 +3082,27 @@ define dso_local double @ld_0_double_float(i64 %ptr) {
 ; CHECK-NEXT:    lfs f1, 0(r3)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = inttoptr i64 %ptr to float*
-  %1 = load float, float* %0, align 4
+  %0 = inttoptr i64 %ptr to ptr
+  %1 = load float, ptr %0, align 4
   %conv = fpext float %1 to double
   ret double %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local double @ld_align16_double_float(i8* nocapture readonly %ptr) {
+define dso_local double @ld_align16_double_float(ptr nocapture readonly %ptr) {
 ; CHECK-LABEL: ld_align16_double_float:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lfs f1, 8(r3)
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to float*
-  %1 = load float, float* %0, align 4
-  %conv = fpext float %1 to double
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  %0 = load float, ptr %add.ptr, align 4
+  %conv = fpext float %0 to double
   ret double %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local double @ld_align32_double_float(i8* nocapture readonly %ptr) {
+define dso_local double @ld_align32_double_float(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align32_double_float:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    plfs f1, 99999000(r3), 0
@@ -3140,15 +3115,14 @@ define dso_local double @ld_align32_double_float(i8* nocapture readonly %ptr) {
 ; CHECK-PREP10-NEXT:    lfsx f1, r3, r4
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to float*
-  %1 = load float, float* %0, align 4
-  %conv = fpext float %1 to double
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  %0 = load float, ptr %add.ptr, align 4
+  %conv = fpext float %0 to double
   ret double %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local double @ld_align64_double_float(i8* nocapture readonly %ptr) {
+define dso_local double @ld_align64_double_float(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align64_double_float:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r4, 244140625
@@ -3164,24 +3138,22 @@ define dso_local double @ld_align64_double_float(i8* nocapture readonly %ptr) {
 ; CHECK-PREP10-NEXT:    lfsx f1, r3, r4
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to float*
-  %1 = load float, float* %0, align 4
-  %conv = fpext float %1 to double
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  %0 = load float, ptr %add.ptr, align 4
+  %conv = fpext float %0 to double
   ret double %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local double @ld_reg_double_float(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local double @ld_reg_double_float(ptr nocapture readonly %ptr, i64 %off) {
 ; CHECK-LABEL: ld_reg_double_float:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lfsx f1, r3, r4
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to float*
-  %1 = load float, float* %0, align 4
-  %conv = fpext float %1 to double
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  %0 = load float, ptr %add.ptr, align 4
+  %conv = fpext float %0 to double
   ret double %conv
 }
 
@@ -3195,8 +3167,8 @@ define dso_local double @ld_or_double_float(i64 %ptr, i8 zeroext %off) {
 entry:
   %conv = zext i8 %off to i64
   %or = or i64 %conv, %ptr
-  %0 = inttoptr i64 %or to float*
-  %1 = load float, float* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load float, ptr %0, align 4
   %conv1 = fpext float %1 to double
   ret double %conv1
 }
@@ -3210,8 +3182,8 @@ define dso_local double @ld_not_disjoint16_double_float(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to float*
-  %1 = load float, float* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load float, ptr %0, align 4
   %conv = fpext float %1 to double
   ret double %conv
 }
@@ -3226,8 +3198,8 @@ define dso_local double @ld_disjoint_align16_double_float(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -4096
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to float*
-  %1 = load float, float* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load float, ptr %0, align 8
   %conv = fpext float %1 to double
   ret double %conv
 }
@@ -3242,8 +3214,8 @@ define dso_local double @ld_not_disjoint32_double_float(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to float*
-  %1 = load float, float* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load float, ptr %0, align 4
   %conv = fpext float %1 to double
   ret double %conv
 }
@@ -3277,8 +3249,8 @@ define dso_local double @ld_disjoint_align32_double_float(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1000341504
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to float*
-  %1 = load float, float* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  %1 = load float, ptr %0, align 16
   %conv = fpext float %1 to double
   ret double %conv
 }
@@ -3305,8 +3277,8 @@ define dso_local double @ld_not_disjoint64_double_float(i64 %ptr) {
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to float*
-  %1 = load float, float* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load float, ptr %0, align 4
   %conv = fpext float %1 to double
   ret double %conv
 }
@@ -3332,8 +3304,8 @@ define dso_local double @ld_disjoint_align64_double_float(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to float*
-  %1 = load float, float* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  %1 = load float, ptr %0, align 4096
   %conv = fpext float %1 to double
   ret double %conv
 }
@@ -3345,7 +3317,7 @@ define dso_local double @ld_cst_align16_double_float() {
 ; CHECK-NEXT:    lfs f1, 4080(0)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load float, float* inttoptr (i64 4080 to float*), align 16
+  %0 = load float, ptr inttoptr (i64 4080 to ptr), align 16
   %conv = fpext float %0 to double
   ret double %conv
 }
@@ -3358,7 +3330,7 @@ define dso_local double @ld_cst_align32_double_float() {
 ; CHECK-NEXT:    lfs f1, -27108(r3)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load float, float* inttoptr (i64 9999900 to float*), align 4
+  %0 = load float, ptr inttoptr (i64 9999900 to ptr), align 4
   %conv = fpext float %0 to double
   ret double %conv
 }
@@ -3380,7 +3352,7 @@ define dso_local double @ld_cst_align64_double_float() {
 ; CHECK-PREP10-NEXT:    lfs f1, 0(r3)
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %0 = load float, float* inttoptr (i64 1000000000000 to float*), align 4096
+  %0 = load float, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   %conv = fpext float %0 to double
   ret double %conv
 }
@@ -3392,26 +3364,25 @@ define dso_local double @ld_0_double_double(i64 %ptr) {
 ; CHECK-NEXT:    lfd f1, 0(r3)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = inttoptr i64 %ptr to double*
-  %1 = load double, double* %0, align 8
+  %0 = inttoptr i64 %ptr to ptr
+  %1 = load double, ptr %0, align 8
   ret double %1
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local double @ld_align16_double_double(i8* nocapture readonly %ptr) {
+define dso_local double @ld_align16_double_double(ptr nocapture readonly %ptr) {
 ; CHECK-LABEL: ld_align16_double_double:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lfd f1, 8(r3)
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to double*
-  %1 = load double, double* %0, align 8
-  ret double %1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  %0 = load double, ptr %add.ptr, align 8
+  ret double %0
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local double @ld_align32_double_double(i8* nocapture readonly %ptr) {
+define dso_local double @ld_align32_double_double(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align32_double_double:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    plfd f1, 99999000(r3), 0
@@ -3424,14 +3395,13 @@ define dso_local double @ld_align32_double_double(i8* nocapture readonly %ptr) {
 ; CHECK-PREP10-NEXT:    lfdx f1, r3, r4
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to double*
-  %1 = load double, double* %0, align 8
-  ret double %1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  %0 = load double, ptr %add.ptr, align 8
+  ret double %0
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local double @ld_align64_double_double(i8* nocapture readonly %ptr) {
+define dso_local double @ld_align64_double_double(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align64_double_double:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r4, 244140625
@@ -3447,23 +3417,21 @@ define dso_local double @ld_align64_double_double(i8* nocapture readonly %ptr) {
 ; CHECK-PREP10-NEXT:    lfdx f1, r3, r4
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to double*
-  %1 = load double, double* %0, align 8
-  ret double %1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  %0 = load double, ptr %add.ptr, align 8
+  ret double %0
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local double @ld_reg_double_double(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local double @ld_reg_double_double(ptr nocapture readonly %ptr, i64 %off) {
 ; CHECK-LABEL: ld_reg_double_double:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lfdx f1, r3, r4
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to double*
-  %1 = load double, double* %0, align 8
-  ret double %1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  %0 = load double, ptr %add.ptr, align 8
+  ret double %0
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
@@ -3476,8 +3444,8 @@ define dso_local double @ld_or_double_double(i64 %ptr, i8 zeroext %off) {
 entry:
   %conv = zext i8 %off to i64
   %or = or i64 %conv, %ptr
-  %0 = inttoptr i64 %or to double*
-  %1 = load double, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load double, ptr %0, align 8
   ret double %1
 }
 
@@ -3490,8 +3458,8 @@ define dso_local double @ld_not_disjoint16_double_double(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to double*
-  %1 = load double, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load double, ptr %0, align 8
   ret double %1
 }
 
@@ -3505,8 +3473,8 @@ define dso_local double @ld_disjoint_align16_double_double(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -4096
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to double*
-  %1 = load double, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load double, ptr %0, align 8
   ret double %1
 }
 
@@ -3520,8 +3488,8 @@ define dso_local double @ld_not_disjoint32_double_double(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to double*
-  %1 = load double, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load double, ptr %0, align 8
   ret double %1
 }
 
@@ -3554,8 +3522,8 @@ define dso_local double @ld_disjoint_align32_double_double(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1000341504
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to double*
-  %1 = load double, double* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  %1 = load double, ptr %0, align 16
   ret double %1
 }
 
@@ -3581,8 +3549,8 @@ define dso_local double @ld_not_disjoint64_double_double(i64 %ptr) {
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to double*
-  %1 = load double, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load double, ptr %0, align 8
   ret double %1
 }
 
@@ -3607,8 +3575,8 @@ define dso_local double @ld_disjoint_align64_double_double(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to double*
-  %1 = load double, double* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  %1 = load double, ptr %0, align 4096
   ret double %1
 }
 
@@ -3619,7 +3587,7 @@ define dso_local double @ld_cst_align16_double_double() {
 ; CHECK-NEXT:    lfd f1, 4080(0)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load double, double* inttoptr (i64 4080 to double*), align 16
+  %0 = load double, ptr inttoptr (i64 4080 to ptr), align 16
   ret double %0
 }
 
@@ -3631,7 +3599,7 @@ define dso_local double @ld_cst_align32_double_double() {
 ; CHECK-NEXT:    lfd f1, -27108(r3)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load double, double* inttoptr (i64 9999900 to double*), align 8
+  %0 = load double, ptr inttoptr (i64 9999900 to ptr), align 8
   ret double %0
 }
 
@@ -3652,7 +3620,7 @@ define dso_local double @ld_cst_align64_double_double() {
 ; CHECK-PREP10-NEXT:    lfd f1, 0(r3)
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %0 = load double, double* inttoptr (i64 1000000000000 to double*), align 4096
+  %0 = load double, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   ret double %0
 }
 
@@ -3672,13 +3640,13 @@ define dso_local void @st_0_double_uint8_t(i64 %ptr, double %str) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptoui double %str to i8
-  %0 = inttoptr i64 %ptr to i8*
-  store i8 %conv, i8* %0, align 1
+  %0 = inttoptr i64 %ptr to ptr
+  store i8 %conv, ptr %0, align 1
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_double_uint8_t(i8* nocapture %ptr, double %str) {
+define dso_local void @st_align16_double_uint8_t(ptr nocapture %ptr, double %str) {
 ; CHECK-POSTP8-LABEL: st_align16_double_uint8_t:
 ; CHECK-POSTP8:       # %bb.0: # %entry
 ; CHECK-POSTP8-NEXT:    xscvdpuxws f0, f1
@@ -3694,13 +3662,13 @@ define dso_local void @st_align16_double_uint8_t(i8* nocapture %ptr, double %str
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptoui double %str to i8
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  store i8 %conv, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  store i8 %conv, ptr %add.ptr, align 1
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_double_uint8_t(i8* nocapture %ptr, double %str) {
+define dso_local void @st_align32_double_uint8_t(ptr nocapture %ptr, double %str) {
 ; CHECK-P10-LABEL: st_align32_double_uint8_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    xscvdpuxws f0, f1
@@ -3726,13 +3694,13 @@ define dso_local void @st_align32_double_uint8_t(i8* nocapture %ptr, double %str
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptoui double %str to i8
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  store i8 %conv, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  store i8 %conv, ptr %add.ptr, align 1
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_double_uint8_t(i8* nocapture %ptr, double %str) {
+define dso_local void @st_align64_double_uint8_t(ptr nocapture %ptr, double %str) {
 ; CHECK-P10-LABEL: st_align64_double_uint8_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    xscvdpuxws f0, f1
@@ -3761,13 +3729,13 @@ define dso_local void @st_align64_double_uint8_t(i8* nocapture %ptr, double %str
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptoui double %str to i8
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  store i8 %conv, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  store i8 %conv, ptr %add.ptr, align 1
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_double_uint8_t(i8* nocapture %ptr, i64 %off, double %str) {
+define dso_local void @st_reg_double_uint8_t(ptr nocapture %ptr, i64 %off, double %str) {
 ; CHECK-POSTP8-LABEL: st_reg_double_uint8_t:
 ; CHECK-POSTP8:       # %bb.0: # %entry
 ; CHECK-POSTP8-NEXT:    xscvdpuxws f0, f1
@@ -3782,8 +3750,8 @@ define dso_local void @st_reg_double_uint8_t(i8* nocapture %ptr, i64 %off, doubl
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptoui double %str to i8
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  store i8 %conv, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  store i8 %conv, ptr %add.ptr, align 1
   ret void
 }
 
@@ -3807,8 +3775,8 @@ entry:
   %conv = fptoui double %str to i8
   %conv1 = zext i8 %off to i64
   %or = or i64 %conv1, %ptr
-  %0 = inttoptr i64 %or to i8*
-  store i8 %conv, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  store i8 %conv, ptr %0, align 1
   ret void
 }
 
@@ -3831,8 +3799,8 @@ define dso_local void @st_not_disjoint16_double_uint8_t(i64 %ptr, double %str) {
 entry:
   %conv = fptoui double %str to i8
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to i8*
-  store i8 %conv, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  store i8 %conv, ptr %0, align 1
   ret void
 }
 
@@ -3857,8 +3825,8 @@ entry:
   %and = and i64 %ptr, -4096
   %conv = fptoui double %str to i8
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to i8*
-  store i8 %conv, i8* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store i8 %conv, ptr %0, align 8
   ret void
 }
 
@@ -3883,8 +3851,8 @@ define dso_local void @st_not_disjoint32_double_uint8_t(i64 %ptr, double %str) {
 entry:
   %conv = fptoui double %str to i8
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to i8*
-  store i8 %conv, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  store i8 %conv, ptr %0, align 1
   ret void
 }
 
@@ -3923,8 +3891,8 @@ entry:
   %and = and i64 %ptr, -1000341504
   %conv = fptoui double %str to i8
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to i8*
-  store i8 %conv, i8* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  store i8 %conv, ptr %0, align 16
   ret void
 }
 
@@ -3965,8 +3933,8 @@ define dso_local void @st_not_disjoint64_double_uint8_t(i64 %ptr, double %str) {
 entry:
   %conv = fptoui double %str to i8
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to i8*
-  store i8 %conv, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  store i8 %conv, ptr %0, align 1
   ret void
 }
 
@@ -4005,8 +3973,8 @@ entry:
   %and = and i64 %ptr, -1099511627776
   %conv = fptoui double %str to i8
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to i8*
-  store i8 %conv, i8* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  store i8 %conv, ptr %0, align 4096
   ret void
 }
 
@@ -4027,7 +3995,7 @@ define dso_local void @st_cst_align16_double_uint8_t(double %str) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptoui double %str to i8
-  store i8 %conv, i8* inttoptr (i64 4080 to i8*), align 16
+  store i8 %conv, ptr inttoptr (i64 4080 to ptr), align 16
   ret void
 }
 
@@ -4057,7 +4025,7 @@ define dso_local void @st_cst_align32_double_uint8_t(double %str) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptoui double %str to i8
-  store i8 %conv, i8* inttoptr (i64 9999900 to i8*), align 4
+  store i8 %conv, ptr inttoptr (i64 9999900 to ptr), align 4
   ret void
 }
 
@@ -4091,7 +4059,7 @@ define dso_local void @st_cst_align64_double_uint8_t(double %str) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptoui double %str to i8
-  store i8 %conv, i8* inttoptr (i64 1000000000000 to i8*), align 4096
+  store i8 %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   ret void
 }
 
@@ -4111,13 +4079,13 @@ define dso_local void @st_0_double_int8_t(i64 %ptr, double %str) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptosi double %str to i8
-  %0 = inttoptr i64 %ptr to i8*
-  store i8 %conv, i8* %0, align 1
+  %0 = inttoptr i64 %ptr to ptr
+  store i8 %conv, ptr %0, align 1
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_double_int8_t(i8* nocapture %ptr, double %str) {
+define dso_local void @st_align16_double_int8_t(ptr nocapture %ptr, double %str) {
 ; CHECK-POSTP8-LABEL: st_align16_double_int8_t:
 ; CHECK-POSTP8:       # %bb.0: # %entry
 ; CHECK-POSTP8-NEXT:    xscvdpsxws f0, f1
@@ -4133,13 +4101,13 @@ define dso_local void @st_align16_double_int8_t(i8* nocapture %ptr, double %str)
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptosi double %str to i8
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  store i8 %conv, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  store i8 %conv, ptr %add.ptr, align 1
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_double_int8_t(i8* nocapture %ptr, double %str) {
+define dso_local void @st_align32_double_int8_t(ptr nocapture %ptr, double %str) {
 ; CHECK-P10-LABEL: st_align32_double_int8_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    xscvdpsxws f0, f1
@@ -4165,13 +4133,13 @@ define dso_local void @st_align32_double_int8_t(i8* nocapture %ptr, double %str)
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptosi double %str to i8
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  store i8 %conv, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  store i8 %conv, ptr %add.ptr, align 1
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_double_int8_t(i8* nocapture %ptr, double %str) {
+define dso_local void @st_align64_double_int8_t(ptr nocapture %ptr, double %str) {
 ; CHECK-P10-LABEL: st_align64_double_int8_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    xscvdpsxws f0, f1
@@ -4200,13 +4168,13 @@ define dso_local void @st_align64_double_int8_t(i8* nocapture %ptr, double %str)
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptosi double %str to i8
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  store i8 %conv, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  store i8 %conv, ptr %add.ptr, align 1
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_double_int8_t(i8* nocapture %ptr, i64 %off, double %str) {
+define dso_local void @st_reg_double_int8_t(ptr nocapture %ptr, i64 %off, double %str) {
 ; CHECK-POSTP8-LABEL: st_reg_double_int8_t:
 ; CHECK-POSTP8:       # %bb.0: # %entry
 ; CHECK-POSTP8-NEXT:    xscvdpsxws f0, f1
@@ -4221,8 +4189,8 @@ define dso_local void @st_reg_double_int8_t(i8* nocapture %ptr, i64 %off, double
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptosi double %str to i8
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  store i8 %conv, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  store i8 %conv, ptr %add.ptr, align 1
   ret void
 }
 
@@ -4246,8 +4214,8 @@ entry:
   %conv = fptosi double %str to i8
   %conv1 = zext i8 %off to i64
   %or = or i64 %conv1, %ptr
-  %0 = inttoptr i64 %or to i8*
-  store i8 %conv, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  store i8 %conv, ptr %0, align 1
   ret void
 }
 
@@ -4270,8 +4238,8 @@ define dso_local void @st_not_disjoint16_double_int8_t(i64 %ptr, double %str) {
 entry:
   %conv = fptosi double %str to i8
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to i8*
-  store i8 %conv, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  store i8 %conv, ptr %0, align 1
   ret void
 }
 
@@ -4296,8 +4264,8 @@ entry:
   %and = and i64 %ptr, -4096
   %conv = fptosi double %str to i8
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to i8*
-  store i8 %conv, i8* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store i8 %conv, ptr %0, align 8
   ret void
 }
 
@@ -4322,8 +4290,8 @@ define dso_local void @st_not_disjoint32_double_int8_t(i64 %ptr, double %str) {
 entry:
   %conv = fptosi double %str to i8
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to i8*
-  store i8 %conv, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  store i8 %conv, ptr %0, align 1
   ret void
 }
 
@@ -4362,8 +4330,8 @@ entry:
   %and = and i64 %ptr, -1000341504
   %conv = fptosi double %str to i8
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to i8*
-  store i8 %conv, i8* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  store i8 %conv, ptr %0, align 16
   ret void
 }
 
@@ -4404,8 +4372,8 @@ define dso_local void @st_not_disjoint64_double_int8_t(i64 %ptr, double %str) {
 entry:
   %conv = fptosi double %str to i8
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to i8*
-  store i8 %conv, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  store i8 %conv, ptr %0, align 1
   ret void
 }
 
@@ -4444,8 +4412,8 @@ entry:
   %and = and i64 %ptr, -1099511627776
   %conv = fptosi double %str to i8
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to i8*
-  store i8 %conv, i8* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  store i8 %conv, ptr %0, align 4096
   ret void
 }
 
@@ -4466,7 +4434,7 @@ define dso_local void @st_cst_align16_double_int8_t(double %str) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptosi double %str to i8
-  store i8 %conv, i8* inttoptr (i64 4080 to i8*), align 16
+  store i8 %conv, ptr inttoptr (i64 4080 to ptr), align 16
   ret void
 }
 
@@ -4496,7 +4464,7 @@ define dso_local void @st_cst_align32_double_int8_t(double %str) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptosi double %str to i8
-  store i8 %conv, i8* inttoptr (i64 9999900 to i8*), align 4
+  store i8 %conv, ptr inttoptr (i64 9999900 to ptr), align 4
   ret void
 }
 
@@ -4530,7 +4498,7 @@ define dso_local void @st_cst_align64_double_int8_t(double %str) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptosi double %str to i8
-  store i8 %conv, i8* inttoptr (i64 1000000000000 to i8*), align 4096
+  store i8 %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   ret void
 }
 
@@ -4550,13 +4518,13 @@ define dso_local void @st_0_double_uint16_t(i64 %ptr, double %str) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptoui double %str to i16
-  %0 = inttoptr i64 %ptr to i16*
-  store i16 %conv, i16* %0, align 2
+  %0 = inttoptr i64 %ptr to ptr
+  store i16 %conv, ptr %0, align 2
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_double_uint16_t(i8* nocapture %ptr, double %str) {
+define dso_local void @st_align16_double_uint16_t(ptr nocapture %ptr, double %str) {
 ; CHECK-POSTP8-LABEL: st_align16_double_uint16_t:
 ; CHECK-POSTP8:       # %bb.0: # %entry
 ; CHECK-POSTP8-NEXT:    xscvdpuxws f0, f1
@@ -4572,14 +4540,13 @@ define dso_local void @st_align16_double_uint16_t(i8* nocapture %ptr, double %st
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptoui double %str to i16
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to i16*
-  store i16 %conv, i16* %0, align 2
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  store i16 %conv, ptr %add.ptr, align 2
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_double_uint16_t(i8* nocapture %ptr, double %str) {
+define dso_local void @st_align32_double_uint16_t(ptr nocapture %ptr, double %str) {
 ; CHECK-P10-LABEL: st_align32_double_uint16_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    xscvdpuxws f0, f1
@@ -4605,14 +4572,13 @@ define dso_local void @st_align32_double_uint16_t(i8* nocapture %ptr, double %st
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptoui double %str to i16
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to i16*
-  store i16 %conv, i16* %0, align 2
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  store i16 %conv, ptr %add.ptr, align 2
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_double_uint16_t(i8* nocapture %ptr, double %str) {
+define dso_local void @st_align64_double_uint16_t(ptr nocapture %ptr, double %str) {
 ; CHECK-P10-LABEL: st_align64_double_uint16_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    xscvdpuxws f0, f1
@@ -4641,14 +4607,13 @@ define dso_local void @st_align64_double_uint16_t(i8* nocapture %ptr, double %st
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptoui double %str to i16
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to i16*
-  store i16 %conv, i16* %0, align 2
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  store i16 %conv, ptr %add.ptr, align 2
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_double_uint16_t(i8* nocapture %ptr, i64 %off, double %str) {
+define dso_local void @st_reg_double_uint16_t(ptr nocapture %ptr, i64 %off, double %str) {
 ; CHECK-POSTP8-LABEL: st_reg_double_uint16_t:
 ; CHECK-POSTP8:       # %bb.0: # %entry
 ; CHECK-POSTP8-NEXT:    xscvdpuxws f0, f1
@@ -4663,9 +4628,8 @@ define dso_local void @st_reg_double_uint16_t(i8* nocapture %ptr, i64 %off, doub
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptoui double %str to i16
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to i16*
-  store i16 %conv, i16* %0, align 2
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  store i16 %conv, ptr %add.ptr, align 2
   ret void
 }
 
@@ -4689,8 +4653,8 @@ entry:
   %conv = fptoui double %str to i16
   %conv1 = zext i8 %off to i64
   %or = or i64 %conv1, %ptr
-  %0 = inttoptr i64 %or to i16*
-  store i16 %conv, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  store i16 %conv, ptr %0, align 2
   ret void
 }
 
@@ -4713,8 +4677,8 @@ define dso_local void @st_not_disjoint16_double_uint16_t(i64 %ptr, double %str)
 entry:
   %conv = fptoui double %str to i16
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to i16*
-  store i16 %conv, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  store i16 %conv, ptr %0, align 2
   ret void
 }
 
@@ -4739,8 +4703,8 @@ entry:
   %and = and i64 %ptr, -4096
   %conv = fptoui double %str to i16
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to i16*
-  store i16 %conv, i16* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store i16 %conv, ptr %0, align 8
   ret void
 }
 
@@ -4765,8 +4729,8 @@ define dso_local void @st_not_disjoint32_double_uint16_t(i64 %ptr, double %str)
 entry:
   %conv = fptoui double %str to i16
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to i16*
-  store i16 %conv, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  store i16 %conv, ptr %0, align 2
   ret void
 }
 
@@ -4805,8 +4769,8 @@ entry:
   %and = and i64 %ptr, -1000341504
   %conv = fptoui double %str to i16
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to i16*
-  store i16 %conv, i16* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  store i16 %conv, ptr %0, align 16
   ret void
 }
 
@@ -4847,8 +4811,8 @@ define dso_local void @st_not_disjoint64_double_uint16_t(i64 %ptr, double %str)
 entry:
   %conv = fptoui double %str to i16
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to i16*
-  store i16 %conv, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  store i16 %conv, ptr %0, align 2
   ret void
 }
 
@@ -4887,8 +4851,8 @@ entry:
   %and = and i64 %ptr, -1099511627776
   %conv = fptoui double %str to i16
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to i16*
-  store i16 %conv, i16* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  store i16 %conv, ptr %0, align 4096
   ret void
 }
 
@@ -4909,7 +4873,7 @@ define dso_local void @st_cst_align16_double_uint16_t(double %str) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptoui double %str to i16
-  store i16 %conv, i16* inttoptr (i64 4080 to i16*), align 16
+  store i16 %conv, ptr inttoptr (i64 4080 to ptr), align 16
   ret void
 }
 
@@ -4939,7 +4903,7 @@ define dso_local void @st_cst_align32_double_uint16_t(double %str) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptoui double %str to i16
-  store i16 %conv, i16* inttoptr (i64 9999900 to i16*), align 4
+  store i16 %conv, ptr inttoptr (i64 9999900 to ptr), align 4
   ret void
 }
 
@@ -4973,7 +4937,7 @@ define dso_local void @st_cst_align64_double_uint16_t(double %str) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptoui double %str to i16
-  store i16 %conv, i16* inttoptr (i64 1000000000000 to i16*), align 4096
+  store i16 %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   ret void
 }
 
@@ -4993,13 +4957,13 @@ define dso_local void @st_0_double_int16_t(i64 %ptr, double %str) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptosi double %str to i16
-  %0 = inttoptr i64 %ptr to i16*
-  store i16 %conv, i16* %0, align 2
+  %0 = inttoptr i64 %ptr to ptr
+  store i16 %conv, ptr %0, align 2
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_double_int16_t(i8* nocapture %ptr, double %str) {
+define dso_local void @st_align16_double_int16_t(ptr nocapture %ptr, double %str) {
 ; CHECK-POSTP8-LABEL: st_align16_double_int16_t:
 ; CHECK-POSTP8:       # %bb.0: # %entry
 ; CHECK-POSTP8-NEXT:    xscvdpsxws f0, f1
@@ -5015,14 +4979,13 @@ define dso_local void @st_align16_double_int16_t(i8* nocapture %ptr, double %str
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptosi double %str to i16
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to i16*
-  store i16 %conv, i16* %0, align 2
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  store i16 %conv, ptr %add.ptr, align 2
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_double_int16_t(i8* nocapture %ptr, double %str) {
+define dso_local void @st_align32_double_int16_t(ptr nocapture %ptr, double %str) {
 ; CHECK-P10-LABEL: st_align32_double_int16_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    xscvdpsxws f0, f1
@@ -5048,14 +5011,13 @@ define dso_local void @st_align32_double_int16_t(i8* nocapture %ptr, double %str
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptosi double %str to i16
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to i16*
-  store i16 %conv, i16* %0, align 2
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  store i16 %conv, ptr %add.ptr, align 2
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_double_int16_t(i8* nocapture %ptr, double %str) {
+define dso_local void @st_align64_double_int16_t(ptr nocapture %ptr, double %str) {
 ; CHECK-P10-LABEL: st_align64_double_int16_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    xscvdpsxws f0, f1
@@ -5084,14 +5046,13 @@ define dso_local void @st_align64_double_int16_t(i8* nocapture %ptr, double %str
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptosi double %str to i16
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to i16*
-  store i16 %conv, i16* %0, align 2
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  store i16 %conv, ptr %add.ptr, align 2
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_double_int16_t(i8* nocapture %ptr, i64 %off, double %str) {
+define dso_local void @st_reg_double_int16_t(ptr nocapture %ptr, i64 %off, double %str) {
 ; CHECK-POSTP8-LABEL: st_reg_double_int16_t:
 ; CHECK-POSTP8:       # %bb.0: # %entry
 ; CHECK-POSTP8-NEXT:    xscvdpsxws f0, f1
@@ -5106,9 +5067,8 @@ define dso_local void @st_reg_double_int16_t(i8* nocapture %ptr, i64 %off, doubl
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptosi double %str to i16
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to i16*
-  store i16 %conv, i16* %0, align 2
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  store i16 %conv, ptr %add.ptr, align 2
   ret void
 }
 
@@ -5132,8 +5092,8 @@ entry:
   %conv = fptosi double %str to i16
   %conv1 = zext i8 %off to i64
   %or = or i64 %conv1, %ptr
-  %0 = inttoptr i64 %or to i16*
-  store i16 %conv, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  store i16 %conv, ptr %0, align 2
   ret void
 }
 
@@ -5156,8 +5116,8 @@ define dso_local void @st_not_disjoint16_double_int16_t(i64 %ptr, double %str) {
 entry:
   %conv = fptosi double %str to i16
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to i16*
-  store i16 %conv, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  store i16 %conv, ptr %0, align 2
   ret void
 }
 
@@ -5182,8 +5142,8 @@ entry:
   %and = and i64 %ptr, -4096
   %conv = fptosi double %str to i16
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to i16*
-  store i16 %conv, i16* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store i16 %conv, ptr %0, align 8
   ret void
 }
 
@@ -5208,8 +5168,8 @@ define dso_local void @st_not_disjoint32_double_int16_t(i64 %ptr, double %str) {
 entry:
   %conv = fptosi double %str to i16
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to i16*
-  store i16 %conv, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  store i16 %conv, ptr %0, align 2
   ret void
 }
 
@@ -5248,8 +5208,8 @@ entry:
   %and = and i64 %ptr, -1000341504
   %conv = fptosi double %str to i16
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to i16*
-  store i16 %conv, i16* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  store i16 %conv, ptr %0, align 16
   ret void
 }
 
@@ -5290,8 +5250,8 @@ define dso_local void @st_not_disjoint64_double_int16_t(i64 %ptr, double %str) {
 entry:
   %conv = fptosi double %str to i16
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to i16*
-  store i16 %conv, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  store i16 %conv, ptr %0, align 2
   ret void
 }
 
@@ -5330,8 +5290,8 @@ entry:
   %and = and i64 %ptr, -1099511627776
   %conv = fptosi double %str to i16
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to i16*
-  store i16 %conv, i16* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  store i16 %conv, ptr %0, align 4096
   ret void
 }
 
@@ -5352,7 +5312,7 @@ define dso_local void @st_cst_align16_double_int16_t(double %str) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptosi double %str to i16
-  store i16 %conv, i16* inttoptr (i64 4080 to i16*), align 16
+  store i16 %conv, ptr inttoptr (i64 4080 to ptr), align 16
   ret void
 }
 
@@ -5382,7 +5342,7 @@ define dso_local void @st_cst_align32_double_int16_t(double %str) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptosi double %str to i16
-  store i16 %conv, i16* inttoptr (i64 9999900 to i16*), align 4
+  store i16 %conv, ptr inttoptr (i64 9999900 to ptr), align 4
   ret void
 }
 
@@ -5416,7 +5376,7 @@ define dso_local void @st_cst_align64_double_int16_t(double %str) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptosi double %str to i16
-  store i16 %conv, i16* inttoptr (i64 1000000000000 to i16*), align 4096
+  store i16 %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   ret void
 }
 
@@ -5429,13 +5389,13 @@ define dso_local void @st_0_double_uint32_t(i64 %ptr, double %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = fptoui double %str to i32
-  %0 = inttoptr i64 %ptr to i32*
-  store i32 %conv, i32* %0, align 4
+  %0 = inttoptr i64 %ptr to ptr
+  store i32 %conv, ptr %0, align 4
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_double_uint32_t(i8* nocapture %ptr, double %str) {
+define dso_local void @st_align16_double_uint32_t(ptr nocapture %ptr, double %str) {
 ; CHECK-LABEL: st_align16_double_uint32_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    xscvdpuxws f0, f1
@@ -5444,14 +5404,13 @@ define dso_local void @st_align16_double_uint32_t(i8* nocapture %ptr, double %st
 ; CHECK-NEXT:    blr
 entry:
   %conv = fptoui double %str to i32
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to i32*
-  store i32 %conv, i32* %0, align 4
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  store i32 %conv, ptr %add.ptr, align 4
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_double_uint32_t(i8* nocapture %ptr, double %str) {
+define dso_local void @st_align32_double_uint32_t(ptr nocapture %ptr, double %str) {
 ; CHECK-P10-LABEL: st_align32_double_uint32_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    xscvdpuxws f0, f1
@@ -5468,14 +5427,13 @@ define dso_local void @st_align32_double_uint32_t(i8* nocapture %ptr, double %st
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %conv = fptoui double %str to i32
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to i32*
-  store i32 %conv, i32* %0, align 4
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  store i32 %conv, ptr %add.ptr, align 4
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_double_uint32_t(i8* nocapture %ptr, double %str) {
+define dso_local void @st_align64_double_uint32_t(ptr nocapture %ptr, double %str) {
 ; CHECK-P10-LABEL: st_align64_double_uint32_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    xscvdpuxws f0, f1
@@ -5494,14 +5452,13 @@ define dso_local void @st_align64_double_uint32_t(i8* nocapture %ptr, double %st
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %conv = fptoui double %str to i32
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to i32*
-  store i32 %conv, i32* %0, align 4
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  store i32 %conv, ptr %add.ptr, align 4
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_double_uint32_t(i8* nocapture %ptr, i64 %off, double %str) {
+define dso_local void @st_reg_double_uint32_t(ptr nocapture %ptr, i64 %off, double %str) {
 ; CHECK-LABEL: st_reg_double_uint32_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    xscvdpuxws f0, f1
@@ -5509,9 +5466,8 @@ define dso_local void @st_reg_double_uint32_t(i8* nocapture %ptr, i64 %off, doub
 ; CHECK-NEXT:    blr
 entry:
   %conv = fptoui double %str to i32
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to i32*
-  store i32 %conv, i32* %0, align 4
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  store i32 %conv, ptr %add.ptr, align 4
   ret void
 }
 
@@ -5527,8 +5483,8 @@ entry:
   %conv = fptoui double %str to i32
   %conv1 = zext i8 %off to i64
   %or = or i64 %conv1, %ptr
-  %0 = inttoptr i64 %or to i32*
-  store i32 %conv, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  store i32 %conv, ptr %0, align 4
   ret void
 }
 
@@ -5543,8 +5499,8 @@ define dso_local void @st_not_disjoint16_double_uint32_t(i64 %ptr, double %str)
 entry:
   %conv = fptoui double %str to i32
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to i32*
-  store i32 %conv, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  store i32 %conv, ptr %0, align 4
   ret void
 }
 
@@ -5561,8 +5517,8 @@ entry:
   %and = and i64 %ptr, -4096
   %conv = fptoui double %str to i32
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to i32*
-  store i32 %conv, i32* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store i32 %conv, ptr %0, align 8
   ret void
 }
 
@@ -5578,8 +5534,8 @@ define dso_local void @st_not_disjoint32_double_uint32_t(i64 %ptr, double %str)
 entry:
   %conv = fptoui double %str to i32
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to i32*
-  store i32 %conv, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  store i32 %conv, ptr %0, align 4
   ret void
 }
 
@@ -5617,8 +5573,8 @@ entry:
   %and = and i64 %ptr, -1000341504
   %conv = fptoui double %str to i32
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to i32*
-  store i32 %conv, i32* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  store i32 %conv, ptr %0, align 16
   ret void
 }
 
@@ -5658,8 +5614,8 @@ define dso_local void @st_not_disjoint64_double_uint32_t(i64 %ptr, double %str)
 entry:
   %conv = fptoui double %str to i32
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to i32*
-  store i32 %conv, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  store i32 %conv, ptr %0, align 4
   ret void
 }
 
@@ -5687,8 +5643,8 @@ entry:
   %and = and i64 %ptr, -1099511627776
   %conv = fptoui double %str to i32
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to i32*
-  store i32 %conv, i32* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  store i32 %conv, ptr %0, align 4096
   ret void
 }
 
@@ -5702,7 +5658,7 @@ define dso_local void @st_cst_align16_double_uint32_t(double %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = fptoui double %str to i32
-  store i32 %conv, i32* inttoptr (i64 4080 to i32*), align 16
+  store i32 %conv, ptr inttoptr (i64 4080 to ptr), align 16
   ret void
 }
 
@@ -5724,7 +5680,7 @@ define dso_local void @st_cst_align32_double_uint32_t(double %str) {
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %conv = fptoui double %str to i32
-  store i32 %conv, i32* inttoptr (i64 9999900 to i32*), align 4
+  store i32 %conv, ptr inttoptr (i64 9999900 to ptr), align 4
   ret void
 }
 
@@ -5748,7 +5704,7 @@ define dso_local void @st_cst_align64_double_uint32_t(double %str) {
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %conv = fptoui double %str to i32
-  store i32 %conv, i32* inttoptr (i64 1000000000000 to i32*), align 4096
+  store i32 %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   ret void
 }
 
@@ -5761,13 +5717,13 @@ define dso_local void @st_0_double_int32_t(i64 %ptr, double %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = fptosi double %str to i32
-  %0 = inttoptr i64 %ptr to i32*
-  store i32 %conv, i32* %0, align 4
+  %0 = inttoptr i64 %ptr to ptr
+  store i32 %conv, ptr %0, align 4
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_double_int32_t(i8* nocapture %ptr, double %str) {
+define dso_local void @st_align16_double_int32_t(ptr nocapture %ptr, double %str) {
 ; CHECK-LABEL: st_align16_double_int32_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    xscvdpsxws f0, f1
@@ -5776,14 +5732,13 @@ define dso_local void @st_align16_double_int32_t(i8* nocapture %ptr, double %str
 ; CHECK-NEXT:    blr
 entry:
   %conv = fptosi double %str to i32
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to i32*
-  store i32 %conv, i32* %0, align 4
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  store i32 %conv, ptr %add.ptr, align 4
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_double_int32_t(i8* nocapture %ptr, double %str) {
+define dso_local void @st_align32_double_int32_t(ptr nocapture %ptr, double %str) {
 ; CHECK-P10-LABEL: st_align32_double_int32_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    xscvdpsxws f0, f1
@@ -5800,14 +5755,13 @@ define dso_local void @st_align32_double_int32_t(i8* nocapture %ptr, double %str
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %conv = fptosi double %str to i32
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to i32*
-  store i32 %conv, i32* %0, align 4
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  store i32 %conv, ptr %add.ptr, align 4
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_double_int32_t(i8* nocapture %ptr, double %str) {
+define dso_local void @st_align64_double_int32_t(ptr nocapture %ptr, double %str) {
 ; CHECK-P10-LABEL: st_align64_double_int32_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    xscvdpsxws f0, f1
@@ -5826,14 +5780,13 @@ define dso_local void @st_align64_double_int32_t(i8* nocapture %ptr, double %str
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %conv = fptosi double %str to i32
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to i32*
-  store i32 %conv, i32* %0, align 4
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  store i32 %conv, ptr %add.ptr, align 4
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_double_int32_t(i8* nocapture %ptr, i64 %off, double %str) {
+define dso_local void @st_reg_double_int32_t(ptr nocapture %ptr, i64 %off, double %str) {
 ; CHECK-LABEL: st_reg_double_int32_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    xscvdpsxws f0, f1
@@ -5841,9 +5794,8 @@ define dso_local void @st_reg_double_int32_t(i8* nocapture %ptr, i64 %off, doubl
 ; CHECK-NEXT:    blr
 entry:
   %conv = fptosi double %str to i32
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to i32*
-  store i32 %conv, i32* %0, align 4
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  store i32 %conv, ptr %add.ptr, align 4
   ret void
 }
 
@@ -5859,8 +5811,8 @@ entry:
   %conv = fptosi double %str to i32
   %conv1 = zext i8 %off to i64
   %or = or i64 %conv1, %ptr
-  %0 = inttoptr i64 %or to i32*
-  store i32 %conv, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  store i32 %conv, ptr %0, align 4
   ret void
 }
 
@@ -5875,8 +5827,8 @@ define dso_local void @st_not_disjoint16_double_int32_t(i64 %ptr, double %str) {
 entry:
   %conv = fptosi double %str to i32
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to i32*
-  store i32 %conv, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  store i32 %conv, ptr %0, align 4
   ret void
 }
 
@@ -5893,8 +5845,8 @@ entry:
   %and = and i64 %ptr, -4096
   %conv = fptosi double %str to i32
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to i32*
-  store i32 %conv, i32* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store i32 %conv, ptr %0, align 8
   ret void
 }
 
@@ -5910,8 +5862,8 @@ define dso_local void @st_not_disjoint32_double_int32_t(i64 %ptr, double %str) {
 entry:
   %conv = fptosi double %str to i32
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to i32*
-  store i32 %conv, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  store i32 %conv, ptr %0, align 4
   ret void
 }
 
@@ -5949,8 +5901,8 @@ entry:
   %and = and i64 %ptr, -1000341504
   %conv = fptosi double %str to i32
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to i32*
-  store i32 %conv, i32* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  store i32 %conv, ptr %0, align 16
   ret void
 }
 
@@ -5990,8 +5942,8 @@ define dso_local void @st_not_disjoint64_double_int32_t(i64 %ptr, double %str) {
 entry:
   %conv = fptosi double %str to i32
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to i32*
-  store i32 %conv, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  store i32 %conv, ptr %0, align 4
   ret void
 }
 
@@ -6019,8 +5971,8 @@ entry:
   %and = and i64 %ptr, -1099511627776
   %conv = fptosi double %str to i32
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to i32*
-  store i32 %conv, i32* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  store i32 %conv, ptr %0, align 4096
   ret void
 }
 
@@ -6034,7 +5986,7 @@ define dso_local void @st_cst_align16_double_int32_t(double %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = fptosi double %str to i32
-  store i32 %conv, i32* inttoptr (i64 4080 to i32*), align 16
+  store i32 %conv, ptr inttoptr (i64 4080 to ptr), align 16
   ret void
 }
 
@@ -6056,7 +6008,7 @@ define dso_local void @st_cst_align32_double_int32_t(double %str) {
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %conv = fptosi double %str to i32
-  store i32 %conv, i32* inttoptr (i64 9999900 to i32*), align 4
+  store i32 %conv, ptr inttoptr (i64 9999900 to ptr), align 4
   ret void
 }
 
@@ -6080,7 +6032,7 @@ define dso_local void @st_cst_align64_double_int32_t(double %str) {
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %conv = fptosi double %str to i32
-  store i32 %conv, i32* inttoptr (i64 1000000000000 to i32*), align 4096
+  store i32 %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   ret void
 }
 
@@ -6099,13 +6051,13 @@ define dso_local void @st_0_double_uint64_t(i64 %ptr, double %str) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptoui double %str to i64
-  %0 = inttoptr i64 %ptr to i64*
-  store i64 %conv, i64* %0, align 8
+  %0 = inttoptr i64 %ptr to ptr
+  store i64 %conv, ptr %0, align 8
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_double_uint64_t(i8* nocapture %ptr, double %str) {
+define dso_local void @st_align16_double_uint64_t(ptr nocapture %ptr, double %str) {
 ; CHECK-POSTP8-LABEL: st_align16_double_uint64_t:
 ; CHECK-POSTP8:       # %bb.0: # %entry
 ; CHECK-POSTP8-NEXT:    xscvdpuxds v2, f1
@@ -6120,14 +6072,13 @@ define dso_local void @st_align16_double_uint64_t(i8* nocapture %ptr, double %st
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptoui double %str to i64
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to i64*
-  store i64 %conv, i64* %0, align 8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  store i64 %conv, ptr %add.ptr, align 8
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_double_uint64_t(i8* nocapture %ptr, double %str) {
+define dso_local void @st_align32_double_uint64_t(ptr nocapture %ptr, double %str) {
 ; CHECK-P10-LABEL: st_align32_double_uint64_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    xscvdpuxds v2, f1
@@ -6143,14 +6094,13 @@ define dso_local void @st_align32_double_uint64_t(i8* nocapture %ptr, double %st
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %conv = fptoui double %str to i64
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to i64*
-  store i64 %conv, i64* %0, align 8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  store i64 %conv, ptr %add.ptr, align 8
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_double_uint64_t(i8* nocapture %ptr, double %str) {
+define dso_local void @st_align64_double_uint64_t(ptr nocapture %ptr, double %str) {
 ; CHECK-P10-LABEL: st_align64_double_uint64_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    xscvdpuxds f0, f1
@@ -6169,14 +6119,13 @@ define dso_local void @st_align64_double_uint64_t(i8* nocapture %ptr, double %st
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %conv = fptoui double %str to i64
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to i64*
-  store i64 %conv, i64* %0, align 8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  store i64 %conv, ptr %add.ptr, align 8
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_double_uint64_t(i8* nocapture %ptr, i64 %off, double %str) {
+define dso_local void @st_reg_double_uint64_t(ptr nocapture %ptr, i64 %off, double %str) {
 ; CHECK-LABEL: st_reg_double_uint64_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    xscvdpuxds f0, f1
@@ -6184,9 +6133,8 @@ define dso_local void @st_reg_double_uint64_t(i8* nocapture %ptr, i64 %off, doub
 ; CHECK-NEXT:    blr
 entry:
   %conv = fptoui double %str to i64
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to i64*
-  store i64 %conv, i64* %0, align 8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  store i64 %conv, ptr %add.ptr, align 8
   ret void
 }
 
@@ -6209,8 +6157,8 @@ entry:
   %conv = fptoui double %str to i64
   %conv1 = zext i8 %off to i64
   %or = or i64 %conv1, %ptr
-  %0 = inttoptr i64 %or to i64*
-  store i64 %conv, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store i64 %conv, ptr %0, align 8
   ret void
 }
 
@@ -6232,8 +6180,8 @@ define dso_local void @st_not_disjoint16_double_uint64_t(i64 %ptr, double %str)
 entry:
   %conv = fptoui double %str to i64
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to i64*
-  store i64 %conv, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store i64 %conv, ptr %0, align 8
   ret void
 }
 
@@ -6257,8 +6205,8 @@ entry:
   %and = and i64 %ptr, -4096
   %conv = fptoui double %str to i64
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to i64*
-  store i64 %conv, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store i64 %conv, ptr %0, align 8
   ret void
 }
 
@@ -6282,8 +6230,8 @@ define dso_local void @st_not_disjoint32_double_uint64_t(i64 %ptr, double %str)
 entry:
   %conv = fptoui double %str to i64
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to i64*
-  store i64 %conv, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store i64 %conv, ptr %0, align 8
   ret void
 }
 
@@ -6320,8 +6268,8 @@ entry:
   %and = and i64 %ptr, -1000341504
   %conv = fptoui double %str to i64
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to i64*
-  store i64 %conv, i64* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  store i64 %conv, ptr %0, align 16
   ret void
 }
 
@@ -6361,8 +6309,8 @@ define dso_local void @st_not_disjoint64_double_uint64_t(i64 %ptr, double %str)
 entry:
   %conv = fptoui double %str to i64
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to i64*
-  store i64 %conv, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store i64 %conv, ptr %0, align 8
   ret void
 }
 
@@ -6390,8 +6338,8 @@ entry:
   %and = and i64 %ptr, -1099511627776
   %conv = fptoui double %str to i64
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to i64*
-  store i64 %conv, i64* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  store i64 %conv, ptr %0, align 4096
   ret void
 }
 
@@ -6411,7 +6359,7 @@ define dso_local void @st_cst_align16_double_uint64_t(double %str) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptoui double %str to i64
-  store i64 %conv, i64* inttoptr (i64 4080 to i64*), align 16
+  store i64 %conv, ptr inttoptr (i64 4080 to ptr), align 16
   ret void
 }
 
@@ -6433,7 +6381,7 @@ define dso_local void @st_cst_align32_double_uint64_t(double %str) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptoui double %str to i64
-  store i64 %conv, i64* inttoptr (i64 9999900 to i64*), align 8
+  store i64 %conv, ptr inttoptr (i64 9999900 to ptr), align 8
   ret void
 }
 
@@ -6466,7 +6414,7 @@ define dso_local void @st_cst_align64_double_uint64_t(double %str) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptoui double %str to i64
-  store i64 %conv, i64* inttoptr (i64 1000000000000 to i64*), align 4096
+  store i64 %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   ret void
 }
 
@@ -6485,13 +6433,13 @@ define dso_local void @st_0_double_int64_t(i64 %ptr, double %str) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptosi double %str to i64
-  %0 = inttoptr i64 %ptr to i64*
-  store i64 %conv, i64* %0, align 8
+  %0 = inttoptr i64 %ptr to ptr
+  store i64 %conv, ptr %0, align 8
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_double_int64_t(i8* nocapture %ptr, double %str) {
+define dso_local void @st_align16_double_int64_t(ptr nocapture %ptr, double %str) {
 ; CHECK-POSTP8-LABEL: st_align16_double_int64_t:
 ; CHECK-POSTP8:       # %bb.0: # %entry
 ; CHECK-POSTP8-NEXT:    xscvdpsxds v2, f1
@@ -6506,14 +6454,13 @@ define dso_local void @st_align16_double_int64_t(i8* nocapture %ptr, double %str
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptosi double %str to i64
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to i64*
-  store i64 %conv, i64* %0, align 8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  store i64 %conv, ptr %add.ptr, align 8
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_double_int64_t(i8* nocapture %ptr, double %str) {
+define dso_local void @st_align32_double_int64_t(ptr nocapture %ptr, double %str) {
 ; CHECK-P10-LABEL: st_align32_double_int64_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    xscvdpsxds v2, f1
@@ -6529,14 +6476,13 @@ define dso_local void @st_align32_double_int64_t(i8* nocapture %ptr, double %str
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %conv = fptosi double %str to i64
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to i64*
-  store i64 %conv, i64* %0, align 8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  store i64 %conv, ptr %add.ptr, align 8
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_double_int64_t(i8* nocapture %ptr, double %str) {
+define dso_local void @st_align64_double_int64_t(ptr nocapture %ptr, double %str) {
 ; CHECK-P10-LABEL: st_align64_double_int64_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    xscvdpsxds f0, f1
@@ -6555,14 +6501,13 @@ define dso_local void @st_align64_double_int64_t(i8* nocapture %ptr, double %str
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %conv = fptosi double %str to i64
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to i64*
-  store i64 %conv, i64* %0, align 8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  store i64 %conv, ptr %add.ptr, align 8
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_double_int64_t(i8* nocapture %ptr, i64 %off, double %str) {
+define dso_local void @st_reg_double_int64_t(ptr nocapture %ptr, i64 %off, double %str) {
 ; CHECK-LABEL: st_reg_double_int64_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    xscvdpsxds f0, f1
@@ -6570,9 +6515,8 @@ define dso_local void @st_reg_double_int64_t(i8* nocapture %ptr, i64 %off, doubl
 ; CHECK-NEXT:    blr
 entry:
   %conv = fptosi double %str to i64
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to i64*
-  store i64 %conv, i64* %0, align 8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  store i64 %conv, ptr %add.ptr, align 8
   ret void
 }
 
@@ -6595,8 +6539,8 @@ entry:
   %conv = fptosi double %str to i64
   %conv1 = zext i8 %off to i64
   %or = or i64 %conv1, %ptr
-  %0 = inttoptr i64 %or to i64*
-  store i64 %conv, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store i64 %conv, ptr %0, align 8
   ret void
 }
 
@@ -6618,8 +6562,8 @@ define dso_local void @st_not_disjoint16_double_int64_t(i64 %ptr, double %str) {
 entry:
   %conv = fptosi double %str to i64
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to i64*
-  store i64 %conv, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store i64 %conv, ptr %0, align 8
   ret void
 }
 
@@ -6643,8 +6587,8 @@ entry:
   %and = and i64 %ptr, -4096
   %conv = fptosi double %str to i64
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to i64*
-  store i64 %conv, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store i64 %conv, ptr %0, align 8
   ret void
 }
 
@@ -6668,8 +6612,8 @@ define dso_local void @st_not_disjoint32_double_int64_t(i64 %ptr, double %str) {
 entry:
   %conv = fptosi double %str to i64
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to i64*
-  store i64 %conv, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store i64 %conv, ptr %0, align 8
   ret void
 }
 
@@ -6706,8 +6650,8 @@ entry:
   %and = and i64 %ptr, -1000341504
   %conv = fptosi double %str to i64
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to i64*
-  store i64 %conv, i64* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  store i64 %conv, ptr %0, align 16
   ret void
 }
 
@@ -6747,8 +6691,8 @@ define dso_local void @st_not_disjoint64_double_int64_t(i64 %ptr, double %str) {
 entry:
   %conv = fptosi double %str to i64
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to i64*
-  store i64 %conv, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store i64 %conv, ptr %0, align 8
   ret void
 }
 
@@ -6776,8 +6720,8 @@ entry:
   %and = and i64 %ptr, -1099511627776
   %conv = fptosi double %str to i64
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to i64*
-  store i64 %conv, i64* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  store i64 %conv, ptr %0, align 4096
   ret void
 }
 
@@ -6797,7 +6741,7 @@ define dso_local void @st_cst_align16_double_int64_t(double %str) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptosi double %str to i64
-  store i64 %conv, i64* inttoptr (i64 4080 to i64*), align 16
+  store i64 %conv, ptr inttoptr (i64 4080 to ptr), align 16
   ret void
 }
 
@@ -6819,7 +6763,7 @@ define dso_local void @st_cst_align32_double_int64_t(double %str) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptosi double %str to i64
-  store i64 %conv, i64* inttoptr (i64 9999900 to i64*), align 8
+  store i64 %conv, ptr inttoptr (i64 9999900 to ptr), align 8
   ret void
 }
 
@@ -6852,7 +6796,7 @@ define dso_local void @st_cst_align64_double_int64_t(double %str) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptosi double %str to i64
-  store i64 %conv, i64* inttoptr (i64 1000000000000 to i64*), align 4096
+  store i64 %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   ret void
 }
 
@@ -6865,13 +6809,13 @@ define dso_local void @st_0_double_float(i64 %ptr, double %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = fptrunc double %str to float
-  %0 = inttoptr i64 %ptr to float*
-  store float %conv, float* %0, align 4
+  %0 = inttoptr i64 %ptr to ptr
+  store float %conv, ptr %0, align 4
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_double_float(i8* nocapture %ptr, double %str) {
+define dso_local void @st_align16_double_float(ptr nocapture %ptr, double %str) {
 ; CHECK-LABEL: st_align16_double_float:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    xsrsp f0, f1
@@ -6879,14 +6823,13 @@ define dso_local void @st_align16_double_float(i8* nocapture %ptr, double %str)
 ; CHECK-NEXT:    blr
 entry:
   %conv = fptrunc double %str to float
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to float*
-  store float %conv, float* %0, align 4
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  store float %conv, ptr %add.ptr, align 4
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_double_float(i8* nocapture %ptr, double %str) {
+define dso_local void @st_align32_double_float(ptr nocapture %ptr, double %str) {
 ; CHECK-P10-LABEL: st_align32_double_float:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    xsrsp f0, f1
@@ -6902,14 +6845,13 @@ define dso_local void @st_align32_double_float(i8* nocapture %ptr, double %str)
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %conv = fptrunc double %str to float
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to float*
-  store float %conv, float* %0, align 4
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  store float %conv, ptr %add.ptr, align 4
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_double_float(i8* nocapture %ptr, double %str) {
+define dso_local void @st_align64_double_float(ptr nocapture %ptr, double %str) {
 ; CHECK-P10-LABEL: st_align64_double_float:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    xsrsp f0, f1
@@ -6928,14 +6870,13 @@ define dso_local void @st_align64_double_float(i8* nocapture %ptr, double %str)
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %conv = fptrunc double %str to float
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to float*
-  store float %conv, float* %0, align 4
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  store float %conv, ptr %add.ptr, align 4
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_double_float(i8* nocapture %ptr, i64 %off, double %str) {
+define dso_local void @st_reg_double_float(ptr nocapture %ptr, i64 %off, double %str) {
 ; CHECK-LABEL: st_reg_double_float:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    xsrsp f0, f1
@@ -6943,9 +6884,8 @@ define dso_local void @st_reg_double_float(i8* nocapture %ptr, i64 %off, double
 ; CHECK-NEXT:    blr
 entry:
   %conv = fptrunc double %str to float
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to float*
-  store float %conv, float* %0, align 4
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  store float %conv, ptr %add.ptr, align 4
   ret void
 }
 
@@ -6961,8 +6901,8 @@ entry:
   %conv = fptrunc double %str to float
   %conv1 = zext i8 %off to i64
   %or = or i64 %conv1, %ptr
-  %0 = inttoptr i64 %or to float*
-  store float %conv, float* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  store float %conv, ptr %0, align 4
   ret void
 }
 
@@ -6977,8 +6917,8 @@ define dso_local void @st_not_disjoint16_double_float(i64 %ptr, double %str) {
 entry:
   %conv = fptrunc double %str to float
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to float*
-  store float %conv, float* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  store float %conv, ptr %0, align 4
   ret void
 }
 
@@ -6994,8 +6934,8 @@ entry:
   %and = and i64 %ptr, -4096
   %conv = fptrunc double %str to float
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to float*
-  store float %conv, float* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store float %conv, ptr %0, align 8
   ret void
 }
 
@@ -7011,8 +6951,8 @@ define dso_local void @st_not_disjoint32_double_float(i64 %ptr, double %str) {
 entry:
   %conv = fptrunc double %str to float
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to float*
-  store float %conv, float* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  store float %conv, ptr %0, align 4
   ret void
 }
 
@@ -7049,8 +6989,8 @@ entry:
   %and = and i64 %ptr, -1000341504
   %conv = fptrunc double %str to float
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to float*
-  store float %conv, float* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  store float %conv, ptr %0, align 16
   ret void
 }
 
@@ -7090,8 +7030,8 @@ define dso_local void @st_not_disjoint64_double_float(i64 %ptr, double %str) {
 entry:
   %conv = fptrunc double %str to float
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to float*
-  store float %conv, float* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  store float %conv, ptr %0, align 4
   ret void
 }
 
@@ -7119,8 +7059,8 @@ entry:
   %and = and i64 %ptr, -1099511627776
   %conv = fptrunc double %str to float
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to float*
-  store float %conv, float* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  store float %conv, ptr %0, align 4096
   ret void
 }
 
@@ -7133,7 +7073,7 @@ define dso_local void @st_cst_align16_double_float(double %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = fptrunc double %str to float
-  store float %conv, float* inttoptr (i64 4080 to float*), align 16
+  store float %conv, ptr inttoptr (i64 4080 to ptr), align 16
   ret void
 }
 
@@ -7147,7 +7087,7 @@ define dso_local void @st_cst_align32_double_float(double %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = fptrunc double %str to float
-  store float %conv, float* inttoptr (i64 9999900 to float*), align 4
+  store float %conv, ptr inttoptr (i64 9999900 to ptr), align 4
   ret void
 }
 
@@ -7171,7 +7111,7 @@ define dso_local void @st_cst_align64_double_float(double %str) {
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %conv = fptrunc double %str to float
-  store float %conv, float* inttoptr (i64 1000000000000 to float*), align 4096
+  store float %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   ret void
 }
 
@@ -7182,26 +7122,25 @@ define dso_local void @st_0_double_double(i64 %ptr, double %str) {
 ; CHECK-NEXT:    stfd f1, 0(r3)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = inttoptr i64 %ptr to double*
-  store double %str, double* %0, align 8
+  %0 = inttoptr i64 %ptr to ptr
+  store double %str, ptr %0, align 8
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_double_double(i8* nocapture %ptr, double %str) {
+define dso_local void @st_align16_double_double(ptr nocapture %ptr, double %str) {
 ; CHECK-LABEL: st_align16_double_double:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    stfd f1, 8(r3)
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to double*
-  store double %str, double* %0, align 8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  store double %str, ptr %add.ptr, align 8
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_double_double(i8* nocapture %ptr, double %str) {
+define dso_local void @st_align32_double_double(ptr nocapture %ptr, double %str) {
 ; CHECK-P10-LABEL: st_align32_double_double:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pstfd f1, 99999000(r3), 0
@@ -7214,14 +7153,13 @@ define dso_local void @st_align32_double_double(i8* nocapture %ptr, double %str)
 ; CHECK-PREP10-NEXT:    stfdx f1, r3, r4
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to double*
-  store double %str, double* %0, align 8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  store double %str, ptr %add.ptr, align 8
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_double_double(i8* nocapture %ptr, double %str) {
+define dso_local void @st_align64_double_double(ptr nocapture %ptr, double %str) {
 ; CHECK-P10-LABEL: st_align64_double_double:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r4, 244140625
@@ -7237,22 +7175,20 @@ define dso_local void @st_align64_double_double(i8* nocapture %ptr, double %str)
 ; CHECK-PREP10-NEXT:    stfdx f1, r3, r4
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to double*
-  store double %str, double* %0, align 8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  store double %str, ptr %add.ptr, align 8
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_double_double(i8* nocapture %ptr, i64 %off, double %str) {
+define dso_local void @st_reg_double_double(ptr nocapture %ptr, i64 %off, double %str) {
 ; CHECK-LABEL: st_reg_double_double:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    stfdx f1, r3, r4
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to double*
-  store double %str, double* %0, align 8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  store double %str, ptr %add.ptr, align 8
   ret void
 }
 
@@ -7266,8 +7202,8 @@ define dso_local void @st_or1_double_double(i64 %ptr, i8 zeroext %off, double %s
 entry:
   %conv = zext i8 %off to i64
   %or = or i64 %conv, %ptr
-  %0 = inttoptr i64 %or to double*
-  store double %str, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store double %str, ptr %0, align 8
   ret void
 }
 
@@ -7280,8 +7216,8 @@ define dso_local void @st_not_disjoint16_double_double(i64 %ptr, double %str) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to double*
-  store double %str, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store double %str, ptr %0, align 8
   ret void
 }
 
@@ -7295,8 +7231,8 @@ define dso_local void @st_disjoint_align16_double_double(i64 %ptr, double %str)
 entry:
   %and = and i64 %ptr, -4096
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to double*
-  store double %str, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store double %str, ptr %0, align 8
   ret void
 }
 
@@ -7310,8 +7246,8 @@ define dso_local void @st_not_disjoint32_double_double(i64 %ptr, double %str) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to double*
-  store double %str, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store double %str, ptr %0, align 8
   ret void
 }
 
@@ -7344,8 +7280,8 @@ define dso_local void @st_disjoint_align32_double_double(i64 %ptr, double %str)
 entry:
   %and = and i64 %ptr, -1000341504
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to double*
-  store double %str, double* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  store double %str, ptr %0, align 16
   ret void
 }
 
@@ -7371,8 +7307,8 @@ define dso_local void @st_not_disjoint64_double_double(i64 %ptr, double %str) {
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to double*
-  store double %str, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store double %str, ptr %0, align 8
   ret void
 }
 
@@ -7397,8 +7333,8 @@ define dso_local void @st_disjoint_align64_double_double(i64 %ptr, double %str)
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to double*
-  store double %str, double* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  store double %str, ptr %0, align 4096
   ret void
 }
 
@@ -7409,7 +7345,7 @@ define dso_local void @st_cst_align16_double_double(double %str) {
 ; CHECK-NEXT:    stfd f1, 4080(0)
 ; CHECK-NEXT:    blr
 entry:
-  store double %str, double* inttoptr (i64 4080 to double*), align 16
+  store double %str, ptr inttoptr (i64 4080 to ptr), align 16
   ret void
 }
 
@@ -7421,7 +7357,7 @@ define dso_local void @st_cst_align32_double_double(double %str) {
 ; CHECK-NEXT:    stfd f1, -27108(r3)
 ; CHECK-NEXT:    blr
 entry:
-  store double %str, double* inttoptr (i64 9999900 to double*), align 8
+  store double %str, ptr inttoptr (i64 9999900 to ptr), align 8
   ret void
 }
 
@@ -7442,6 +7378,6 @@ define dso_local void @st_cst_align64_double_double(double %str) {
 ; CHECK-PREP10-NEXT:    stfd f1, 0(r3)
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  store double %str, double* inttoptr (i64 1000000000000 to double*), align 4096
+  store double %str, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/scalar-float-ldst.ll b/llvm/test/CodeGen/PowerPC/scalar-float-ldst.ll
index b2c2109e930c..c3115b904218 100644
--- a/llvm/test/CodeGen/PowerPC/scalar-float-ldst.ll
+++ b/llvm/test/CodeGen/PowerPC/scalar-float-ldst.ll
@@ -33,14 +33,14 @@ define dso_local float @ld_0_float_uint8_t(i64 %ptr) {
 ; CHECK-P8-NEXT:    xscvuxdsp f1, f0
 ; CHECK-P8-NEXT:    blr
 entry:
-  %0 = inttoptr i64 %ptr to i8*
-  %1 = load i8, i8* %0, align 1
+  %0 = inttoptr i64 %ptr to ptr
+  %1 = load i8, ptr %0, align 1
   %conv = uitofp i8 %1 to float
   ret float %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local float @ld_align16_float_uint8_t(i8* nocapture readonly %ptr) {
+define dso_local float @ld_align16_float_uint8_t(ptr nocapture readonly %ptr) {
 ; CHECK-POSTP8-LABEL: ld_align16_float_uint8_t:
 ; CHECK-POSTP8:       # %bb.0: # %entry
 ; CHECK-POSTP8-NEXT:    addi r3, r3, 8
@@ -55,14 +55,14 @@ define dso_local float @ld_align16_float_uint8_t(i8* nocapture readonly %ptr) {
 ; CHECK-P8-NEXT:    xscvuxdsp f1, f0
 ; CHECK-P8-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  %0 = load i8, ptr %add.ptr, align 1
   %conv = uitofp i8 %0 to float
   ret float %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local float @ld_align32_float_uint8_t(i8* nocapture readonly %ptr) {
+define dso_local float @ld_align32_float_uint8_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align32_float_uint8_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r4, 99999000
@@ -87,14 +87,14 @@ define dso_local float @ld_align32_float_uint8_t(i8* nocapture readonly %ptr) {
 ; CHECK-P8-NEXT:    xscvuxdsp f1, f0
 ; CHECK-P8-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  %0 = load i8, ptr %add.ptr, align 1
   %conv = uitofp i8 %0 to float
   ret float %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local float @ld_align64_float_uint8_t(i8* nocapture readonly %ptr) {
+define dso_local float @ld_align64_float_uint8_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align64_float_uint8_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r4, 244140625
@@ -122,14 +122,14 @@ define dso_local float @ld_align64_float_uint8_t(i8* nocapture readonly %ptr) {
 ; CHECK-P8-NEXT:    xscvuxdsp f1, f0
 ; CHECK-P8-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  %0 = load i8, ptr %add.ptr, align 1
   %conv = uitofp i8 %0 to float
   ret float %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local float @ld_reg_float_uint8_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local float @ld_reg_float_uint8_t(ptr nocapture readonly %ptr, i64 %off) {
 ; CHECK-POSTP8-LABEL: ld_reg_float_uint8_t:
 ; CHECK-POSTP8:       # %bb.0: # %entry
 ; CHECK-POSTP8-NEXT:    lxsibzx f0, r3, r4
@@ -143,8 +143,8 @@ define dso_local float @ld_reg_float_uint8_t(i8* nocapture readonly %ptr, i64 %o
 ; CHECK-P8-NEXT:    xscvuxdsp f1, f0
 ; CHECK-P8-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  %0 = load i8, ptr %add.ptr, align 1
   %conv = uitofp i8 %0 to float
   ret float %conv
 }
@@ -168,8 +168,8 @@ define dso_local float @ld_or_float_uint8_t(i64 %ptr, i8 zeroext %off) {
 entry:
   %conv = zext i8 %off to i64
   %or = or i64 %conv, %ptr
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 1
   %conv1 = uitofp i8 %1 to float
   ret float %conv1
 }
@@ -192,8 +192,8 @@ define dso_local float @ld_not_disjoint16_float_uint8_t(i64 %ptr) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 1
   %conv = uitofp i8 %1 to float
   ret float %conv
 }
@@ -218,8 +218,8 @@ define dso_local float @ld_disjoint_align16_float_uint8_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -4096
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 8
   %conv = uitofp i8 %1 to float
   ret float %conv
 }
@@ -244,8 +244,8 @@ define dso_local float @ld_not_disjoint32_float_uint8_t(i64 %ptr) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 1
   %conv = uitofp i8 %1 to float
   ret float %conv
 }
@@ -284,8 +284,8 @@ define dso_local float @ld_disjoint_align32_float_uint8_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1000341504
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 16
   %conv = uitofp i8 %1 to float
   ret float %conv
 }
@@ -326,8 +326,8 @@ define dso_local float @ld_not_disjoint64_float_uint8_t(i64 %ptr) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 1
   %conv = uitofp i8 %1 to float
   ret float %conv
 }
@@ -366,8 +366,8 @@ define dso_local float @ld_disjoint_align64_float_uint8_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 4096
   %conv = uitofp i8 %1 to float
   ret float %conv
 }
@@ -388,7 +388,7 @@ define dso_local float @ld_cst_align16_float_uint8_t() {
 ; CHECK-P8-NEXT:    xscvuxdsp f1, f0
 ; CHECK-P8-NEXT:    blr
 entry:
-  %0 = load i8, i8* inttoptr (i64 4080 to i8*), align 16
+  %0 = load i8, ptr inttoptr (i64 4080 to ptr), align 16
   %conv = uitofp i8 %0 to float
   ret float %conv
 }
@@ -418,7 +418,7 @@ define dso_local float @ld_cst_align32_float_uint8_t() {
 ; CHECK-P8-NEXT:    xscvuxdsp f1, f0
 ; CHECK-P8-NEXT:    blr
 entry:
-  %0 = load i8, i8* inttoptr (i64 9999900 to i8*), align 4
+  %0 = load i8, ptr inttoptr (i64 9999900 to ptr), align 4
   %conv = uitofp i8 %0 to float
   ret float %conv
 }
@@ -452,7 +452,7 @@ define dso_local float @ld_cst_align64_float_uint8_t() {
 ; CHECK-P8-NEXT:    xscvuxdsp f1, f0
 ; CHECK-P8-NEXT:    blr
 entry:
-  %0 = load i8, i8* inttoptr (i64 1000000000000 to i8*), align 4096
+  %0 = load i8, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   %conv = uitofp i8 %0 to float
   ret float %conv
 }
@@ -474,14 +474,14 @@ define dso_local float @ld_0_float_int8_t(i64 %ptr) {
 ; CHECK-P8-NEXT:    xscvsxdsp f1, f0
 ; CHECK-P8-NEXT:    blr
 entry:
-  %0 = inttoptr i64 %ptr to i8*
-  %1 = load i8, i8* %0, align 1
+  %0 = inttoptr i64 %ptr to ptr
+  %1 = load i8, ptr %0, align 1
   %conv = sitofp i8 %1 to float
   ret float %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local float @ld_align16_float_int8_t(i8* nocapture readonly %ptr) {
+define dso_local float @ld_align16_float_int8_t(ptr nocapture readonly %ptr) {
 ; CHECK-POSTP8-LABEL: ld_align16_float_int8_t:
 ; CHECK-POSTP8:       # %bb.0: # %entry
 ; CHECK-POSTP8-NEXT:    addi r3, r3, 8
@@ -498,14 +498,14 @@ define dso_local float @ld_align16_float_int8_t(i8* nocapture readonly %ptr) {
 ; CHECK-P8-NEXT:    xscvsxdsp f1, f0
 ; CHECK-P8-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  %0 = load i8, ptr %add.ptr, align 1
   %conv = sitofp i8 %0 to float
   ret float %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local float @ld_align32_float_int8_t(i8* nocapture readonly %ptr) {
+define dso_local float @ld_align32_float_int8_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align32_float_int8_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r4, 99999000
@@ -533,14 +533,14 @@ define dso_local float @ld_align32_float_int8_t(i8* nocapture readonly %ptr) {
 ; CHECK-P8-NEXT:    xscvsxdsp f1, f0
 ; CHECK-P8-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  %0 = load i8, ptr %add.ptr, align 1
   %conv = sitofp i8 %0 to float
   ret float %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local float @ld_align64_float_int8_t(i8* nocapture readonly %ptr) {
+define dso_local float @ld_align64_float_int8_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align64_float_int8_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r4, 244140625
@@ -571,14 +571,14 @@ define dso_local float @ld_align64_float_int8_t(i8* nocapture readonly %ptr) {
 ; CHECK-P8-NEXT:    xscvsxdsp f1, f0
 ; CHECK-P8-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  %0 = load i8, ptr %add.ptr, align 1
   %conv = sitofp i8 %0 to float
   ret float %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local float @ld_reg_float_int8_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local float @ld_reg_float_int8_t(ptr nocapture readonly %ptr, i64 %off) {
 ; CHECK-POSTP8-LABEL: ld_reg_float_int8_t:
 ; CHECK-POSTP8:       # %bb.0: # %entry
 ; CHECK-POSTP8-NEXT:    lxsibzx v2, r3, r4
@@ -594,8 +594,8 @@ define dso_local float @ld_reg_float_int8_t(i8* nocapture readonly %ptr, i64 %of
 ; CHECK-P8-NEXT:    xscvsxdsp f1, f0
 ; CHECK-P8-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  %0 = load i8, ptr %add.ptr, align 1
   %conv = sitofp i8 %0 to float
   ret float %conv
 }
@@ -621,8 +621,8 @@ define dso_local float @ld_or_float_int8_t(i64 %ptr, i8 zeroext %off) {
 entry:
   %conv = zext i8 %off to i64
   %or = or i64 %conv, %ptr
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 1
   %conv1 = sitofp i8 %1 to float
   ret float %conv1
 }
@@ -647,8 +647,8 @@ define dso_local float @ld_not_disjoint16_float_int8_t(i64 %ptr) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 1
   %conv = sitofp i8 %1 to float
   ret float %conv
 }
@@ -675,8 +675,8 @@ define dso_local float @ld_disjoint_align16_float_int8_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -4096
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 8
   %conv = sitofp i8 %1 to float
   ret float %conv
 }
@@ -703,8 +703,8 @@ define dso_local float @ld_not_disjoint32_float_int8_t(i64 %ptr) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 1
   %conv = sitofp i8 %1 to float
   ret float %conv
 }
@@ -746,8 +746,8 @@ define dso_local float @ld_disjoint_align32_float_int8_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1000341504
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 16
   %conv = sitofp i8 %1 to float
   ret float %conv
 }
@@ -791,8 +791,8 @@ define dso_local float @ld_not_disjoint64_float_int8_t(i64 %ptr) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 1
   %conv = sitofp i8 %1 to float
   ret float %conv
 }
@@ -834,8 +834,8 @@ define dso_local float @ld_disjoint_align64_float_int8_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 4096
   %conv = sitofp i8 %1 to float
   ret float %conv
 }
@@ -858,7 +858,7 @@ define dso_local float @ld_cst_align16_float_int8_t() {
 ; CHECK-P8-NEXT:    xscvsxdsp f1, f0
 ; CHECK-P8-NEXT:    blr
 entry:
-  %0 = load i8, i8* inttoptr (i64 4080 to i8*), align 16
+  %0 = load i8, ptr inttoptr (i64 4080 to ptr), align 16
   %conv = sitofp i8 %0 to float
   ret float %conv
 }
@@ -891,7 +891,7 @@ define dso_local float @ld_cst_align32_float_int8_t() {
 ; CHECK-P8-NEXT:    xscvsxdsp f1, f0
 ; CHECK-P8-NEXT:    blr
 entry:
-  %0 = load i8, i8* inttoptr (i64 9999900 to i8*), align 4
+  %0 = load i8, ptr inttoptr (i64 9999900 to ptr), align 4
   %conv = sitofp i8 %0 to float
   ret float %conv
 }
@@ -928,7 +928,7 @@ define dso_local float @ld_cst_align64_float_int8_t() {
 ; CHECK-P8-NEXT:    xscvsxdsp f1, f0
 ; CHECK-P8-NEXT:    blr
 entry:
-  %0 = load i8, i8* inttoptr (i64 1000000000000 to i8*), align 4096
+  %0 = load i8, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   %conv = sitofp i8 %0 to float
   ret float %conv
 }
@@ -948,14 +948,14 @@ define dso_local float @ld_0_float_uint16_t(i64 %ptr) {
 ; CHECK-P8-NEXT:    xscvuxdsp f1, f0
 ; CHECK-P8-NEXT:    blr
 entry:
-  %0 = inttoptr i64 %ptr to i16*
-  %1 = load i16, i16* %0, align 2
+  %0 = inttoptr i64 %ptr to ptr
+  %1 = load i16, ptr %0, align 2
   %conv = uitofp i16 %1 to float
   ret float %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local float @ld_align16_float_uint16_t(i8* nocapture readonly %ptr) {
+define dso_local float @ld_align16_float_uint16_t(ptr nocapture readonly %ptr) {
 ; CHECK-POSTP8-LABEL: ld_align16_float_uint16_t:
 ; CHECK-POSTP8:       # %bb.0: # %entry
 ; CHECK-POSTP8-NEXT:    addi r3, r3, 8
@@ -970,15 +970,14 @@ define dso_local float @ld_align16_float_uint16_t(i8* nocapture readonly %ptr) {
 ; CHECK-P8-NEXT:    xscvuxdsp f1, f0
 ; CHECK-P8-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to i16*
-  %1 = load i16, i16* %0, align 2
-  %conv = uitofp i16 %1 to float
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  %0 = load i16, ptr %add.ptr, align 2
+  %conv = uitofp i16 %0 to float
   ret float %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local float @ld_align32_float_uint16_t(i8* nocapture readonly %ptr) {
+define dso_local float @ld_align32_float_uint16_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align32_float_uint16_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r4, 99999000
@@ -1003,15 +1002,14 @@ define dso_local float @ld_align32_float_uint16_t(i8* nocapture readonly %ptr) {
 ; CHECK-P8-NEXT:    xscvuxdsp f1, f0
 ; CHECK-P8-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to i16*
-  %1 = load i16, i16* %0, align 2
-  %conv = uitofp i16 %1 to float
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  %0 = load i16, ptr %add.ptr, align 2
+  %conv = uitofp i16 %0 to float
   ret float %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local float @ld_align64_float_uint16_t(i8* nocapture readonly %ptr) {
+define dso_local float @ld_align64_float_uint16_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align64_float_uint16_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r4, 244140625
@@ -1039,15 +1037,14 @@ define dso_local float @ld_align64_float_uint16_t(i8* nocapture readonly %ptr) {
 ; CHECK-P8-NEXT:    xscvuxdsp f1, f0
 ; CHECK-P8-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to i16*
-  %1 = load i16, i16* %0, align 2
-  %conv = uitofp i16 %1 to float
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  %0 = load i16, ptr %add.ptr, align 2
+  %conv = uitofp i16 %0 to float
   ret float %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local float @ld_reg_float_uint16_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local float @ld_reg_float_uint16_t(ptr nocapture readonly %ptr, i64 %off) {
 ; CHECK-POSTP8-LABEL: ld_reg_float_uint16_t:
 ; CHECK-POSTP8:       # %bb.0: # %entry
 ; CHECK-POSTP8-NEXT:    lxsihzx f0, r3, r4
@@ -1061,10 +1058,9 @@ define dso_local float @ld_reg_float_uint16_t(i8* nocapture readonly %ptr, i64 %
 ; CHECK-P8-NEXT:    xscvuxdsp f1, f0
 ; CHECK-P8-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to i16*
-  %1 = load i16, i16* %0, align 2
-  %conv = uitofp i16 %1 to float
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  %0 = load i16, ptr %add.ptr, align 2
+  %conv = uitofp i16 %0 to float
   ret float %conv
 }
 
@@ -1087,8 +1083,8 @@ define dso_local float @ld_or_float_uint16_t(i64 %ptr, i8 zeroext %off) {
 entry:
   %conv = zext i8 %off to i64
   %or = or i64 %conv, %ptr
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 2
   %conv1 = uitofp i16 %1 to float
   ret float %conv1
 }
@@ -1111,8 +1107,8 @@ define dso_local float @ld_not_disjoint16_float_uint16_t(i64 %ptr) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 2
   %conv = uitofp i16 %1 to float
   ret float %conv
 }
@@ -1137,8 +1133,8 @@ define dso_local float @ld_disjoint_align16_float_uint16_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -4096
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 8
   %conv = uitofp i16 %1 to float
   ret float %conv
 }
@@ -1163,8 +1159,8 @@ define dso_local float @ld_not_disjoint32_float_uint16_t(i64 %ptr) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 2
   %conv = uitofp i16 %1 to float
   ret float %conv
 }
@@ -1203,8 +1199,8 @@ define dso_local float @ld_disjoint_align32_float_uint16_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1000341504
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 16
   %conv = uitofp i16 %1 to float
   ret float %conv
 }
@@ -1245,8 +1241,8 @@ define dso_local float @ld_not_disjoint64_float_uint16_t(i64 %ptr) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 2
   %conv = uitofp i16 %1 to float
   ret float %conv
 }
@@ -1285,8 +1281,8 @@ define dso_local float @ld_disjoint_align64_float_uint16_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 4096
   %conv = uitofp i16 %1 to float
   ret float %conv
 }
@@ -1307,7 +1303,7 @@ define dso_local float @ld_cst_align16_float_uint16_t() {
 ; CHECK-P8-NEXT:    xscvuxdsp f1, f0
 ; CHECK-P8-NEXT:    blr
 entry:
-  %0 = load i16, i16* inttoptr (i64 4080 to i16*), align 16
+  %0 = load i16, ptr inttoptr (i64 4080 to ptr), align 16
   %conv = uitofp i16 %0 to float
   ret float %conv
 }
@@ -1337,7 +1333,7 @@ define dso_local float @ld_cst_align32_float_uint16_t() {
 ; CHECK-P8-NEXT:    xscvuxdsp f1, f0
 ; CHECK-P8-NEXT:    blr
 entry:
-  %0 = load i16, i16* inttoptr (i64 9999900 to i16*), align 4
+  %0 = load i16, ptr inttoptr (i64 9999900 to ptr), align 4
   %conv = uitofp i16 %0 to float
   ret float %conv
 }
@@ -1371,7 +1367,7 @@ define dso_local float @ld_cst_align64_float_uint16_t() {
 ; CHECK-P8-NEXT:    xscvuxdsp f1, f0
 ; CHECK-P8-NEXT:    blr
 entry:
-  %0 = load i16, i16* inttoptr (i64 1000000000000 to i16*), align 4096
+  %0 = load i16, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   %conv = uitofp i16 %0 to float
   ret float %conv
 }
@@ -1392,14 +1388,14 @@ define dso_local float @ld_0_float_int16_t(i64 %ptr) {
 ; CHECK-P8-NEXT:    xscvsxdsp f1, f0
 ; CHECK-P8-NEXT:    blr
 entry:
-  %0 = inttoptr i64 %ptr to i16*
-  %1 = load i16, i16* %0, align 2
+  %0 = inttoptr i64 %ptr to ptr
+  %1 = load i16, ptr %0, align 2
   %conv = sitofp i16 %1 to float
   ret float %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local float @ld_align16_float_int16_t(i8* nocapture readonly %ptr) {
+define dso_local float @ld_align16_float_int16_t(ptr nocapture readonly %ptr) {
 ; CHECK-POSTP8-LABEL: ld_align16_float_int16_t:
 ; CHECK-POSTP8:       # %bb.0: # %entry
 ; CHECK-POSTP8-NEXT:    addi r3, r3, 8
@@ -1415,15 +1411,14 @@ define dso_local float @ld_align16_float_int16_t(i8* nocapture readonly %ptr) {
 ; CHECK-P8-NEXT:    xscvsxdsp f1, f0
 ; CHECK-P8-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to i16*
-  %1 = load i16, i16* %0, align 2
-  %conv = sitofp i16 %1 to float
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  %0 = load i16, ptr %add.ptr, align 2
+  %conv = sitofp i16 %0 to float
   ret float %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local float @ld_align32_float_int16_t(i8* nocapture readonly %ptr) {
+define dso_local float @ld_align32_float_int16_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align32_float_int16_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r4, 99999000
@@ -1450,15 +1445,14 @@ define dso_local float @ld_align32_float_int16_t(i8* nocapture readonly %ptr) {
 ; CHECK-P8-NEXT:    xscvsxdsp f1, f0
 ; CHECK-P8-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to i16*
-  %1 = load i16, i16* %0, align 2
-  %conv = sitofp i16 %1 to float
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  %0 = load i16, ptr %add.ptr, align 2
+  %conv = sitofp i16 %0 to float
   ret float %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local float @ld_align64_float_int16_t(i8* nocapture readonly %ptr) {
+define dso_local float @ld_align64_float_int16_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align64_float_int16_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r4, 244140625
@@ -1488,15 +1482,14 @@ define dso_local float @ld_align64_float_int16_t(i8* nocapture readonly %ptr) {
 ; CHECK-P8-NEXT:    xscvsxdsp f1, f0
 ; CHECK-P8-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to i16*
-  %1 = load i16, i16* %0, align 2
-  %conv = sitofp i16 %1 to float
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  %0 = load i16, ptr %add.ptr, align 2
+  %conv = sitofp i16 %0 to float
   ret float %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local float @ld_reg_float_int16_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local float @ld_reg_float_int16_t(ptr nocapture readonly %ptr, i64 %off) {
 ; CHECK-POSTP8-LABEL: ld_reg_float_int16_t:
 ; CHECK-POSTP8:       # %bb.0: # %entry
 ; CHECK-POSTP8-NEXT:    lxsihzx v2, r3, r4
@@ -1511,10 +1504,9 @@ define dso_local float @ld_reg_float_int16_t(i8* nocapture readonly %ptr, i64 %o
 ; CHECK-P8-NEXT:    xscvsxdsp f1, f0
 ; CHECK-P8-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to i16*
-  %1 = load i16, i16* %0, align 2
-  %conv = sitofp i16 %1 to float
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  %0 = load i16, ptr %add.ptr, align 2
+  %conv = sitofp i16 %0 to float
   ret float %conv
 }
 
@@ -1538,8 +1530,8 @@ define dso_local float @ld_or_float_int16_t(i64 %ptr, i8 zeroext %off) {
 entry:
   %conv = zext i8 %off to i64
   %or = or i64 %conv, %ptr
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 2
   %conv1 = sitofp i16 %1 to float
   ret float %conv1
 }
@@ -1563,8 +1555,8 @@ define dso_local float @ld_not_disjoint16_float_int16_t(i64 %ptr) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 2
   %conv = sitofp i16 %1 to float
   ret float %conv
 }
@@ -1590,8 +1582,8 @@ define dso_local float @ld_disjoint_align16_float_int16_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -4096
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 8
   %conv = sitofp i16 %1 to float
   ret float %conv
 }
@@ -1617,8 +1609,8 @@ define dso_local float @ld_not_disjoint32_float_int16_t(i64 %ptr) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 2
   %conv = sitofp i16 %1 to float
   ret float %conv
 }
@@ -1659,8 +1651,8 @@ define dso_local float @ld_disjoint_align32_float_int16_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1000341504
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 16
   %conv = sitofp i16 %1 to float
   ret float %conv
 }
@@ -1703,8 +1695,8 @@ define dso_local float @ld_not_disjoint64_float_int16_t(i64 %ptr) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 2
   %conv = sitofp i16 %1 to float
   ret float %conv
 }
@@ -1745,8 +1737,8 @@ define dso_local float @ld_disjoint_align64_float_int16_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 4096
   %conv = sitofp i16 %1 to float
   ret float %conv
 }
@@ -1768,7 +1760,7 @@ define dso_local float @ld_cst_align16_float_int16_t() {
 ; CHECK-P8-NEXT:    xscvsxdsp f1, f0
 ; CHECK-P8-NEXT:    blr
 entry:
-  %0 = load i16, i16* inttoptr (i64 4080 to i16*), align 16
+  %0 = load i16, ptr inttoptr (i64 4080 to ptr), align 16
   %conv = sitofp i16 %0 to float
   ret float %conv
 }
@@ -1800,7 +1792,7 @@ define dso_local float @ld_cst_align32_float_int16_t() {
 ; CHECK-P8-NEXT:    xscvsxdsp f1, f0
 ; CHECK-P8-NEXT:    blr
 entry:
-  %0 = load i16, i16* inttoptr (i64 9999900 to i16*), align 4
+  %0 = load i16, ptr inttoptr (i64 9999900 to ptr), align 4
   %conv = sitofp i16 %0 to float
   ret float %conv
 }
@@ -1836,7 +1828,7 @@ define dso_local float @ld_cst_align64_float_int16_t() {
 ; CHECK-P8-NEXT:    xscvsxdsp f1, f0
 ; CHECK-P8-NEXT:    blr
 entry:
-  %0 = load i16, i16* inttoptr (i64 1000000000000 to i16*), align 4096
+  %0 = load i16, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   %conv = sitofp i16 %0 to float
   ret float %conv
 }
@@ -1849,14 +1841,14 @@ define dso_local float @ld_0_float_uint32_t(i64 %ptr) {
 ; CHECK-NEXT:    xscvuxdsp f1, f0
 ; CHECK-NEXT:    blr
 entry:
-  %0 = inttoptr i64 %ptr to i32*
-  %1 = load i32, i32* %0, align 4
+  %0 = inttoptr i64 %ptr to ptr
+  %1 = load i32, ptr %0, align 4
   %conv = uitofp i32 %1 to float
   ret float %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local float @ld_align16_float_uint32_t(i8* nocapture readonly %ptr) {
+define dso_local float @ld_align16_float_uint32_t(ptr nocapture readonly %ptr) {
 ; CHECK-LABEL: ld_align16_float_uint32_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    addi r3, r3, 8
@@ -1864,15 +1856,14 @@ define dso_local float @ld_align16_float_uint32_t(i8* nocapture readonly %ptr) {
 ; CHECK-NEXT:    xscvuxdsp f1, f0
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to i32*
-  %1 = load i32, i32* %0, align 4
-  %conv = uitofp i32 %1 to float
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  %0 = load i32, ptr %add.ptr, align 4
+  %conv = uitofp i32 %0 to float
   ret float %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local float @ld_align32_float_uint32_t(i8* nocapture readonly %ptr) {
+define dso_local float @ld_align32_float_uint32_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align32_float_uint32_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r4, 99999000
@@ -1888,15 +1879,14 @@ define dso_local float @ld_align32_float_uint32_t(i8* nocapture readonly %ptr) {
 ; CHECK-PREP10-NEXT:    xscvuxdsp f1, f0
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to i32*
-  %1 = load i32, i32* %0, align 4
-  %conv = uitofp i32 %1 to float
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  %0 = load i32, ptr %add.ptr, align 4
+  %conv = uitofp i32 %0 to float
   ret float %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local float @ld_align64_float_uint32_t(i8* nocapture readonly %ptr) {
+define dso_local float @ld_align64_float_uint32_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align64_float_uint32_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r4, 244140625
@@ -1914,25 +1904,23 @@ define dso_local float @ld_align64_float_uint32_t(i8* nocapture readonly %ptr) {
 ; CHECK-PREP10-NEXT:    xscvuxdsp f1, f0
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to i32*
-  %1 = load i32, i32* %0, align 4
-  %conv = uitofp i32 %1 to float
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  %0 = load i32, ptr %add.ptr, align 4
+  %conv = uitofp i32 %0 to float
   ret float %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local float @ld_reg_float_uint32_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local float @ld_reg_float_uint32_t(ptr nocapture readonly %ptr, i64 %off) {
 ; CHECK-LABEL: ld_reg_float_uint32_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lfiwzx f0, r3, r4
 ; CHECK-NEXT:    xscvuxdsp f1, f0
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to i32*
-  %1 = load i32, i32* %0, align 4
-  %conv = uitofp i32 %1 to float
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  %0 = load i32, ptr %add.ptr, align 4
+  %conv = uitofp i32 %0 to float
   ret float %conv
 }
 
@@ -1947,8 +1935,8 @@ define dso_local float @ld_or_float_uint32_t(i64 %ptr, i8 zeroext %off) {
 entry:
   %conv = zext i8 %off to i64
   %or = or i64 %conv, %ptr
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 4
   %conv1 = uitofp i32 %1 to float
   ret float %conv1
 }
@@ -1963,8 +1951,8 @@ define dso_local float @ld_not_disjoint16_float_uint32_t(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 4
   %conv = uitofp i32 %1 to float
   ret float %conv
 }
@@ -1981,8 +1969,8 @@ define dso_local float @ld_disjoint_align16_float_uint32_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -4096
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 8
   %conv = uitofp i32 %1 to float
   ret float %conv
 }
@@ -1998,8 +1986,8 @@ define dso_local float @ld_not_disjoint32_float_uint32_t(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 4
   %conv = uitofp i32 %1 to float
   ret float %conv
 }
@@ -2037,8 +2025,8 @@ define dso_local float @ld_disjoint_align32_float_uint32_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1000341504
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 16
   %conv = uitofp i32 %1 to float
   ret float %conv
 }
@@ -2067,8 +2055,8 @@ define dso_local float @ld_not_disjoint64_float_uint32_t(i64 %ptr) {
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 4
   %conv = uitofp i32 %1 to float
   ret float %conv
 }
@@ -2096,8 +2084,8 @@ define dso_local float @ld_disjoint_align64_float_uint32_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 4096
   %conv = uitofp i32 %1 to float
   ret float %conv
 }
@@ -2111,7 +2099,7 @@ define dso_local float @ld_cst_align16_float_uint32_t() {
 ; CHECK-NEXT:    xscvuxdsp f1, f0
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i32, i32* inttoptr (i64 4080 to i32*), align 16
+  %0 = load i32, ptr inttoptr (i64 4080 to ptr), align 16
   %conv = uitofp i32 %0 to float
   ret float %conv
 }
@@ -2133,7 +2121,7 @@ define dso_local float @ld_cst_align32_float_uint32_t() {
 ; CHECK-PREP10-NEXT:    xscvuxdsp f1, f0
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %0 = load i32, i32* inttoptr (i64 9999900 to i32*), align 4
+  %0 = load i32, ptr inttoptr (i64 9999900 to ptr), align 4
   %conv = uitofp i32 %0 to float
   ret float %conv
 }
@@ -2157,7 +2145,7 @@ define dso_local float @ld_cst_align64_float_uint32_t() {
 ; CHECK-PREP10-NEXT:    xscvuxdsp f1, f0
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %0 = load i32, i32* inttoptr (i64 1000000000000 to i32*), align 4096
+  %0 = load i32, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   %conv = uitofp i32 %0 to float
   ret float %conv
 }
@@ -2170,14 +2158,14 @@ define dso_local float @ld_0_float_int32_t(i64 %ptr) {
 ; CHECK-NEXT:    xscvsxdsp f1, f0
 ; CHECK-NEXT:    blr
 entry:
-  %0 = inttoptr i64 %ptr to i32*
-  %1 = load i32, i32* %0, align 4
+  %0 = inttoptr i64 %ptr to ptr
+  %1 = load i32, ptr %0, align 4
   %conv = sitofp i32 %1 to float
   ret float %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local float @ld_align16_float_int32_t(i8* nocapture readonly %ptr) {
+define dso_local float @ld_align16_float_int32_t(ptr nocapture readonly %ptr) {
 ; CHECK-LABEL: ld_align16_float_int32_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    addi r3, r3, 8
@@ -2185,15 +2173,14 @@ define dso_local float @ld_align16_float_int32_t(i8* nocapture readonly %ptr) {
 ; CHECK-NEXT:    xscvsxdsp f1, f0
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to i32*
-  %1 = load i32, i32* %0, align 4
-  %conv = sitofp i32 %1 to float
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  %0 = load i32, ptr %add.ptr, align 4
+  %conv = sitofp i32 %0 to float
   ret float %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local float @ld_align32_float_int32_t(i8* nocapture readonly %ptr) {
+define dso_local float @ld_align32_float_int32_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align32_float_int32_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r4, 99999000
@@ -2209,15 +2196,14 @@ define dso_local float @ld_align32_float_int32_t(i8* nocapture readonly %ptr) {
 ; CHECK-PREP10-NEXT:    xscvsxdsp f1, f0
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to i32*
-  %1 = load i32, i32* %0, align 4
-  %conv = sitofp i32 %1 to float
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  %0 = load i32, ptr %add.ptr, align 4
+  %conv = sitofp i32 %0 to float
   ret float %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local float @ld_align64_float_int32_t(i8* nocapture readonly %ptr) {
+define dso_local float @ld_align64_float_int32_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align64_float_int32_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r4, 244140625
@@ -2235,25 +2221,23 @@ define dso_local float @ld_align64_float_int32_t(i8* nocapture readonly %ptr) {
 ; CHECK-PREP10-NEXT:    xscvsxdsp f1, f0
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to i32*
-  %1 = load i32, i32* %0, align 4
-  %conv = sitofp i32 %1 to float
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  %0 = load i32, ptr %add.ptr, align 4
+  %conv = sitofp i32 %0 to float
   ret float %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local float @ld_reg_float_int32_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local float @ld_reg_float_int32_t(ptr nocapture readonly %ptr, i64 %off) {
 ; CHECK-LABEL: ld_reg_float_int32_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lfiwax f0, r3, r4
 ; CHECK-NEXT:    xscvsxdsp f1, f0
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to i32*
-  %1 = load i32, i32* %0, align 4
-  %conv = sitofp i32 %1 to float
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  %0 = load i32, ptr %add.ptr, align 4
+  %conv = sitofp i32 %0 to float
   ret float %conv
 }
 
@@ -2268,8 +2252,8 @@ define dso_local float @ld_or_float_int32_t(i64 %ptr, i8 zeroext %off) {
 entry:
   %conv = zext i8 %off to i64
   %or = or i64 %conv, %ptr
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 4
   %conv1 = sitofp i32 %1 to float
   ret float %conv1
 }
@@ -2284,8 +2268,8 @@ define dso_local float @ld_not_disjoint16_float_int32_t(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 4
   %conv = sitofp i32 %1 to float
   ret float %conv
 }
@@ -2302,8 +2286,8 @@ define dso_local float @ld_disjoint_align16_float_int32_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -4096
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 8
   %conv = sitofp i32 %1 to float
   ret float %conv
 }
@@ -2319,8 +2303,8 @@ define dso_local float @ld_not_disjoint32_float_int32_t(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 4
   %conv = sitofp i32 %1 to float
   ret float %conv
 }
@@ -2358,8 +2342,8 @@ define dso_local float @ld_disjoint_align32_float_int32_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1000341504
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 16
   %conv = sitofp i32 %1 to float
   ret float %conv
 }
@@ -2388,8 +2372,8 @@ define dso_local float @ld_not_disjoint64_float_int32_t(i64 %ptr) {
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 4
   %conv = sitofp i32 %1 to float
   ret float %conv
 }
@@ -2417,8 +2401,8 @@ define dso_local float @ld_disjoint_align64_float_int32_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 4096
   %conv = sitofp i32 %1 to float
   ret float %conv
 }
@@ -2432,7 +2416,7 @@ define dso_local float @ld_cst_align16_float_int32_t() {
 ; CHECK-NEXT:    xscvsxdsp f1, f0
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i32, i32* inttoptr (i64 4080 to i32*), align 16
+  %0 = load i32, ptr inttoptr (i64 4080 to ptr), align 16
   %conv = sitofp i32 %0 to float
   ret float %conv
 }
@@ -2454,7 +2438,7 @@ define dso_local float @ld_cst_align32_float_int32_t() {
 ; CHECK-PREP10-NEXT:    xscvsxdsp f1, f0
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %0 = load i32, i32* inttoptr (i64 9999900 to i32*), align 4
+  %0 = load i32, ptr inttoptr (i64 9999900 to ptr), align 4
   %conv = sitofp i32 %0 to float
   ret float %conv
 }
@@ -2478,7 +2462,7 @@ define dso_local float @ld_cst_align64_float_int32_t() {
 ; CHECK-PREP10-NEXT:    xscvsxdsp f1, f0
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %0 = load i32, i32* inttoptr (i64 1000000000000 to i32*), align 4096
+  %0 = load i32, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   %conv = sitofp i32 %0 to float
   ret float %conv
 }
@@ -2491,29 +2475,28 @@ define dso_local float @ld_0_float_uint64_t(i64 %ptr) {
 ; CHECK-NEXT:    xscvuxdsp f1, f0
 ; CHECK-NEXT:    blr
 entry:
-  %0 = inttoptr i64 %ptr to i64*
-  %1 = load i64, i64* %0, align 8
+  %0 = inttoptr i64 %ptr to ptr
+  %1 = load i64, ptr %0, align 8
   %conv = uitofp i64 %1 to float
   ret float %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local float @ld_align16_float_uint64_t(i8* nocapture readonly %ptr) {
+define dso_local float @ld_align16_float_uint64_t(ptr nocapture readonly %ptr) {
 ; CHECK-LABEL: ld_align16_float_uint64_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lfd f0, 8(r3)
 ; CHECK-NEXT:    xscvuxdsp f1, f0
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to i64*
-  %1 = load i64, i64* %0, align 8
-  %conv = uitofp i64 %1 to float
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  %0 = load i64, ptr %add.ptr, align 8
+  %conv = uitofp i64 %0 to float
   ret float %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local float @ld_align32_float_uint64_t(i8* nocapture readonly %ptr) {
+define dso_local float @ld_align32_float_uint64_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align32_float_uint64_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    plfd f0, 99999000(r3), 0
@@ -2528,15 +2511,14 @@ define dso_local float @ld_align32_float_uint64_t(i8* nocapture readonly %ptr) {
 ; CHECK-PREP10-NEXT:    xscvuxdsp f1, f0
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to i64*
-  %1 = load i64, i64* %0, align 8
-  %conv = uitofp i64 %1 to float
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  %0 = load i64, ptr %add.ptr, align 8
+  %conv = uitofp i64 %0 to float
   ret float %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local float @ld_align64_float_uint64_t(i8* nocapture readonly %ptr) {
+define dso_local float @ld_align64_float_uint64_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align64_float_uint64_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r4, 244140625
@@ -2554,25 +2536,23 @@ define dso_local float @ld_align64_float_uint64_t(i8* nocapture readonly %ptr) {
 ; CHECK-PREP10-NEXT:    xscvuxdsp f1, f0
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to i64*
-  %1 = load i64, i64* %0, align 8
-  %conv = uitofp i64 %1 to float
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  %0 = load i64, ptr %add.ptr, align 8
+  %conv = uitofp i64 %0 to float
   ret float %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local float @ld_reg_float_uint64_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local float @ld_reg_float_uint64_t(ptr nocapture readonly %ptr, i64 %off) {
 ; CHECK-LABEL: ld_reg_float_uint64_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lfdx f0, r3, r4
 ; CHECK-NEXT:    xscvuxdsp f1, f0
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to i64*
-  %1 = load i64, i64* %0, align 8
-  %conv = uitofp i64 %1 to float
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  %0 = load i64, ptr %add.ptr, align 8
+  %conv = uitofp i64 %0 to float
   ret float %conv
 }
 
@@ -2587,8 +2567,8 @@ define dso_local float @ld_or_float_uint64_t(i64 %ptr, i8 zeroext %off) {
 entry:
   %conv = zext i8 %off to i64
   %or = or i64 %conv, %ptr
-  %0 = inttoptr i64 %or to i64*
-  %1 = load i64, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i64, ptr %0, align 8
   %conv1 = uitofp i64 %1 to float
   ret float %conv1
 }
@@ -2603,8 +2583,8 @@ define dso_local float @ld_not_disjoint16_float_uint64_t(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to i64*
-  %1 = load i64, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i64, ptr %0, align 8
   %conv = uitofp i64 %1 to float
   ret float %conv
 }
@@ -2620,8 +2600,8 @@ define dso_local float @ld_disjoint_align16_float_uint64_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -4096
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to i64*
-  %1 = load i64, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i64, ptr %0, align 8
   %conv = uitofp i64 %1 to float
   ret float %conv
 }
@@ -2637,8 +2617,8 @@ define dso_local float @ld_not_disjoint32_float_uint64_t(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to i64*
-  %1 = load i64, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i64, ptr %0, align 8
   %conv = uitofp i64 %1 to float
   ret float %conv
 }
@@ -2675,8 +2655,8 @@ define dso_local float @ld_disjoint_align32_float_uint64_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1000341504
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to i64*
-  %1 = load i64, i64* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i64, ptr %0, align 16
   %conv = uitofp i64 %1 to float
   ret float %conv
 }
@@ -2705,8 +2685,8 @@ define dso_local float @ld_not_disjoint64_float_uint64_t(i64 %ptr) {
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to i64*
-  %1 = load i64, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i64, ptr %0, align 8
   %conv = uitofp i64 %1 to float
   ret float %conv
 }
@@ -2734,8 +2714,8 @@ define dso_local float @ld_disjoint_align64_float_uint64_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to i64*
-  %1 = load i64, i64* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i64, ptr %0, align 4096
   %conv = uitofp i64 %1 to float
   ret float %conv
 }
@@ -2748,7 +2728,7 @@ define dso_local float @ld_cst_align16_float_uint64_t() {
 ; CHECK-NEXT:    xscvuxdsp f1, f0
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i64, i64* inttoptr (i64 4080 to i64*), align 16
+  %0 = load i64, ptr inttoptr (i64 4080 to ptr), align 16
   %conv = uitofp i64 %0 to float
   ret float %conv
 }
@@ -2762,7 +2742,7 @@ define dso_local float @ld_cst_align32_float_uint64_t() {
 ; CHECK-NEXT:    xscvuxdsp f1, f0
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i64, i64* inttoptr (i64 9999900 to i64*), align 8
+  %0 = load i64, ptr inttoptr (i64 9999900 to ptr), align 8
   %conv = uitofp i64 %0 to float
   ret float %conv
 }
@@ -2786,7 +2766,7 @@ define dso_local float @ld_cst_align64_float_uint64_t() {
 ; CHECK-PREP10-NEXT:    xscvuxdsp f1, f0
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %0 = load i64, i64* inttoptr (i64 1000000000000 to i64*), align 4096
+  %0 = load i64, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   %conv = uitofp i64 %0 to float
   ret float %conv
 }
@@ -2799,29 +2779,28 @@ define dso_local float @ld_0_float_int64_t(i64 %ptr) {
 ; CHECK-NEXT:    xscvsxdsp f1, f0
 ; CHECK-NEXT:    blr
 entry:
-  %0 = inttoptr i64 %ptr to i64*
-  %1 = load i64, i64* %0, align 8
+  %0 = inttoptr i64 %ptr to ptr
+  %1 = load i64, ptr %0, align 8
   %conv = sitofp i64 %1 to float
   ret float %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local float @ld_align16_float_int64_t(i8* nocapture readonly %ptr) {
+define dso_local float @ld_align16_float_int64_t(ptr nocapture readonly %ptr) {
 ; CHECK-LABEL: ld_align16_float_int64_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lfd f0, 8(r3)
 ; CHECK-NEXT:    xscvsxdsp f1, f0
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to i64*
-  %1 = load i64, i64* %0, align 8
-  %conv = sitofp i64 %1 to float
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  %0 = load i64, ptr %add.ptr, align 8
+  %conv = sitofp i64 %0 to float
   ret float %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local float @ld_align32_float_int64_t(i8* nocapture readonly %ptr) {
+define dso_local float @ld_align32_float_int64_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align32_float_int64_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    plfd f0, 99999000(r3), 0
@@ -2836,15 +2815,14 @@ define dso_local float @ld_align32_float_int64_t(i8* nocapture readonly %ptr) {
 ; CHECK-PREP10-NEXT:    xscvsxdsp f1, f0
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to i64*
-  %1 = load i64, i64* %0, align 8
-  %conv = sitofp i64 %1 to float
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  %0 = load i64, ptr %add.ptr, align 8
+  %conv = sitofp i64 %0 to float
   ret float %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local float @ld_align64_float_int64_t(i8* nocapture readonly %ptr) {
+define dso_local float @ld_align64_float_int64_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align64_float_int64_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r4, 244140625
@@ -2862,25 +2840,23 @@ define dso_local float @ld_align64_float_int64_t(i8* nocapture readonly %ptr) {
 ; CHECK-PREP10-NEXT:    xscvsxdsp f1, f0
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to i64*
-  %1 = load i64, i64* %0, align 8
-  %conv = sitofp i64 %1 to float
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  %0 = load i64, ptr %add.ptr, align 8
+  %conv = sitofp i64 %0 to float
   ret float %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local float @ld_reg_float_int64_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local float @ld_reg_float_int64_t(ptr nocapture readonly %ptr, i64 %off) {
 ; CHECK-LABEL: ld_reg_float_int64_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lfdx f0, r3, r4
 ; CHECK-NEXT:    xscvsxdsp f1, f0
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to i64*
-  %1 = load i64, i64* %0, align 8
-  %conv = sitofp i64 %1 to float
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  %0 = load i64, ptr %add.ptr, align 8
+  %conv = sitofp i64 %0 to float
   ret float %conv
 }
 
@@ -2895,8 +2871,8 @@ define dso_local float @ld_or_float_int64_t(i64 %ptr, i8 zeroext %off) {
 entry:
   %conv = zext i8 %off to i64
   %or = or i64 %conv, %ptr
-  %0 = inttoptr i64 %or to i64*
-  %1 = load i64, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i64, ptr %0, align 8
   %conv1 = sitofp i64 %1 to float
   ret float %conv1
 }
@@ -2911,8 +2887,8 @@ define dso_local float @ld_not_disjoint16_float_int64_t(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to i64*
-  %1 = load i64, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i64, ptr %0, align 8
   %conv = sitofp i64 %1 to float
   ret float %conv
 }
@@ -2928,8 +2904,8 @@ define dso_local float @ld_disjoint_align16_float_int64_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -4096
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to i64*
-  %1 = load i64, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i64, ptr %0, align 8
   %conv = sitofp i64 %1 to float
   ret float %conv
 }
@@ -2945,8 +2921,8 @@ define dso_local float @ld_not_disjoint32_float_int64_t(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to i64*
-  %1 = load i64, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i64, ptr %0, align 8
   %conv = sitofp i64 %1 to float
   ret float %conv
 }
@@ -2983,8 +2959,8 @@ define dso_local float @ld_disjoint_align32_float_int64_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1000341504
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to i64*
-  %1 = load i64, i64* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i64, ptr %0, align 16
   %conv = sitofp i64 %1 to float
   ret float %conv
 }
@@ -3013,8 +2989,8 @@ define dso_local float @ld_not_disjoint64_float_int64_t(i64 %ptr) {
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to i64*
-  %1 = load i64, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i64, ptr %0, align 8
   %conv = sitofp i64 %1 to float
   ret float %conv
 }
@@ -3042,8 +3018,8 @@ define dso_local float @ld_disjoint_align64_float_int64_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to i64*
-  %1 = load i64, i64* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i64, ptr %0, align 4096
   %conv = sitofp i64 %1 to float
   ret float %conv
 }
@@ -3056,7 +3032,7 @@ define dso_local float @ld_cst_align16_float_int64_t() {
 ; CHECK-NEXT:    xscvsxdsp f1, f0
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i64, i64* inttoptr (i64 4080 to i64*), align 16
+  %0 = load i64, ptr inttoptr (i64 4080 to ptr), align 16
   %conv = sitofp i64 %0 to float
   ret float %conv
 }
@@ -3070,7 +3046,7 @@ define dso_local float @ld_cst_align32_float_int64_t() {
 ; CHECK-NEXT:    xscvsxdsp f1, f0
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i64, i64* inttoptr (i64 9999900 to i64*), align 8
+  %0 = load i64, ptr inttoptr (i64 9999900 to ptr), align 8
   %conv = sitofp i64 %0 to float
   ret float %conv
 }
@@ -3094,7 +3070,7 @@ define dso_local float @ld_cst_align64_float_int64_t() {
 ; CHECK-PREP10-NEXT:    xscvsxdsp f1, f0
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %0 = load i64, i64* inttoptr (i64 1000000000000 to i64*), align 4096
+  %0 = load i64, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   %conv = sitofp i64 %0 to float
   ret float %conv
 }
@@ -3106,26 +3082,25 @@ define dso_local float @ld_0_float_float(i64 %ptr) {
 ; CHECK-NEXT:    lfs f1, 0(r3)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = inttoptr i64 %ptr to float*
-  %1 = load float, float* %0, align 4
+  %0 = inttoptr i64 %ptr to ptr
+  %1 = load float, ptr %0, align 4
   ret float %1
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local float @ld_align16_float_float(i8* nocapture readonly %ptr) {
+define dso_local float @ld_align16_float_float(ptr nocapture readonly %ptr) {
 ; CHECK-LABEL: ld_align16_float_float:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lfs f1, 8(r3)
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to float*
-  %1 = load float, float* %0, align 4
-  ret float %1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  %0 = load float, ptr %add.ptr, align 4
+  ret float %0
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local float @ld_align32_float_float(i8* nocapture readonly %ptr) {
+define dso_local float @ld_align32_float_float(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align32_float_float:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    plfs f1, 99999000(r3), 0
@@ -3138,14 +3113,13 @@ define dso_local float @ld_align32_float_float(i8* nocapture readonly %ptr) {
 ; CHECK-PREP10-NEXT:    lfsx f1, r3, r4
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to float*
-  %1 = load float, float* %0, align 4
-  ret float %1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  %0 = load float, ptr %add.ptr, align 4
+  ret float %0
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local float @ld_align64_float_float(i8* nocapture readonly %ptr) {
+define dso_local float @ld_align64_float_float(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align64_float_float:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r4, 244140625
@@ -3161,23 +3135,21 @@ define dso_local float @ld_align64_float_float(i8* nocapture readonly %ptr) {
 ; CHECK-PREP10-NEXT:    lfsx f1, r3, r4
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to float*
-  %1 = load float, float* %0, align 4
-  ret float %1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  %0 = load float, ptr %add.ptr, align 4
+  ret float %0
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local float @ld_reg_float_float(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local float @ld_reg_float_float(ptr nocapture readonly %ptr, i64 %off) {
 ; CHECK-LABEL: ld_reg_float_float:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lfsx f1, r3, r4
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to float*
-  %1 = load float, float* %0, align 4
-  ret float %1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  %0 = load float, ptr %add.ptr, align 4
+  ret float %0
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
@@ -3190,8 +3162,8 @@ define dso_local float @ld_or_float_float(i64 %ptr, i8 zeroext %off) {
 entry:
   %conv = zext i8 %off to i64
   %or = or i64 %conv, %ptr
-  %0 = inttoptr i64 %or to float*
-  %1 = load float, float* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load float, ptr %0, align 4
   ret float %1
 }
 
@@ -3204,8 +3176,8 @@ define dso_local float @ld_not_disjoint16_float_float(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to float*
-  %1 = load float, float* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load float, ptr %0, align 4
   ret float %1
 }
 
@@ -3219,8 +3191,8 @@ define dso_local float @ld_disjoint_align16_float_float(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -4096
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to float*
-  %1 = load float, float* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load float, ptr %0, align 8
   ret float %1
 }
 
@@ -3234,8 +3206,8 @@ define dso_local float @ld_not_disjoint32_float_float(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to float*
-  %1 = load float, float* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load float, ptr %0, align 4
   ret float %1
 }
 
@@ -3268,8 +3240,8 @@ define dso_local float @ld_disjoint_align32_float_float(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1000341504
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to float*
-  %1 = load float, float* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  %1 = load float, ptr %0, align 16
   ret float %1
 }
 
@@ -3295,8 +3267,8 @@ define dso_local float @ld_not_disjoint64_float_float(i64 %ptr) {
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to float*
-  %1 = load float, float* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load float, ptr %0, align 4
   ret float %1
 }
 
@@ -3321,8 +3293,8 @@ define dso_local float @ld_disjoint_align64_float_float(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to float*
-  %1 = load float, float* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  %1 = load float, ptr %0, align 4096
   ret float %1
 }
 
@@ -3333,7 +3305,7 @@ define dso_local float @ld_cst_align16_float_float() {
 ; CHECK-NEXT:    lfs f1, 4080(0)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load float, float* inttoptr (i64 4080 to float*), align 16
+  %0 = load float, ptr inttoptr (i64 4080 to ptr), align 16
   ret float %0
 }
 
@@ -3345,7 +3317,7 @@ define dso_local float @ld_cst_align32_float_float() {
 ; CHECK-NEXT:    lfs f1, -27108(r3)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load float, float* inttoptr (i64 9999900 to float*), align 4
+  %0 = load float, ptr inttoptr (i64 9999900 to ptr), align 4
   ret float %0
 }
 
@@ -3366,7 +3338,7 @@ define dso_local float @ld_cst_align64_float_float() {
 ; CHECK-PREP10-NEXT:    lfs f1, 0(r3)
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %0 = load float, float* inttoptr (i64 1000000000000 to float*), align 4096
+  %0 = load float, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   ret float %0
 }
 
@@ -3378,29 +3350,28 @@ define dso_local float @ld_0_float_double(i64 %ptr) {
 ; CHECK-NEXT:    xsrsp f1, f0
 ; CHECK-NEXT:    blr
 entry:
-  %0 = inttoptr i64 %ptr to double*
-  %1 = load double, double* %0, align 8
+  %0 = inttoptr i64 %ptr to ptr
+  %1 = load double, ptr %0, align 8
   %conv = fptrunc double %1 to float
   ret float %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local float @ld_align16_float_double(i8* nocapture readonly %ptr) {
+define dso_local float @ld_align16_float_double(ptr nocapture readonly %ptr) {
 ; CHECK-LABEL: ld_align16_float_double:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lfd f0, 8(r3)
 ; CHECK-NEXT:    xsrsp f1, f0
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to double*
-  %1 = load double, double* %0, align 8
-  %conv = fptrunc double %1 to float
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  %0 = load double, ptr %add.ptr, align 8
+  %conv = fptrunc double %0 to float
   ret float %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local float @ld_align32_float_double(i8* nocapture readonly %ptr) {
+define dso_local float @ld_align32_float_double(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align32_float_double:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    plfd f0, 99999000(r3), 0
@@ -3415,15 +3386,14 @@ define dso_local float @ld_align32_float_double(i8* nocapture readonly %ptr) {
 ; CHECK-PREP10-NEXT:    xsrsp f1, f0
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to double*
-  %1 = load double, double* %0, align 8
-  %conv = fptrunc double %1 to float
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  %0 = load double, ptr %add.ptr, align 8
+  %conv = fptrunc double %0 to float
   ret float %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local float @ld_align64_float_double(i8* nocapture readonly %ptr) {
+define dso_local float @ld_align64_float_double(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align64_float_double:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r4, 244140625
@@ -3441,25 +3411,23 @@ define dso_local float @ld_align64_float_double(i8* nocapture readonly %ptr) {
 ; CHECK-PREP10-NEXT:    xsrsp f1, f0
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to double*
-  %1 = load double, double* %0, align 8
-  %conv = fptrunc double %1 to float
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  %0 = load double, ptr %add.ptr, align 8
+  %conv = fptrunc double %0 to float
   ret float %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local float @ld_reg_float_double(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local float @ld_reg_float_double(ptr nocapture readonly %ptr, i64 %off) {
 ; CHECK-LABEL: ld_reg_float_double:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lfdx f0, r3, r4
 ; CHECK-NEXT:    xsrsp f1, f0
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to double*
-  %1 = load double, double* %0, align 8
-  %conv = fptrunc double %1 to float
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  %0 = load double, ptr %add.ptr, align 8
+  %conv = fptrunc double %0 to float
   ret float %conv
 }
 
@@ -3474,8 +3442,8 @@ define dso_local float @ld_or_float_double(i64 %ptr, i8 zeroext %off) {
 entry:
   %conv = zext i8 %off to i64
   %or = or i64 %conv, %ptr
-  %0 = inttoptr i64 %or to double*
-  %1 = load double, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load double, ptr %0, align 8
   %conv1 = fptrunc double %1 to float
   ret float %conv1
 }
@@ -3490,8 +3458,8 @@ define dso_local float @ld_not_disjoint16_float_double(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to double*
-  %1 = load double, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load double, ptr %0, align 8
   %conv = fptrunc double %1 to float
   ret float %conv
 }
@@ -3507,8 +3475,8 @@ define dso_local float @ld_disjoint_align16_float_double(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -4096
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to double*
-  %1 = load double, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load double, ptr %0, align 8
   %conv = fptrunc double %1 to float
   ret float %conv
 }
@@ -3524,8 +3492,8 @@ define dso_local float @ld_not_disjoint32_float_double(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to double*
-  %1 = load double, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load double, ptr %0, align 8
   %conv = fptrunc double %1 to float
   ret float %conv
 }
@@ -3562,8 +3530,8 @@ define dso_local float @ld_disjoint_align32_float_double(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1000341504
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to double*
-  %1 = load double, double* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  %1 = load double, ptr %0, align 16
   %conv = fptrunc double %1 to float
   ret float %conv
 }
@@ -3592,8 +3560,8 @@ define dso_local float @ld_not_disjoint64_float_double(i64 %ptr) {
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to double*
-  %1 = load double, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load double, ptr %0, align 8
   %conv = fptrunc double %1 to float
   ret float %conv
 }
@@ -3621,8 +3589,8 @@ define dso_local float @ld_disjoint_align64_float_double(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to double*
-  %1 = load double, double* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  %1 = load double, ptr %0, align 4096
   %conv = fptrunc double %1 to float
   ret float %conv
 }
@@ -3635,7 +3603,7 @@ define dso_local float @ld_cst_align16_float_double() {
 ; CHECK-NEXT:    xsrsp f1, f0
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load double, double* inttoptr (i64 4080 to double*), align 16
+  %0 = load double, ptr inttoptr (i64 4080 to ptr), align 16
   %conv = fptrunc double %0 to float
   ret float %conv
 }
@@ -3649,7 +3617,7 @@ define dso_local float @ld_cst_align32_float_double() {
 ; CHECK-NEXT:    xsrsp f1, f0
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load double, double* inttoptr (i64 9999900 to double*), align 8
+  %0 = load double, ptr inttoptr (i64 9999900 to ptr), align 8
   %conv = fptrunc double %0 to float
   ret float %conv
 }
@@ -3673,7 +3641,7 @@ define dso_local float @ld_cst_align64_float_double() {
 ; CHECK-PREP10-NEXT:    xsrsp f1, f0
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %0 = load double, double* inttoptr (i64 1000000000000 to double*), align 4096
+  %0 = load double, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   %conv = fptrunc double %0 to float
   ret float %conv
 }
@@ -3694,13 +3662,13 @@ define dso_local void @st_0_float_uint8_t(i64 %ptr, float %str) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptoui float %str to i8
-  %0 = inttoptr i64 %ptr to i8*
-  store i8 %conv, i8* %0, align 1
+  %0 = inttoptr i64 %ptr to ptr
+  store i8 %conv, ptr %0, align 1
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_float_uint8_t(i8* nocapture %ptr, float %str) {
+define dso_local void @st_align16_float_uint8_t(ptr nocapture %ptr, float %str) {
 ; CHECK-POSTP8-LABEL: st_align16_float_uint8_t:
 ; CHECK-POSTP8:       # %bb.0: # %entry
 ; CHECK-POSTP8-NEXT:    xscvdpuxws f0, f1
@@ -3716,13 +3684,13 @@ define dso_local void @st_align16_float_uint8_t(i8* nocapture %ptr, float %str)
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptoui float %str to i8
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  store i8 %conv, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  store i8 %conv, ptr %add.ptr, align 1
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_float_uint8_t(i8* nocapture %ptr, float %str) {
+define dso_local void @st_align32_float_uint8_t(ptr nocapture %ptr, float %str) {
 ; CHECK-P10-LABEL: st_align32_float_uint8_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    xscvdpuxws f0, f1
@@ -3748,13 +3716,13 @@ define dso_local void @st_align32_float_uint8_t(i8* nocapture %ptr, float %str)
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptoui float %str to i8
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  store i8 %conv, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  store i8 %conv, ptr %add.ptr, align 1
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_float_uint8_t(i8* nocapture %ptr, float %str) {
+define dso_local void @st_align64_float_uint8_t(ptr nocapture %ptr, float %str) {
 ; CHECK-P10-LABEL: st_align64_float_uint8_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    xscvdpuxws f0, f1
@@ -3783,13 +3751,13 @@ define dso_local void @st_align64_float_uint8_t(i8* nocapture %ptr, float %str)
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptoui float %str to i8
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  store i8 %conv, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  store i8 %conv, ptr %add.ptr, align 1
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_float_uint8_t(i8* nocapture %ptr, i64 %off, float %str) {
+define dso_local void @st_reg_float_uint8_t(ptr nocapture %ptr, i64 %off, float %str) {
 ; CHECK-POSTP8-LABEL: st_reg_float_uint8_t:
 ; CHECK-POSTP8:       # %bb.0: # %entry
 ; CHECK-POSTP8-NEXT:    xscvdpuxws f0, f1
@@ -3804,8 +3772,8 @@ define dso_local void @st_reg_float_uint8_t(i8* nocapture %ptr, i64 %off, float
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptoui float %str to i8
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  store i8 %conv, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  store i8 %conv, ptr %add.ptr, align 1
   ret void
 }
 
@@ -3829,8 +3797,8 @@ entry:
   %conv = fptoui float %str to i8
   %conv1 = zext i8 %off to i64
   %or = or i64 %conv1, %ptr
-  %0 = inttoptr i64 %or to i8*
-  store i8 %conv, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  store i8 %conv, ptr %0, align 1
   ret void
 }
 
@@ -3853,8 +3821,8 @@ define dso_local void @st_not_disjoint16_float_uint8_t(i64 %ptr, float %str) {
 entry:
   %conv = fptoui float %str to i8
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to i8*
-  store i8 %conv, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  store i8 %conv, ptr %0, align 1
   ret void
 }
 
@@ -3879,8 +3847,8 @@ entry:
   %and = and i64 %ptr, -4096
   %conv = fptoui float %str to i8
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to i8*
-  store i8 %conv, i8* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store i8 %conv, ptr %0, align 8
   ret void
 }
 
@@ -3905,8 +3873,8 @@ define dso_local void @st_not_disjoint32_float_uint8_t(i64 %ptr, float %str) {
 entry:
   %conv = fptoui float %str to i8
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to i8*
-  store i8 %conv, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  store i8 %conv, ptr %0, align 1
   ret void
 }
 
@@ -3945,8 +3913,8 @@ entry:
   %and = and i64 %ptr, -1000341504
   %conv = fptoui float %str to i8
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to i8*
-  store i8 %conv, i8* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  store i8 %conv, ptr %0, align 16
   ret void
 }
 
@@ -3987,8 +3955,8 @@ define dso_local void @st_not_disjoint64_float_uint8_t(i64 %ptr, float %str) {
 entry:
   %conv = fptoui float %str to i8
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to i8*
-  store i8 %conv, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  store i8 %conv, ptr %0, align 1
   ret void
 }
 
@@ -4027,8 +3995,8 @@ entry:
   %and = and i64 %ptr, -1099511627776
   %conv = fptoui float %str to i8
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to i8*
-  store i8 %conv, i8* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  store i8 %conv, ptr %0, align 4096
   ret void
 }
 
@@ -4049,7 +4017,7 @@ define dso_local void @st_cst_align16_float_uint8_t(float %str) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptoui float %str to i8
-  store i8 %conv, i8* inttoptr (i64 4080 to i8*), align 16
+  store i8 %conv, ptr inttoptr (i64 4080 to ptr), align 16
   ret void
 }
 
@@ -4079,7 +4047,7 @@ define dso_local void @st_cst_align32_float_uint8_t(float %str) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptoui float %str to i8
-  store i8 %conv, i8* inttoptr (i64 9999900 to i8*), align 4
+  store i8 %conv, ptr inttoptr (i64 9999900 to ptr), align 4
   ret void
 }
 
@@ -4113,7 +4081,7 @@ define dso_local void @st_cst_align64_float_uint8_t(float %str) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptoui float %str to i8
-  store i8 %conv, i8* inttoptr (i64 1000000000000 to i8*), align 4096
+  store i8 %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   ret void
 }
 
@@ -4133,13 +4101,13 @@ define dso_local void @st_0_float_int8_t(i64 %ptr, float %str) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptosi float %str to i8
-  %0 = inttoptr i64 %ptr to i8*
-  store i8 %conv, i8* %0, align 1
+  %0 = inttoptr i64 %ptr to ptr
+  store i8 %conv, ptr %0, align 1
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_float_int8_t(i8* nocapture %ptr, float %str) {
+define dso_local void @st_align16_float_int8_t(ptr nocapture %ptr, float %str) {
 ; CHECK-POSTP8-LABEL: st_align16_float_int8_t:
 ; CHECK-POSTP8:       # %bb.0: # %entry
 ; CHECK-POSTP8-NEXT:    xscvdpsxws f0, f1
@@ -4155,13 +4123,13 @@ define dso_local void @st_align16_float_int8_t(i8* nocapture %ptr, float %str) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptosi float %str to i8
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  store i8 %conv, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  store i8 %conv, ptr %add.ptr, align 1
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_float_int8_t(i8* nocapture %ptr, float %str) {
+define dso_local void @st_align32_float_int8_t(ptr nocapture %ptr, float %str) {
 ; CHECK-P10-LABEL: st_align32_float_int8_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    xscvdpsxws f0, f1
@@ -4187,13 +4155,13 @@ define dso_local void @st_align32_float_int8_t(i8* nocapture %ptr, float %str) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptosi float %str to i8
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  store i8 %conv, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  store i8 %conv, ptr %add.ptr, align 1
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_float_int8_t(i8* nocapture %ptr, float %str) {
+define dso_local void @st_align64_float_int8_t(ptr nocapture %ptr, float %str) {
 ; CHECK-P10-LABEL: st_align64_float_int8_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    xscvdpsxws f0, f1
@@ -4222,13 +4190,13 @@ define dso_local void @st_align64_float_int8_t(i8* nocapture %ptr, float %str) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptosi float %str to i8
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  store i8 %conv, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  store i8 %conv, ptr %add.ptr, align 1
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_float_int8_t(i8* nocapture %ptr, i64 %off, float %str) {
+define dso_local void @st_reg_float_int8_t(ptr nocapture %ptr, i64 %off, float %str) {
 ; CHECK-POSTP8-LABEL: st_reg_float_int8_t:
 ; CHECK-POSTP8:       # %bb.0: # %entry
 ; CHECK-POSTP8-NEXT:    xscvdpsxws f0, f1
@@ -4243,8 +4211,8 @@ define dso_local void @st_reg_float_int8_t(i8* nocapture %ptr, i64 %off, float %
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptosi float %str to i8
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  store i8 %conv, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  store i8 %conv, ptr %add.ptr, align 1
   ret void
 }
 
@@ -4268,8 +4236,8 @@ entry:
   %conv = fptosi float %str to i8
   %conv1 = zext i8 %off to i64
   %or = or i64 %conv1, %ptr
-  %0 = inttoptr i64 %or to i8*
-  store i8 %conv, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  store i8 %conv, ptr %0, align 1
   ret void
 }
 
@@ -4292,8 +4260,8 @@ define dso_local void @st_not_disjoint16_float_int8_t(i64 %ptr, float %str) {
 entry:
   %conv = fptosi float %str to i8
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to i8*
-  store i8 %conv, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  store i8 %conv, ptr %0, align 1
   ret void
 }
 
@@ -4318,8 +4286,8 @@ entry:
   %and = and i64 %ptr, -4096
   %conv = fptosi float %str to i8
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to i8*
-  store i8 %conv, i8* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store i8 %conv, ptr %0, align 8
   ret void
 }
 
@@ -4344,8 +4312,8 @@ define dso_local void @st_not_disjoint32_float_int8_t(i64 %ptr, float %str) {
 entry:
   %conv = fptosi float %str to i8
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to i8*
-  store i8 %conv, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  store i8 %conv, ptr %0, align 1
   ret void
 }
 
@@ -4384,8 +4352,8 @@ entry:
   %and = and i64 %ptr, -1000341504
   %conv = fptosi float %str to i8
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to i8*
-  store i8 %conv, i8* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  store i8 %conv, ptr %0, align 16
   ret void
 }
 
@@ -4426,8 +4394,8 @@ define dso_local void @st_not_disjoint64_float_int8_t(i64 %ptr, float %str) {
 entry:
   %conv = fptosi float %str to i8
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to i8*
-  store i8 %conv, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  store i8 %conv, ptr %0, align 1
   ret void
 }
 
@@ -4466,8 +4434,8 @@ entry:
   %and = and i64 %ptr, -1099511627776
   %conv = fptosi float %str to i8
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to i8*
-  store i8 %conv, i8* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  store i8 %conv, ptr %0, align 4096
   ret void
 }
 
@@ -4488,7 +4456,7 @@ define dso_local void @st_cst_align16_float_int8_t(float %str) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptosi float %str to i8
-  store i8 %conv, i8* inttoptr (i64 4080 to i8*), align 16
+  store i8 %conv, ptr inttoptr (i64 4080 to ptr), align 16
   ret void
 }
 
@@ -4518,7 +4486,7 @@ define dso_local void @st_cst_align32_float_int8_t(float %str) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptosi float %str to i8
-  store i8 %conv, i8* inttoptr (i64 9999900 to i8*), align 4
+  store i8 %conv, ptr inttoptr (i64 9999900 to ptr), align 4
   ret void
 }
 
@@ -4552,7 +4520,7 @@ define dso_local void @st_cst_align64_float_int8_t(float %str) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptosi float %str to i8
-  store i8 %conv, i8* inttoptr (i64 1000000000000 to i8*), align 4096
+  store i8 %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   ret void
 }
 
@@ -4572,13 +4540,13 @@ define dso_local void @st_0_float_uint16_t(i64 %ptr, float %str) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptoui float %str to i16
-  %0 = inttoptr i64 %ptr to i16*
-  store i16 %conv, i16* %0, align 2
+  %0 = inttoptr i64 %ptr to ptr
+  store i16 %conv, ptr %0, align 2
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_float_uint16_t(i8* nocapture %ptr, float %str) {
+define dso_local void @st_align16_float_uint16_t(ptr nocapture %ptr, float %str) {
 ; CHECK-POSTP8-LABEL: st_align16_float_uint16_t:
 ; CHECK-POSTP8:       # %bb.0: # %entry
 ; CHECK-POSTP8-NEXT:    xscvdpuxws f0, f1
@@ -4594,14 +4562,13 @@ define dso_local void @st_align16_float_uint16_t(i8* nocapture %ptr, float %str)
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptoui float %str to i16
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to i16*
-  store i16 %conv, i16* %0, align 2
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  store i16 %conv, ptr %add.ptr, align 2
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_float_uint16_t(i8* nocapture %ptr, float %str) {
+define dso_local void @st_align32_float_uint16_t(ptr nocapture %ptr, float %str) {
 ; CHECK-P10-LABEL: st_align32_float_uint16_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    xscvdpuxws f0, f1
@@ -4627,14 +4594,13 @@ define dso_local void @st_align32_float_uint16_t(i8* nocapture %ptr, float %str)
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptoui float %str to i16
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to i16*
-  store i16 %conv, i16* %0, align 2
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  store i16 %conv, ptr %add.ptr, align 2
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_float_uint16_t(i8* nocapture %ptr, float %str) {
+define dso_local void @st_align64_float_uint16_t(ptr nocapture %ptr, float %str) {
 ; CHECK-P10-LABEL: st_align64_float_uint16_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    xscvdpuxws f0, f1
@@ -4663,14 +4629,13 @@ define dso_local void @st_align64_float_uint16_t(i8* nocapture %ptr, float %str)
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptoui float %str to i16
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to i16*
-  store i16 %conv, i16* %0, align 2
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  store i16 %conv, ptr %add.ptr, align 2
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_float_uint16_t(i8* nocapture %ptr, i64 %off, float %str) {
+define dso_local void @st_reg_float_uint16_t(ptr nocapture %ptr, i64 %off, float %str) {
 ; CHECK-POSTP8-LABEL: st_reg_float_uint16_t:
 ; CHECK-POSTP8:       # %bb.0: # %entry
 ; CHECK-POSTP8-NEXT:    xscvdpuxws f0, f1
@@ -4685,9 +4650,8 @@ define dso_local void @st_reg_float_uint16_t(i8* nocapture %ptr, i64 %off, float
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptoui float %str to i16
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to i16*
-  store i16 %conv, i16* %0, align 2
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  store i16 %conv, ptr %add.ptr, align 2
   ret void
 }
 
@@ -4711,8 +4675,8 @@ entry:
   %conv = fptoui float %str to i16
   %conv1 = zext i8 %off to i64
   %or = or i64 %conv1, %ptr
-  %0 = inttoptr i64 %or to i16*
-  store i16 %conv, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  store i16 %conv, ptr %0, align 2
   ret void
 }
 
@@ -4735,8 +4699,8 @@ define dso_local void @st_not_disjoint16_float_uint16_t(i64 %ptr, float %str) {
 entry:
   %conv = fptoui float %str to i16
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to i16*
-  store i16 %conv, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  store i16 %conv, ptr %0, align 2
   ret void
 }
 
@@ -4761,8 +4725,8 @@ entry:
   %and = and i64 %ptr, -4096
   %conv = fptoui float %str to i16
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to i16*
-  store i16 %conv, i16* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store i16 %conv, ptr %0, align 8
   ret void
 }
 
@@ -4787,8 +4751,8 @@ define dso_local void @st_not_disjoint32_float_uint16_t(i64 %ptr, float %str) {
 entry:
   %conv = fptoui float %str to i16
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to i16*
-  store i16 %conv, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  store i16 %conv, ptr %0, align 2
   ret void
 }
 
@@ -4827,8 +4791,8 @@ entry:
   %and = and i64 %ptr, -1000341504
   %conv = fptoui float %str to i16
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to i16*
-  store i16 %conv, i16* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  store i16 %conv, ptr %0, align 16
   ret void
 }
 
@@ -4869,8 +4833,8 @@ define dso_local void @st_not_disjoint64_float_uint16_t(i64 %ptr, float %str) {
 entry:
   %conv = fptoui float %str to i16
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to i16*
-  store i16 %conv, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  store i16 %conv, ptr %0, align 2
   ret void
 }
 
@@ -4909,8 +4873,8 @@ entry:
   %and = and i64 %ptr, -1099511627776
   %conv = fptoui float %str to i16
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to i16*
-  store i16 %conv, i16* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  store i16 %conv, ptr %0, align 4096
   ret void
 }
 
@@ -4931,7 +4895,7 @@ define dso_local void @st_cst_align16_float_uint16_t(float %str) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptoui float %str to i16
-  store i16 %conv, i16* inttoptr (i64 4080 to i16*), align 16
+  store i16 %conv, ptr inttoptr (i64 4080 to ptr), align 16
   ret void
 }
 
@@ -4961,7 +4925,7 @@ define dso_local void @st_cst_align32_float_uint16_t(float %str) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptoui float %str to i16
-  store i16 %conv, i16* inttoptr (i64 9999900 to i16*), align 4
+  store i16 %conv, ptr inttoptr (i64 9999900 to ptr), align 4
   ret void
 }
 
@@ -4995,7 +4959,7 @@ define dso_local void @st_cst_align64_float_uint16_t(float %str) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptoui float %str to i16
-  store i16 %conv, i16* inttoptr (i64 1000000000000 to i16*), align 4096
+  store i16 %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   ret void
 }
 
@@ -5015,13 +4979,13 @@ define dso_local void @st_0_float_int16_t(i64 %ptr, float %str) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptosi float %str to i16
-  %0 = inttoptr i64 %ptr to i16*
-  store i16 %conv, i16* %0, align 2
+  %0 = inttoptr i64 %ptr to ptr
+  store i16 %conv, ptr %0, align 2
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_float_int16_t(i8* nocapture %ptr, float %str) {
+define dso_local void @st_align16_float_int16_t(ptr nocapture %ptr, float %str) {
 ; CHECK-POSTP8-LABEL: st_align16_float_int16_t:
 ; CHECK-POSTP8:       # %bb.0: # %entry
 ; CHECK-POSTP8-NEXT:    xscvdpsxws f0, f1
@@ -5037,14 +5001,13 @@ define dso_local void @st_align16_float_int16_t(i8* nocapture %ptr, float %str)
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptosi float %str to i16
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to i16*
-  store i16 %conv, i16* %0, align 2
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  store i16 %conv, ptr %add.ptr, align 2
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_float_int16_t(i8* nocapture %ptr, float %str) {
+define dso_local void @st_align32_float_int16_t(ptr nocapture %ptr, float %str) {
 ; CHECK-P10-LABEL: st_align32_float_int16_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    xscvdpsxws f0, f1
@@ -5070,14 +5033,13 @@ define dso_local void @st_align32_float_int16_t(i8* nocapture %ptr, float %str)
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptosi float %str to i16
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to i16*
-  store i16 %conv, i16* %0, align 2
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  store i16 %conv, ptr %add.ptr, align 2
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_float_int16_t(i8* nocapture %ptr, float %str) {
+define dso_local void @st_align64_float_int16_t(ptr nocapture %ptr, float %str) {
 ; CHECK-P10-LABEL: st_align64_float_int16_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    xscvdpsxws f0, f1
@@ -5106,14 +5068,13 @@ define dso_local void @st_align64_float_int16_t(i8* nocapture %ptr, float %str)
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptosi float %str to i16
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to i16*
-  store i16 %conv, i16* %0, align 2
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  store i16 %conv, ptr %add.ptr, align 2
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_float_int16_t(i8* nocapture %ptr, i64 %off, float %str) {
+define dso_local void @st_reg_float_int16_t(ptr nocapture %ptr, i64 %off, float %str) {
 ; CHECK-POSTP8-LABEL: st_reg_float_int16_t:
 ; CHECK-POSTP8:       # %bb.0: # %entry
 ; CHECK-POSTP8-NEXT:    xscvdpsxws f0, f1
@@ -5128,9 +5089,8 @@ define dso_local void @st_reg_float_int16_t(i8* nocapture %ptr, i64 %off, float
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptosi float %str to i16
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to i16*
-  store i16 %conv, i16* %0, align 2
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  store i16 %conv, ptr %add.ptr, align 2
   ret void
 }
 
@@ -5154,8 +5114,8 @@ entry:
   %conv = fptosi float %str to i16
   %conv1 = zext i8 %off to i64
   %or = or i64 %conv1, %ptr
-  %0 = inttoptr i64 %or to i16*
-  store i16 %conv, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  store i16 %conv, ptr %0, align 2
   ret void
 }
 
@@ -5178,8 +5138,8 @@ define dso_local void @st_not_disjoint16_float_int16_t(i64 %ptr, float %str) {
 entry:
   %conv = fptosi float %str to i16
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to i16*
-  store i16 %conv, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  store i16 %conv, ptr %0, align 2
   ret void
 }
 
@@ -5204,8 +5164,8 @@ entry:
   %and = and i64 %ptr, -4096
   %conv = fptosi float %str to i16
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to i16*
-  store i16 %conv, i16* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store i16 %conv, ptr %0, align 8
   ret void
 }
 
@@ -5230,8 +5190,8 @@ define dso_local void @st_not_disjoint32_float_int16_t(i64 %ptr, float %str) {
 entry:
   %conv = fptosi float %str to i16
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to i16*
-  store i16 %conv, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  store i16 %conv, ptr %0, align 2
   ret void
 }
 
@@ -5270,8 +5230,8 @@ entry:
   %and = and i64 %ptr, -1000341504
   %conv = fptosi float %str to i16
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to i16*
-  store i16 %conv, i16* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  store i16 %conv, ptr %0, align 16
   ret void
 }
 
@@ -5312,8 +5272,8 @@ define dso_local void @st_not_disjoint64_float_int16_t(i64 %ptr, float %str) {
 entry:
   %conv = fptosi float %str to i16
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to i16*
-  store i16 %conv, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  store i16 %conv, ptr %0, align 2
   ret void
 }
 
@@ -5352,8 +5312,8 @@ entry:
   %and = and i64 %ptr, -1099511627776
   %conv = fptosi float %str to i16
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to i16*
-  store i16 %conv, i16* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  store i16 %conv, ptr %0, align 4096
   ret void
 }
 
@@ -5374,7 +5334,7 @@ define dso_local void @st_cst_align16_float_int16_t(float %str) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptosi float %str to i16
-  store i16 %conv, i16* inttoptr (i64 4080 to i16*), align 16
+  store i16 %conv, ptr inttoptr (i64 4080 to ptr), align 16
   ret void
 }
 
@@ -5404,7 +5364,7 @@ define dso_local void @st_cst_align32_float_int16_t(float %str) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptosi float %str to i16
-  store i16 %conv, i16* inttoptr (i64 9999900 to i16*), align 4
+  store i16 %conv, ptr inttoptr (i64 9999900 to ptr), align 4
   ret void
 }
 
@@ -5438,7 +5398,7 @@ define dso_local void @st_cst_align64_float_int16_t(float %str) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptosi float %str to i16
-  store i16 %conv, i16* inttoptr (i64 1000000000000 to i16*), align 4096
+  store i16 %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   ret void
 }
 
@@ -5451,13 +5411,13 @@ define dso_local void @st_0_float_uint32_t(i64 %ptr, float %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = fptoui float %str to i32
-  %0 = inttoptr i64 %ptr to i32*
-  store i32 %conv, i32* %0, align 4
+  %0 = inttoptr i64 %ptr to ptr
+  store i32 %conv, ptr %0, align 4
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_float_uint32_t(i8* nocapture %ptr, float %str) {
+define dso_local void @st_align16_float_uint32_t(ptr nocapture %ptr, float %str) {
 ; CHECK-LABEL: st_align16_float_uint32_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    xscvdpuxws f0, f1
@@ -5466,14 +5426,13 @@ define dso_local void @st_align16_float_uint32_t(i8* nocapture %ptr, float %str)
 ; CHECK-NEXT:    blr
 entry:
   %conv = fptoui float %str to i32
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to i32*
-  store i32 %conv, i32* %0, align 4
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  store i32 %conv, ptr %add.ptr, align 4
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_float_uint32_t(i8* nocapture %ptr, float %str) {
+define dso_local void @st_align32_float_uint32_t(ptr nocapture %ptr, float %str) {
 ; CHECK-P10-LABEL: st_align32_float_uint32_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    xscvdpuxws f0, f1
@@ -5490,14 +5449,13 @@ define dso_local void @st_align32_float_uint32_t(i8* nocapture %ptr, float %str)
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %conv = fptoui float %str to i32
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to i32*
-  store i32 %conv, i32* %0, align 4
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  store i32 %conv, ptr %add.ptr, align 4
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_float_uint32_t(i8* nocapture %ptr, float %str) {
+define dso_local void @st_align64_float_uint32_t(ptr nocapture %ptr, float %str) {
 ; CHECK-P10-LABEL: st_align64_float_uint32_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    xscvdpuxws f0, f1
@@ -5516,14 +5474,13 @@ define dso_local void @st_align64_float_uint32_t(i8* nocapture %ptr, float %str)
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %conv = fptoui float %str to i32
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to i32*
-  store i32 %conv, i32* %0, align 4
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  store i32 %conv, ptr %add.ptr, align 4
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_float_uint32_t(i8* nocapture %ptr, i64 %off, float %str) {
+define dso_local void @st_reg_float_uint32_t(ptr nocapture %ptr, i64 %off, float %str) {
 ; CHECK-LABEL: st_reg_float_uint32_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    xscvdpuxws f0, f1
@@ -5531,9 +5488,8 @@ define dso_local void @st_reg_float_uint32_t(i8* nocapture %ptr, i64 %off, float
 ; CHECK-NEXT:    blr
 entry:
   %conv = fptoui float %str to i32
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to i32*
-  store i32 %conv, i32* %0, align 4
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  store i32 %conv, ptr %add.ptr, align 4
   ret void
 }
 
@@ -5549,8 +5505,8 @@ entry:
   %conv = fptoui float %str to i32
   %conv1 = zext i8 %off to i64
   %or = or i64 %conv1, %ptr
-  %0 = inttoptr i64 %or to i32*
-  store i32 %conv, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  store i32 %conv, ptr %0, align 4
   ret void
 }
 
@@ -5565,8 +5521,8 @@ define dso_local void @st_not_disjoint16_float_uint32_t(i64 %ptr, float %str) {
 entry:
   %conv = fptoui float %str to i32
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to i32*
-  store i32 %conv, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  store i32 %conv, ptr %0, align 4
   ret void
 }
 
@@ -5583,8 +5539,8 @@ entry:
   %and = and i64 %ptr, -4096
   %conv = fptoui float %str to i32
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to i32*
-  store i32 %conv, i32* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store i32 %conv, ptr %0, align 8
   ret void
 }
 
@@ -5600,8 +5556,8 @@ define dso_local void @st_not_disjoint32_float_uint32_t(i64 %ptr, float %str) {
 entry:
   %conv = fptoui float %str to i32
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to i32*
-  store i32 %conv, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  store i32 %conv, ptr %0, align 4
   ret void
 }
 
@@ -5639,8 +5595,8 @@ entry:
   %and = and i64 %ptr, -1000341504
   %conv = fptoui float %str to i32
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to i32*
-  store i32 %conv, i32* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  store i32 %conv, ptr %0, align 16
   ret void
 }
 
@@ -5680,8 +5636,8 @@ define dso_local void @st_not_disjoint64_float_uint32_t(i64 %ptr, float %str) {
 entry:
   %conv = fptoui float %str to i32
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to i32*
-  store i32 %conv, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  store i32 %conv, ptr %0, align 4
   ret void
 }
 
@@ -5709,8 +5665,8 @@ entry:
   %and = and i64 %ptr, -1099511627776
   %conv = fptoui float %str to i32
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to i32*
-  store i32 %conv, i32* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  store i32 %conv, ptr %0, align 4096
   ret void
 }
 
@@ -5724,7 +5680,7 @@ define dso_local void @st_cst_align16_float_uint32_t(float %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = fptoui float %str to i32
-  store i32 %conv, i32* inttoptr (i64 4080 to i32*), align 16
+  store i32 %conv, ptr inttoptr (i64 4080 to ptr), align 16
   ret void
 }
 
@@ -5746,7 +5702,7 @@ define dso_local void @st_cst_align32_float_uint32_t(float %str) {
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %conv = fptoui float %str to i32
-  store i32 %conv, i32* inttoptr (i64 9999900 to i32*), align 4
+  store i32 %conv, ptr inttoptr (i64 9999900 to ptr), align 4
   ret void
 }
 
@@ -5770,7 +5726,7 @@ define dso_local void @st_cst_align64_float_uint32_t(float %str) {
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %conv = fptoui float %str to i32
-  store i32 %conv, i32* inttoptr (i64 1000000000000 to i32*), align 4096
+  store i32 %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   ret void
 }
 
@@ -5783,13 +5739,13 @@ define dso_local void @st_0_float_int32_t(i64 %ptr, float %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = fptosi float %str to i32
-  %0 = inttoptr i64 %ptr to i32*
-  store i32 %conv, i32* %0, align 4
+  %0 = inttoptr i64 %ptr to ptr
+  store i32 %conv, ptr %0, align 4
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_float_int32_t(i8* nocapture %ptr, float %str) {
+define dso_local void @st_align16_float_int32_t(ptr nocapture %ptr, float %str) {
 ; CHECK-LABEL: st_align16_float_int32_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    xscvdpsxws f0, f1
@@ -5798,14 +5754,13 @@ define dso_local void @st_align16_float_int32_t(i8* nocapture %ptr, float %str)
 ; CHECK-NEXT:    blr
 entry:
   %conv = fptosi float %str to i32
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to i32*
-  store i32 %conv, i32* %0, align 4
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  store i32 %conv, ptr %add.ptr, align 4
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_float_int32_t(i8* nocapture %ptr, float %str) {
+define dso_local void @st_align32_float_int32_t(ptr nocapture %ptr, float %str) {
 ; CHECK-P10-LABEL: st_align32_float_int32_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    xscvdpsxws f0, f1
@@ -5822,14 +5777,13 @@ define dso_local void @st_align32_float_int32_t(i8* nocapture %ptr, float %str)
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %conv = fptosi float %str to i32
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to i32*
-  store i32 %conv, i32* %0, align 4
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  store i32 %conv, ptr %add.ptr, align 4
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_float_int32_t(i8* nocapture %ptr, float %str) {
+define dso_local void @st_align64_float_int32_t(ptr nocapture %ptr, float %str) {
 ; CHECK-P10-LABEL: st_align64_float_int32_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    xscvdpsxws f0, f1
@@ -5848,14 +5802,13 @@ define dso_local void @st_align64_float_int32_t(i8* nocapture %ptr, float %str)
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %conv = fptosi float %str to i32
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to i32*
-  store i32 %conv, i32* %0, align 4
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  store i32 %conv, ptr %add.ptr, align 4
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_float_int32_t(i8* nocapture %ptr, i64 %off, float %str) {
+define dso_local void @st_reg_float_int32_t(ptr nocapture %ptr, i64 %off, float %str) {
 ; CHECK-LABEL: st_reg_float_int32_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    xscvdpsxws f0, f1
@@ -5863,9 +5816,8 @@ define dso_local void @st_reg_float_int32_t(i8* nocapture %ptr, i64 %off, float
 ; CHECK-NEXT:    blr
 entry:
   %conv = fptosi float %str to i32
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to i32*
-  store i32 %conv, i32* %0, align 4
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  store i32 %conv, ptr %add.ptr, align 4
   ret void
 }
 
@@ -5881,8 +5833,8 @@ entry:
   %conv = fptosi float %str to i32
   %conv1 = zext i8 %off to i64
   %or = or i64 %conv1, %ptr
-  %0 = inttoptr i64 %or to i32*
-  store i32 %conv, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  store i32 %conv, ptr %0, align 4
   ret void
 }
 
@@ -5897,8 +5849,8 @@ define dso_local void @st_not_disjoint16_float_int32_t(i64 %ptr, float %str) {
 entry:
   %conv = fptosi float %str to i32
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to i32*
-  store i32 %conv, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  store i32 %conv, ptr %0, align 4
   ret void
 }
 
@@ -5915,8 +5867,8 @@ entry:
   %and = and i64 %ptr, -4096
   %conv = fptosi float %str to i32
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to i32*
-  store i32 %conv, i32* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store i32 %conv, ptr %0, align 8
   ret void
 }
 
@@ -5932,8 +5884,8 @@ define dso_local void @st_not_disjoint32_float_int32_t(i64 %ptr, float %str) {
 entry:
   %conv = fptosi float %str to i32
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to i32*
-  store i32 %conv, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  store i32 %conv, ptr %0, align 4
   ret void
 }
 
@@ -5971,8 +5923,8 @@ entry:
   %and = and i64 %ptr, -1000341504
   %conv = fptosi float %str to i32
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to i32*
-  store i32 %conv, i32* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  store i32 %conv, ptr %0, align 16
   ret void
 }
 
@@ -6012,8 +5964,8 @@ define dso_local void @st_not_disjoint64_float_int32_t(i64 %ptr, float %str) {
 entry:
   %conv = fptosi float %str to i32
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to i32*
-  store i32 %conv, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  store i32 %conv, ptr %0, align 4
   ret void
 }
 
@@ -6041,8 +5993,8 @@ entry:
   %and = and i64 %ptr, -1099511627776
   %conv = fptosi float %str to i32
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to i32*
-  store i32 %conv, i32* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  store i32 %conv, ptr %0, align 4096
   ret void
 }
 
@@ -6056,7 +6008,7 @@ define dso_local void @st_cst_align16_float_int32_t(float %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = fptosi float %str to i32
-  store i32 %conv, i32* inttoptr (i64 4080 to i32*), align 16
+  store i32 %conv, ptr inttoptr (i64 4080 to ptr), align 16
   ret void
 }
 
@@ -6078,7 +6030,7 @@ define dso_local void @st_cst_align32_float_int32_t(float %str) {
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %conv = fptosi float %str to i32
-  store i32 %conv, i32* inttoptr (i64 9999900 to i32*), align 4
+  store i32 %conv, ptr inttoptr (i64 9999900 to ptr), align 4
   ret void
 }
 
@@ -6102,7 +6054,7 @@ define dso_local void @st_cst_align64_float_int32_t(float %str) {
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %conv = fptosi float %str to i32
-  store i32 %conv, i32* inttoptr (i64 1000000000000 to i32*), align 4096
+  store i32 %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   ret void
 }
 
@@ -6121,13 +6073,13 @@ define dso_local void @st_0_float_uint64_t(i64 %ptr, float %str) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptoui float %str to i64
-  %0 = inttoptr i64 %ptr to i64*
-  store i64 %conv, i64* %0, align 8
+  %0 = inttoptr i64 %ptr to ptr
+  store i64 %conv, ptr %0, align 8
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_float_uint64_t(i8* nocapture %ptr, float %str) {
+define dso_local void @st_align16_float_uint64_t(ptr nocapture %ptr, float %str) {
 ; CHECK-POSTP8-LABEL: st_align16_float_uint64_t:
 ; CHECK-POSTP8:       # %bb.0: # %entry
 ; CHECK-POSTP8-NEXT:    xscvdpuxds v2, f1
@@ -6142,14 +6094,13 @@ define dso_local void @st_align16_float_uint64_t(i8* nocapture %ptr, float %str)
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptoui float %str to i64
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to i64*
-  store i64 %conv, i64* %0, align 8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  store i64 %conv, ptr %add.ptr, align 8
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_float_uint64_t(i8* nocapture %ptr, float %str) {
+define dso_local void @st_align32_float_uint64_t(ptr nocapture %ptr, float %str) {
 ; CHECK-P10-LABEL: st_align32_float_uint64_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    xscvdpuxds v2, f1
@@ -6165,14 +6116,13 @@ define dso_local void @st_align32_float_uint64_t(i8* nocapture %ptr, float %str)
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %conv = fptoui float %str to i64
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to i64*
-  store i64 %conv, i64* %0, align 8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  store i64 %conv, ptr %add.ptr, align 8
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_float_uint64_t(i8* nocapture %ptr, float %str) {
+define dso_local void @st_align64_float_uint64_t(ptr nocapture %ptr, float %str) {
 ; CHECK-P10-LABEL: st_align64_float_uint64_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    xscvdpuxds f0, f1
@@ -6191,14 +6141,13 @@ define dso_local void @st_align64_float_uint64_t(i8* nocapture %ptr, float %str)
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %conv = fptoui float %str to i64
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to i64*
-  store i64 %conv, i64* %0, align 8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  store i64 %conv, ptr %add.ptr, align 8
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_float_uint64_t(i8* nocapture %ptr, i64 %off, float %str) {
+define dso_local void @st_reg_float_uint64_t(ptr nocapture %ptr, i64 %off, float %str) {
 ; CHECK-LABEL: st_reg_float_uint64_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    xscvdpuxds f0, f1
@@ -6206,9 +6155,8 @@ define dso_local void @st_reg_float_uint64_t(i8* nocapture %ptr, i64 %off, float
 ; CHECK-NEXT:    blr
 entry:
   %conv = fptoui float %str to i64
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to i64*
-  store i64 %conv, i64* %0, align 8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  store i64 %conv, ptr %add.ptr, align 8
   ret void
 }
 
@@ -6231,8 +6179,8 @@ entry:
   %conv = fptoui float %str to i64
   %conv1 = zext i8 %off to i64
   %or = or i64 %conv1, %ptr
-  %0 = inttoptr i64 %or to i64*
-  store i64 %conv, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store i64 %conv, ptr %0, align 8
   ret void
 }
 
@@ -6254,8 +6202,8 @@ define dso_local void @st_not_disjoint16_float_uint64_t(i64 %ptr, float %str) {
 entry:
   %conv = fptoui float %str to i64
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to i64*
-  store i64 %conv, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store i64 %conv, ptr %0, align 8
   ret void
 }
 
@@ -6279,8 +6227,8 @@ entry:
   %and = and i64 %ptr, -4096
   %conv = fptoui float %str to i64
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to i64*
-  store i64 %conv, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store i64 %conv, ptr %0, align 8
   ret void
 }
 
@@ -6304,8 +6252,8 @@ define dso_local void @st_not_disjoint32_float_uint64_t(i64 %ptr, float %str) {
 entry:
   %conv = fptoui float %str to i64
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to i64*
-  store i64 %conv, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store i64 %conv, ptr %0, align 8
   ret void
 }
 
@@ -6342,8 +6290,8 @@ entry:
   %and = and i64 %ptr, -1000341504
   %conv = fptoui float %str to i64
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to i64*
-  store i64 %conv, i64* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  store i64 %conv, ptr %0, align 16
   ret void
 }
 
@@ -6383,8 +6331,8 @@ define dso_local void @st_not_disjoint64_float_uint64_t(i64 %ptr, float %str) {
 entry:
   %conv = fptoui float %str to i64
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to i64*
-  store i64 %conv, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store i64 %conv, ptr %0, align 8
   ret void
 }
 
@@ -6412,8 +6360,8 @@ entry:
   %and = and i64 %ptr, -1099511627776
   %conv = fptoui float %str to i64
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to i64*
-  store i64 %conv, i64* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  store i64 %conv, ptr %0, align 4096
   ret void
 }
 
@@ -6433,7 +6381,7 @@ define dso_local void @st_cst_align16_float_uint64_t(float %str) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptoui float %str to i64
-  store i64 %conv, i64* inttoptr (i64 4080 to i64*), align 16
+  store i64 %conv, ptr inttoptr (i64 4080 to ptr), align 16
   ret void
 }
 
@@ -6455,7 +6403,7 @@ define dso_local void @st_cst_align32_float_uint64_t(float %str) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptoui float %str to i64
-  store i64 %conv, i64* inttoptr (i64 9999900 to i64*), align 8
+  store i64 %conv, ptr inttoptr (i64 9999900 to ptr), align 8
   ret void
 }
 
@@ -6488,7 +6436,7 @@ define dso_local void @st_cst_align64_float_uint64_t(float %str) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptoui float %str to i64
-  store i64 %conv, i64* inttoptr (i64 1000000000000 to i64*), align 4096
+  store i64 %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   ret void
 }
 
@@ -6507,13 +6455,13 @@ define dso_local void @st_0_float_int64_t(i64 %ptr, float %str) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptosi float %str to i64
-  %0 = inttoptr i64 %ptr to i64*
-  store i64 %conv, i64* %0, align 8
+  %0 = inttoptr i64 %ptr to ptr
+  store i64 %conv, ptr %0, align 8
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_float_int64_t(i8* nocapture %ptr, float %str) {
+define dso_local void @st_align16_float_int64_t(ptr nocapture %ptr, float %str) {
 ; CHECK-POSTP8-LABEL: st_align16_float_int64_t:
 ; CHECK-POSTP8:       # %bb.0: # %entry
 ; CHECK-POSTP8-NEXT:    xscvdpsxds v2, f1
@@ -6528,14 +6476,13 @@ define dso_local void @st_align16_float_int64_t(i8* nocapture %ptr, float %str)
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptosi float %str to i64
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to i64*
-  store i64 %conv, i64* %0, align 8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  store i64 %conv, ptr %add.ptr, align 8
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_float_int64_t(i8* nocapture %ptr, float %str) {
+define dso_local void @st_align32_float_int64_t(ptr nocapture %ptr, float %str) {
 ; CHECK-P10-LABEL: st_align32_float_int64_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    xscvdpsxds v2, f1
@@ -6551,14 +6498,13 @@ define dso_local void @st_align32_float_int64_t(i8* nocapture %ptr, float %str)
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %conv = fptosi float %str to i64
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to i64*
-  store i64 %conv, i64* %0, align 8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  store i64 %conv, ptr %add.ptr, align 8
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_float_int64_t(i8* nocapture %ptr, float %str) {
+define dso_local void @st_align64_float_int64_t(ptr nocapture %ptr, float %str) {
 ; CHECK-P10-LABEL: st_align64_float_int64_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    xscvdpsxds f0, f1
@@ -6577,14 +6523,13 @@ define dso_local void @st_align64_float_int64_t(i8* nocapture %ptr, float %str)
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %conv = fptosi float %str to i64
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to i64*
-  store i64 %conv, i64* %0, align 8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  store i64 %conv, ptr %add.ptr, align 8
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_float_int64_t(i8* nocapture %ptr, i64 %off, float %str) {
+define dso_local void @st_reg_float_int64_t(ptr nocapture %ptr, i64 %off, float %str) {
 ; CHECK-LABEL: st_reg_float_int64_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    xscvdpsxds f0, f1
@@ -6592,9 +6537,8 @@ define dso_local void @st_reg_float_int64_t(i8* nocapture %ptr, i64 %off, float
 ; CHECK-NEXT:    blr
 entry:
   %conv = fptosi float %str to i64
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to i64*
-  store i64 %conv, i64* %0, align 8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  store i64 %conv, ptr %add.ptr, align 8
   ret void
 }
 
@@ -6617,8 +6561,8 @@ entry:
   %conv = fptosi float %str to i64
   %conv1 = zext i8 %off to i64
   %or = or i64 %conv1, %ptr
-  %0 = inttoptr i64 %or to i64*
-  store i64 %conv, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store i64 %conv, ptr %0, align 8
   ret void
 }
 
@@ -6640,8 +6584,8 @@ define dso_local void @st_not_disjoint16_float_int64_t(i64 %ptr, float %str) {
 entry:
   %conv = fptosi float %str to i64
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to i64*
-  store i64 %conv, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store i64 %conv, ptr %0, align 8
   ret void
 }
 
@@ -6665,8 +6609,8 @@ entry:
   %and = and i64 %ptr, -4096
   %conv = fptosi float %str to i64
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to i64*
-  store i64 %conv, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store i64 %conv, ptr %0, align 8
   ret void
 }
 
@@ -6690,8 +6634,8 @@ define dso_local void @st_not_disjoint32_float_int64_t(i64 %ptr, float %str) {
 entry:
   %conv = fptosi float %str to i64
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to i64*
-  store i64 %conv, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store i64 %conv, ptr %0, align 8
   ret void
 }
 
@@ -6728,8 +6672,8 @@ entry:
   %and = and i64 %ptr, -1000341504
   %conv = fptosi float %str to i64
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to i64*
-  store i64 %conv, i64* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  store i64 %conv, ptr %0, align 16
   ret void
 }
 
@@ -6769,8 +6713,8 @@ define dso_local void @st_not_disjoint64_float_int64_t(i64 %ptr, float %str) {
 entry:
   %conv = fptosi float %str to i64
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to i64*
-  store i64 %conv, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store i64 %conv, ptr %0, align 8
   ret void
 }
 
@@ -6798,8 +6742,8 @@ entry:
   %and = and i64 %ptr, -1099511627776
   %conv = fptosi float %str to i64
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to i64*
-  store i64 %conv, i64* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  store i64 %conv, ptr %0, align 4096
   ret void
 }
 
@@ -6819,7 +6763,7 @@ define dso_local void @st_cst_align16_float_int64_t(float %str) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptosi float %str to i64
-  store i64 %conv, i64* inttoptr (i64 4080 to i64*), align 16
+  store i64 %conv, ptr inttoptr (i64 4080 to ptr), align 16
   ret void
 }
 
@@ -6841,7 +6785,7 @@ define dso_local void @st_cst_align32_float_int64_t(float %str) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptosi float %str to i64
-  store i64 %conv, i64* inttoptr (i64 9999900 to i64*), align 8
+  store i64 %conv, ptr inttoptr (i64 9999900 to ptr), align 8
   ret void
 }
 
@@ -6874,7 +6818,7 @@ define dso_local void @st_cst_align64_float_int64_t(float %str) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = fptosi float %str to i64
-  store i64 %conv, i64* inttoptr (i64 1000000000000 to i64*), align 4096
+  store i64 %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   ret void
 }
 
@@ -6885,26 +6829,25 @@ define dso_local void @st_0_float_float(i64 %ptr, float %str) {
 ; CHECK-NEXT:    stfs f1, 0(r3)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = inttoptr i64 %ptr to float*
-  store float %str, float* %0, align 4
+  %0 = inttoptr i64 %ptr to ptr
+  store float %str, ptr %0, align 4
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_float_float(i8* nocapture %ptr, float %str) {
+define dso_local void @st_align16_float_float(ptr nocapture %ptr, float %str) {
 ; CHECK-LABEL: st_align16_float_float:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    stfs f1, 8(r3)
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to float*
-  store float %str, float* %0, align 4
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  store float %str, ptr %add.ptr, align 4
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_float_float(i8* nocapture %ptr, float %str) {
+define dso_local void @st_align32_float_float(ptr nocapture %ptr, float %str) {
 ; CHECK-P10-LABEL: st_align32_float_float:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pstfs f1, 99999000(r3), 0
@@ -6917,14 +6860,13 @@ define dso_local void @st_align32_float_float(i8* nocapture %ptr, float %str) {
 ; CHECK-PREP10-NEXT:    stfsx f1, r3, r4
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to float*
-  store float %str, float* %0, align 4
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  store float %str, ptr %add.ptr, align 4
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_float_float(i8* nocapture %ptr, float %str) {
+define dso_local void @st_align64_float_float(ptr nocapture %ptr, float %str) {
 ; CHECK-P10-LABEL: st_align64_float_float:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r4, 244140625
@@ -6940,22 +6882,20 @@ define dso_local void @st_align64_float_float(i8* nocapture %ptr, float %str) {
 ; CHECK-PREP10-NEXT:    stfsx f1, r3, r4
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to float*
-  store float %str, float* %0, align 4
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  store float %str, ptr %add.ptr, align 4
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_float_float(i8* nocapture %ptr, i64 %off, float %str) {
+define dso_local void @st_reg_float_float(ptr nocapture %ptr, i64 %off, float %str) {
 ; CHECK-LABEL: st_reg_float_float:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    stfsx f1, r3, r4
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to float*
-  store float %str, float* %0, align 4
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  store float %str, ptr %add.ptr, align 4
   ret void
 }
 
@@ -6969,8 +6909,8 @@ define dso_local void @st_or1_float_float(i64 %ptr, i8 zeroext %off, float %str)
 entry:
   %conv = zext i8 %off to i64
   %or = or i64 %conv, %ptr
-  %0 = inttoptr i64 %or to float*
-  store float %str, float* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  store float %str, ptr %0, align 4
   ret void
 }
 
@@ -6983,8 +6923,8 @@ define dso_local void @st_not_disjoint16_float_float(i64 %ptr, float %str) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to float*
-  store float %str, float* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  store float %str, ptr %0, align 4
   ret void
 }
 
@@ -6998,8 +6938,8 @@ define dso_local void @st_disjoint_align16_float_float(i64 %ptr, float %str) {
 entry:
   %and = and i64 %ptr, -4096
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to float*
-  store float %str, float* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store float %str, ptr %0, align 8
   ret void
 }
 
@@ -7013,8 +6953,8 @@ define dso_local void @st_not_disjoint32_float_float(i64 %ptr, float %str) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to float*
-  store float %str, float* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  store float %str, ptr %0, align 4
   ret void
 }
 
@@ -7047,8 +6987,8 @@ define dso_local void @st_disjoint_align32_float_float(i64 %ptr, float %str) {
 entry:
   %and = and i64 %ptr, -1000341504
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to float*
-  store float %str, float* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  store float %str, ptr %0, align 16
   ret void
 }
 
@@ -7074,8 +7014,8 @@ define dso_local void @st_not_disjoint64_float_float(i64 %ptr, float %str) {
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to float*
-  store float %str, float* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  store float %str, ptr %0, align 4
   ret void
 }
 
@@ -7100,8 +7040,8 @@ define dso_local void @st_disjoint_align64_float_float(i64 %ptr, float %str) {
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to float*
-  store float %str, float* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  store float %str, ptr %0, align 4096
   ret void
 }
 
@@ -7112,7 +7052,7 @@ define dso_local void @st_cst_align16_float_float(float %str) {
 ; CHECK-NEXT:    stfs f1, 4080(0)
 ; CHECK-NEXT:    blr
 entry:
-  store float %str, float* inttoptr (i64 4080 to float*), align 16
+  store float %str, ptr inttoptr (i64 4080 to ptr), align 16
   ret void
 }
 
@@ -7124,7 +7064,7 @@ define dso_local void @st_cst_align32_float_float(float %str) {
 ; CHECK-NEXT:    stfs f1, -27108(r3)
 ; CHECK-NEXT:    blr
 entry:
-  store float %str, float* inttoptr (i64 9999900 to float*), align 4
+  store float %str, ptr inttoptr (i64 9999900 to ptr), align 4
   ret void
 }
 
@@ -7145,7 +7085,7 @@ define dso_local void @st_cst_align64_float_float(float %str) {
 ; CHECK-PREP10-NEXT:    stfs f1, 0(r3)
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  store float %str, float* inttoptr (i64 1000000000000 to float*), align 4096
+  store float %str, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   ret void
 }
 
@@ -7157,27 +7097,26 @@ define dso_local void @st_0_float_double(i64 %ptr, float %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = fpext float %str to double
-  %0 = inttoptr i64 %ptr to double*
-  store double %conv, double* %0, align 8
+  %0 = inttoptr i64 %ptr to ptr
+  store double %conv, ptr %0, align 8
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_float_double(i8* nocapture %ptr, float %str) {
+define dso_local void @st_align16_float_double(ptr nocapture %ptr, float %str) {
 ; CHECK-LABEL: st_align16_float_double:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    stfd f1, 8(r3)
 ; CHECK-NEXT:    blr
 entry:
   %conv = fpext float %str to double
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to double*
-  store double %conv, double* %0, align 8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  store double %conv, ptr %add.ptr, align 8
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_float_double(i8* nocapture %ptr, float %str) {
+define dso_local void @st_align32_float_double(ptr nocapture %ptr, float %str) {
 ; CHECK-P10-LABEL: st_align32_float_double:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pstfd f1, 99999000(r3), 0
@@ -7191,14 +7130,13 @@ define dso_local void @st_align32_float_double(i8* nocapture %ptr, float %str) {
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %conv = fpext float %str to double
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to double*
-  store double %conv, double* %0, align 8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  store double %conv, ptr %add.ptr, align 8
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_float_double(i8* nocapture %ptr, float %str) {
+define dso_local void @st_align64_float_double(ptr nocapture %ptr, float %str) {
 ; CHECK-P10-LABEL: st_align64_float_double:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r4, 244140625
@@ -7215,23 +7153,21 @@ define dso_local void @st_align64_float_double(i8* nocapture %ptr, float %str) {
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %conv = fpext float %str to double
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to double*
-  store double %conv, double* %0, align 8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  store double %conv, ptr %add.ptr, align 8
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_float_double(i8* nocapture %ptr, i64 %off, float %str) {
+define dso_local void @st_reg_float_double(ptr nocapture %ptr, i64 %off, float %str) {
 ; CHECK-LABEL: st_reg_float_double:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    stfdx f1, r3, r4
 ; CHECK-NEXT:    blr
 entry:
   %conv = fpext float %str to double
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to double*
-  store double %conv, double* %0, align 8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  store double %conv, ptr %add.ptr, align 8
   ret void
 }
 
@@ -7246,8 +7182,8 @@ entry:
   %conv = fpext float %str to double
   %conv1 = zext i8 %off to i64
   %or = or i64 %conv1, %ptr
-  %0 = inttoptr i64 %or to double*
-  store double %conv, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store double %conv, ptr %0, align 8
   ret void
 }
 
@@ -7261,8 +7197,8 @@ define dso_local void @st_not_disjoint16_float_double(i64 %ptr, float %str) {
 entry:
   %conv = fpext float %str to double
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to double*
-  store double %conv, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store double %conv, ptr %0, align 8
   ret void
 }
 
@@ -7277,8 +7213,8 @@ entry:
   %and = and i64 %ptr, -4096
   %conv = fpext float %str to double
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to double*
-  store double %conv, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store double %conv, ptr %0, align 8
   ret void
 }
 
@@ -7293,8 +7229,8 @@ define dso_local void @st_not_disjoint32_float_double(i64 %ptr, float %str) {
 entry:
   %conv = fpext float %str to double
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to double*
-  store double %conv, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store double %conv, ptr %0, align 8
   ret void
 }
 
@@ -7328,8 +7264,8 @@ entry:
   %and = and i64 %ptr, -1000341504
   %conv = fpext float %str to double
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to double*
-  store double %conv, double* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  store double %conv, ptr %0, align 16
   ret void
 }
 
@@ -7356,8 +7292,8 @@ define dso_local void @st_not_disjoint64_float_double(i64 %ptr, float %str) {
 entry:
   %conv = fpext float %str to double
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to double*
-  store double %conv, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store double %conv, ptr %0, align 8
   ret void
 }
 
@@ -7383,8 +7319,8 @@ entry:
   %and = and i64 %ptr, -1099511627776
   %conv = fpext float %str to double
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to double*
-  store double %conv, double* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  store double %conv, ptr %0, align 4096
   ret void
 }
 
@@ -7396,7 +7332,7 @@ define dso_local void @st_cst_align16_float_double(float %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = fpext float %str to double
-  store double %conv, double* inttoptr (i64 4080 to double*), align 16
+  store double %conv, ptr inttoptr (i64 4080 to ptr), align 16
   ret void
 }
 
@@ -7409,7 +7345,7 @@ define dso_local void @st_cst_align32_float_double(float %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = fpext float %str to double
-  store double %conv, double* inttoptr (i64 9999900 to double*), align 8
+  store double %conv, ptr inttoptr (i64 9999900 to ptr), align 8
   ret void
 }
 
@@ -7431,6 +7367,6 @@ define dso_local void @st_cst_align64_float_double(float %str) {
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %conv = fpext float %str to double
-  store double %conv, double* inttoptr (i64 1000000000000 to double*), align 4096
+  store double %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/scalar-i16-ldst.ll b/llvm/test/CodeGen/PowerPC/scalar-i16-ldst.ll
index f45611f18346..5f11d98c253b 100644
--- a/llvm/test/CodeGen/PowerPC/scalar-i16-ldst.ll
+++ b/llvm/test/CodeGen/PowerPC/scalar-i16-ldst.ll
@@ -26,28 +26,28 @@ define dso_local signext i16 @ld_0_int16_t_int8_t(i64 %ptr) {
 ; CHECK-NEXT:    extsb r3, r3
 ; CHECK-NEXT:    blr
 entry:
-  %0 = inttoptr i64 %ptr to i8*
-  %1 = load i8, i8* %0, align 1
+  %0 = inttoptr i64 %ptr to ptr
+  %1 = load i8, ptr %0, align 1
   %conv = sext i8 %1 to i16
   ret i16 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i16 @ld_align16_int16_t_int8_t(i8* nocapture readonly %ptr) {
+define dso_local signext i16 @ld_align16_int16_t_int8_t(ptr nocapture readonly %ptr) {
 ; CHECK-LABEL: ld_align16_int16_t_int8_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lbz r3, 8(r3)
 ; CHECK-NEXT:    extsb r3, r3
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  %0 = load i8, ptr %add.ptr, align 1
   %conv = sext i8 %0 to i16
   ret i16 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i16 @ld_align32_int16_t_int8_t(i8* nocapture readonly %ptr) {
+define dso_local signext i16 @ld_align32_int16_t_int8_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align32_int16_t_int8_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    plbz r3, 99999000(r3), 0
@@ -62,14 +62,14 @@ define dso_local signext i16 @ld_align32_int16_t_int8_t(i8* nocapture readonly %
 ; CHECK-PREP10-NEXT:    extsb r3, r3
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  %0 = load i8, ptr %add.ptr, align 1
   %conv = sext i8 %0 to i16
   ret i16 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i16 @ld_align64_int16_t_int8_t(i8* nocapture readonly %ptr) {
+define dso_local signext i16 @ld_align64_int16_t_int8_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align64_int16_t_int8_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r4, 244140625
@@ -87,22 +87,22 @@ define dso_local signext i16 @ld_align64_int16_t_int8_t(i8* nocapture readonly %
 ; CHECK-PREP10-NEXT:    extsb r3, r3
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  %0 = load i8, ptr %add.ptr, align 1
   %conv = sext i8 %0 to i16
   ret i16 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i16 @ld_reg_int16_t_int8_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local signext i16 @ld_reg_int16_t_int8_t(ptr nocapture readonly %ptr, i64 %off) {
 ; CHECK-LABEL: ld_reg_int16_t_int8_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lbzx r3, r3, r4
 ; CHECK-NEXT:    extsb r3, r3
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  %0 = load i8, ptr %add.ptr, align 1
   %conv = sext i8 %0 to i16
   ret i16 %conv
 }
@@ -118,8 +118,8 @@ define dso_local signext i16 @ld_or_int16_t_int8_t(i64 %ptr, i8 zeroext %off) {
 entry:
   %conv = zext i8 %off to i64
   %or = or i64 %conv, %ptr
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 1
   %conv1 = sext i8 %1 to i16
   ret i16 %conv1
 }
@@ -134,8 +134,8 @@ define dso_local signext i16 @ld_not_disjoint16_int16_t_int8_t(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 1
   %conv = sext i8 %1 to i16
   ret i16 %conv
 }
@@ -151,8 +151,8 @@ define dso_local signext i16 @ld_disjoint_align16_int16_t_int8_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -4096
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 8
   %conv = sext i8 %1 to i16
   ret i16 %conv
 }
@@ -168,8 +168,8 @@ define dso_local signext i16 @ld_not_disjoint32_int16_t_int8_t(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 1
   %conv = sext i8 %1 to i16
   ret i16 %conv
 }
@@ -206,8 +206,8 @@ define dso_local signext i16 @ld_disjoint_align32_int16_t_int8_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1000341504
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 16
   %conv = sext i8 %1 to i16
   ret i16 %conv
 }
@@ -236,8 +236,8 @@ define dso_local signext i16 @ld_not_disjoint64_int16_t_int8_t(i64 %ptr) {
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 1
   %conv = sext i8 %1 to i16
   ret i16 %conv
 }
@@ -265,8 +265,8 @@ define dso_local signext i16 @ld_disjoint_align64_int16_t_int8_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 4096
   %conv = sext i8 %1 to i16
   ret i16 %conv
 }
@@ -279,7 +279,7 @@ define dso_local signext i16 @ld_cst_align16_int16_t_int8_t() {
 ; CHECK-NEXT:    extsb r3, r3
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i8, i8* inttoptr (i64 4080 to i8*), align 16
+  %0 = load i8, ptr inttoptr (i64 4080 to ptr), align 16
   %conv = sext i8 %0 to i16
   ret i16 %conv
 }
@@ -293,7 +293,7 @@ define dso_local signext i16 @ld_cst_align32_int16_t_int8_t() {
 ; CHECK-NEXT:    extsb r3, r3
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i8, i8* inttoptr (i64 9999900 to i8*), align 4
+  %0 = load i8, ptr inttoptr (i64 9999900 to ptr), align 4
   %conv = sext i8 %0 to i16
   ret i16 %conv
 }
@@ -317,7 +317,7 @@ define dso_local signext i16 @ld_cst_align64_int16_t_int8_t() {
 ; CHECK-PREP10-NEXT:    extsb r3, r3
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %0 = load i8, i8* inttoptr (i64 1000000000000 to i8*), align 4096
+  %0 = load i8, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   %conv = sext i8 %0 to i16
   ret i16 %conv
 }
@@ -329,26 +329,25 @@ define dso_local signext i16 @ld_0_int16_t_uint16_t(i64 %ptr) {
 ; CHECK-NEXT:    lha r3, 0(r3)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = inttoptr i64 %ptr to i16*
-  %1 = load i16, i16* %0, align 2
+  %0 = inttoptr i64 %ptr to ptr
+  %1 = load i16, ptr %0, align 2
   ret i16 %1
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i16 @ld_align16_int16_t_uint16_t(i8* nocapture readonly %ptr) {
+define dso_local signext i16 @ld_align16_int16_t_uint16_t(ptr nocapture readonly %ptr) {
 ; CHECK-LABEL: ld_align16_int16_t_uint16_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lha r3, 8(r3)
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to i16*
-  %1 = load i16, i16* %0, align 2
-  ret i16 %1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  %0 = load i16, ptr %add.ptr, align 2
+  ret i16 %0
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i16 @ld_align32_int16_t_uint16_t(i8* nocapture readonly %ptr) {
+define dso_local signext i16 @ld_align32_int16_t_uint16_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align32_int16_t_uint16_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    plha r3, 99999000(r3), 0
@@ -361,14 +360,13 @@ define dso_local signext i16 @ld_align32_int16_t_uint16_t(i8* nocapture readonly
 ; CHECK-PREP10-NEXT:    lhax r3, r3, r4
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to i16*
-  %1 = load i16, i16* %0, align 2
-  ret i16 %1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  %0 = load i16, ptr %add.ptr, align 2
+  ret i16 %0
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i16 @ld_align64_int16_t_uint16_t(i8* nocapture readonly %ptr) {
+define dso_local signext i16 @ld_align64_int16_t_uint16_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align64_int16_t_uint16_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r4, 244140625
@@ -384,23 +382,21 @@ define dso_local signext i16 @ld_align64_int16_t_uint16_t(i8* nocapture readonly
 ; CHECK-PREP10-NEXT:    lhax r3, r3, r4
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to i16*
-  %1 = load i16, i16* %0, align 2
-  ret i16 %1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  %0 = load i16, ptr %add.ptr, align 2
+  ret i16 %0
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i16 @ld_reg_int16_t_uint16_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local signext i16 @ld_reg_int16_t_uint16_t(ptr nocapture readonly %ptr, i64 %off) {
 ; CHECK-LABEL: ld_reg_int16_t_uint16_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lhax r3, r3, r4
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to i16*
-  %1 = load i16, i16* %0, align 2
-  ret i16 %1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  %0 = load i16, ptr %add.ptr, align 2
+  ret i16 %0
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
@@ -413,8 +409,8 @@ define dso_local signext i16 @ld_or_int16_t_uint16_t(i64 %ptr, i8 zeroext %off)
 entry:
   %conv = zext i8 %off to i64
   %or = or i64 %conv, %ptr
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 2
   ret i16 %1
 }
 
@@ -427,8 +423,8 @@ define dso_local signext i16 @ld_not_disjoint16_int16_t_uint16_t(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 2
   ret i16 %1
 }
 
@@ -442,8 +438,8 @@ define dso_local signext i16 @ld_disjoint_align16_int16_t_uint16_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -4096
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 8
   ret i16 %1
 }
 
@@ -457,8 +453,8 @@ define dso_local signext i16 @ld_not_disjoint32_int16_t_uint16_t(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 2
   ret i16 %1
 }
 
@@ -491,8 +487,8 @@ define dso_local signext i16 @ld_disjoint_align32_int16_t_uint16_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1000341504
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 16
   ret i16 %1
 }
 
@@ -518,8 +514,8 @@ define dso_local signext i16 @ld_not_disjoint64_int16_t_uint16_t(i64 %ptr) {
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 2
   ret i16 %1
 }
 
@@ -544,8 +540,8 @@ define dso_local signext i16 @ld_disjoint_align64_int16_t_uint16_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 4096
   ret i16 %1
 }
 
@@ -556,7 +552,7 @@ define dso_local signext i16 @ld_cst_align16_int16_t_uint16_t() {
 ; CHECK-NEXT:    lha r3, 4080(0)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i16, i16* inttoptr (i64 4080 to i16*), align 16
+  %0 = load i16, ptr inttoptr (i64 4080 to ptr), align 16
   ret i16 %0
 }
 
@@ -568,7 +564,7 @@ define dso_local signext i16 @ld_cst_align32_int16_t_uint16_t() {
 ; CHECK-NEXT:    lha r3, -27108(r3)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i16, i16* inttoptr (i64 9999900 to i16*), align 4
+  %0 = load i16, ptr inttoptr (i64 9999900 to ptr), align 4
   ret i16 %0
 }
 
@@ -589,7 +585,7 @@ define dso_local signext i16 @ld_cst_align64_int16_t_uint16_t() {
 ; CHECK-PREP10-NEXT:    lha r3, 0(r3)
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %0 = load i16, i16* inttoptr (i64 1000000000000 to i16*), align 4096
+  %0 = load i16, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   ret i16 %0
 }
 
@@ -605,14 +601,14 @@ define dso_local signext i16 @ld_0_int16_t_uint32_t(i64 %ptr) {
 ; CHECK-BE-NEXT:    lha r3, 2(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %0 = inttoptr i64 %ptr to i32*
-  %1 = load i32, i32* %0, align 4
+  %0 = inttoptr i64 %ptr to ptr
+  %1 = load i32, ptr %0, align 4
   %conv = trunc i32 %1 to i16
   ret i16 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i16 @ld_align16_int16_t_uint32_t(i8* nocapture readonly %ptr) {
+define dso_local signext i16 @ld_align16_int16_t_uint32_t(ptr nocapture readonly %ptr) {
 ; CHECK-LE-LABEL: ld_align16_int16_t_uint32_t:
 ; CHECK-LE:       # %bb.0: # %entry
 ; CHECK-LE-NEXT:    lha r3, 8(r3)
@@ -623,15 +619,14 @@ define dso_local signext i16 @ld_align16_int16_t_uint32_t(i8* nocapture readonly
 ; CHECK-BE-NEXT:    lha r3, 10(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to i32*
-  %1 = load i32, i32* %0, align 4
-  %conv = trunc i32 %1 to i16
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  %0 = load i32, ptr %add.ptr, align 4
+  %conv = trunc i32 %0 to i16
   ret i16 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i16 @ld_align32_int16_t_uint32_t(i8* nocapture readonly %ptr) {
+define dso_local signext i16 @ld_align32_int16_t_uint32_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LE-LABEL: ld_align32_int16_t_uint32_t:
 ; CHECK-P10-LE:       # %bb.0: # %entry
 ; CHECK-P10-LE-NEXT:    plha r3, 99999000(r3), 0
@@ -670,15 +665,14 @@ define dso_local signext i16 @ld_align32_int16_t_uint32_t(i8* nocapture readonly
 ; CHECK-P8-BE-NEXT:    lhax r3, r3, r4
 ; CHECK-P8-BE-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to i32*
-  %1 = load i32, i32* %0, align 4
-  %conv = trunc i32 %1 to i16
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  %0 = load i32, ptr %add.ptr, align 4
+  %conv = trunc i32 %0 to i16
   ret i16 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i16 @ld_align64_int16_t_uint32_t(i8* nocapture readonly %ptr) {
+define dso_local signext i16 @ld_align64_int16_t_uint32_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LE-LABEL: ld_align64_int16_t_uint32_t:
 ; CHECK-P10-LE:       # %bb.0: # %entry
 ; CHECK-P10-LE-NEXT:    pli r4, 244140625
@@ -728,15 +722,14 @@ define dso_local signext i16 @ld_align64_int16_t_uint32_t(i8* nocapture readonly
 ; CHECK-P8-BE-NEXT:    lhax r3, r3, r4
 ; CHECK-P8-BE-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to i32*
-  %1 = load i32, i32* %0, align 4
-  %conv = trunc i32 %1 to i16
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  %0 = load i32, ptr %add.ptr, align 4
+  %conv = trunc i32 %0 to i16
   ret i16 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i16 @ld_reg_int16_t_uint32_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local signext i16 @ld_reg_int16_t_uint32_t(ptr nocapture readonly %ptr, i64 %off) {
 ; CHECK-LE-LABEL: ld_reg_int16_t_uint32_t:
 ; CHECK-LE:       # %bb.0: # %entry
 ; CHECK-LE-NEXT:    lhax r3, r3, r4
@@ -748,10 +741,9 @@ define dso_local signext i16 @ld_reg_int16_t_uint32_t(i8* nocapture readonly %pt
 ; CHECK-BE-NEXT:    lha r3, 2(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to i32*
-  %1 = load i32, i32* %0, align 4
-  %conv = trunc i32 %1 to i16
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  %0 = load i32, ptr %add.ptr, align 4
+  %conv = trunc i32 %0 to i16
   ret i16 %conv
 }
 
@@ -771,8 +763,8 @@ define dso_local signext i16 @ld_or_int16_t_uint32_t(i64 %ptr, i8 zeroext %off)
 entry:
   %conv = zext i8 %off to i64
   %or = or i64 %conv, %ptr
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 4
   %conv1 = trunc i32 %1 to i16
   ret i16 %conv1
 }
@@ -792,8 +784,8 @@ define dso_local signext i16 @ld_not_disjoint16_int16_t_uint32_t(i64 %ptr) {
 ; CHECK-BE-NEXT:    blr
 entry:
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 4
   %conv = trunc i32 %1 to i16
   ret i16 %conv
 }
@@ -814,8 +806,8 @@ define dso_local signext i16 @ld_disjoint_align16_int16_t_uint32_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -4096
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 8
   %conv = trunc i32 %1 to i16
   ret i16 %conv
 }
@@ -837,8 +829,8 @@ define dso_local signext i16 @ld_not_disjoint32_int16_t_uint32_t(i64 %ptr) {
 ; CHECK-BE-NEXT:    blr
 entry:
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 4
   %conv = trunc i32 %1 to i16
   ret i16 %conv
 }
@@ -897,8 +889,8 @@ define dso_local signext i16 @ld_disjoint_align32_int16_t_uint32_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1000341504
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 16
   %conv = trunc i32 %1 to i16
   ret i16 %conv
 }
@@ -964,8 +956,8 @@ define dso_local signext i16 @ld_not_disjoint64_int16_t_uint32_t(i64 %ptr) {
 ; CHECK-P8-BE-NEXT:    blr
 entry:
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 4
   %conv = trunc i32 %1 to i16
   ret i16 %conv
 }
@@ -1029,8 +1021,8 @@ define dso_local signext i16 @ld_disjoint_align64_int16_t_uint32_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 4096
   %conv = trunc i32 %1 to i16
   ret i16 %conv
 }
@@ -1047,7 +1039,7 @@ define dso_local signext i16 @ld_cst_align16_int16_t_uint32_t() {
 ; CHECK-BE-NEXT:    lha r3, 4082(0)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %0 = load i32, i32* inttoptr (i64 4080 to i32*), align 16
+  %0 = load i32, ptr inttoptr (i64 4080 to ptr), align 16
   %conv = trunc i32 %0 to i16
   ret i16 %conv
 }
@@ -1066,7 +1058,7 @@ define dso_local signext i16 @ld_cst_align32_int16_t_uint32_t() {
 ; CHECK-BE-NEXT:    lha r3, -27106(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %0 = load i32, i32* inttoptr (i64 9999900 to i32*), align 4
+  %0 = load i32, ptr inttoptr (i64 9999900 to ptr), align 4
   %conv = trunc i32 %0 to i16
   ret i16 %conv
 }
@@ -1122,7 +1114,7 @@ define dso_local signext i16 @ld_cst_align64_int16_t_uint32_t() {
 ; CHECK-P8-BE-NEXT:    lha r3, 0(r3)
 ; CHECK-P8-BE-NEXT:    blr
 entry:
-  %0 = load i32, i32* inttoptr (i64 1000000000000 to i32*), align 4096
+  %0 = load i32, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   %conv = trunc i32 %0 to i16
   ret i16 %conv
 }
@@ -1139,14 +1131,14 @@ define dso_local signext i16 @ld_0_int16_t_uint64_t(i64 %ptr) {
 ; CHECK-BE-NEXT:    lha r3, 6(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %0 = inttoptr i64 %ptr to i64*
-  %1 = load i64, i64* %0, align 8
+  %0 = inttoptr i64 %ptr to ptr
+  %1 = load i64, ptr %0, align 8
   %conv = trunc i64 %1 to i16
   ret i16 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i16 @ld_align16_int16_t_uint64_t(i8* nocapture readonly %ptr) {
+define dso_local signext i16 @ld_align16_int16_t_uint64_t(ptr nocapture readonly %ptr) {
 ; CHECK-LE-LABEL: ld_align16_int16_t_uint64_t:
 ; CHECK-LE:       # %bb.0: # %entry
 ; CHECK-LE-NEXT:    lha r3, 8(r3)
@@ -1157,15 +1149,14 @@ define dso_local signext i16 @ld_align16_int16_t_uint64_t(i8* nocapture readonly
 ; CHECK-BE-NEXT:    lha r3, 14(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to i64*
-  %1 = load i64, i64* %0, align 8
-  %conv = trunc i64 %1 to i16
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  %0 = load i64, ptr %add.ptr, align 8
+  %conv = trunc i64 %0 to i16
   ret i16 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i16 @ld_align32_int16_t_uint64_t(i8* nocapture readonly %ptr) {
+define dso_local signext i16 @ld_align32_int16_t_uint64_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LE-LABEL: ld_align32_int16_t_uint64_t:
 ; CHECK-P10-LE:       # %bb.0: # %entry
 ; CHECK-P10-LE-NEXT:    plha r3, 99999000(r3), 0
@@ -1204,15 +1195,14 @@ define dso_local signext i16 @ld_align32_int16_t_uint64_t(i8* nocapture readonly
 ; CHECK-P8-BE-NEXT:    lhax r3, r3, r4
 ; CHECK-P8-BE-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to i64*
-  %1 = load i64, i64* %0, align 8
-  %conv = trunc i64 %1 to i16
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  %0 = load i64, ptr %add.ptr, align 8
+  %conv = trunc i64 %0 to i16
   ret i16 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i16 @ld_align64_int16_t_uint64_t(i8* nocapture readonly %ptr) {
+define dso_local signext i16 @ld_align64_int16_t_uint64_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LE-LABEL: ld_align64_int16_t_uint64_t:
 ; CHECK-P10-LE:       # %bb.0: # %entry
 ; CHECK-P10-LE-NEXT:    pli r4, 244140625
@@ -1262,15 +1252,14 @@ define dso_local signext i16 @ld_align64_int16_t_uint64_t(i8* nocapture readonly
 ; CHECK-P8-BE-NEXT:    lhax r3, r3, r4
 ; CHECK-P8-BE-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to i64*
-  %1 = load i64, i64* %0, align 8
-  %conv = trunc i64 %1 to i16
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  %0 = load i64, ptr %add.ptr, align 8
+  %conv = trunc i64 %0 to i16
   ret i16 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i16 @ld_reg_int16_t_uint64_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local signext i16 @ld_reg_int16_t_uint64_t(ptr nocapture readonly %ptr, i64 %off) {
 ; CHECK-LE-LABEL: ld_reg_int16_t_uint64_t:
 ; CHECK-LE:       # %bb.0: # %entry
 ; CHECK-LE-NEXT:    lhax r3, r3, r4
@@ -1282,10 +1271,9 @@ define dso_local signext i16 @ld_reg_int16_t_uint64_t(i8* nocapture readonly %pt
 ; CHECK-BE-NEXT:    lha r3, 6(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to i64*
-  %1 = load i64, i64* %0, align 8
-  %conv = trunc i64 %1 to i16
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  %0 = load i64, ptr %add.ptr, align 8
+  %conv = trunc i64 %0 to i16
   ret i16 %conv
 }
 
@@ -1305,8 +1293,8 @@ define dso_local signext i16 @ld_or_int16_t_uint64_t(i64 %ptr, i8 zeroext %off)
 entry:
   %conv = zext i8 %off to i64
   %or = or i64 %conv, %ptr
-  %0 = inttoptr i64 %or to i64*
-  %1 = load i64, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i64, ptr %0, align 8
   %conv1 = trunc i64 %1 to i16
   ret i16 %conv1
 }
@@ -1326,8 +1314,8 @@ define dso_local signext i16 @ld_not_disjoint16_int16_t_uint64_t(i64 %ptr) {
 ; CHECK-BE-NEXT:    blr
 entry:
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to i64*
-  %1 = load i64, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i64, ptr %0, align 8
   %conv = trunc i64 %1 to i16
   ret i16 %conv
 }
@@ -1348,8 +1336,8 @@ define dso_local signext i16 @ld_disjoint_align16_int16_t_uint64_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -4096
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to i64*
-  %1 = load i64, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i64, ptr %0, align 8
   %conv = trunc i64 %1 to i16
   ret i16 %conv
 }
@@ -1371,8 +1359,8 @@ define dso_local signext i16 @ld_not_disjoint32_int16_t_uint64_t(i64 %ptr) {
 ; CHECK-BE-NEXT:    blr
 entry:
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to i64*
-  %1 = load i64, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i64, ptr %0, align 8
   %conv = trunc i64 %1 to i16
   ret i16 %conv
 }
@@ -1431,8 +1419,8 @@ define dso_local signext i16 @ld_disjoint_align32_int16_t_uint64_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1000341504
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to i64*
-  %1 = load i64, i64* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i64, ptr %0, align 16
   %conv = trunc i64 %1 to i16
   ret i16 %conv
 }
@@ -1498,8 +1486,8 @@ define dso_local signext i16 @ld_not_disjoint64_int16_t_uint64_t(i64 %ptr) {
 ; CHECK-P8-BE-NEXT:    blr
 entry:
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to i64*
-  %1 = load i64, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i64, ptr %0, align 8
   %conv = trunc i64 %1 to i16
   ret i16 %conv
 }
@@ -1563,8 +1551,8 @@ define dso_local signext i16 @ld_disjoint_align64_int16_t_uint64_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to i64*
-  %1 = load i64, i64* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i64, ptr %0, align 4096
   %conv = trunc i64 %1 to i16
   ret i16 %conv
 }
@@ -1581,7 +1569,7 @@ define dso_local signext i16 @ld_cst_align16_int16_t_uint64_t() {
 ; CHECK-BE-NEXT:    lha r3, 4086(0)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %0 = load i64, i64* inttoptr (i64 4080 to i64*), align 16
+  %0 = load i64, ptr inttoptr (i64 4080 to ptr), align 16
   %conv = trunc i64 %0 to i16
   ret i16 %conv
 }
@@ -1600,7 +1588,7 @@ define dso_local signext i16 @ld_cst_align32_int16_t_uint64_t() {
 ; CHECK-BE-NEXT:    lha r3, -27102(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %0 = load i64, i64* inttoptr (i64 9999900 to i64*), align 8
+  %0 = load i64, ptr inttoptr (i64 9999900 to ptr), align 8
   %conv = trunc i64 %0 to i16
   ret i16 %conv
 }
@@ -1656,7 +1644,7 @@ define dso_local signext i16 @ld_cst_align64_int16_t_uint64_t() {
 ; CHECK-P8-BE-NEXT:    lha r3, 0(r3)
 ; CHECK-P8-BE-NEXT:    blr
 entry:
-  %0 = load i64, i64* inttoptr (i64 1000000000000 to i64*), align 4096
+  %0 = load i64, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   %conv = trunc i64 %0 to i16
   ret i16 %conv
 }
@@ -1671,14 +1659,14 @@ define dso_local signext i16 @ld_0_int16_t_float(i64 %ptr) {
 ; CHECK-NEXT:    extsw r3, r3
 ; CHECK-NEXT:    blr
 entry:
-  %0 = inttoptr i64 %ptr to float*
-  %1 = load float, float* %0, align 4
+  %0 = inttoptr i64 %ptr to ptr
+  %1 = load float, ptr %0, align 4
   %conv = fptosi float %1 to i16
   ret i16 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i16 @ld_align16_int16_t_float(i8* nocapture readonly %ptr) {
+define dso_local signext i16 @ld_align16_int16_t_float(ptr nocapture readonly %ptr) {
 ; CHECK-LABEL: ld_align16_int16_t_float:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lfs f0, 8(r3)
@@ -1687,15 +1675,14 @@ define dso_local signext i16 @ld_align16_int16_t_float(i8* nocapture readonly %p
 ; CHECK-NEXT:    extsw r3, r3
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to float*
-  %1 = load float, float* %0, align 4
-  %conv = fptosi float %1 to i16
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  %0 = load float, ptr %add.ptr, align 4
+  %conv = fptosi float %0 to i16
   ret i16 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i16 @ld_align32_int16_t_float(i8* nocapture readonly %ptr) {
+define dso_local signext i16 @ld_align32_int16_t_float(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align32_int16_t_float:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    plfs f0, 99999000(r3), 0
@@ -1714,15 +1701,14 @@ define dso_local signext i16 @ld_align32_int16_t_float(i8* nocapture readonly %p
 ; CHECK-PREP10-NEXT:    extsw r3, r3
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to float*
-  %1 = load float, float* %0, align 4
-  %conv = fptosi float %1 to i16
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  %0 = load float, ptr %add.ptr, align 4
+  %conv = fptosi float %0 to i16
   ret i16 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i16 @ld_align64_int16_t_float(i8* nocapture readonly %ptr) {
+define dso_local signext i16 @ld_align64_int16_t_float(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align64_int16_t_float:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r4, 244140625
@@ -1744,15 +1730,14 @@ define dso_local signext i16 @ld_align64_int16_t_float(i8* nocapture readonly %p
 ; CHECK-PREP10-NEXT:    extsw r3, r3
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to float*
-  %1 = load float, float* %0, align 4
-  %conv = fptosi float %1 to i16
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  %0 = load float, ptr %add.ptr, align 4
+  %conv = fptosi float %0 to i16
   ret i16 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i16 @ld_reg_int16_t_float(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local signext i16 @ld_reg_int16_t_float(ptr nocapture readonly %ptr, i64 %off) {
 ; CHECK-LABEL: ld_reg_int16_t_float:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lfsx f0, r3, r4
@@ -1761,10 +1746,9 @@ define dso_local signext i16 @ld_reg_int16_t_float(i8* nocapture readonly %ptr,
 ; CHECK-NEXT:    extsw r3, r3
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to float*
-  %1 = load float, float* %0, align 4
-  %conv = fptosi float %1 to i16
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  %0 = load float, ptr %add.ptr, align 4
+  %conv = fptosi float %0 to i16
   ret i16 %conv
 }
 
@@ -1781,8 +1765,8 @@ define dso_local signext i16 @ld_or_int16_t_float(i64 %ptr, i8 zeroext %off) {
 entry:
   %conv = zext i8 %off to i64
   %or = or i64 %conv, %ptr
-  %0 = inttoptr i64 %or to float*
-  %1 = load float, float* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load float, ptr %0, align 4
   %conv1 = fptosi float %1 to i16
   ret i16 %conv1
 }
@@ -1799,8 +1783,8 @@ define dso_local signext i16 @ld_not_disjoint16_int16_t_float(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to float*
-  %1 = load float, float* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load float, ptr %0, align 4
   %conv = fptosi float %1 to i16
   ret i16 %conv
 }
@@ -1818,8 +1802,8 @@ define dso_local signext i16 @ld_disjoint_align16_int16_t_float(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -4096
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to float*
-  %1 = load float, float* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load float, ptr %0, align 8
   %conv = fptosi float %1 to i16
   ret i16 %conv
 }
@@ -1837,8 +1821,8 @@ define dso_local signext i16 @ld_not_disjoint32_int16_t_float(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to float*
-  %1 = load float, float* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load float, ptr %0, align 4
   %conv = fptosi float %1 to i16
   ret i16 %conv
 }
@@ -1881,8 +1865,8 @@ define dso_local signext i16 @ld_disjoint_align32_int16_t_float(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1000341504
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to float*
-  %1 = load float, float* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  %1 = load float, ptr %0, align 16
   %conv = fptosi float %1 to i16
   ret i16 %conv
 }
@@ -1915,8 +1899,8 @@ define dso_local signext i16 @ld_not_disjoint64_int16_t_float(i64 %ptr) {
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to float*
-  %1 = load float, float* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load float, ptr %0, align 4
   %conv = fptosi float %1 to i16
   ret i16 %conv
 }
@@ -1949,8 +1933,8 @@ define dso_local signext i16 @ld_disjoint_align64_int16_t_float(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to float*
-  %1 = load float, float* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  %1 = load float, ptr %0, align 4096
   %conv = fptosi float %1 to i16
   ret i16 %conv
 }
@@ -1965,7 +1949,7 @@ define dso_local signext i16 @ld_cst_align16_int16_t_float() {
 ; CHECK-NEXT:    extsw r3, r3
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load float, float* inttoptr (i64 4080 to float*), align 16
+  %0 = load float, ptr inttoptr (i64 4080 to ptr), align 16
   %conv = fptosi float %0 to i16
   ret i16 %conv
 }
@@ -1981,7 +1965,7 @@ define dso_local signext i16 @ld_cst_align32_int16_t_float() {
 ; CHECK-NEXT:    extsw r3, r3
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load float, float* inttoptr (i64 9999900 to float*), align 4
+  %0 = load float, ptr inttoptr (i64 9999900 to ptr), align 4
   %conv = fptosi float %0 to i16
   ret i16 %conv
 }
@@ -2009,7 +1993,7 @@ define dso_local signext i16 @ld_cst_align64_int16_t_float() {
 ; CHECK-PREP10-NEXT:    extsw r3, r3
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %0 = load float, float* inttoptr (i64 1000000000000 to float*), align 4096
+  %0 = load float, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   %conv = fptosi float %0 to i16
   ret i16 %conv
 }
@@ -2024,14 +2008,14 @@ define dso_local signext i16 @ld_0_int16_t_double(i64 %ptr) {
 ; CHECK-NEXT:    extsw r3, r3
 ; CHECK-NEXT:    blr
 entry:
-  %0 = inttoptr i64 %ptr to double*
-  %1 = load double, double* %0, align 8
+  %0 = inttoptr i64 %ptr to ptr
+  %1 = load double, ptr %0, align 8
   %conv = fptosi double %1 to i16
   ret i16 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i16 @ld_align16_int16_t_double(i8* nocapture readonly %ptr) {
+define dso_local signext i16 @ld_align16_int16_t_double(ptr nocapture readonly %ptr) {
 ; CHECK-LABEL: ld_align16_int16_t_double:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lfd f0, 8(r3)
@@ -2040,15 +2024,14 @@ define dso_local signext i16 @ld_align16_int16_t_double(i8* nocapture readonly %
 ; CHECK-NEXT:    extsw r3, r3
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to double*
-  %1 = load double, double* %0, align 8
-  %conv = fptosi double %1 to i16
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  %0 = load double, ptr %add.ptr, align 8
+  %conv = fptosi double %0 to i16
   ret i16 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i16 @ld_align32_int16_t_double(i8* nocapture readonly %ptr) {
+define dso_local signext i16 @ld_align32_int16_t_double(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align32_int16_t_double:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    plfd f0, 99999000(r3), 0
@@ -2067,15 +2050,14 @@ define dso_local signext i16 @ld_align32_int16_t_double(i8* nocapture readonly %
 ; CHECK-PREP10-NEXT:    extsw r3, r3
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to double*
-  %1 = load double, double* %0, align 8
-  %conv = fptosi double %1 to i16
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  %0 = load double, ptr %add.ptr, align 8
+  %conv = fptosi double %0 to i16
   ret i16 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i16 @ld_align64_int16_t_double(i8* nocapture readonly %ptr) {
+define dso_local signext i16 @ld_align64_int16_t_double(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align64_int16_t_double:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r4, 244140625
@@ -2097,15 +2079,14 @@ define dso_local signext i16 @ld_align64_int16_t_double(i8* nocapture readonly %
 ; CHECK-PREP10-NEXT:    extsw r3, r3
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to double*
-  %1 = load double, double* %0, align 8
-  %conv = fptosi double %1 to i16
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  %0 = load double, ptr %add.ptr, align 8
+  %conv = fptosi double %0 to i16
   ret i16 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i16 @ld_reg_int16_t_double(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local signext i16 @ld_reg_int16_t_double(ptr nocapture readonly %ptr, i64 %off) {
 ; CHECK-LABEL: ld_reg_int16_t_double:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lfdx f0, r3, r4
@@ -2114,10 +2095,9 @@ define dso_local signext i16 @ld_reg_int16_t_double(i8* nocapture readonly %ptr,
 ; CHECK-NEXT:    extsw r3, r3
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to double*
-  %1 = load double, double* %0, align 8
-  %conv = fptosi double %1 to i16
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  %0 = load double, ptr %add.ptr, align 8
+  %conv = fptosi double %0 to i16
   ret i16 %conv
 }
 
@@ -2134,8 +2114,8 @@ define dso_local signext i16 @ld_or_int16_t_double(i64 %ptr, i8 zeroext %off) {
 entry:
   %conv = zext i8 %off to i64
   %or = or i64 %conv, %ptr
-  %0 = inttoptr i64 %or to double*
-  %1 = load double, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load double, ptr %0, align 8
   %conv1 = fptosi double %1 to i16
   ret i16 %conv1
 }
@@ -2152,8 +2132,8 @@ define dso_local signext i16 @ld_not_disjoint16_int16_t_double(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to double*
-  %1 = load double, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load double, ptr %0, align 8
   %conv = fptosi double %1 to i16
   ret i16 %conv
 }
@@ -2171,8 +2151,8 @@ define dso_local signext i16 @ld_disjoint_align16_int16_t_double(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -4096
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to double*
-  %1 = load double, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load double, ptr %0, align 8
   %conv = fptosi double %1 to i16
   ret i16 %conv
 }
@@ -2190,8 +2170,8 @@ define dso_local signext i16 @ld_not_disjoint32_int16_t_double(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to double*
-  %1 = load double, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load double, ptr %0, align 8
   %conv = fptosi double %1 to i16
   ret i16 %conv
 }
@@ -2234,8 +2214,8 @@ define dso_local signext i16 @ld_disjoint_align32_int16_t_double(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1000341504
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to double*
-  %1 = load double, double* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  %1 = load double, ptr %0, align 16
   %conv = fptosi double %1 to i16
   ret i16 %conv
 }
@@ -2268,8 +2248,8 @@ define dso_local signext i16 @ld_not_disjoint64_int16_t_double(i64 %ptr) {
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to double*
-  %1 = load double, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load double, ptr %0, align 8
   %conv = fptosi double %1 to i16
   ret i16 %conv
 }
@@ -2301,8 +2281,8 @@ define dso_local signext i16 @ld_disjoint_align64_int16_t_double(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to double*
-  %1 = load double, double* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  %1 = load double, ptr %0, align 4096
   %conv = fptosi double %1 to i16
   ret i16 %conv
 }
@@ -2317,7 +2297,7 @@ define dso_local signext i16 @ld_cst_align16_int16_t_double() {
 ; CHECK-NEXT:    extsw r3, r3
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load double, double* inttoptr (i64 4080 to double*), align 16
+  %0 = load double, ptr inttoptr (i64 4080 to ptr), align 16
   %conv = fptosi double %0 to i16
   ret i16 %conv
 }
@@ -2333,7 +2313,7 @@ define dso_local signext i16 @ld_cst_align32_int16_t_double() {
 ; CHECK-NEXT:    extsw r3, r3
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load double, double* inttoptr (i64 9999900 to double*), align 8
+  %0 = load double, ptr inttoptr (i64 9999900 to ptr), align 8
   %conv = fptosi double %0 to i16
   ret i16 %conv
 }
@@ -2361,7 +2341,7 @@ define dso_local signext i16 @ld_cst_align64_int16_t_double() {
 ; CHECK-PREP10-NEXT:    extsw r3, r3
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %0 = load double, double* inttoptr (i64 1000000000000 to double*), align 4096
+  %0 = load double, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   %conv = fptosi double %0 to i16
   ret i16 %conv
 }
@@ -2373,27 +2353,27 @@ define dso_local zeroext i16 @ld_0_uint16_t_uint8_t(i64 %ptr) {
 ; CHECK-NEXT:    lbz r3, 0(r3)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = inttoptr i64 %ptr to i8*
-  %1 = load i8, i8* %0, align 1
+  %0 = inttoptr i64 %ptr to ptr
+  %1 = load i8, ptr %0, align 1
   %conv = zext i8 %1 to i16
   ret i16 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i16 @ld_align16_uint16_t_uint8_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i16 @ld_align16_uint16_t_uint8_t(ptr nocapture readonly %ptr) {
 ; CHECK-LABEL: ld_align16_uint16_t_uint8_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lbz r3, 8(r3)
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  %0 = load i8, ptr %add.ptr, align 1
   %conv = zext i8 %0 to i16
   ret i16 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i16 @ld_align32_uint16_t_uint8_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i16 @ld_align32_uint16_t_uint8_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align32_uint16_t_uint8_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    plbz r3, 99999000(r3), 0
@@ -2406,14 +2386,14 @@ define dso_local zeroext i16 @ld_align32_uint16_t_uint8_t(i8* nocapture readonly
 ; CHECK-PREP10-NEXT:    lbzx r3, r3, r4
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  %0 = load i8, ptr %add.ptr, align 1
   %conv = zext i8 %0 to i16
   ret i16 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i16 @ld_align64_uint16_t_uint8_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i16 @ld_align64_uint16_t_uint8_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align64_uint16_t_uint8_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r4, 244140625
@@ -2429,21 +2409,21 @@ define dso_local zeroext i16 @ld_align64_uint16_t_uint8_t(i8* nocapture readonly
 ; CHECK-PREP10-NEXT:    lbzx r3, r3, r4
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  %0 = load i8, ptr %add.ptr, align 1
   %conv = zext i8 %0 to i16
   ret i16 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i16 @ld_reg_uint16_t_uint8_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local zeroext i16 @ld_reg_uint16_t_uint8_t(ptr nocapture readonly %ptr, i64 %off) {
 ; CHECK-LABEL: ld_reg_uint16_t_uint8_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lbzx r3, r3, r4
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  %0 = load i8, ptr %add.ptr, align 1
   %conv = zext i8 %0 to i16
   ret i16 %conv
 }
@@ -2458,8 +2438,8 @@ define dso_local zeroext i16 @ld_or_uint16_t_uint8_t(i64 %ptr, i8 zeroext %off)
 entry:
   %conv = zext i8 %off to i64
   %or = or i64 %conv, %ptr
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 1
   %conv1 = zext i8 %1 to i16
   ret i16 %conv1
 }
@@ -2473,8 +2453,8 @@ define dso_local zeroext i16 @ld_not_disjoint16_uint16_t_uint8_t(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 1
   %conv = zext i8 %1 to i16
   ret i16 %conv
 }
@@ -2489,8 +2469,8 @@ define dso_local zeroext i16 @ld_disjoint_align16_uint16_t_uint8_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -4096
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 8
   %conv = zext i8 %1 to i16
   ret i16 %conv
 }
@@ -2505,8 +2485,8 @@ define dso_local zeroext i16 @ld_not_disjoint32_uint16_t_uint8_t(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 1
   %conv = zext i8 %1 to i16
   ret i16 %conv
 }
@@ -2540,8 +2520,8 @@ define dso_local zeroext i16 @ld_disjoint_align32_uint16_t_uint8_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1000341504
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 16
   %conv = zext i8 %1 to i16
   ret i16 %conv
 }
@@ -2568,8 +2548,8 @@ define dso_local zeroext i16 @ld_not_disjoint64_uint16_t_uint8_t(i64 %ptr) {
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 1
   %conv = zext i8 %1 to i16
   ret i16 %conv
 }
@@ -2595,8 +2575,8 @@ define dso_local zeroext i16 @ld_disjoint_align64_uint16_t_uint8_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 4096
   %conv = zext i8 %1 to i16
   ret i16 %conv
 }
@@ -2608,7 +2588,7 @@ define dso_local zeroext i16 @ld_cst_align16_uint16_t_uint8_t() {
 ; CHECK-NEXT:    lbz r3, 4080(0)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i8, i8* inttoptr (i64 4080 to i8*), align 16
+  %0 = load i8, ptr inttoptr (i64 4080 to ptr), align 16
   %conv = zext i8 %0 to i16
   ret i16 %conv
 }
@@ -2621,7 +2601,7 @@ define dso_local zeroext i16 @ld_cst_align32_uint16_t_uint8_t() {
 ; CHECK-NEXT:    lbz r3, -27108(r3)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i8, i8* inttoptr (i64 9999900 to i8*), align 4
+  %0 = load i8, ptr inttoptr (i64 9999900 to ptr), align 4
   %conv = zext i8 %0 to i16
   ret i16 %conv
 }
@@ -2643,7 +2623,7 @@ define dso_local zeroext i16 @ld_cst_align64_uint16_t_uint8_t() {
 ; CHECK-PREP10-NEXT:    lbz r3, 0(r3)
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %0 = load i8, i8* inttoptr (i64 1000000000000 to i8*), align 4096
+  %0 = load i8, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   %conv = zext i8 %0 to i16
   ret i16 %conv
 }
@@ -2657,14 +2637,14 @@ define dso_local zeroext i16 @ld_0_uint16_t_int8_t(i64 %ptr) {
 ; CHECK-NEXT:    clrldi r3, r3, 48
 ; CHECK-NEXT:    blr
 entry:
-  %0 = inttoptr i64 %ptr to i8*
-  %1 = load i8, i8* %0, align 1
+  %0 = inttoptr i64 %ptr to ptr
+  %1 = load i8, ptr %0, align 1
   %conv = sext i8 %1 to i16
   ret i16 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i16 @ld_align16_uint16_t_int8_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i16 @ld_align16_uint16_t_int8_t(ptr nocapture readonly %ptr) {
 ; CHECK-LABEL: ld_align16_uint16_t_int8_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lbz r3, 8(r3)
@@ -2672,14 +2652,14 @@ define dso_local zeroext i16 @ld_align16_uint16_t_int8_t(i8* nocapture readonly
 ; CHECK-NEXT:    clrldi r3, r3, 48
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  %0 = load i8, ptr %add.ptr, align 1
   %conv = sext i8 %0 to i16
   ret i16 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i16 @ld_align32_uint16_t_int8_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i16 @ld_align32_uint16_t_int8_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align32_uint16_t_int8_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    plbz r3, 99999000(r3), 0
@@ -2696,14 +2676,14 @@ define dso_local zeroext i16 @ld_align32_uint16_t_int8_t(i8* nocapture readonly
 ; CHECK-PREP10-NEXT:    clrldi r3, r3, 48
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  %0 = load i8, ptr %add.ptr, align 1
   %conv = sext i8 %0 to i16
   ret i16 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i16 @ld_align64_uint16_t_int8_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i16 @ld_align64_uint16_t_int8_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align64_uint16_t_int8_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r4, 244140625
@@ -2723,14 +2703,14 @@ define dso_local zeroext i16 @ld_align64_uint16_t_int8_t(i8* nocapture readonly
 ; CHECK-PREP10-NEXT:    clrldi r3, r3, 48
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  %0 = load i8, ptr %add.ptr, align 1
   %conv = sext i8 %0 to i16
   ret i16 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i16 @ld_reg_uint16_t_int8_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local zeroext i16 @ld_reg_uint16_t_int8_t(ptr nocapture readonly %ptr, i64 %off) {
 ; CHECK-LABEL: ld_reg_uint16_t_int8_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lbzx r3, r3, r4
@@ -2738,8 +2718,8 @@ define dso_local zeroext i16 @ld_reg_uint16_t_int8_t(i8* nocapture readonly %ptr
 ; CHECK-NEXT:    clrldi r3, r3, 48
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  %0 = load i8, ptr %add.ptr, align 1
   %conv = sext i8 %0 to i16
   ret i16 %conv
 }
@@ -2756,8 +2736,8 @@ define dso_local zeroext i16 @ld_or_uint16_t_int8_t(i64 %ptr, i8 zeroext %off) {
 entry:
   %conv = zext i8 %off to i64
   %or = or i64 %conv, %ptr
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 1
   %conv1 = sext i8 %1 to i16
   ret i16 %conv1
 }
@@ -2773,8 +2753,8 @@ define dso_local zeroext i16 @ld_not_disjoint16_uint16_t_int8_t(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 1
   %conv = sext i8 %1 to i16
   ret i16 %conv
 }
@@ -2791,8 +2771,8 @@ define dso_local zeroext i16 @ld_disjoint_align16_uint16_t_int8_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -4096
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 8
   %conv = sext i8 %1 to i16
   ret i16 %conv
 }
@@ -2809,8 +2789,8 @@ define dso_local zeroext i16 @ld_not_disjoint32_uint16_t_int8_t(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 1
   %conv = sext i8 %1 to i16
   ret i16 %conv
 }
@@ -2850,8 +2830,8 @@ define dso_local zeroext i16 @ld_disjoint_align32_uint16_t_int8_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1000341504
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 16
   %conv = sext i8 %1 to i16
   ret i16 %conv
 }
@@ -2882,8 +2862,8 @@ define dso_local zeroext i16 @ld_not_disjoint64_uint16_t_int8_t(i64 %ptr) {
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 1
   %conv = sext i8 %1 to i16
   ret i16 %conv
 }
@@ -2913,8 +2893,8 @@ define dso_local zeroext i16 @ld_disjoint_align64_uint16_t_int8_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 4096
   %conv = sext i8 %1 to i16
   ret i16 %conv
 }
@@ -2928,7 +2908,7 @@ define dso_local zeroext i16 @ld_cst_align16_uint16_t_int8_t() {
 ; CHECK-NEXT:    clrldi r3, r3, 48
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i8, i8* inttoptr (i64 4080 to i8*), align 16
+  %0 = load i8, ptr inttoptr (i64 4080 to ptr), align 16
   %conv = sext i8 %0 to i16
   ret i16 %conv
 }
@@ -2943,7 +2923,7 @@ define dso_local zeroext i16 @ld_cst_align32_uint16_t_int8_t() {
 ; CHECK-NEXT:    clrldi r3, r3, 48
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i8, i8* inttoptr (i64 9999900 to i8*), align 4
+  %0 = load i8, ptr inttoptr (i64 9999900 to ptr), align 4
   %conv = sext i8 %0 to i16
   ret i16 %conv
 }
@@ -2969,7 +2949,7 @@ define dso_local zeroext i16 @ld_cst_align64_uint16_t_int8_t() {
 ; CHECK-PREP10-NEXT:    clrldi r3, r3, 48
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %0 = load i8, i8* inttoptr (i64 1000000000000 to i8*), align 4096
+  %0 = load i8, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   %conv = sext i8 %0 to i16
   ret i16 %conv
 }
@@ -2981,26 +2961,25 @@ define dso_local zeroext i16 @ld_0_uint16_t_uint16_t(i64 %ptr) {
 ; CHECK-NEXT:    lhz r3, 0(r3)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = inttoptr i64 %ptr to i16*
-  %1 = load i16, i16* %0, align 2
+  %0 = inttoptr i64 %ptr to ptr
+  %1 = load i16, ptr %0, align 2
   ret i16 %1
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i16 @ld_align16_uint16_t_uint16_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i16 @ld_align16_uint16_t_uint16_t(ptr nocapture readonly %ptr) {
 ; CHECK-LABEL: ld_align16_uint16_t_uint16_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lhz r3, 8(r3)
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to i16*
-  %1 = load i16, i16* %0, align 2
-  ret i16 %1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  %0 = load i16, ptr %add.ptr, align 2
+  ret i16 %0
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i16 @ld_align32_uint16_t_uint16_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i16 @ld_align32_uint16_t_uint16_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align32_uint16_t_uint16_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    plhz r3, 99999000(r3), 0
@@ -3013,14 +2992,13 @@ define dso_local zeroext i16 @ld_align32_uint16_t_uint16_t(i8* nocapture readonl
 ; CHECK-PREP10-NEXT:    lhzx r3, r3, r4
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to i16*
-  %1 = load i16, i16* %0, align 2
-  ret i16 %1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  %0 = load i16, ptr %add.ptr, align 2
+  ret i16 %0
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i16 @ld_align64_uint16_t_uint16_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i16 @ld_align64_uint16_t_uint16_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align64_uint16_t_uint16_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r4, 244140625
@@ -3036,23 +3014,21 @@ define dso_local zeroext i16 @ld_align64_uint16_t_uint16_t(i8* nocapture readonl
 ; CHECK-PREP10-NEXT:    lhzx r3, r3, r4
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to i16*
-  %1 = load i16, i16* %0, align 2
-  ret i16 %1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  %0 = load i16, ptr %add.ptr, align 2
+  ret i16 %0
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i16 @ld_reg_uint16_t_uint16_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local zeroext i16 @ld_reg_uint16_t_uint16_t(ptr nocapture readonly %ptr, i64 %off) {
 ; CHECK-LABEL: ld_reg_uint16_t_uint16_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lhzx r3, r3, r4
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to i16*
-  %1 = load i16, i16* %0, align 2
-  ret i16 %1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  %0 = load i16, ptr %add.ptr, align 2
+  ret i16 %0
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
@@ -3065,8 +3041,8 @@ define dso_local zeroext i16 @ld_or_uint16_t_uint16_t(i64 %ptr, i8 zeroext %off)
 entry:
   %conv = zext i8 %off to i64
   %or = or i64 %conv, %ptr
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 2
   ret i16 %1
 }
 
@@ -3079,8 +3055,8 @@ define dso_local zeroext i16 @ld_not_disjoint16_uint16_t_uint16_t(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 2
   ret i16 %1
 }
 
@@ -3094,8 +3070,8 @@ define dso_local zeroext i16 @ld_disjoint_align16_uint16_t_uint16_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -4096
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 8
   ret i16 %1
 }
 
@@ -3109,8 +3085,8 @@ define dso_local zeroext i16 @ld_not_disjoint32_uint16_t_uint16_t(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 2
   ret i16 %1
 }
 
@@ -3143,8 +3119,8 @@ define dso_local zeroext i16 @ld_disjoint_align32_uint16_t_uint16_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1000341504
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 16
   ret i16 %1
 }
 
@@ -3170,8 +3146,8 @@ define dso_local zeroext i16 @ld_not_disjoint64_uint16_t_uint16_t(i64 %ptr) {
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 2
   ret i16 %1
 }
 
@@ -3196,8 +3172,8 @@ define dso_local zeroext i16 @ld_disjoint_align64_uint16_t_uint16_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 4096
   ret i16 %1
 }
 
@@ -3208,7 +3184,7 @@ define dso_local zeroext i16 @ld_cst_align16_uint16_t_uint16_t() {
 ; CHECK-NEXT:    lhz r3, 4080(0)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i16, i16* inttoptr (i64 4080 to i16*), align 16
+  %0 = load i16, ptr inttoptr (i64 4080 to ptr), align 16
   ret i16 %0
 }
 
@@ -3220,7 +3196,7 @@ define dso_local zeroext i16 @ld_cst_align32_uint16_t_uint16_t() {
 ; CHECK-NEXT:    lhz r3, -27108(r3)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i16, i16* inttoptr (i64 9999900 to i16*), align 4
+  %0 = load i16, ptr inttoptr (i64 9999900 to ptr), align 4
   ret i16 %0
 }
 
@@ -3241,7 +3217,7 @@ define dso_local zeroext i16 @ld_cst_align64_uint16_t_uint16_t() {
 ; CHECK-PREP10-NEXT:    lhz r3, 0(r3)
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %0 = load i16, i16* inttoptr (i64 1000000000000 to i16*), align 4096
+  %0 = load i16, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   ret i16 %0
 }
 
@@ -3257,14 +3233,14 @@ define dso_local zeroext i16 @ld_0_uint16_t_uint32_t(i64 %ptr) {
 ; CHECK-BE-NEXT:    lhz r3, 2(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %0 = inttoptr i64 %ptr to i32*
-  %1 = load i32, i32* %0, align 4
+  %0 = inttoptr i64 %ptr to ptr
+  %1 = load i32, ptr %0, align 4
   %conv = trunc i32 %1 to i16
   ret i16 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i16 @ld_align16_uint16_t_uint32_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i16 @ld_align16_uint16_t_uint32_t(ptr nocapture readonly %ptr) {
 ; CHECK-LE-LABEL: ld_align16_uint16_t_uint32_t:
 ; CHECK-LE:       # %bb.0: # %entry
 ; CHECK-LE-NEXT:    lhz r3, 8(r3)
@@ -3275,15 +3251,14 @@ define dso_local zeroext i16 @ld_align16_uint16_t_uint32_t(i8* nocapture readonl
 ; CHECK-BE-NEXT:    lhz r3, 10(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to i32*
-  %1 = load i32, i32* %0, align 4
-  %conv = trunc i32 %1 to i16
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  %0 = load i32, ptr %add.ptr, align 4
+  %conv = trunc i32 %0 to i16
   ret i16 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i16 @ld_align32_uint16_t_uint32_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i16 @ld_align32_uint16_t_uint32_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LE-LABEL: ld_align32_uint16_t_uint32_t:
 ; CHECK-P10-LE:       # %bb.0: # %entry
 ; CHECK-P10-LE-NEXT:    plhz r3, 99999000(r3), 0
@@ -3322,15 +3297,14 @@ define dso_local zeroext i16 @ld_align32_uint16_t_uint32_t(i8* nocapture readonl
 ; CHECK-P8-BE-NEXT:    lhzx r3, r3, r4
 ; CHECK-P8-BE-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to i32*
-  %1 = load i32, i32* %0, align 4
-  %conv = trunc i32 %1 to i16
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  %0 = load i32, ptr %add.ptr, align 4
+  %conv = trunc i32 %0 to i16
   ret i16 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i16 @ld_align64_uint16_t_uint32_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i16 @ld_align64_uint16_t_uint32_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LE-LABEL: ld_align64_uint16_t_uint32_t:
 ; CHECK-P10-LE:       # %bb.0: # %entry
 ; CHECK-P10-LE-NEXT:    pli r4, 244140625
@@ -3380,15 +3354,14 @@ define dso_local zeroext i16 @ld_align64_uint16_t_uint32_t(i8* nocapture readonl
 ; CHECK-P8-BE-NEXT:    lhzx r3, r3, r4
 ; CHECK-P8-BE-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to i32*
-  %1 = load i32, i32* %0, align 4
-  %conv = trunc i32 %1 to i16
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  %0 = load i32, ptr %add.ptr, align 4
+  %conv = trunc i32 %0 to i16
   ret i16 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i16 @ld_reg_uint16_t_uint32_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local zeroext i16 @ld_reg_uint16_t_uint32_t(ptr nocapture readonly %ptr, i64 %off) {
 ; CHECK-LE-LABEL: ld_reg_uint16_t_uint32_t:
 ; CHECK-LE:       # %bb.0: # %entry
 ; CHECK-LE-NEXT:    lhzx r3, r3, r4
@@ -3400,10 +3373,9 @@ define dso_local zeroext i16 @ld_reg_uint16_t_uint32_t(i8* nocapture readonly %p
 ; CHECK-BE-NEXT:    lhz r3, 2(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to i32*
-  %1 = load i32, i32* %0, align 4
-  %conv = trunc i32 %1 to i16
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  %0 = load i32, ptr %add.ptr, align 4
+  %conv = trunc i32 %0 to i16
   ret i16 %conv
 }
 
@@ -3423,8 +3395,8 @@ define dso_local zeroext i16 @ld_or_uint16_t_uint32_t(i64 %ptr, i8 zeroext %off)
 entry:
   %conv = zext i8 %off to i64
   %or = or i64 %conv, %ptr
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 4
   %conv1 = trunc i32 %1 to i16
   ret i16 %conv1
 }
@@ -3444,8 +3416,8 @@ define dso_local zeroext i16 @ld_not_disjoint16_uint16_t_uint32_t(i64 %ptr) {
 ; CHECK-BE-NEXT:    blr
 entry:
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 4
   %conv = trunc i32 %1 to i16
   ret i16 %conv
 }
@@ -3466,8 +3438,8 @@ define dso_local zeroext i16 @ld_disjoint_align16_uint16_t_uint32_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -4096
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 8
   %conv = trunc i32 %1 to i16
   ret i16 %conv
 }
@@ -3489,8 +3461,8 @@ define dso_local zeroext i16 @ld_not_disjoint32_uint16_t_uint32_t(i64 %ptr) {
 ; CHECK-BE-NEXT:    blr
 entry:
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 4
   %conv = trunc i32 %1 to i16
   ret i16 %conv
 }
@@ -3549,8 +3521,8 @@ define dso_local zeroext i16 @ld_disjoint_align32_uint16_t_uint32_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1000341504
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 16
   %conv = trunc i32 %1 to i16
   ret i16 %conv
 }
@@ -3616,8 +3588,8 @@ define dso_local zeroext i16 @ld_not_disjoint64_uint16_t_uint32_t(i64 %ptr) {
 ; CHECK-P8-BE-NEXT:    blr
 entry:
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 4
   %conv = trunc i32 %1 to i16
   ret i16 %conv
 }
@@ -3681,8 +3653,8 @@ define dso_local zeroext i16 @ld_disjoint_align64_uint16_t_uint32_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 4096
   %conv = trunc i32 %1 to i16
   ret i16 %conv
 }
@@ -3699,7 +3671,7 @@ define dso_local zeroext i16 @ld_cst_align16_uint16_t_uint32_t() {
 ; CHECK-BE-NEXT:    lhz r3, 4082(0)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %0 = load i32, i32* inttoptr (i64 4080 to i32*), align 16
+  %0 = load i32, ptr inttoptr (i64 4080 to ptr), align 16
   %conv = trunc i32 %0 to i16
   ret i16 %conv
 }
@@ -3718,7 +3690,7 @@ define dso_local zeroext i16 @ld_cst_align32_uint16_t_uint32_t() {
 ; CHECK-BE-NEXT:    lhz r3, -27106(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %0 = load i32, i32* inttoptr (i64 9999900 to i32*), align 4
+  %0 = load i32, ptr inttoptr (i64 9999900 to ptr), align 4
   %conv = trunc i32 %0 to i16
   ret i16 %conv
 }
@@ -3774,7 +3746,7 @@ define dso_local zeroext i16 @ld_cst_align64_uint16_t_uint32_t() {
 ; CHECK-P8-BE-NEXT:    lhz r3, 0(r3)
 ; CHECK-P8-BE-NEXT:    blr
 entry:
-  %0 = load i32, i32* inttoptr (i64 1000000000000 to i32*), align 4096
+  %0 = load i32, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   %conv = trunc i32 %0 to i16
   ret i16 %conv
 }
@@ -3791,14 +3763,14 @@ define dso_local zeroext i16 @ld_0_uint16_t_uint64_t(i64 %ptr) {
 ; CHECK-BE-NEXT:    lhz r3, 6(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %0 = inttoptr i64 %ptr to i64*
-  %1 = load i64, i64* %0, align 8
+  %0 = inttoptr i64 %ptr to ptr
+  %1 = load i64, ptr %0, align 8
   %conv = trunc i64 %1 to i16
   ret i16 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i16 @ld_align16_uint16_t_uint64_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i16 @ld_align16_uint16_t_uint64_t(ptr nocapture readonly %ptr) {
 ; CHECK-LE-LABEL: ld_align16_uint16_t_uint64_t:
 ; CHECK-LE:       # %bb.0: # %entry
 ; CHECK-LE-NEXT:    lhz r3, 8(r3)
@@ -3809,15 +3781,14 @@ define dso_local zeroext i16 @ld_align16_uint16_t_uint64_t(i8* nocapture readonl
 ; CHECK-BE-NEXT:    lhz r3, 14(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to i64*
-  %1 = load i64, i64* %0, align 8
-  %conv = trunc i64 %1 to i16
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  %0 = load i64, ptr %add.ptr, align 8
+  %conv = trunc i64 %0 to i16
   ret i16 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i16 @ld_align32_uint16_t_uint64_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i16 @ld_align32_uint16_t_uint64_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LE-LABEL: ld_align32_uint16_t_uint64_t:
 ; CHECK-P10-LE:       # %bb.0: # %entry
 ; CHECK-P10-LE-NEXT:    plhz r3, 99999000(r3), 0
@@ -3856,15 +3827,14 @@ define dso_local zeroext i16 @ld_align32_uint16_t_uint64_t(i8* nocapture readonl
 ; CHECK-P8-BE-NEXT:    lhzx r3, r3, r4
 ; CHECK-P8-BE-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to i64*
-  %1 = load i64, i64* %0, align 8
-  %conv = trunc i64 %1 to i16
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  %0 = load i64, ptr %add.ptr, align 8
+  %conv = trunc i64 %0 to i16
   ret i16 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i16 @ld_align64_uint16_t_uint64_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i16 @ld_align64_uint16_t_uint64_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LE-LABEL: ld_align64_uint16_t_uint64_t:
 ; CHECK-P10-LE:       # %bb.0: # %entry
 ; CHECK-P10-LE-NEXT:    pli r4, 244140625
@@ -3914,15 +3884,14 @@ define dso_local zeroext i16 @ld_align64_uint16_t_uint64_t(i8* nocapture readonl
 ; CHECK-P8-BE-NEXT:    lhzx r3, r3, r4
 ; CHECK-P8-BE-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to i64*
-  %1 = load i64, i64* %0, align 8
-  %conv = trunc i64 %1 to i16
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  %0 = load i64, ptr %add.ptr, align 8
+  %conv = trunc i64 %0 to i16
   ret i16 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i16 @ld_reg_uint16_t_uint64_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local zeroext i16 @ld_reg_uint16_t_uint64_t(ptr nocapture readonly %ptr, i64 %off) {
 ; CHECK-LE-LABEL: ld_reg_uint16_t_uint64_t:
 ; CHECK-LE:       # %bb.0: # %entry
 ; CHECK-LE-NEXT:    lhzx r3, r3, r4
@@ -3934,10 +3903,9 @@ define dso_local zeroext i16 @ld_reg_uint16_t_uint64_t(i8* nocapture readonly %p
 ; CHECK-BE-NEXT:    lhz r3, 6(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to i64*
-  %1 = load i64, i64* %0, align 8
-  %conv = trunc i64 %1 to i16
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  %0 = load i64, ptr %add.ptr, align 8
+  %conv = trunc i64 %0 to i16
   ret i16 %conv
 }
 
@@ -3957,8 +3925,8 @@ define dso_local zeroext i16 @ld_or_uint16_t_uint64_t(i64 %ptr, i8 zeroext %off)
 entry:
   %conv = zext i8 %off to i64
   %or = or i64 %conv, %ptr
-  %0 = inttoptr i64 %or to i64*
-  %1 = load i64, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i64, ptr %0, align 8
   %conv1 = trunc i64 %1 to i16
   ret i16 %conv1
 }
@@ -3978,8 +3946,8 @@ define dso_local zeroext i16 @ld_not_disjoint16_uint16_t_uint64_t(i64 %ptr) {
 ; CHECK-BE-NEXT:    blr
 entry:
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to i64*
-  %1 = load i64, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i64, ptr %0, align 8
   %conv = trunc i64 %1 to i16
   ret i16 %conv
 }
@@ -4000,8 +3968,8 @@ define dso_local zeroext i16 @ld_disjoint_align16_uint16_t_uint64_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -4096
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to i64*
-  %1 = load i64, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i64, ptr %0, align 8
   %conv = trunc i64 %1 to i16
   ret i16 %conv
 }
@@ -4023,8 +3991,8 @@ define dso_local zeroext i16 @ld_not_disjoint32_uint16_t_uint64_t(i64 %ptr) {
 ; CHECK-BE-NEXT:    blr
 entry:
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to i64*
-  %1 = load i64, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i64, ptr %0, align 8
   %conv = trunc i64 %1 to i16
   ret i16 %conv
 }
@@ -4083,8 +4051,8 @@ define dso_local zeroext i16 @ld_disjoint_align32_uint16_t_uint64_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1000341504
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to i64*
-  %1 = load i64, i64* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i64, ptr %0, align 16
   %conv = trunc i64 %1 to i16
   ret i16 %conv
 }
@@ -4150,8 +4118,8 @@ define dso_local zeroext i16 @ld_not_disjoint64_uint16_t_uint64_t(i64 %ptr) {
 ; CHECK-P8-BE-NEXT:    blr
 entry:
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to i64*
-  %1 = load i64, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i64, ptr %0, align 8
   %conv = trunc i64 %1 to i16
   ret i16 %conv
 }
@@ -4215,8 +4183,8 @@ define dso_local zeroext i16 @ld_disjoint_align64_uint16_t_uint64_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to i64*
-  %1 = load i64, i64* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i64, ptr %0, align 4096
   %conv = trunc i64 %1 to i16
   ret i16 %conv
 }
@@ -4233,7 +4201,7 @@ define dso_local zeroext i16 @ld_cst_align16_uint16_t_uint64_t() {
 ; CHECK-BE-NEXT:    lhz r3, 4086(0)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %0 = load i64, i64* inttoptr (i64 4080 to i64*), align 16
+  %0 = load i64, ptr inttoptr (i64 4080 to ptr), align 16
   %conv = trunc i64 %0 to i16
   ret i16 %conv
 }
@@ -4252,7 +4220,7 @@ define dso_local zeroext i16 @ld_cst_align32_uint16_t_uint64_t() {
 ; CHECK-BE-NEXT:    lhz r3, -27102(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %0 = load i64, i64* inttoptr (i64 9999900 to i64*), align 8
+  %0 = load i64, ptr inttoptr (i64 9999900 to ptr), align 8
   %conv = trunc i64 %0 to i16
   ret i16 %conv
 }
@@ -4308,13 +4276,13 @@ define dso_local zeroext i16 @ld_cst_align64_uint16_t_uint64_t() {
 ; CHECK-P8-BE-NEXT:    lhz r3, 0(r3)
 ; CHECK-P8-BE-NEXT:    blr
 entry:
-  %0 = load i64, i64* inttoptr (i64 1000000000000 to i64*), align 4096
+  %0 = load i64, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   %conv = trunc i64 %0 to i16
   ret i16 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i16 @ld_align16_uint16_t_float(i8* nocapture readonly %ptr) {
+define dso_local zeroext i16 @ld_align16_uint16_t_float(ptr nocapture readonly %ptr) {
 ; CHECK-LABEL: ld_align16_uint16_t_float:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lfs f0, 8(r3)
@@ -4322,15 +4290,14 @@ define dso_local zeroext i16 @ld_align16_uint16_t_float(i8* nocapture readonly %
 ; CHECK-NEXT:    mffprwz r3, f0
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to float*
-  %1 = load float, float* %0, align 4
-  %conv = fptoui float %1 to i16
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  %0 = load float, ptr %add.ptr, align 4
+  %conv = fptoui float %0 to i16
   ret i16 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i16 @ld_align32_uint16_t_float(i8* nocapture readonly %ptr) {
+define dso_local zeroext i16 @ld_align32_uint16_t_float(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align32_uint16_t_float:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    plfs f0, 99999000(r3), 0
@@ -4347,15 +4314,14 @@ define dso_local zeroext i16 @ld_align32_uint16_t_float(i8* nocapture readonly %
 ; CHECK-PREP10-NEXT:    mffprwz r3, f0
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to float*
-  %1 = load float, float* %0, align 4
-  %conv = fptoui float %1 to i16
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  %0 = load float, ptr %add.ptr, align 4
+  %conv = fptoui float %0 to i16
   ret i16 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i16 @ld_align64_uint16_t_float(i8* nocapture readonly %ptr) {
+define dso_local zeroext i16 @ld_align64_uint16_t_float(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align64_uint16_t_float:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r4, 244140625
@@ -4375,15 +4341,14 @@ define dso_local zeroext i16 @ld_align64_uint16_t_float(i8* nocapture readonly %
 ; CHECK-PREP10-NEXT:    mffprwz r3, f0
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to float*
-  %1 = load float, float* %0, align 4
-  %conv = fptoui float %1 to i16
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  %0 = load float, ptr %add.ptr, align 4
+  %conv = fptoui float %0 to i16
   ret i16 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i16 @ld_reg_uint16_t_float(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local zeroext i16 @ld_reg_uint16_t_float(ptr nocapture readonly %ptr, i64 %off) {
 ; CHECK-LABEL: ld_reg_uint16_t_float:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lfsx f0, r3, r4
@@ -4391,10 +4356,9 @@ define dso_local zeroext i16 @ld_reg_uint16_t_float(i8* nocapture readonly %ptr,
 ; CHECK-NEXT:    mffprwz r3, f0
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to float*
-  %1 = load float, float* %0, align 4
-  %conv = fptoui float %1 to i16
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  %0 = load float, ptr %add.ptr, align 4
+  %conv = fptoui float %0 to i16
   ret i16 %conv
 }
 
@@ -4410,8 +4374,8 @@ define dso_local zeroext i16 @ld_or_uint16_t_float(i64 %ptr, i8 zeroext %off) {
 entry:
   %conv = zext i8 %off to i64
   %or = or i64 %conv, %ptr
-  %0 = inttoptr i64 %or to float*
-  %1 = load float, float* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load float, ptr %0, align 4
   %conv1 = fptoui float %1 to i16
   ret i16 %conv1
 }
@@ -4427,8 +4391,8 @@ define dso_local zeroext i16 @ld_not_disjoint16_uint16_t_float(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to float*
-  %1 = load float, float* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load float, ptr %0, align 4
   %conv = fptoui float %1 to i16
   ret i16 %conv
 }
@@ -4445,8 +4409,8 @@ define dso_local zeroext i16 @ld_disjoint_align16_uint16_t_float(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -4096
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to float*
-  %1 = load float, float* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load float, ptr %0, align 8
   %conv = fptoui float %1 to i16
   ret i16 %conv
 }
@@ -4463,8 +4427,8 @@ define dso_local zeroext i16 @ld_not_disjoint32_uint16_t_float(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to float*
-  %1 = load float, float* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load float, ptr %0, align 4
   %conv = fptoui float %1 to i16
   ret i16 %conv
 }
@@ -4504,8 +4468,8 @@ define dso_local zeroext i16 @ld_disjoint_align32_uint16_t_float(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1000341504
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to float*
-  %1 = load float, float* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  %1 = load float, ptr %0, align 16
   %conv = fptoui float %1 to i16
   ret i16 %conv
 }
@@ -4536,8 +4500,8 @@ define dso_local zeroext i16 @ld_not_disjoint64_uint16_t_float(i64 %ptr) {
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to float*
-  %1 = load float, float* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load float, ptr %0, align 4
   %conv = fptoui float %1 to i16
   ret i16 %conv
 }
@@ -4567,8 +4531,8 @@ define dso_local zeroext i16 @ld_disjoint_align64_uint16_t_float(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to float*
-  %1 = load float, float* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  %1 = load float, ptr %0, align 4096
   %conv = fptoui float %1 to i16
   ret i16 %conv
 }
@@ -4582,7 +4546,7 @@ define dso_local zeroext i16 @ld_cst_align16_uint16_t_float() {
 ; CHECK-NEXT:    mffprwz r3, f0
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load float, float* inttoptr (i64 4080 to float*), align 16
+  %0 = load float, ptr inttoptr (i64 4080 to ptr), align 16
   %conv = fptoui float %0 to i16
   ret i16 %conv
 }
@@ -4597,7 +4561,7 @@ define dso_local zeroext i16 @ld_cst_align32_uint16_t_float() {
 ; CHECK-NEXT:    mffprwz r3, f0
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load float, float* inttoptr (i64 9999900 to float*), align 4
+  %0 = load float, ptr inttoptr (i64 9999900 to ptr), align 4
   %conv = fptoui float %0 to i16
   ret i16 %conv
 }
@@ -4623,7 +4587,7 @@ define dso_local zeroext i16 @ld_cst_align64_uint16_t_float() {
 ; CHECK-PREP10-NEXT:    mffprwz r3, f0
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %0 = load float, float* inttoptr (i64 1000000000000 to float*), align 4096
+  %0 = load float, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   %conv = fptoui float %0 to i16
   ret i16 %conv
 }
@@ -4637,14 +4601,14 @@ define dso_local zeroext i16 @ld_0_uint16_t_double(i64 %ptr) {
 ; CHECK-NEXT:    mffprwz r3, f0
 ; CHECK-NEXT:    blr
 entry:
-  %0 = inttoptr i64 %ptr to double*
-  %1 = load double, double* %0, align 8
+  %0 = inttoptr i64 %ptr to ptr
+  %1 = load double, ptr %0, align 8
   %conv = fptoui double %1 to i16
   ret i16 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i16 @ld_align16_uint16_t_double(i8* nocapture readonly %ptr) {
+define dso_local zeroext i16 @ld_align16_uint16_t_double(ptr nocapture readonly %ptr) {
 ; CHECK-LABEL: ld_align16_uint16_t_double:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lfd f0, 8(r3)
@@ -4652,15 +4616,14 @@ define dso_local zeroext i16 @ld_align16_uint16_t_double(i8* nocapture readonly
 ; CHECK-NEXT:    mffprwz r3, f0
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to double*
-  %1 = load double, double* %0, align 8
-  %conv = fptoui double %1 to i16
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  %0 = load double, ptr %add.ptr, align 8
+  %conv = fptoui double %0 to i16
   ret i16 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i16 @ld_align32_uint16_t_double(i8* nocapture readonly %ptr) {
+define dso_local zeroext i16 @ld_align32_uint16_t_double(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align32_uint16_t_double:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    plfd f0, 99999000(r3), 0
@@ -4677,15 +4640,14 @@ define dso_local zeroext i16 @ld_align32_uint16_t_double(i8* nocapture readonly
 ; CHECK-PREP10-NEXT:    mffprwz r3, f0
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to double*
-  %1 = load double, double* %0, align 8
-  %conv = fptoui double %1 to i16
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  %0 = load double, ptr %add.ptr, align 8
+  %conv = fptoui double %0 to i16
   ret i16 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i16 @ld_align64_uint16_t_double(i8* nocapture readonly %ptr) {
+define dso_local zeroext i16 @ld_align64_uint16_t_double(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align64_uint16_t_double:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r4, 244140625
@@ -4705,15 +4667,14 @@ define dso_local zeroext i16 @ld_align64_uint16_t_double(i8* nocapture readonly
 ; CHECK-PREP10-NEXT:    mffprwz r3, f0
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to double*
-  %1 = load double, double* %0, align 8
-  %conv = fptoui double %1 to i16
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  %0 = load double, ptr %add.ptr, align 8
+  %conv = fptoui double %0 to i16
   ret i16 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i16 @ld_reg_uint16_t_double(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local zeroext i16 @ld_reg_uint16_t_double(ptr nocapture readonly %ptr, i64 %off) {
 ; CHECK-LABEL: ld_reg_uint16_t_double:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lfdx f0, r3, r4
@@ -4721,10 +4682,9 @@ define dso_local zeroext i16 @ld_reg_uint16_t_double(i8* nocapture readonly %ptr
 ; CHECK-NEXT:    mffprwz r3, f0
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to double*
-  %1 = load double, double* %0, align 8
-  %conv = fptoui double %1 to i16
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  %0 = load double, ptr %add.ptr, align 8
+  %conv = fptoui double %0 to i16
   ret i16 %conv
 }
 
@@ -4740,8 +4700,8 @@ define dso_local zeroext i16 @ld_or_uint16_t_double(i64 %ptr, i8 zeroext %off) {
 entry:
   %conv = zext i8 %off to i64
   %or = or i64 %conv, %ptr
-  %0 = inttoptr i64 %or to double*
-  %1 = load double, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load double, ptr %0, align 8
   %conv1 = fptoui double %1 to i16
   ret i16 %conv1
 }
@@ -4757,8 +4717,8 @@ define dso_local zeroext i16 @ld_not_disjoint16_uint16_t_double(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to double*
-  %1 = load double, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load double, ptr %0, align 8
   %conv = fptoui double %1 to i16
   ret i16 %conv
 }
@@ -4775,8 +4735,8 @@ define dso_local zeroext i16 @ld_disjoint_align16_uint16_t_double(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -4096
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to double*
-  %1 = load double, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load double, ptr %0, align 8
   %conv = fptoui double %1 to i16
   ret i16 %conv
 }
@@ -4793,8 +4753,8 @@ define dso_local zeroext i16 @ld_not_disjoint32_uint16_t_double(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to double*
-  %1 = load double, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load double, ptr %0, align 8
   %conv = fptoui double %1 to i16
   ret i16 %conv
 }
@@ -4834,8 +4794,8 @@ define dso_local zeroext i16 @ld_disjoint_align32_uint16_t_double(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1000341504
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to double*
-  %1 = load double, double* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  %1 = load double, ptr %0, align 16
   %conv = fptoui double %1 to i16
   ret i16 %conv
 }
@@ -4866,8 +4826,8 @@ define dso_local zeroext i16 @ld_not_disjoint64_uint16_t_double(i64 %ptr) {
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to double*
-  %1 = load double, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load double, ptr %0, align 8
   %conv = fptoui double %1 to i16
   ret i16 %conv
 }
@@ -4897,8 +4857,8 @@ define dso_local zeroext i16 @ld_disjoint_align64_uint16_t_double(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to double*
-  %1 = load double, double* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  %1 = load double, ptr %0, align 4096
   %conv = fptoui double %1 to i16
   ret i16 %conv
 }
@@ -4912,7 +4872,7 @@ define dso_local zeroext i16 @ld_cst_align16_uint16_t_double() {
 ; CHECK-NEXT:    mffprwz r3, f0
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load double, double* inttoptr (i64 4080 to double*), align 16
+  %0 = load double, ptr inttoptr (i64 4080 to ptr), align 16
   %conv = fptoui double %0 to i16
   ret i16 %conv
 }
@@ -4927,7 +4887,7 @@ define dso_local zeroext i16 @ld_cst_align32_uint16_t_double() {
 ; CHECK-NEXT:    mffprwz r3, f0
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load double, double* inttoptr (i64 9999900 to double*), align 8
+  %0 = load double, ptr inttoptr (i64 9999900 to ptr), align 8
   %conv = fptoui double %0 to i16
   ret i16 %conv
 }
@@ -4953,7 +4913,7 @@ define dso_local zeroext i16 @ld_cst_align64_uint16_t_double() {
 ; CHECK-PREP10-NEXT:    mffprwz r3, f0
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %0 = load double, double* inttoptr (i64 1000000000000 to double*), align 4096
+  %0 = load double, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   %conv = fptoui double %0 to i16
   ret i16 %conv
 }
@@ -4966,26 +4926,26 @@ define dso_local void @st_0_uint16_t_uint8_t(i64 %ptr, i16 zeroext %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = trunc i16 %str to i8
-  %0 = inttoptr i64 %ptr to i8*
-  store i8 %conv, i8* %0, align 1
+  %0 = inttoptr i64 %ptr to ptr
+  store i8 %conv, ptr %0, align 1
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_uint16_t_uint8_t(i8* nocapture %ptr, i16 zeroext %str) {
+define dso_local void @st_align16_uint16_t_uint8_t(ptr nocapture %ptr, i16 zeroext %str) {
 ; CHECK-LABEL: st_align16_uint16_t_uint8_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    stb r4, 8(r3)
 ; CHECK-NEXT:    blr
 entry:
   %conv = trunc i16 %str to i8
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  store i8 %conv, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  store i8 %conv, ptr %add.ptr, align 1
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_uint16_t_uint8_t(i8* nocapture %ptr, i16 zeroext %str) {
+define dso_local void @st_align32_uint16_t_uint8_t(ptr nocapture %ptr, i16 zeroext %str) {
 ; CHECK-P10-LABEL: st_align32_uint16_t_uint8_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pstb r4, 99999000(r3), 0
@@ -4999,13 +4959,13 @@ define dso_local void @st_align32_uint16_t_uint8_t(i8* nocapture %ptr, i16 zeroe
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %conv = trunc i16 %str to i8
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  store i8 %conv, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  store i8 %conv, ptr %add.ptr, align 1
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_uint16_t_uint8_t(i8* nocapture %ptr, i16 zeroext %str) {
+define dso_local void @st_align64_uint16_t_uint8_t(ptr nocapture %ptr, i16 zeroext %str) {
 ; CHECK-P10-LABEL: st_align64_uint16_t_uint8_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r5, 244140625
@@ -5022,21 +4982,21 @@ define dso_local void @st_align64_uint16_t_uint8_t(i8* nocapture %ptr, i16 zeroe
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %conv = trunc i16 %str to i8
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  store i8 %conv, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  store i8 %conv, ptr %add.ptr, align 1
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_uint16_t_uint8_t(i8* nocapture %ptr, i64 %off, i16 zeroext %str) {
+define dso_local void @st_reg_uint16_t_uint8_t(ptr nocapture %ptr, i64 %off, i16 zeroext %str) {
 ; CHECK-LABEL: st_reg_uint16_t_uint8_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    stbx r5, r3, r4
 ; CHECK-NEXT:    blr
 entry:
   %conv = trunc i16 %str to i8
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  store i8 %conv, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  store i8 %conv, ptr %add.ptr, align 1
   ret void
 }
 
@@ -5051,8 +5011,8 @@ entry:
   %conv = trunc i16 %str to i8
   %conv1 = zext i8 %off to i64
   %or = or i64 %conv1, %ptr
-  %0 = inttoptr i64 %or to i8*
-  store i8 %conv, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  store i8 %conv, ptr %0, align 1
   ret void
 }
 
@@ -5066,8 +5026,8 @@ define dso_local void @st_not_disjoint16_uint16_t_uint8_t(i64 %ptr, i16 zeroext
 entry:
   %conv = trunc i16 %str to i8
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to i8*
-  store i8 %conv, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  store i8 %conv, ptr %0, align 1
   ret void
 }
 
@@ -5082,8 +5042,8 @@ entry:
   %and = and i64 %ptr, -4096
   %conv = trunc i16 %str to i8
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to i8*
-  store i8 %conv, i8* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store i8 %conv, ptr %0, align 8
   ret void
 }
 
@@ -5098,8 +5058,8 @@ define dso_local void @st_not_disjoint32_uint16_t_uint8_t(i64 %ptr, i16 zeroext
 entry:
   %conv = trunc i16 %str to i8
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to i8*
-  store i8 %conv, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  store i8 %conv, ptr %0, align 1
   ret void
 }
 
@@ -5133,8 +5093,8 @@ entry:
   %and = and i64 %ptr, -1000341504
   %conv = trunc i16 %str to i8
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to i8*
-  store i8 %conv, i8* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  store i8 %conv, ptr %0, align 16
   ret void
 }
 
@@ -5161,8 +5121,8 @@ define dso_local void @st_not_disjoint64_uint16_t_uint8_t(i64 %ptr, i16 zeroext
 entry:
   %conv = trunc i16 %str to i8
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to i8*
-  store i8 %conv, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  store i8 %conv, ptr %0, align 1
   ret void
 }
 
@@ -5188,8 +5148,8 @@ entry:
   %and = and i64 %ptr, -1099511627776
   %conv = trunc i16 %str to i8
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to i8*
-  store i8 %conv, i8* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  store i8 %conv, ptr %0, align 4096
   ret void
 }
 
@@ -5201,7 +5161,7 @@ define dso_local void @st_cst_align16_uint16_t_uint8_t(i16 zeroext %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = trunc i16 %str to i8
-  store i8 %conv, i8* inttoptr (i64 4080 to i8*), align 16
+  store i8 %conv, ptr inttoptr (i64 4080 to ptr), align 16
   ret void
 }
 
@@ -5214,7 +5174,7 @@ define dso_local void @st_cst_align32_uint16_t_uint8_t(i16 zeroext %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = trunc i16 %str to i8
-  store i8 %conv, i8* inttoptr (i64 9999900 to i8*), align 4
+  store i8 %conv, ptr inttoptr (i64 9999900 to ptr), align 4
   ret void
 }
 
@@ -5236,7 +5196,7 @@ define dso_local void @st_cst_align64_uint16_t_uint8_t(i16 zeroext %str) {
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %conv = trunc i16 %str to i8
-  store i8 %conv, i8* inttoptr (i64 1000000000000 to i8*), align 4096
+  store i8 %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   ret void
 }
 
@@ -5247,26 +5207,25 @@ define dso_local void @st_0_uint16_t_uint16_t(i64 %ptr, i16 zeroext %str) {
 ; CHECK-NEXT:    sth r4, 0(r3)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = inttoptr i64 %ptr to i16*
-  store i16 %str, i16* %0, align 2
+  %0 = inttoptr i64 %ptr to ptr
+  store i16 %str, ptr %0, align 2
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_uint16_t_uint16_t(i8* nocapture %ptr, i16 zeroext %str) {
+define dso_local void @st_align16_uint16_t_uint16_t(ptr nocapture %ptr, i16 zeroext %str) {
 ; CHECK-LABEL: st_align16_uint16_t_uint16_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    sth r4, 8(r3)
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to i16*
-  store i16 %str, i16* %0, align 2
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  store i16 %str, ptr %add.ptr, align 2
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_uint16_t_uint16_t(i8* nocapture %ptr, i16 zeroext %str) {
+define dso_local void @st_align32_uint16_t_uint16_t(ptr nocapture %ptr, i16 zeroext %str) {
 ; CHECK-P10-LABEL: st_align32_uint16_t_uint16_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    psth r4, 99999000(r3), 0
@@ -5279,14 +5238,13 @@ define dso_local void @st_align32_uint16_t_uint16_t(i8* nocapture %ptr, i16 zero
 ; CHECK-PREP10-NEXT:    sthx r4, r3, r5
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to i16*
-  store i16 %str, i16* %0, align 2
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  store i16 %str, ptr %add.ptr, align 2
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_uint16_t_uint16_t(i8* nocapture %ptr, i16 zeroext %str) {
+define dso_local void @st_align64_uint16_t_uint16_t(ptr nocapture %ptr, i16 zeroext %str) {
 ; CHECK-P10-LABEL: st_align64_uint16_t_uint16_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r5, 244140625
@@ -5302,22 +5260,20 @@ define dso_local void @st_align64_uint16_t_uint16_t(i8* nocapture %ptr, i16 zero
 ; CHECK-PREP10-NEXT:    sthx r4, r3, r5
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to i16*
-  store i16 %str, i16* %0, align 2
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  store i16 %str, ptr %add.ptr, align 2
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_uint16_t_uint16_t(i8* nocapture %ptr, i64 %off, i16 zeroext %str) {
+define dso_local void @st_reg_uint16_t_uint16_t(ptr nocapture %ptr, i64 %off, i16 zeroext %str) {
 ; CHECK-LABEL: st_reg_uint16_t_uint16_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    sthx r5, r3, r4
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to i16*
-  store i16 %str, i16* %0, align 2
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  store i16 %str, ptr %add.ptr, align 2
   ret void
 }
 
@@ -5331,8 +5287,8 @@ define dso_local void @st_or1_uint16_t_uint16_t(i64 %ptr, i8 zeroext %off, i16 z
 entry:
   %conv = zext i8 %off to i64
   %or = or i64 %conv, %ptr
-  %0 = inttoptr i64 %or to i16*
-  store i16 %str, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  store i16 %str, ptr %0, align 2
   ret void
 }
 
@@ -5345,8 +5301,8 @@ define dso_local void @st_not_disjoint16_uint16_t_uint16_t(i64 %ptr, i16 zeroext
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to i16*
-  store i16 %str, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  store i16 %str, ptr %0, align 2
   ret void
 }
 
@@ -5360,8 +5316,8 @@ define dso_local void @st_disjoint_align16_uint16_t_uint16_t(i64 %ptr, i16 zeroe
 entry:
   %and = and i64 %ptr, -4096
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to i16*
-  store i16 %str, i16* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store i16 %str, ptr %0, align 8
   ret void
 }
 
@@ -5375,8 +5331,8 @@ define dso_local void @st_not_disjoint32_uint16_t_uint16_t(i64 %ptr, i16 zeroext
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to i16*
-  store i16 %str, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  store i16 %str, ptr %0, align 2
   ret void
 }
 
@@ -5409,8 +5365,8 @@ define dso_local void @st_disjoint_align32_uint16_t_uint16_t(i64 %ptr, i16 zeroe
 entry:
   %and = and i64 %ptr, -1000341504
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to i16*
-  store i16 %str, i16* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  store i16 %str, ptr %0, align 16
   ret void
 }
 
@@ -5436,8 +5392,8 @@ define dso_local void @st_not_disjoint64_uint16_t_uint16_t(i64 %ptr, i16 zeroext
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to i16*
-  store i16 %str, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  store i16 %str, ptr %0, align 2
   ret void
 }
 
@@ -5462,8 +5418,8 @@ define dso_local void @st_disjoint_align64_uint16_t_uint16_t(i64 %ptr, i16 zeroe
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to i16*
-  store i16 %str, i16* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  store i16 %str, ptr %0, align 4096
   ret void
 }
 
@@ -5474,7 +5430,7 @@ define dso_local void @st_cst_align16_uint16_t_uint16_t(i16 zeroext %str) {
 ; CHECK-NEXT:    sth r3, 4080(0)
 ; CHECK-NEXT:    blr
 entry:
-  store i16 %str, i16* inttoptr (i64 4080 to i16*), align 16
+  store i16 %str, ptr inttoptr (i64 4080 to ptr), align 16
   ret void
 }
 
@@ -5486,7 +5442,7 @@ define dso_local void @st_cst_align32_uint16_t_uint16_t(i16 zeroext %str) {
 ; CHECK-NEXT:    sth r3, -27108(r4)
 ; CHECK-NEXT:    blr
 entry:
-  store i16 %str, i16* inttoptr (i64 9999900 to i16*), align 4
+  store i16 %str, ptr inttoptr (i64 9999900 to ptr), align 4
   ret void
 }
 
@@ -5507,7 +5463,7 @@ define dso_local void @st_cst_align64_uint16_t_uint16_t(i16 zeroext %str) {
 ; CHECK-PREP10-NEXT:    sth r3, 0(r4)
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  store i16 %str, i16* inttoptr (i64 1000000000000 to i16*), align 4096
+  store i16 %str, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   ret void
 }
 
@@ -5519,27 +5475,26 @@ define dso_local void @st_0_uint16_t_uint32_t(i64 %ptr, i16 zeroext %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = zext i16 %str to i32
-  %0 = inttoptr i64 %ptr to i32*
-  store i32 %conv, i32* %0, align 4
+  %0 = inttoptr i64 %ptr to ptr
+  store i32 %conv, ptr %0, align 4
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_uint16_t_uint32_t(i8* nocapture %ptr, i16 zeroext %str) {
+define dso_local void @st_align16_uint16_t_uint32_t(ptr nocapture %ptr, i16 zeroext %str) {
 ; CHECK-LABEL: st_align16_uint16_t_uint32_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    stw r4, 8(r3)
 ; CHECK-NEXT:    blr
 entry:
   %conv = zext i16 %str to i32
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to i32*
-  store i32 %conv, i32* %0, align 4
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  store i32 %conv, ptr %add.ptr, align 4
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_uint16_t_uint32_t(i8* nocapture %ptr, i16 zeroext %str) {
+define dso_local void @st_align32_uint16_t_uint32_t(ptr nocapture %ptr, i16 zeroext %str) {
 ; CHECK-P10-LABEL: st_align32_uint16_t_uint32_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pstw r4, 99999000(r3), 0
@@ -5553,14 +5508,13 @@ define dso_local void @st_align32_uint16_t_uint32_t(i8* nocapture %ptr, i16 zero
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %conv = zext i16 %str to i32
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to i32*
-  store i32 %conv, i32* %0, align 4
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  store i32 %conv, ptr %add.ptr, align 4
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_uint16_t_uint32_t(i8* nocapture %ptr, i16 zeroext %str) {
+define dso_local void @st_align64_uint16_t_uint32_t(ptr nocapture %ptr, i16 zeroext %str) {
 ; CHECK-P10-LABEL: st_align64_uint16_t_uint32_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r5, 244140625
@@ -5577,23 +5531,21 @@ define dso_local void @st_align64_uint16_t_uint32_t(i8* nocapture %ptr, i16 zero
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %conv = zext i16 %str to i32
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to i32*
-  store i32 %conv, i32* %0, align 4
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  store i32 %conv, ptr %add.ptr, align 4
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_uint16_t_uint32_t(i8* nocapture %ptr, i64 %off, i16 zeroext %str) {
+define dso_local void @st_reg_uint16_t_uint32_t(ptr nocapture %ptr, i64 %off, i16 zeroext %str) {
 ; CHECK-LABEL: st_reg_uint16_t_uint32_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    stwx r5, r3, r4
 ; CHECK-NEXT:    blr
 entry:
   %conv = zext i16 %str to i32
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to i32*
-  store i32 %conv, i32* %0, align 4
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  store i32 %conv, ptr %add.ptr, align 4
   ret void
 }
 
@@ -5608,8 +5560,8 @@ entry:
   %conv = zext i16 %str to i32
   %conv1 = zext i8 %off to i64
   %or = or i64 %conv1, %ptr
-  %0 = inttoptr i64 %or to i32*
-  store i32 %conv, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  store i32 %conv, ptr %0, align 4
   ret void
 }
 
@@ -5623,8 +5575,8 @@ define dso_local void @st_not_disjoint16_uint16_t_uint32_t(i64 %ptr, i16 zeroext
 entry:
   %conv = zext i16 %str to i32
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to i32*
-  store i32 %conv, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  store i32 %conv, ptr %0, align 4
   ret void
 }
 
@@ -5639,8 +5591,8 @@ entry:
   %and = and i64 %ptr, -4096
   %conv = zext i16 %str to i32
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to i32*
-  store i32 %conv, i32* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store i32 %conv, ptr %0, align 8
   ret void
 }
 
@@ -5655,8 +5607,8 @@ define dso_local void @st_not_disjoint32_uint16_t_uint32_t(i64 %ptr, i16 zeroext
 entry:
   %conv = zext i16 %str to i32
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to i32*
-  store i32 %conv, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  store i32 %conv, ptr %0, align 4
   ret void
 }
 
@@ -5690,8 +5642,8 @@ entry:
   %and = and i64 %ptr, -1000341504
   %conv = zext i16 %str to i32
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to i32*
-  store i32 %conv, i32* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  store i32 %conv, ptr %0, align 16
   ret void
 }
 
@@ -5718,8 +5670,8 @@ define dso_local void @st_not_disjoint64_uint16_t_uint32_t(i64 %ptr, i16 zeroext
 entry:
   %conv = zext i16 %str to i32
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to i32*
-  store i32 %conv, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  store i32 %conv, ptr %0, align 4
   ret void
 }
 
@@ -5745,8 +5697,8 @@ entry:
   %and = and i64 %ptr, -1099511627776
   %conv = zext i16 %str to i32
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to i32*
-  store i32 %conv, i32* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  store i32 %conv, ptr %0, align 4096
   ret void
 }
 
@@ -5758,7 +5710,7 @@ define dso_local void @st_cst_align16_uint16_t_uint32_t(i16 zeroext %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = zext i16 %str to i32
-  store i32 %conv, i32* inttoptr (i64 4080 to i32*), align 16
+  store i32 %conv, ptr inttoptr (i64 4080 to ptr), align 16
   ret void
 }
 
@@ -5771,7 +5723,7 @@ define dso_local void @st_cst_align32_uint16_t_uint32_t(i16 zeroext %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = zext i16 %str to i32
-  store i32 %conv, i32* inttoptr (i64 9999900 to i32*), align 4
+  store i32 %conv, ptr inttoptr (i64 9999900 to ptr), align 4
   ret void
 }
 
@@ -5793,7 +5745,7 @@ define dso_local void @st_cst_align64_uint16_t_uint32_t(i16 zeroext %str) {
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %conv = zext i16 %str to i32
-  store i32 %conv, i32* inttoptr (i64 1000000000000 to i32*), align 4096
+  store i32 %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   ret void
 }
 
@@ -5805,27 +5757,26 @@ define dso_local void @st_0_uint16_t_uint64_t(i64 %ptr, i16 zeroext %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = zext i16 %str to i64
-  %0 = inttoptr i64 %ptr to i64*
-  store i64 %conv, i64* %0, align 8
+  %0 = inttoptr i64 %ptr to ptr
+  store i64 %conv, ptr %0, align 8
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_uint16_t_uint64_t(i8* nocapture %ptr, i16 zeroext %str) {
+define dso_local void @st_align16_uint16_t_uint64_t(ptr nocapture %ptr, i16 zeroext %str) {
 ; CHECK-LABEL: st_align16_uint16_t_uint64_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    std r4, 8(r3)
 ; CHECK-NEXT:    blr
 entry:
   %conv = zext i16 %str to i64
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to i64*
-  store i64 %conv, i64* %0, align 8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  store i64 %conv, ptr %add.ptr, align 8
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_uint16_t_uint64_t(i8* nocapture %ptr, i16 zeroext %str) {
+define dso_local void @st_align32_uint16_t_uint64_t(ptr nocapture %ptr, i16 zeroext %str) {
 ; CHECK-P10-LABEL: st_align32_uint16_t_uint64_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pstd r4, 99999000(r3), 0
@@ -5839,14 +5790,13 @@ define dso_local void @st_align32_uint16_t_uint64_t(i8* nocapture %ptr, i16 zero
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %conv = zext i16 %str to i64
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to i64*
-  store i64 %conv, i64* %0, align 8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  store i64 %conv, ptr %add.ptr, align 8
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_uint16_t_uint64_t(i8* nocapture %ptr, i16 zeroext %str) {
+define dso_local void @st_align64_uint16_t_uint64_t(ptr nocapture %ptr, i16 zeroext %str) {
 ; CHECK-P10-LABEL: st_align64_uint16_t_uint64_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r5, 244140625
@@ -5863,23 +5813,21 @@ define dso_local void @st_align64_uint16_t_uint64_t(i8* nocapture %ptr, i16 zero
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %conv = zext i16 %str to i64
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to i64*
-  store i64 %conv, i64* %0, align 8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  store i64 %conv, ptr %add.ptr, align 8
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_uint16_t_uint64_t(i8* nocapture %ptr, i64 %off, i16 zeroext %str) {
+define dso_local void @st_reg_uint16_t_uint64_t(ptr nocapture %ptr, i64 %off, i16 zeroext %str) {
 ; CHECK-LABEL: st_reg_uint16_t_uint64_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    stdx r5, r3, r4
 ; CHECK-NEXT:    blr
 entry:
   %conv = zext i16 %str to i64
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to i64*
-  store i64 %conv, i64* %0, align 8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  store i64 %conv, ptr %add.ptr, align 8
   ret void
 }
 
@@ -5894,8 +5842,8 @@ entry:
   %conv = zext i16 %str to i64
   %conv1 = zext i8 %off to i64
   %or = or i64 %conv1, %ptr
-  %0 = inttoptr i64 %or to i64*
-  store i64 %conv, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store i64 %conv, ptr %0, align 8
   ret void
 }
 
@@ -5909,8 +5857,8 @@ define dso_local void @st_not_disjoint16_uint16_t_uint64_t(i64 %ptr, i16 zeroext
 entry:
   %conv = zext i16 %str to i64
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to i64*
-  store i64 %conv, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store i64 %conv, ptr %0, align 8
   ret void
 }
 
@@ -5925,8 +5873,8 @@ entry:
   %and = and i64 %ptr, -4096
   %conv = zext i16 %str to i64
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to i64*
-  store i64 %conv, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store i64 %conv, ptr %0, align 8
   ret void
 }
 
@@ -5941,8 +5889,8 @@ define dso_local void @st_not_disjoint32_uint16_t_uint64_t(i64 %ptr, i16 zeroext
 entry:
   %conv = zext i16 %str to i64
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to i64*
-  store i64 %conv, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store i64 %conv, ptr %0, align 8
   ret void
 }
 
@@ -5976,8 +5924,8 @@ entry:
   %and = and i64 %ptr, -1000341504
   %conv = zext i16 %str to i64
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to i64*
-  store i64 %conv, i64* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  store i64 %conv, ptr %0, align 16
   ret void
 }
 
@@ -6004,8 +5952,8 @@ define dso_local void @st_not_disjoint64_uint16_t_uint64_t(i64 %ptr, i16 zeroext
 entry:
   %conv = zext i16 %str to i64
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to i64*
-  store i64 %conv, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store i64 %conv, ptr %0, align 8
   ret void
 }
 
@@ -6031,8 +5979,8 @@ entry:
   %and = and i64 %ptr, -1099511627776
   %conv = zext i16 %str to i64
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to i64*
-  store i64 %conv, i64* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  store i64 %conv, ptr %0, align 4096
   ret void
 }
 
@@ -6044,7 +5992,7 @@ define dso_local void @st_cst_align16_uint16_t_uint64_t(i16 zeroext %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = zext i16 %str to i64
-  store i64 %conv, i64* inttoptr (i64 4080 to i64*), align 16
+  store i64 %conv, ptr inttoptr (i64 4080 to ptr), align 16
   ret void
 }
 
@@ -6057,7 +6005,7 @@ define dso_local void @st_cst_align32_uint16_t_uint64_t(i16 zeroext %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = zext i16 %str to i64
-  store i64 %conv, i64* inttoptr (i64 9999900 to i64*), align 8
+  store i64 %conv, ptr inttoptr (i64 9999900 to ptr), align 8
   ret void
 }
 
@@ -6079,7 +6027,7 @@ define dso_local void @st_cst_align64_uint16_t_uint64_t(i16 zeroext %str) {
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %conv = zext i16 %str to i64
-  store i64 %conv, i64* inttoptr (i64 1000000000000 to i64*), align 4096
+  store i64 %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   ret void
 }
 
@@ -6093,13 +6041,13 @@ define dso_local void @st_0_uint16_t_float(i64 %ptr, i16 zeroext %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = uitofp i16 %str to float
-  %0 = inttoptr i64 %ptr to float*
-  store float %conv, float* %0, align 4
+  %0 = inttoptr i64 %ptr to ptr
+  store float %conv, ptr %0, align 4
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_uint16_t_float(i8* nocapture %ptr, i16 zeroext %str) {
+define dso_local void @st_align16_uint16_t_float(ptr nocapture %ptr, i16 zeroext %str) {
 ; CHECK-LABEL: st_align16_uint16_t_float:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    mtfprwz f0, r4
@@ -6108,14 +6056,13 @@ define dso_local void @st_align16_uint16_t_float(i8* nocapture %ptr, i16 zeroext
 ; CHECK-NEXT:    blr
 entry:
   %conv = uitofp i16 %str to float
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to float*
-  store float %conv, float* %0, align 4
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  store float %conv, ptr %add.ptr, align 4
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_uint16_t_float(i8* nocapture %ptr, i16 zeroext %str) {
+define dso_local void @st_align32_uint16_t_float(ptr nocapture %ptr, i16 zeroext %str) {
 ; CHECK-P10-LABEL: st_align32_uint16_t_float:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    mtfprwz f0, r4
@@ -6142,14 +6089,13 @@ define dso_local void @st_align32_uint16_t_float(i8* nocapture %ptr, i16 zeroext
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = uitofp i16 %str to float
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to float*
-  store float %conv, float* %0, align 4
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  store float %conv, ptr %add.ptr, align 4
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_uint16_t_float(i8* nocapture %ptr, i16 zeroext %str) {
+define dso_local void @st_align64_uint16_t_float(ptr nocapture %ptr, i16 zeroext %str) {
 ; CHECK-P10-LABEL: st_align64_uint16_t_float:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    mtfprwz f0, r4
@@ -6180,14 +6126,13 @@ define dso_local void @st_align64_uint16_t_float(i8* nocapture %ptr, i16 zeroext
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = uitofp i16 %str to float
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to float*
-  store float %conv, float* %0, align 4
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  store float %conv, ptr %add.ptr, align 4
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_uint16_t_float(i8* nocapture %ptr, i64 %off, i16 zeroext %str) {
+define dso_local void @st_reg_uint16_t_float(ptr nocapture %ptr, i64 %off, i16 zeroext %str) {
 ; CHECK-LABEL: st_reg_uint16_t_float:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    mtfprwz f0, r5
@@ -6196,9 +6141,8 @@ define dso_local void @st_reg_uint16_t_float(i8* nocapture %ptr, i64 %off, i16 z
 ; CHECK-NEXT:    blr
 entry:
   %conv = uitofp i16 %str to float
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to float*
-  store float %conv, float* %0, align 4
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  store float %conv, ptr %add.ptr, align 4
   ret void
 }
 
@@ -6215,8 +6159,8 @@ entry:
   %conv = uitofp i16 %str to float
   %conv1 = zext i8 %off to i64
   %or = or i64 %conv1, %ptr
-  %0 = inttoptr i64 %or to float*
-  store float %conv, float* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  store float %conv, ptr %0, align 4
   ret void
 }
 
@@ -6232,8 +6176,8 @@ define dso_local void @st_not_disjoint16_uint16_t_float(i64 %ptr, i16 zeroext %s
 entry:
   %conv = uitofp i16 %str to float
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to float*
-  store float %conv, float* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  store float %conv, ptr %0, align 4
   ret void
 }
 
@@ -6250,8 +6194,8 @@ entry:
   %and = and i64 %ptr, -4096
   %conv = uitofp i16 %str to float
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to float*
-  store float %conv, float* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store float %conv, ptr %0, align 8
   ret void
 }
 
@@ -6286,8 +6230,8 @@ define dso_local void @st_not_disjoint32_uint16_t_float(i64 %ptr, i16 zeroext %s
 entry:
   %conv = uitofp i16 %str to float
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to float*
-  store float %conv, float* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  store float %conv, ptr %0, align 4
   ret void
 }
 
@@ -6327,8 +6271,8 @@ entry:
   %and = and i64 %ptr, -1000341504
   %conv = uitofp i16 %str to float
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to float*
-  store float %conv, float* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  store float %conv, ptr %0, align 16
   ret void
 }
 
@@ -6359,8 +6303,8 @@ define dso_local void @st_not_disjoint64_uint16_t_float(i64 %ptr, i16 zeroext %s
 entry:
   %conv = uitofp i16 %str to float
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to float*
-  store float %conv, float* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  store float %conv, ptr %0, align 4
   ret void
 }
 
@@ -6401,8 +6345,8 @@ entry:
   %and = and i64 %ptr, -1099511627776
   %conv = uitofp i16 %str to float
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to float*
-  store float %conv, float* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  store float %conv, ptr %0, align 4096
   ret void
 }
 
@@ -6416,7 +6360,7 @@ define dso_local void @st_cst_align16_uint16_t_float(i16 zeroext %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = uitofp i16 %str to float
-  store float %conv, float* inttoptr (i64 4080 to float*), align 16
+  store float %conv, ptr inttoptr (i64 4080 to ptr), align 16
   ret void
 }
 
@@ -6431,7 +6375,7 @@ define dso_local void @st_cst_align32_uint16_t_float(i16 zeroext %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = uitofp i16 %str to float
-  store float %conv, float* inttoptr (i64 9999900 to float*), align 4
+  store float %conv, ptr inttoptr (i64 9999900 to ptr), align 4
   ret void
 }
 
@@ -6467,7 +6411,7 @@ define dso_local void @st_cst_align64_uint16_t_float(i16 zeroext %str) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = uitofp i16 %str to float
-  store float %conv, float* inttoptr (i64 1000000000000 to float*), align 4096
+  store float %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   ret void
 }
 
@@ -6481,13 +6425,13 @@ define dso_local void @st_0_uint16_t_double(i64 %ptr, i16 zeroext %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = uitofp i16 %str to double
-  %0 = inttoptr i64 %ptr to double*
-  store double %conv, double* %0, align 8
+  %0 = inttoptr i64 %ptr to ptr
+  store double %conv, ptr %0, align 8
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_uint16_t_double(i8* nocapture %ptr, i16 zeroext %str) {
+define dso_local void @st_align16_uint16_t_double(ptr nocapture %ptr, i16 zeroext %str) {
 ; CHECK-LABEL: st_align16_uint16_t_double:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    mtfprwz f0, r4
@@ -6496,14 +6440,13 @@ define dso_local void @st_align16_uint16_t_double(i8* nocapture %ptr, i16 zeroex
 ; CHECK-NEXT:    blr
 entry:
   %conv = uitofp i16 %str to double
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to double*
-  store double %conv, double* %0, align 8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  store double %conv, ptr %add.ptr, align 8
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_uint16_t_double(i8* nocapture %ptr, i16 zeroext %str) {
+define dso_local void @st_align32_uint16_t_double(ptr nocapture %ptr, i16 zeroext %str) {
 ; CHECK-P10-LABEL: st_align32_uint16_t_double:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    mtfprwz f0, r4
@@ -6530,14 +6473,13 @@ define dso_local void @st_align32_uint16_t_double(i8* nocapture %ptr, i16 zeroex
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = uitofp i16 %str to double
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to double*
-  store double %conv, double* %0, align 8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  store double %conv, ptr %add.ptr, align 8
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_uint16_t_double(i8* nocapture %ptr, i16 zeroext %str) {
+define dso_local void @st_align64_uint16_t_double(ptr nocapture %ptr, i16 zeroext %str) {
 ; CHECK-P10-LABEL: st_align64_uint16_t_double:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    mtfprwz f0, r4
@@ -6568,14 +6510,13 @@ define dso_local void @st_align64_uint16_t_double(i8* nocapture %ptr, i16 zeroex
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = uitofp i16 %str to double
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to double*
-  store double %conv, double* %0, align 8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  store double %conv, ptr %add.ptr, align 8
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_uint16_t_double(i8* nocapture %ptr, i64 %off, i16 zeroext %str) {
+define dso_local void @st_reg_uint16_t_double(ptr nocapture %ptr, i64 %off, i16 zeroext %str) {
 ; CHECK-LABEL: st_reg_uint16_t_double:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    mtfprwz f0, r5
@@ -6584,9 +6525,8 @@ define dso_local void @st_reg_uint16_t_double(i8* nocapture %ptr, i64 %off, i16
 ; CHECK-NEXT:    blr
 entry:
   %conv = uitofp i16 %str to double
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to double*
-  store double %conv, double* %0, align 8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  store double %conv, ptr %add.ptr, align 8
   ret void
 }
 
@@ -6603,8 +6543,8 @@ entry:
   %conv = uitofp i16 %str to double
   %conv1 = zext i8 %off to i64
   %or = or i64 %conv1, %ptr
-  %0 = inttoptr i64 %or to double*
-  store double %conv, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store double %conv, ptr %0, align 8
   ret void
 }
 
@@ -6620,8 +6560,8 @@ define dso_local void @st_not_disjoint16_uint16_t_double(i64 %ptr, i16 zeroext %
 entry:
   %conv = uitofp i16 %str to double
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to double*
-  store double %conv, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store double %conv, ptr %0, align 8
   ret void
 }
 
@@ -6638,8 +6578,8 @@ entry:
   %and = and i64 %ptr, -4096
   %conv = uitofp i16 %str to double
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to double*
-  store double %conv, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store double %conv, ptr %0, align 8
   ret void
 }
 
@@ -6674,8 +6614,8 @@ define dso_local void @st_not_disjoint32_uint16_t_double(i64 %ptr, i16 zeroext %
 entry:
   %conv = uitofp i16 %str to double
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to double*
-  store double %conv, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store double %conv, ptr %0, align 8
   ret void
 }
 
@@ -6715,8 +6655,8 @@ entry:
   %and = and i64 %ptr, -1000341504
   %conv = uitofp i16 %str to double
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to double*
-  store double %conv, double* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  store double %conv, ptr %0, align 16
   ret void
 }
 
@@ -6747,8 +6687,8 @@ define dso_local void @st_not_disjoint64_uint16_t_double(i64 %ptr, i16 zeroext %
 entry:
   %conv = uitofp i16 %str to double
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to double*
-  store double %conv, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store double %conv, ptr %0, align 8
   ret void
 }
 
@@ -6789,8 +6729,8 @@ entry:
   %and = and i64 %ptr, -1099511627776
   %conv = uitofp i16 %str to double
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to double*
-  store double %conv, double* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  store double %conv, ptr %0, align 4096
   ret void
 }
 
@@ -6804,7 +6744,7 @@ define dso_local void @st_cst_align16_uint16_t_double(i16 zeroext %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = uitofp i16 %str to double
-  store double %conv, double* inttoptr (i64 4080 to double*), align 16
+  store double %conv, ptr inttoptr (i64 4080 to ptr), align 16
   ret void
 }
 
@@ -6819,7 +6759,7 @@ define dso_local void @st_cst_align32_uint16_t_double(i16 zeroext %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = uitofp i16 %str to double
-  store double %conv, double* inttoptr (i64 9999900 to double*), align 8
+  store double %conv, ptr inttoptr (i64 9999900 to ptr), align 8
   ret void
 }
 
@@ -6855,7 +6795,7 @@ define dso_local void @st_cst_align64_uint16_t_double(i16 zeroext %str) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = uitofp i16 %str to double
-  store double %conv, double* inttoptr (i64 1000000000000 to double*), align 4096
+  store double %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   ret void
 }
 
@@ -6867,27 +6807,26 @@ define dso_local void @st_0_int16_t_uint32_t(i64 %ptr, i16 signext %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = sext i16 %str to i32
-  %0 = inttoptr i64 %ptr to i32*
-  store i32 %conv, i32* %0, align 4
+  %0 = inttoptr i64 %ptr to ptr
+  store i32 %conv, ptr %0, align 4
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_int16_t_uint32_t(i8* nocapture %ptr, i16 signext %str) {
+define dso_local void @st_align16_int16_t_uint32_t(ptr nocapture %ptr, i16 signext %str) {
 ; CHECK-LABEL: st_align16_int16_t_uint32_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    stw r4, 8(r3)
 ; CHECK-NEXT:    blr
 entry:
   %conv = sext i16 %str to i32
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to i32*
-  store i32 %conv, i32* %0, align 4
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  store i32 %conv, ptr %add.ptr, align 4
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_int16_t_uint32_t(i8* nocapture %ptr, i16 signext %str) {
+define dso_local void @st_align32_int16_t_uint32_t(ptr nocapture %ptr, i16 signext %str) {
 ; CHECK-P10-LABEL: st_align32_int16_t_uint32_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pstw r4, 99999000(r3), 0
@@ -6901,14 +6840,13 @@ define dso_local void @st_align32_int16_t_uint32_t(i8* nocapture %ptr, i16 signe
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %conv = sext i16 %str to i32
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to i32*
-  store i32 %conv, i32* %0, align 4
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  store i32 %conv, ptr %add.ptr, align 4
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_int16_t_uint32_t(i8* nocapture %ptr, i16 signext %str) {
+define dso_local void @st_align64_int16_t_uint32_t(ptr nocapture %ptr, i16 signext %str) {
 ; CHECK-P10-LABEL: st_align64_int16_t_uint32_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r5, 244140625
@@ -6925,23 +6863,21 @@ define dso_local void @st_align64_int16_t_uint32_t(i8* nocapture %ptr, i16 signe
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %conv = sext i16 %str to i32
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to i32*
-  store i32 %conv, i32* %0, align 4
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  store i32 %conv, ptr %add.ptr, align 4
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_int16_t_uint32_t(i8* nocapture %ptr, i64 %off, i16 signext %str) {
+define dso_local void @st_reg_int16_t_uint32_t(ptr nocapture %ptr, i64 %off, i16 signext %str) {
 ; CHECK-LABEL: st_reg_int16_t_uint32_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    stwx r5, r3, r4
 ; CHECK-NEXT:    blr
 entry:
   %conv = sext i16 %str to i32
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to i32*
-  store i32 %conv, i32* %0, align 4
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  store i32 %conv, ptr %add.ptr, align 4
   ret void
 }
 
@@ -6956,8 +6892,8 @@ entry:
   %conv = sext i16 %str to i32
   %conv1 = zext i8 %off to i64
   %or = or i64 %conv1, %ptr
-  %0 = inttoptr i64 %or to i32*
-  store i32 %conv, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  store i32 %conv, ptr %0, align 4
   ret void
 }
 
@@ -6971,8 +6907,8 @@ define dso_local void @st_not_disjoint16_int16_t_uint32_t(i64 %ptr, i16 signext
 entry:
   %conv = sext i16 %str to i32
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to i32*
-  store i32 %conv, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  store i32 %conv, ptr %0, align 4
   ret void
 }
 
@@ -6987,8 +6923,8 @@ entry:
   %and = and i64 %ptr, -4096
   %conv = sext i16 %str to i32
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to i32*
-  store i32 %conv, i32* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store i32 %conv, ptr %0, align 8
   ret void
 }
 
@@ -7003,8 +6939,8 @@ define dso_local void @st_not_disjoint32_int16_t_uint32_t(i64 %ptr, i16 signext
 entry:
   %conv = sext i16 %str to i32
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to i32*
-  store i32 %conv, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  store i32 %conv, ptr %0, align 4
   ret void
 }
 
@@ -7038,8 +6974,8 @@ entry:
   %and = and i64 %ptr, -1000341504
   %conv = sext i16 %str to i32
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to i32*
-  store i32 %conv, i32* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  store i32 %conv, ptr %0, align 16
   ret void
 }
 
@@ -7066,8 +7002,8 @@ define dso_local void @st_not_disjoint64_int16_t_uint32_t(i64 %ptr, i16 signext
 entry:
   %conv = sext i16 %str to i32
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to i32*
-  store i32 %conv, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  store i32 %conv, ptr %0, align 4
   ret void
 }
 
@@ -7093,8 +7029,8 @@ entry:
   %and = and i64 %ptr, -1099511627776
   %conv = sext i16 %str to i32
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to i32*
-  store i32 %conv, i32* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  store i32 %conv, ptr %0, align 4096
   ret void
 }
 
@@ -7106,7 +7042,7 @@ define dso_local void @st_cst_align16_int16_t_uint32_t(i16 signext %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = sext i16 %str to i32
-  store i32 %conv, i32* inttoptr (i64 4080 to i32*), align 16
+  store i32 %conv, ptr inttoptr (i64 4080 to ptr), align 16
   ret void
 }
 
@@ -7119,7 +7055,7 @@ define dso_local void @st_cst_align32_int16_t_uint32_t(i16 signext %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = sext i16 %str to i32
-  store i32 %conv, i32* inttoptr (i64 9999900 to i32*), align 4
+  store i32 %conv, ptr inttoptr (i64 9999900 to ptr), align 4
   ret void
 }
 
@@ -7141,7 +7077,7 @@ define dso_local void @st_cst_align64_int16_t_uint32_t(i16 signext %str) {
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %conv = sext i16 %str to i32
-  store i32 %conv, i32* inttoptr (i64 1000000000000 to i32*), align 4096
+  store i32 %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   ret void
 }
 
@@ -7153,27 +7089,26 @@ define dso_local void @st_0_int16_t_uint64_t(i64 %ptr, i16 signext %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = sext i16 %str to i64
-  %0 = inttoptr i64 %ptr to i64*
-  store i64 %conv, i64* %0, align 8
+  %0 = inttoptr i64 %ptr to ptr
+  store i64 %conv, ptr %0, align 8
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_int16_t_uint64_t(i8* nocapture %ptr, i16 signext %str) {
+define dso_local void @st_align16_int16_t_uint64_t(ptr nocapture %ptr, i16 signext %str) {
 ; CHECK-LABEL: st_align16_int16_t_uint64_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    std r4, 8(r3)
 ; CHECK-NEXT:    blr
 entry:
   %conv = sext i16 %str to i64
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to i64*
-  store i64 %conv, i64* %0, align 8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  store i64 %conv, ptr %add.ptr, align 8
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_int16_t_uint64_t(i8* nocapture %ptr, i16 signext %str) {
+define dso_local void @st_align32_int16_t_uint64_t(ptr nocapture %ptr, i16 signext %str) {
 ; CHECK-P10-LABEL: st_align32_int16_t_uint64_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pstd r4, 99999000(r3), 0
@@ -7187,14 +7122,13 @@ define dso_local void @st_align32_int16_t_uint64_t(i8* nocapture %ptr, i16 signe
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %conv = sext i16 %str to i64
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to i64*
-  store i64 %conv, i64* %0, align 8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  store i64 %conv, ptr %add.ptr, align 8
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_int16_t_uint64_t(i8* nocapture %ptr, i16 signext %str) {
+define dso_local void @st_align64_int16_t_uint64_t(ptr nocapture %ptr, i16 signext %str) {
 ; CHECK-P10-LABEL: st_align64_int16_t_uint64_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r5, 244140625
@@ -7211,23 +7145,21 @@ define dso_local void @st_align64_int16_t_uint64_t(i8* nocapture %ptr, i16 signe
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %conv = sext i16 %str to i64
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to i64*
-  store i64 %conv, i64* %0, align 8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  store i64 %conv, ptr %add.ptr, align 8
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_int16_t_uint64_t(i8* nocapture %ptr, i64 %off, i16 signext %str) {
+define dso_local void @st_reg_int16_t_uint64_t(ptr nocapture %ptr, i64 %off, i16 signext %str) {
 ; CHECK-LABEL: st_reg_int16_t_uint64_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    stdx r5, r3, r4
 ; CHECK-NEXT:    blr
 entry:
   %conv = sext i16 %str to i64
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to i64*
-  store i64 %conv, i64* %0, align 8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  store i64 %conv, ptr %add.ptr, align 8
   ret void
 }
 
@@ -7242,8 +7174,8 @@ entry:
   %conv = sext i16 %str to i64
   %conv1 = zext i8 %off to i64
   %or = or i64 %conv1, %ptr
-  %0 = inttoptr i64 %or to i64*
-  store i64 %conv, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store i64 %conv, ptr %0, align 8
   ret void
 }
 
@@ -7257,8 +7189,8 @@ define dso_local void @st_not_disjoint16_int16_t_uint64_t(i64 %ptr, i16 signext
 entry:
   %conv = sext i16 %str to i64
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to i64*
-  store i64 %conv, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store i64 %conv, ptr %0, align 8
   ret void
 }
 
@@ -7273,8 +7205,8 @@ entry:
   %and = and i64 %ptr, -4096
   %conv = sext i16 %str to i64
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to i64*
-  store i64 %conv, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store i64 %conv, ptr %0, align 8
   ret void
 }
 
@@ -7289,8 +7221,8 @@ define dso_local void @st_not_disjoint32_int16_t_uint64_t(i64 %ptr, i16 signext
 entry:
   %conv = sext i16 %str to i64
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to i64*
-  store i64 %conv, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store i64 %conv, ptr %0, align 8
   ret void
 }
 
@@ -7324,8 +7256,8 @@ entry:
   %and = and i64 %ptr, -1000341504
   %conv = sext i16 %str to i64
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to i64*
-  store i64 %conv, i64* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  store i64 %conv, ptr %0, align 16
   ret void
 }
 
@@ -7352,8 +7284,8 @@ define dso_local void @st_not_disjoint64_int16_t_uint64_t(i64 %ptr, i16 signext
 entry:
   %conv = sext i16 %str to i64
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to i64*
-  store i64 %conv, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store i64 %conv, ptr %0, align 8
   ret void
 }
 
@@ -7379,8 +7311,8 @@ entry:
   %and = and i64 %ptr, -1099511627776
   %conv = sext i16 %str to i64
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to i64*
-  store i64 %conv, i64* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  store i64 %conv, ptr %0, align 4096
   ret void
 }
 
@@ -7392,7 +7324,7 @@ define dso_local void @st_cst_align16_int16_t_uint64_t(i16 signext %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = sext i16 %str to i64
-  store i64 %conv, i64* inttoptr (i64 4080 to i64*), align 16
+  store i64 %conv, ptr inttoptr (i64 4080 to ptr), align 16
   ret void
 }
 
@@ -7405,7 +7337,7 @@ define dso_local void @st_cst_align32_int16_t_uint64_t(i16 signext %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = sext i16 %str to i64
-  store i64 %conv, i64* inttoptr (i64 9999900 to i64*), align 8
+  store i64 %conv, ptr inttoptr (i64 9999900 to ptr), align 8
   ret void
 }
 
@@ -7427,7 +7359,7 @@ define dso_local void @st_cst_align64_int16_t_uint64_t(i16 signext %str) {
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %conv = sext i16 %str to i64
-  store i64 %conv, i64* inttoptr (i64 1000000000000 to i64*), align 4096
+  store i64 %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   ret void
 }
 
@@ -7441,13 +7373,13 @@ define dso_local void @st_0_int16_t_float(i64 %ptr, i16 signext %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = sitofp i16 %str to float
-  %0 = inttoptr i64 %ptr to float*
-  store float %conv, float* %0, align 4
+  %0 = inttoptr i64 %ptr to ptr
+  store float %conv, ptr %0, align 4
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_int16_t_float(i8* nocapture %ptr, i16 signext %str) {
+define dso_local void @st_align16_int16_t_float(ptr nocapture %ptr, i16 signext %str) {
 ; CHECK-LABEL: st_align16_int16_t_float:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    mtfprwa f0, r4
@@ -7456,14 +7388,13 @@ define dso_local void @st_align16_int16_t_float(i8* nocapture %ptr, i16 signext
 ; CHECK-NEXT:    blr
 entry:
   %conv = sitofp i16 %str to float
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to float*
-  store float %conv, float* %0, align 4
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  store float %conv, ptr %add.ptr, align 4
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_int16_t_float(i8* nocapture %ptr, i16 signext %str) {
+define dso_local void @st_align32_int16_t_float(ptr nocapture %ptr, i16 signext %str) {
 ; CHECK-P10-LABEL: st_align32_int16_t_float:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    mtfprwa f0, r4
@@ -7490,14 +7421,13 @@ define dso_local void @st_align32_int16_t_float(i8* nocapture %ptr, i16 signext
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = sitofp i16 %str to float
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to float*
-  store float %conv, float* %0, align 4
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  store float %conv, ptr %add.ptr, align 4
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_int16_t_float(i8* nocapture %ptr, i16 signext %str) {
+define dso_local void @st_align64_int16_t_float(ptr nocapture %ptr, i16 signext %str) {
 ; CHECK-P10-LABEL: st_align64_int16_t_float:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    mtfprwa f0, r4
@@ -7528,14 +7458,13 @@ define dso_local void @st_align64_int16_t_float(i8* nocapture %ptr, i16 signext
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = sitofp i16 %str to float
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to float*
-  store float %conv, float* %0, align 4
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  store float %conv, ptr %add.ptr, align 4
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_int16_t_float(i8* nocapture %ptr, i64 %off, i16 signext %str) {
+define dso_local void @st_reg_int16_t_float(ptr nocapture %ptr, i64 %off, i16 signext %str) {
 ; CHECK-LABEL: st_reg_int16_t_float:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    mtfprwa f0, r5
@@ -7544,9 +7473,8 @@ define dso_local void @st_reg_int16_t_float(i8* nocapture %ptr, i64 %off, i16 si
 ; CHECK-NEXT:    blr
 entry:
   %conv = sitofp i16 %str to float
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to float*
-  store float %conv, float* %0, align 4
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  store float %conv, ptr %add.ptr, align 4
   ret void
 }
 
@@ -7563,8 +7491,8 @@ entry:
   %conv = sitofp i16 %str to float
   %conv1 = zext i8 %off to i64
   %or = or i64 %conv1, %ptr
-  %0 = inttoptr i64 %or to float*
-  store float %conv, float* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  store float %conv, ptr %0, align 4
   ret void
 }
 
@@ -7580,8 +7508,8 @@ define dso_local void @st_not_disjoint16_int16_t_float(i64 %ptr, i16 signext %st
 entry:
   %conv = sitofp i16 %str to float
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to float*
-  store float %conv, float* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  store float %conv, ptr %0, align 4
   ret void
 }
 
@@ -7598,8 +7526,8 @@ entry:
   %and = and i64 %ptr, -4096
   %conv = sitofp i16 %str to float
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to float*
-  store float %conv, float* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store float %conv, ptr %0, align 8
   ret void
 }
 
@@ -7634,8 +7562,8 @@ define dso_local void @st_not_disjoint32_int16_t_float(i64 %ptr, i16 signext %st
 entry:
   %conv = sitofp i16 %str to float
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to float*
-  store float %conv, float* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  store float %conv, ptr %0, align 4
   ret void
 }
 
@@ -7675,8 +7603,8 @@ entry:
   %and = and i64 %ptr, -1000341504
   %conv = sitofp i16 %str to float
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to float*
-  store float %conv, float* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  store float %conv, ptr %0, align 16
   ret void
 }
 
@@ -7707,8 +7635,8 @@ define dso_local void @st_not_disjoint64_int16_t_float(i64 %ptr, i16 signext %st
 entry:
   %conv = sitofp i16 %str to float
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to float*
-  store float %conv, float* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  store float %conv, ptr %0, align 4
   ret void
 }
 
@@ -7750,8 +7678,8 @@ entry:
   %and = and i64 %ptr, -1099511627776
   %conv = sitofp i16 %str to float
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to float*
-  store float %conv, float* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  store float %conv, ptr %0, align 4096
   ret void
 }
 
@@ -7765,7 +7693,7 @@ define dso_local void @st_cst_align16_int16_t_float(i16 signext %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = sitofp i16 %str to float
-  store float %conv, float* inttoptr (i64 4080 to float*), align 16
+  store float %conv, ptr inttoptr (i64 4080 to ptr), align 16
   ret void
 }
 
@@ -7780,7 +7708,7 @@ define dso_local void @st_cst_align32_int16_t_float(i16 signext %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = sitofp i16 %str to float
-  store float %conv, float* inttoptr (i64 9999900 to float*), align 4
+  store float %conv, ptr inttoptr (i64 9999900 to ptr), align 4
   ret void
 }
 
@@ -7816,7 +7744,7 @@ define dso_local void @st_cst_align64_int16_t_float(i16 signext %str) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = sitofp i16 %str to float
-  store float %conv, float* inttoptr (i64 1000000000000 to float*), align 4096
+  store float %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   ret void
 }
 
@@ -7830,13 +7758,13 @@ define dso_local void @st_0_int16_t_double(i64 %ptr, i16 signext %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = sitofp i16 %str to double
-  %0 = inttoptr i64 %ptr to double*
-  store double %conv, double* %0, align 8
+  %0 = inttoptr i64 %ptr to ptr
+  store double %conv, ptr %0, align 8
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_int16_t_double(i8* nocapture %ptr, i16 signext %str) {
+define dso_local void @st_align16_int16_t_double(ptr nocapture %ptr, i16 signext %str) {
 ; CHECK-LABEL: st_align16_int16_t_double:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    mtfprwa f0, r4
@@ -7845,14 +7773,13 @@ define dso_local void @st_align16_int16_t_double(i8* nocapture %ptr, i16 signext
 ; CHECK-NEXT:    blr
 entry:
   %conv = sitofp i16 %str to double
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to double*
-  store double %conv, double* %0, align 8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  store double %conv, ptr %add.ptr, align 8
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_int16_t_double(i8* nocapture %ptr, i16 signext %str) {
+define dso_local void @st_align32_int16_t_double(ptr nocapture %ptr, i16 signext %str) {
 ; CHECK-P10-LABEL: st_align32_int16_t_double:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    mtfprwa f0, r4
@@ -7879,14 +7806,13 @@ define dso_local void @st_align32_int16_t_double(i8* nocapture %ptr, i16 signext
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = sitofp i16 %str to double
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to double*
-  store double %conv, double* %0, align 8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  store double %conv, ptr %add.ptr, align 8
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_int16_t_double(i8* nocapture %ptr, i16 signext %str) {
+define dso_local void @st_align64_int16_t_double(ptr nocapture %ptr, i16 signext %str) {
 ; CHECK-P10-LABEL: st_align64_int16_t_double:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    mtfprwa f0, r4
@@ -7917,14 +7843,13 @@ define dso_local void @st_align64_int16_t_double(i8* nocapture %ptr, i16 signext
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = sitofp i16 %str to double
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to double*
-  store double %conv, double* %0, align 8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  store double %conv, ptr %add.ptr, align 8
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_int16_t_double(i8* nocapture %ptr, i64 %off, i16 signext %str) {
+define dso_local void @st_reg_int16_t_double(ptr nocapture %ptr, i64 %off, i16 signext %str) {
 ; CHECK-LABEL: st_reg_int16_t_double:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    mtfprwa f0, r5
@@ -7933,9 +7858,8 @@ define dso_local void @st_reg_int16_t_double(i8* nocapture %ptr, i64 %off, i16 s
 ; CHECK-NEXT:    blr
 entry:
   %conv = sitofp i16 %str to double
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to double*
-  store double %conv, double* %0, align 8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  store double %conv, ptr %add.ptr, align 8
   ret void
 }
 
@@ -7952,8 +7876,8 @@ entry:
   %conv = sitofp i16 %str to double
   %conv1 = zext i8 %off to i64
   %or = or i64 %conv1, %ptr
-  %0 = inttoptr i64 %or to double*
-  store double %conv, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store double %conv, ptr %0, align 8
   ret void
 }
 
@@ -7969,8 +7893,8 @@ define dso_local void @st_not_disjoint16_int16_t_double(i64 %ptr, i16 signext %s
 entry:
   %conv = sitofp i16 %str to double
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to double*
-  store double %conv, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store double %conv, ptr %0, align 8
   ret void
 }
 
@@ -7987,8 +7911,8 @@ entry:
   %and = and i64 %ptr, -4096
   %conv = sitofp i16 %str to double
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to double*
-  store double %conv, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store double %conv, ptr %0, align 8
   ret void
 }
 
@@ -8023,8 +7947,8 @@ define dso_local void @st_not_disjoint32_int16_t_double(i64 %ptr, i16 signext %s
 entry:
   %conv = sitofp i16 %str to double
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to double*
-  store double %conv, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store double %conv, ptr %0, align 8
   ret void
 }
 
@@ -8064,8 +7988,8 @@ entry:
   %and = and i64 %ptr, -1000341504
   %conv = sitofp i16 %str to double
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to double*
-  store double %conv, double* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  store double %conv, ptr %0, align 16
   ret void
 }
 
@@ -8096,8 +8020,8 @@ define dso_local void @st_not_disjoint64_int16_t_double(i64 %ptr, i16 signext %s
 entry:
   %conv = sitofp i16 %str to double
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to double*
-  store double %conv, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store double %conv, ptr %0, align 8
   ret void
 }
 
@@ -8138,8 +8062,8 @@ entry:
   %and = and i64 %ptr, -1099511627776
   %conv = sitofp i16 %str to double
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to double*
-  store double %conv, double* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  store double %conv, ptr %0, align 4096
   ret void
 }
 
@@ -8153,7 +8077,7 @@ define dso_local void @st_cst_align16_int16_t_double(i16 signext %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = sitofp i16 %str to double
-  store double %conv, double* inttoptr (i64 4080 to double*), align 16
+  store double %conv, ptr inttoptr (i64 4080 to ptr), align 16
   ret void
 }
 
@@ -8168,7 +8092,7 @@ define dso_local void @st_cst_align32_int16_t_double(i16 signext %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = sitofp i16 %str to double
-  store double %conv, double* inttoptr (i64 9999900 to double*), align 8
+  store double %conv, ptr inttoptr (i64 9999900 to ptr), align 8
   ret void
 }
 
@@ -8204,6 +8128,6 @@ define dso_local void @st_cst_align64_int16_t_double(i16 signext %str) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = sitofp i16 %str to double
-  store double %conv, double* inttoptr (i64 1000000000000 to double*), align 4096
+  store double %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/scalar-i32-ldst.ll b/llvm/test/CodeGen/PowerPC/scalar-i32-ldst.ll
index 82d5b6f7278d..73578e57cdc6 100644
--- a/llvm/test/CodeGen/PowerPC/scalar-i32-ldst.ll
+++ b/llvm/test/CodeGen/PowerPC/scalar-i32-ldst.ll
@@ -26,28 +26,28 @@ define dso_local signext i32 @ld_0_int32_t_int8_t(i64 %ptr) {
 ; CHECK-NEXT:    extsb r3, r3
 ; CHECK-NEXT:    blr
 entry:
-  %0 = inttoptr i64 %ptr to i8*
-  %1 = load i8, i8* %0, align 1
+  %0 = inttoptr i64 %ptr to ptr
+  %1 = load i8, ptr %0, align 1
   %conv = sext i8 %1 to i32
   ret i32 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i32 @ld_align16_int32_t_int8_t(i8* nocapture readonly %ptr) {
+define dso_local signext i32 @ld_align16_int32_t_int8_t(ptr nocapture readonly %ptr) {
 ; CHECK-LABEL: ld_align16_int32_t_int8_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lbz r3, 8(r3)
 ; CHECK-NEXT:    extsb r3, r3
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  %0 = load i8, ptr %add.ptr, align 1
   %conv = sext i8 %0 to i32
   ret i32 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i32 @ld_align32_int32_t_int8_t(i8* nocapture readonly %ptr) {
+define dso_local signext i32 @ld_align32_int32_t_int8_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align32_int32_t_int8_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    plbz r3, 99999000(r3), 0
@@ -62,14 +62,14 @@ define dso_local signext i32 @ld_align32_int32_t_int8_t(i8* nocapture readonly %
 ; CHECK-PREP10-NEXT:    extsb r3, r3
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  %0 = load i8, ptr %add.ptr, align 1
   %conv = sext i8 %0 to i32
   ret i32 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i32 @ld_align64_int32_t_int8_t(i8* nocapture readonly %ptr) {
+define dso_local signext i32 @ld_align64_int32_t_int8_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align64_int32_t_int8_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r4, 244140625
@@ -87,22 +87,22 @@ define dso_local signext i32 @ld_align64_int32_t_int8_t(i8* nocapture readonly %
 ; CHECK-PREP10-NEXT:    extsb r3, r3
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  %0 = load i8, ptr %add.ptr, align 1
   %conv = sext i8 %0 to i32
   ret i32 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i32 @ld_reg_int32_t_int8_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local signext i32 @ld_reg_int32_t_int8_t(ptr nocapture readonly %ptr, i64 %off) {
 ; CHECK-LABEL: ld_reg_int32_t_int8_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lbzx r3, r3, r4
 ; CHECK-NEXT:    extsb r3, r3
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  %0 = load i8, ptr %add.ptr, align 1
   %conv = sext i8 %0 to i32
   ret i32 %conv
 }
@@ -118,8 +118,8 @@ define dso_local signext i32 @ld_or_int32_t_int8_t(i64 %ptr, i8 zeroext %off) {
 entry:
   %conv = zext i8 %off to i64
   %or = or i64 %conv, %ptr
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 1
   %conv1 = sext i8 %1 to i32
   ret i32 %conv1
 }
@@ -136,8 +136,8 @@ entry:
   %and = and i64 %ptr, -4096
   %conv = zext i8 %off to i64
   %or = or i64 %and, %conv
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 1
   %conv1 = sext i8 %1 to i32
   ret i32 %conv1
 }
@@ -152,8 +152,8 @@ define dso_local signext i32 @ld_not_disjoint16_int32_t_int8_t(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 1
   %conv = sext i8 %1 to i32
   ret i32 %conv
 }
@@ -169,8 +169,8 @@ define dso_local signext i32 @ld_disjoint_align16_int32_t_int8_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -4096
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 8
   %conv = sext i8 %1 to i32
   ret i32 %conv
 }
@@ -186,8 +186,8 @@ define dso_local signext i32 @ld_not_disjoint32_int32_t_int8_t(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 1
   %conv = sext i8 %1 to i32
   ret i32 %conv
 }
@@ -224,8 +224,8 @@ define dso_local signext i32 @ld_disjoint_align32_int32_t_int8_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1000341504
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 16
   %conv = sext i8 %1 to i32
   ret i32 %conv
 }
@@ -254,8 +254,8 @@ define dso_local signext i32 @ld_not_disjoint64_int32_t_int8_t(i64 %ptr) {
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 1
   %conv = sext i8 %1 to i32
   ret i32 %conv
 }
@@ -283,8 +283,8 @@ define dso_local signext i32 @ld_disjoint_align64_int32_t_int8_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 4096
   %conv = sext i8 %1 to i32
   ret i32 %conv
 }
@@ -297,7 +297,7 @@ define dso_local signext i32 @ld_cst_align16_int32_t_int8_t() {
 ; CHECK-NEXT:    extsb r3, r3
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i8, i8* inttoptr (i64 4080 to i8*), align 16
+  %0 = load i8, ptr inttoptr (i64 4080 to ptr), align 16
   %conv = sext i8 %0 to i32
   ret i32 %conv
 }
@@ -311,7 +311,7 @@ define dso_local signext i32 @ld_cst_align32_int32_t_int8_t() {
 ; CHECK-NEXT:    extsb r3, r3
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i8, i8* inttoptr (i64 9999900 to i8*), align 4
+  %0 = load i8, ptr inttoptr (i64 9999900 to ptr), align 4
   %conv = sext i8 %0 to i32
   ret i32 %conv
 }
@@ -335,7 +335,7 @@ define dso_local signext i32 @ld_cst_align64_int32_t_int8_t() {
 ; CHECK-PREP10-NEXT:    extsb r3, r3
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %0 = load i8, i8* inttoptr (i64 1000000000000 to i8*), align 4096
+  %0 = load i8, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   %conv = sext i8 %0 to i32
   ret i32 %conv
 }
@@ -347,28 +347,27 @@ define dso_local signext i32 @ld_0_int32_t_int16_t(i64 %ptr) {
 ; CHECK-NEXT:    lha r3, 0(r3)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = inttoptr i64 %ptr to i16*
-  %1 = load i16, i16* %0, align 2
+  %0 = inttoptr i64 %ptr to ptr
+  %1 = load i16, ptr %0, align 2
   %conv = sext i16 %1 to i32
   ret i32 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i32 @ld_align16_int32_t_int16_t(i8* nocapture readonly %ptr) {
+define dso_local signext i32 @ld_align16_int32_t_int16_t(ptr nocapture readonly %ptr) {
 ; CHECK-LABEL: ld_align16_int32_t_int16_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lha r3, 8(r3)
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to i16*
-  %1 = load i16, i16* %0, align 2
-  %conv = sext i16 %1 to i32
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  %0 = load i16, ptr %add.ptr, align 2
+  %conv = sext i16 %0 to i32
   ret i32 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i32 @ld_align32_int32_t_int16_t(i8* nocapture readonly %ptr) {
+define dso_local signext i32 @ld_align32_int32_t_int16_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align32_int32_t_int16_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    plha r3, 99999000(r3), 0
@@ -381,15 +380,14 @@ define dso_local signext i32 @ld_align32_int32_t_int16_t(i8* nocapture readonly
 ; CHECK-PREP10-NEXT:    lhax r3, r3, r4
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to i16*
-  %1 = load i16, i16* %0, align 2
-  %conv = sext i16 %1 to i32
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  %0 = load i16, ptr %add.ptr, align 2
+  %conv = sext i16 %0 to i32
   ret i32 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i32 @ld_align64_int32_t_int16_t(i8* nocapture readonly %ptr) {
+define dso_local signext i32 @ld_align64_int32_t_int16_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align64_int32_t_int16_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r4, 244140625
@@ -405,24 +403,22 @@ define dso_local signext i32 @ld_align64_int32_t_int16_t(i8* nocapture readonly
 ; CHECK-PREP10-NEXT:    lhax r3, r3, r4
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to i16*
-  %1 = load i16, i16* %0, align 2
-  %conv = sext i16 %1 to i32
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  %0 = load i16, ptr %add.ptr, align 2
+  %conv = sext i16 %0 to i32
   ret i32 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i32 @ld_reg_int32_t_int16_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local signext i32 @ld_reg_int32_t_int16_t(ptr nocapture readonly %ptr, i64 %off) {
 ; CHECK-LABEL: ld_reg_int32_t_int16_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lhax r3, r3, r4
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to i16*
-  %1 = load i16, i16* %0, align 2
-  %conv = sext i16 %1 to i32
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  %0 = load i16, ptr %add.ptr, align 2
+  %conv = sext i16 %0 to i32
   ret i32 %conv
 }
 
@@ -436,8 +432,8 @@ define dso_local signext i32 @ld_or_int32_t_int16_t(i64 %ptr, i8 zeroext %off) {
 entry:
   %conv = zext i8 %off to i64
   %or = or i64 %conv, %ptr
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 2
   %conv1 = sext i16 %1 to i32
   ret i32 %conv1
 }
@@ -453,8 +449,8 @@ entry:
   %and = and i64 %ptr, -4096
   %conv = zext i8 %off to i64
   %or = or i64 %and, %conv
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 2
   %conv1 = sext i16 %1 to i32
   ret i32 %conv1
 }
@@ -468,8 +464,8 @@ define dso_local signext i32 @ld_not_disjoint16_int32_t_int16_t(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 2
   %conv = sext i16 %1 to i32
   ret i32 %conv
 }
@@ -484,8 +480,8 @@ define dso_local signext i32 @ld_disjoint_align16_int32_t_int16_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -4096
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 8
   %conv = sext i16 %1 to i32
   ret i32 %conv
 }
@@ -500,8 +496,8 @@ define dso_local signext i32 @ld_not_disjoint32_int32_t_int16_t(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 2
   %conv = sext i16 %1 to i32
   ret i32 %conv
 }
@@ -535,8 +531,8 @@ define dso_local signext i32 @ld_disjoint_align32_int32_t_int16_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1000341504
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 16
   %conv = sext i16 %1 to i32
   ret i32 %conv
 }
@@ -563,8 +559,8 @@ define dso_local signext i32 @ld_not_disjoint64_int32_t_int16_t(i64 %ptr) {
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 2
   %conv = sext i16 %1 to i32
   ret i32 %conv
 }
@@ -590,8 +586,8 @@ define dso_local signext i32 @ld_disjoint_align64_int32_t_int16_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 4096
   %conv = sext i16 %1 to i32
   ret i32 %conv
 }
@@ -603,7 +599,7 @@ define dso_local signext i32 @ld_cst_align16_int32_t_int16_t() {
 ; CHECK-NEXT:    lha r3, 4080(0)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i16, i16* inttoptr (i64 4080 to i16*), align 16
+  %0 = load i16, ptr inttoptr (i64 4080 to ptr), align 16
   %conv = sext i16 %0 to i32
   ret i32 %conv
 }
@@ -616,7 +612,7 @@ define dso_local signext i32 @ld_cst_align32_int32_t_int16_t() {
 ; CHECK-NEXT:    lha r3, -27108(r3)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i16, i16* inttoptr (i64 9999900 to i16*), align 4
+  %0 = load i16, ptr inttoptr (i64 9999900 to ptr), align 4
   %conv = sext i16 %0 to i32
   ret i32 %conv
 }
@@ -638,7 +634,7 @@ define dso_local signext i32 @ld_cst_align64_int32_t_int16_t() {
 ; CHECK-PREP10-NEXT:    lha r3, 0(r3)
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %0 = load i16, i16* inttoptr (i64 1000000000000 to i16*), align 4096
+  %0 = load i16, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   %conv = sext i16 %0 to i32
   ret i32 %conv
 }
@@ -650,26 +646,25 @@ define dso_local signext i32 @ld_0_int32_t_uint32_t(i64 %ptr) {
 ; CHECK-NEXT:    lwa r3, 0(r3)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = inttoptr i64 %ptr to i32*
-  %1 = load i32, i32* %0, align 4
+  %0 = inttoptr i64 %ptr to ptr
+  %1 = load i32, ptr %0, align 4
   ret i32 %1
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i32 @ld_align16_int32_t_uint32_t(i8* nocapture readonly %ptr) {
+define dso_local signext i32 @ld_align16_int32_t_uint32_t(ptr nocapture readonly %ptr) {
 ; CHECK-LABEL: ld_align16_int32_t_uint32_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lwa r3, 8(r3)
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to i32*
-  %1 = load i32, i32* %0, align 4
-  ret i32 %1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  %0 = load i32, ptr %add.ptr, align 4
+  ret i32 %0
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i32 @ld_align32_int32_t_uint32_t(i8* nocapture readonly %ptr) {
+define dso_local signext i32 @ld_align32_int32_t_uint32_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align32_int32_t_uint32_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    plwa r3, 99999000(r3), 0
@@ -682,14 +677,13 @@ define dso_local signext i32 @ld_align32_int32_t_uint32_t(i8* nocapture readonly
 ; CHECK-PREP10-NEXT:    lwax r3, r3, r4
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to i32*
-  %1 = load i32, i32* %0, align 4
-  ret i32 %1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  %0 = load i32, ptr %add.ptr, align 4
+  ret i32 %0
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i32 @ld_align64_int32_t_uint32_t(i8* nocapture readonly %ptr) {
+define dso_local signext i32 @ld_align64_int32_t_uint32_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align64_int32_t_uint32_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r4, 244140625
@@ -705,23 +699,21 @@ define dso_local signext i32 @ld_align64_int32_t_uint32_t(i8* nocapture readonly
 ; CHECK-PREP10-NEXT:    lwax r3, r3, r4
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to i32*
-  %1 = load i32, i32* %0, align 4
-  ret i32 %1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  %0 = load i32, ptr %add.ptr, align 4
+  ret i32 %0
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i32 @ld_reg_int32_t_uint32_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local signext i32 @ld_reg_int32_t_uint32_t(ptr nocapture readonly %ptr, i64 %off) {
 ; CHECK-LABEL: ld_reg_int32_t_uint32_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lwax r3, r3, r4
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to i32*
-  %1 = load i32, i32* %0, align 4
-  ret i32 %1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  %0 = load i32, ptr %add.ptr, align 4
+  ret i32 %0
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
@@ -734,8 +726,8 @@ define dso_local signext i32 @ld_or_int32_t_uint32_t(i64 %ptr, i8 zeroext %off)
 entry:
   %conv = zext i8 %off to i64
   %or = or i64 %conv, %ptr
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 4
   ret i32 %1
 }
 
@@ -750,8 +742,8 @@ entry:
   %and = and i64 %ptr, -4096
   %conv = zext i8 %off to i64
   %or = or i64 %and, %conv
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 4
   ret i32 %1
 }
 
@@ -764,8 +756,8 @@ define dso_local signext i32 @ld_not_disjoint16_int32_t_uint32_t(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 4
   ret i32 %1
 }
 
@@ -793,8 +785,8 @@ define dso_local signext i32 @ld_disjoint_unalign16_int32_t_uint32_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -4096
   %or = or i64 %and, 6
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 4
   ret i32 %1
 }
 
@@ -808,8 +800,8 @@ define dso_local signext i32 @ld_disjoint_align16_int32_t_uint32_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -4096
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 8
   ret i32 %1
 }
 
@@ -823,8 +815,8 @@ define dso_local signext i32 @ld_not_disjoint32_int32_t_uint32_t(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 4
   ret i32 %1
 }
 
@@ -857,8 +849,8 @@ define dso_local signext i32 @ld_disjoint_align32_int32_t_uint32_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1000341504
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 16
   ret i32 %1
 }
 
@@ -884,8 +876,8 @@ define dso_local signext i32 @ld_not_disjoint64_int32_t_uint32_t(i64 %ptr) {
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 4
   ret i32 %1
 }
 
@@ -910,8 +902,8 @@ define dso_local signext i32 @ld_disjoint_align64_int32_t_uint32_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 4096
   ret i32 %1
 }
 
@@ -922,7 +914,7 @@ define dso_local signext i32 @ld_cst_align16_int32_t_uint32_t() {
 ; CHECK-NEXT:    lwa r3, 4080(0)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i32, i32* inttoptr (i64 4080 to i32*), align 16
+  %0 = load i32, ptr inttoptr (i64 4080 to ptr), align 16
   ret i32 %0
 }
 
@@ -934,7 +926,7 @@ define dso_local signext i32 @ld_cst_align32_int32_t_uint32_t() {
 ; CHECK-NEXT:    lwa r3, -27108(r3)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i32, i32* inttoptr (i64 9999900 to i32*), align 4
+  %0 = load i32, ptr inttoptr (i64 9999900 to ptr), align 4
   ret i32 %0
 }
 
@@ -955,7 +947,7 @@ define dso_local signext i32 @ld_cst_align64_int32_t_uint32_t() {
 ; CHECK-PREP10-NEXT:    lwa r3, 0(r3)
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %0 = load i32, i32* inttoptr (i64 1000000000000 to i32*), align 4096
+  %0 = load i32, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   ret i32 %0
 }
 
@@ -971,14 +963,14 @@ define dso_local signext i32 @ld_0_int32_t_uint64_t(i64 %ptr) {
 ; CHECK-BE-NEXT:    lwa r3, 4(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %0 = inttoptr i64 %ptr to i64*
-  %1 = load i64, i64* %0, align 8
+  %0 = inttoptr i64 %ptr to ptr
+  %1 = load i64, ptr %0, align 8
   %conv = trunc i64 %1 to i32
   ret i32 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i32 @ld_unalign16_int32_t_uint64_t(i8* nocapture readonly %ptr) {
+define dso_local signext i32 @ld_unalign16_int32_t_uint64_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LE-LABEL: ld_unalign16_int32_t_uint64_t:
 ; CHECK-P10-LE:       # %bb.0: # %entry
 ; CHECK-P10-LE-NEXT:    plwa r3, 1(r3), 0
@@ -1013,15 +1005,14 @@ define dso_local signext i32 @ld_unalign16_int32_t_uint64_t(i8* nocapture readon
 ; CHECK-P8-BE-NEXT:    lwax r3, r3, r4
 ; CHECK-P8-BE-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1
-  %0 = bitcast i8* %add.ptr to i64*
-  %1 = load i64, i64* %0, align 8
-  %conv = trunc i64 %1 to i32
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1
+  %0 = load i64, ptr %add.ptr, align 8
+  %conv = trunc i64 %0 to i32
   ret i32 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i32 @ld_align16_int32_t_uint64_t(i8* nocapture readonly %ptr) {
+define dso_local signext i32 @ld_align16_int32_t_uint64_t(ptr nocapture readonly %ptr) {
 ; CHECK-LE-LABEL: ld_align16_int32_t_uint64_t:
 ; CHECK-LE:       # %bb.0: # %entry
 ; CHECK-LE-NEXT:    lwa r3, 8(r3)
@@ -1032,15 +1023,14 @@ define dso_local signext i32 @ld_align16_int32_t_uint64_t(i8* nocapture readonly
 ; CHECK-BE-NEXT:    lwa r3, 12(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to i64*
-  %1 = load i64, i64* %0, align 8
-  %conv = trunc i64 %1 to i32
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  %0 = load i64, ptr %add.ptr, align 8
+  %conv = trunc i64 %0 to i32
   ret i32 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i32 @ld_align32_int32_t_uint64_t(i8* nocapture readonly %ptr) {
+define dso_local signext i32 @ld_align32_int32_t_uint64_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LE-LABEL: ld_align32_int32_t_uint64_t:
 ; CHECK-P10-LE:       # %bb.0: # %entry
 ; CHECK-P10-LE-NEXT:    plwa r3, 99999000(r3), 0
@@ -1079,15 +1069,14 @@ define dso_local signext i32 @ld_align32_int32_t_uint64_t(i8* nocapture readonly
 ; CHECK-P8-BE-NEXT:    lwax r3, r3, r4
 ; CHECK-P8-BE-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to i64*
-  %1 = load i64, i64* %0, align 8
-  %conv = trunc i64 %1 to i32
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  %0 = load i64, ptr %add.ptr, align 8
+  %conv = trunc i64 %0 to i32
   ret i32 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i32 @ld_align64_int32_t_uint64_t(i8* nocapture readonly %ptr) {
+define dso_local signext i32 @ld_align64_int32_t_uint64_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LE-LABEL: ld_align64_int32_t_uint64_t:
 ; CHECK-P10-LE:       # %bb.0: # %entry
 ; CHECK-P10-LE-NEXT:    pli r4, 244140625
@@ -1137,15 +1126,14 @@ define dso_local signext i32 @ld_align64_int32_t_uint64_t(i8* nocapture readonly
 ; CHECK-P8-BE-NEXT:    lwax r3, r3, r4
 ; CHECK-P8-BE-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to i64*
-  %1 = load i64, i64* %0, align 8
-  %conv = trunc i64 %1 to i32
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  %0 = load i64, ptr %add.ptr, align 8
+  %conv = trunc i64 %0 to i32
   ret i32 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i32 @ld_reg_int32_t_uint64_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local signext i32 @ld_reg_int32_t_uint64_t(ptr nocapture readonly %ptr, i64 %off) {
 ; CHECK-LE-LABEL: ld_reg_int32_t_uint64_t:
 ; CHECK-LE:       # %bb.0: # %entry
 ; CHECK-LE-NEXT:    lwax r3, r3, r4
@@ -1157,10 +1145,9 @@ define dso_local signext i32 @ld_reg_int32_t_uint64_t(i8* nocapture readonly %pt
 ; CHECK-BE-NEXT:    lwa r3, 4(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to i64*
-  %1 = load i64, i64* %0, align 8
-  %conv = trunc i64 %1 to i32
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  %0 = load i64, ptr %add.ptr, align 8
+  %conv = trunc i64 %0 to i32
   ret i32 %conv
 }
 
@@ -1180,8 +1167,8 @@ define dso_local signext i32 @ld_or_int32_t_uint64_t(i64 %ptr, i8 zeroext %off)
 entry:
   %conv = zext i8 %off to i64
   %or = or i64 %conv, %ptr
-  %0 = inttoptr i64 %or to i64*
-  %1 = load i64, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i64, ptr %0, align 8
   %conv1 = trunc i64 %1 to i32
   ret i32 %conv1
 }
@@ -1204,8 +1191,8 @@ entry:
   %and = and i64 %ptr, -4096
   %conv = zext i8 %off to i64
   %or = or i64 %and, %conv
-  %0 = inttoptr i64 %or to i64*
-  %1 = load i64, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i64, ptr %0, align 8
   %conv1 = trunc i64 %1 to i32
   ret i32 %conv1
 }
@@ -1225,8 +1212,8 @@ define dso_local signext i32 @ld_not_disjoint16_int32_t_uint64_t(i64 %ptr) {
 ; CHECK-BE-NEXT:    blr
 entry:
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to i64*
-  %1 = load i64, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i64, ptr %0, align 8
   %conv = trunc i64 %1 to i32
   ret i32 %conv
 }
@@ -1275,8 +1262,8 @@ define dso_local signext i32 @ld_disjoint_unalign16_int32_t_uint64_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -4096
   %or = or i64 %and, 6
-  %0 = inttoptr i64 %or to i64*
-  %1 = load i64, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i64, ptr %0, align 8
   %conv = trunc i64 %1 to i32
   ret i32 %conv
 }
@@ -1297,8 +1284,8 @@ define dso_local signext i32 @ld_disjoint_align16_int32_t_uint64_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -4096
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to i64*
-  %1 = load i64, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i64, ptr %0, align 8
   %conv = trunc i64 %1 to i32
   ret i32 %conv
 }
@@ -1320,8 +1307,8 @@ define dso_local signext i32 @ld_not_disjoint32_int32_t_uint64_t(i64 %ptr) {
 ; CHECK-BE-NEXT:    blr
 entry:
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to i64*
-  %1 = load i64, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i64, ptr %0, align 8
   %conv = trunc i64 %1 to i32
   ret i32 %conv
 }
@@ -1380,8 +1367,8 @@ define dso_local signext i32 @ld_disjoint_align32_int32_t_uint64_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1000341504
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to i64*
-  %1 = load i64, i64* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i64, ptr %0, align 16
   %conv = trunc i64 %1 to i32
   ret i32 %conv
 }
@@ -1447,8 +1434,8 @@ define dso_local signext i32 @ld_not_disjoint64_int32_t_uint64_t(i64 %ptr) {
 ; CHECK-P8-BE-NEXT:    blr
 entry:
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to i64*
-  %1 = load i64, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i64, ptr %0, align 8
   %conv = trunc i64 %1 to i32
   ret i32 %conv
 }
@@ -1512,8 +1499,8 @@ define dso_local signext i32 @ld_disjoint_align64_int32_t_uint64_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to i64*
-  %1 = load i64, i64* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i64, ptr %0, align 4096
   %conv = trunc i64 %1 to i32
   ret i32 %conv
 }
@@ -1530,7 +1517,7 @@ define dso_local signext i32 @ld_cst_align16_int32_t_uint64_t() {
 ; CHECK-BE-NEXT:    lwa r3, 4084(0)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %0 = load i64, i64* inttoptr (i64 4080 to i64*), align 16
+  %0 = load i64, ptr inttoptr (i64 4080 to ptr), align 16
   %conv = trunc i64 %0 to i32
   ret i32 %conv
 }
@@ -1549,7 +1536,7 @@ define dso_local signext i32 @ld_cst_align32_int32_t_uint64_t() {
 ; CHECK-BE-NEXT:    lwa r3, -27104(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %0 = load i64, i64* inttoptr (i64 9999900 to i64*), align 8
+  %0 = load i64, ptr inttoptr (i64 9999900 to ptr), align 8
   %conv = trunc i64 %0 to i32
   ret i32 %conv
 }
@@ -1605,7 +1592,7 @@ define dso_local signext i32 @ld_cst_align64_int32_t_uint64_t() {
 ; CHECK-P8-BE-NEXT:    lwa r3, 0(r3)
 ; CHECK-P8-BE-NEXT:    blr
 entry:
-  %0 = load i64, i64* inttoptr (i64 1000000000000 to i64*), align 4096
+  %0 = load i64, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   %conv = trunc i64 %0 to i32
   ret i32 %conv
 }
@@ -1620,14 +1607,14 @@ define dso_local signext i32 @ld_0_int32_t_float(i64 %ptr) {
 ; CHECK-NEXT:    extsw r3, r3
 ; CHECK-NEXT:    blr
 entry:
-  %0 = inttoptr i64 %ptr to float*
-  %1 = load float, float* %0, align 4
+  %0 = inttoptr i64 %ptr to ptr
+  %1 = load float, ptr %0, align 4
   %conv = fptosi float %1 to i32
   ret i32 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i32 @ld_align16_int32_t_float(i8* nocapture readonly %ptr) {
+define dso_local signext i32 @ld_align16_int32_t_float(ptr nocapture readonly %ptr) {
 ; CHECK-LABEL: ld_align16_int32_t_float:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lfs f0, 8(r3)
@@ -1636,15 +1623,14 @@ define dso_local signext i32 @ld_align16_int32_t_float(i8* nocapture readonly %p
 ; CHECK-NEXT:    extsw r3, r3
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to float*
-  %1 = load float, float* %0, align 4
-  %conv = fptosi float %1 to i32
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  %0 = load float, ptr %add.ptr, align 4
+  %conv = fptosi float %0 to i32
   ret i32 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i32 @ld_align32_int32_t_float(i8* nocapture readonly %ptr) {
+define dso_local signext i32 @ld_align32_int32_t_float(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align32_int32_t_float:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    plfs f0, 99999000(r3), 0
@@ -1663,15 +1649,14 @@ define dso_local signext i32 @ld_align32_int32_t_float(i8* nocapture readonly %p
 ; CHECK-PREP10-NEXT:    extsw r3, r3
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to float*
-  %1 = load float, float* %0, align 4
-  %conv = fptosi float %1 to i32
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  %0 = load float, ptr %add.ptr, align 4
+  %conv = fptosi float %0 to i32
   ret i32 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i32 @ld_align64_int32_t_float(i8* nocapture readonly %ptr) {
+define dso_local signext i32 @ld_align64_int32_t_float(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align64_int32_t_float:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r4, 244140625
@@ -1693,15 +1678,14 @@ define dso_local signext i32 @ld_align64_int32_t_float(i8* nocapture readonly %p
 ; CHECK-PREP10-NEXT:    extsw r3, r3
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to float*
-  %1 = load float, float* %0, align 4
-  %conv = fptosi float %1 to i32
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  %0 = load float, ptr %add.ptr, align 4
+  %conv = fptosi float %0 to i32
   ret i32 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i32 @ld_reg_int32_t_float(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local signext i32 @ld_reg_int32_t_float(ptr nocapture readonly %ptr, i64 %off) {
 ; CHECK-LABEL: ld_reg_int32_t_float:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lfsx f0, r3, r4
@@ -1710,10 +1694,9 @@ define dso_local signext i32 @ld_reg_int32_t_float(i8* nocapture readonly %ptr,
 ; CHECK-NEXT:    extsw r3, r3
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to float*
-  %1 = load float, float* %0, align 4
-  %conv = fptosi float %1 to i32
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  %0 = load float, ptr %add.ptr, align 4
+  %conv = fptosi float %0 to i32
   ret i32 %conv
 }
 
@@ -1730,8 +1713,8 @@ define dso_local signext i32 @ld_or_int32_t_float(i64 %ptr, i8 zeroext %off) {
 entry:
   %conv = zext i8 %off to i64
   %or = or i64 %conv, %ptr
-  %0 = inttoptr i64 %or to float*
-  %1 = load float, float* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load float, ptr %0, align 4
   %conv1 = fptosi float %1 to i32
   ret i32 %conv1
 }
@@ -1750,8 +1733,8 @@ entry:
   %and = and i64 %ptr, -4096
   %conv = zext i8 %off to i64
   %or = or i64 %and, %conv
-  %0 = inttoptr i64 %or to float*
-  %1 = load float, float* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load float, ptr %0, align 4
   %conv1 = fptosi float %1 to i32
   ret i32 %conv1
 }
@@ -1768,8 +1751,8 @@ define dso_local signext i32 @ld_not_disjoint16_int32_t_float(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to float*
-  %1 = load float, float* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load float, ptr %0, align 4
   %conv = fptosi float %1 to i32
   ret i32 %conv
 }
@@ -1787,8 +1770,8 @@ define dso_local signext i32 @ld_disjoint_align16_int32_t_float(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -4096
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to float*
-  %1 = load float, float* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load float, ptr %0, align 8
   %conv = fptosi float %1 to i32
   ret i32 %conv
 }
@@ -1806,8 +1789,8 @@ define dso_local signext i32 @ld_not_disjoint32_int32_t_float(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to float*
-  %1 = load float, float* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load float, ptr %0, align 4
   %conv = fptosi float %1 to i32
   ret i32 %conv
 }
@@ -1850,8 +1833,8 @@ define dso_local signext i32 @ld_disjoint_align32_int32_t_float(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1000341504
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to float*
-  %1 = load float, float* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  %1 = load float, ptr %0, align 16
   %conv = fptosi float %1 to i32
   ret i32 %conv
 }
@@ -1884,8 +1867,8 @@ define dso_local signext i32 @ld_not_disjoint64_int32_t_float(i64 %ptr) {
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to float*
-  %1 = load float, float* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load float, ptr %0, align 4
   %conv = fptosi float %1 to i32
   ret i32 %conv
 }
@@ -1917,8 +1900,8 @@ define dso_local signext i32 @ld_disjoint_align64_int32_t_float(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to float*
-  %1 = load float, float* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  %1 = load float, ptr %0, align 4096
   %conv = fptosi float %1 to i32
   ret i32 %conv
 }
@@ -1933,7 +1916,7 @@ define dso_local signext i32 @ld_cst_align16_int32_t_float() {
 ; CHECK-NEXT:    extsw r3, r3
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load float, float* inttoptr (i64 4080 to float*), align 16
+  %0 = load float, ptr inttoptr (i64 4080 to ptr), align 16
   %conv = fptosi float %0 to i32
   ret i32 %conv
 }
@@ -1949,7 +1932,7 @@ define dso_local signext i32 @ld_cst_align32_int32_t_float() {
 ; CHECK-NEXT:    extsw r3, r3
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load float, float* inttoptr (i64 9999900 to float*), align 4
+  %0 = load float, ptr inttoptr (i64 9999900 to ptr), align 4
   %conv = fptosi float %0 to i32
   ret i32 %conv
 }
@@ -1977,7 +1960,7 @@ define dso_local signext i32 @ld_cst_align64_int32_t_float() {
 ; CHECK-PREP10-NEXT:    extsw r3, r3
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %0 = load float, float* inttoptr (i64 1000000000000 to float*), align 4096
+  %0 = load float, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   %conv = fptosi float %0 to i32
   ret i32 %conv
 }
@@ -1992,14 +1975,14 @@ define dso_local signext i32 @ld_0_int32_t_double(i64 %ptr) {
 ; CHECK-NEXT:    extsw r3, r3
 ; CHECK-NEXT:    blr
 entry:
-  %0 = inttoptr i64 %ptr to double*
-  %1 = load double, double* %0, align 8
+  %0 = inttoptr i64 %ptr to ptr
+  %1 = load double, ptr %0, align 8
   %conv = fptosi double %1 to i32
   ret i32 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i32 @ld_align16_int32_t_double(i8* nocapture readonly %ptr) {
+define dso_local signext i32 @ld_align16_int32_t_double(ptr nocapture readonly %ptr) {
 ; CHECK-LABEL: ld_align16_int32_t_double:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lfd f0, 8(r3)
@@ -2008,15 +1991,14 @@ define dso_local signext i32 @ld_align16_int32_t_double(i8* nocapture readonly %
 ; CHECK-NEXT:    extsw r3, r3
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to double*
-  %1 = load double, double* %0, align 8
-  %conv = fptosi double %1 to i32
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  %0 = load double, ptr %add.ptr, align 8
+  %conv = fptosi double %0 to i32
   ret i32 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i32 @ld_align32_int32_t_double(i8* nocapture readonly %ptr) {
+define dso_local signext i32 @ld_align32_int32_t_double(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align32_int32_t_double:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    plfd f0, 99999000(r3), 0
@@ -2035,15 +2017,14 @@ define dso_local signext i32 @ld_align32_int32_t_double(i8* nocapture readonly %
 ; CHECK-PREP10-NEXT:    extsw r3, r3
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to double*
-  %1 = load double, double* %0, align 8
-  %conv = fptosi double %1 to i32
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  %0 = load double, ptr %add.ptr, align 8
+  %conv = fptosi double %0 to i32
   ret i32 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i32 @ld_align64_int32_t_double(i8* nocapture readonly %ptr) {
+define dso_local signext i32 @ld_align64_int32_t_double(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align64_int32_t_double:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r4, 244140625
@@ -2065,15 +2046,14 @@ define dso_local signext i32 @ld_align64_int32_t_double(i8* nocapture readonly %
 ; CHECK-PREP10-NEXT:    extsw r3, r3
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to double*
-  %1 = load double, double* %0, align 8
-  %conv = fptosi double %1 to i32
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  %0 = load double, ptr %add.ptr, align 8
+  %conv = fptosi double %0 to i32
   ret i32 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i32 @ld_reg_int32_t_double(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local signext i32 @ld_reg_int32_t_double(ptr nocapture readonly %ptr, i64 %off) {
 ; CHECK-LABEL: ld_reg_int32_t_double:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lfdx f0, r3, r4
@@ -2082,10 +2062,9 @@ define dso_local signext i32 @ld_reg_int32_t_double(i8* nocapture readonly %ptr,
 ; CHECK-NEXT:    extsw r3, r3
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to double*
-  %1 = load double, double* %0, align 8
-  %conv = fptosi double %1 to i32
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  %0 = load double, ptr %add.ptr, align 8
+  %conv = fptosi double %0 to i32
   ret i32 %conv
 }
 
@@ -2102,8 +2081,8 @@ define dso_local signext i32 @ld_or_int32_t_double(i64 %ptr, i8 zeroext %off) {
 entry:
   %conv = zext i8 %off to i64
   %or = or i64 %conv, %ptr
-  %0 = inttoptr i64 %or to double*
-  %1 = load double, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load double, ptr %0, align 8
   %conv1 = fptosi double %1 to i32
   ret i32 %conv1
 }
@@ -2122,8 +2101,8 @@ entry:
   %and = and i64 %ptr, -4096
   %conv = zext i8 %off to i64
   %or = or i64 %and, %conv
-  %0 = inttoptr i64 %or to double*
-  %1 = load double, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load double, ptr %0, align 8
   %conv1 = fptosi double %1 to i32
   ret i32 %conv1
 }
@@ -2140,8 +2119,8 @@ define dso_local signext i32 @ld_not_disjoint16_int32_t_double(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to double*
-  %1 = load double, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load double, ptr %0, align 8
   %conv = fptosi double %1 to i32
   ret i32 %conv
 }
@@ -2159,8 +2138,8 @@ define dso_local signext i32 @ld_disjoint_align16_int32_t_double(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -4096
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to double*
-  %1 = load double, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load double, ptr %0, align 8
   %conv = fptosi double %1 to i32
   ret i32 %conv
 }
@@ -2178,8 +2157,8 @@ define dso_local signext i32 @ld_not_disjoint32_int32_t_double(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to double*
-  %1 = load double, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load double, ptr %0, align 8
   %conv = fptosi double %1 to i32
   ret i32 %conv
 }
@@ -2222,8 +2201,8 @@ define dso_local signext i32 @ld_disjoint_align32_int32_t_double(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1000341504
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to double*
-  %1 = load double, double* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  %1 = load double, ptr %0, align 16
   %conv = fptosi double %1 to i32
   ret i32 %conv
 }
@@ -2256,8 +2235,8 @@ define dso_local signext i32 @ld_not_disjoint64_int32_t_double(i64 %ptr) {
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to double*
-  %1 = load double, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load double, ptr %0, align 8
   %conv = fptosi double %1 to i32
   ret i32 %conv
 }
@@ -2289,8 +2268,8 @@ define dso_local signext i32 @ld_disjoint_align64_int32_t_double(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to double*
-  %1 = load double, double* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  %1 = load double, ptr %0, align 4096
   %conv = fptosi double %1 to i32
   ret i32 %conv
 }
@@ -2305,7 +2284,7 @@ define dso_local signext i32 @ld_cst_align16_int32_t_double() {
 ; CHECK-NEXT:    extsw r3, r3
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load double, double* inttoptr (i64 4080 to double*), align 16
+  %0 = load double, ptr inttoptr (i64 4080 to ptr), align 16
   %conv = fptosi double %0 to i32
   ret i32 %conv
 }
@@ -2321,7 +2300,7 @@ define dso_local signext i32 @ld_cst_align32_int32_t_double() {
 ; CHECK-NEXT:    extsw r3, r3
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load double, double* inttoptr (i64 9999900 to double*), align 8
+  %0 = load double, ptr inttoptr (i64 9999900 to ptr), align 8
   %conv = fptosi double %0 to i32
   ret i32 %conv
 }
@@ -2349,7 +2328,7 @@ define dso_local signext i32 @ld_cst_align64_int32_t_double() {
 ; CHECK-PREP10-NEXT:    extsw r3, r3
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %0 = load double, double* inttoptr (i64 1000000000000 to double*), align 4096
+  %0 = load double, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   %conv = fptosi double %0 to i32
   ret i32 %conv
 }
@@ -2361,27 +2340,27 @@ define dso_local zeroext i32 @ld_0_uint32_t_uint8_t(i64 %ptr) {
 ; CHECK-NEXT:    lbz r3, 0(r3)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = inttoptr i64 %ptr to i8*
-  %1 = load i8, i8* %0, align 1
+  %0 = inttoptr i64 %ptr to ptr
+  %1 = load i8, ptr %0, align 1
   %conv = zext i8 %1 to i32
   ret i32 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i32 @ld_align16_uint32_t_uint8_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i32 @ld_align16_uint32_t_uint8_t(ptr nocapture readonly %ptr) {
 ; CHECK-LABEL: ld_align16_uint32_t_uint8_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lbz r3, 8(r3)
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  %0 = load i8, ptr %add.ptr, align 1
   %conv = zext i8 %0 to i32
   ret i32 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i32 @ld_align32_uint32_t_uint8_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i32 @ld_align32_uint32_t_uint8_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align32_uint32_t_uint8_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    plbz r3, 99999000(r3), 0
@@ -2394,14 +2373,14 @@ define dso_local zeroext i32 @ld_align32_uint32_t_uint8_t(i8* nocapture readonly
 ; CHECK-PREP10-NEXT:    lbzx r3, r3, r4
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  %0 = load i8, ptr %add.ptr, align 1
   %conv = zext i8 %0 to i32
   ret i32 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i32 @ld_align64_uint32_t_uint8_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i32 @ld_align64_uint32_t_uint8_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align64_uint32_t_uint8_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r4, 244140625
@@ -2417,21 +2396,21 @@ define dso_local zeroext i32 @ld_align64_uint32_t_uint8_t(i8* nocapture readonly
 ; CHECK-PREP10-NEXT:    lbzx r3, r3, r4
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  %0 = load i8, ptr %add.ptr, align 1
   %conv = zext i8 %0 to i32
   ret i32 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i32 @ld_reg_uint32_t_uint8_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local zeroext i32 @ld_reg_uint32_t_uint8_t(ptr nocapture readonly %ptr, i64 %off) {
 ; CHECK-LABEL: ld_reg_uint32_t_uint8_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lbzx r3, r3, r4
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  %0 = load i8, ptr %add.ptr, align 1
   %conv = zext i8 %0 to i32
   ret i32 %conv
 }
@@ -2446,8 +2425,8 @@ define dso_local zeroext i32 @ld_or_uint32_t_uint8_t(i64 %ptr, i8 zeroext %off)
 entry:
   %conv = zext i8 %off to i64
   %or = or i64 %conv, %ptr
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 1
   %conv1 = zext i8 %1 to i32
   ret i32 %conv1
 }
@@ -2461,8 +2440,8 @@ define dso_local zeroext i32 @ld_not_disjoint16_uint32_t_uint8_t(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 1
   %conv = zext i8 %1 to i32
   ret i32 %conv
 }
@@ -2477,8 +2456,8 @@ define dso_local zeroext i32 @ld_disjoint_align16_uint32_t_uint8_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -4096
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 8
   %conv = zext i8 %1 to i32
   ret i32 %conv
 }
@@ -2493,8 +2472,8 @@ define dso_local zeroext i32 @ld_not_disjoint32_uint32_t_uint8_t(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 1
   %conv = zext i8 %1 to i32
   ret i32 %conv
 }
@@ -2528,8 +2507,8 @@ define dso_local zeroext i32 @ld_disjoint_align32_uint32_t_uint8_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1000341504
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 16
   %conv = zext i8 %1 to i32
   ret i32 %conv
 }
@@ -2556,8 +2535,8 @@ define dso_local zeroext i32 @ld_not_disjoint64_uint32_t_uint8_t(i64 %ptr) {
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 1
   %conv = zext i8 %1 to i32
   ret i32 %conv
 }
@@ -2583,8 +2562,8 @@ define dso_local zeroext i32 @ld_disjoint_align64_uint32_t_uint8_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 4096
   %conv = zext i8 %1 to i32
   ret i32 %conv
 }
@@ -2596,7 +2575,7 @@ define dso_local zeroext i32 @ld_cst_align16_uint32_t_uint8_t() {
 ; CHECK-NEXT:    lbz r3, 4080(0)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i8, i8* inttoptr (i64 4080 to i8*), align 16
+  %0 = load i8, ptr inttoptr (i64 4080 to ptr), align 16
   %conv = zext i8 %0 to i32
   ret i32 %conv
 }
@@ -2609,7 +2588,7 @@ define dso_local zeroext i32 @ld_cst_align32_uint32_t_uint8_t() {
 ; CHECK-NEXT:    lbz r3, -27108(r3)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i8, i8* inttoptr (i64 9999900 to i8*), align 4
+  %0 = load i8, ptr inttoptr (i64 9999900 to ptr), align 4
   %conv = zext i8 %0 to i32
   ret i32 %conv
 }
@@ -2631,7 +2610,7 @@ define dso_local zeroext i32 @ld_cst_align64_uint32_t_uint8_t() {
 ; CHECK-PREP10-NEXT:    lbz r3, 0(r3)
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %0 = load i8, i8* inttoptr (i64 1000000000000 to i8*), align 4096
+  %0 = load i8, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   %conv = zext i8 %0 to i32
   ret i32 %conv
 }
@@ -2645,14 +2624,14 @@ define dso_local zeroext i32 @ld_0_uint32_t_int8_t(i64 %ptr) {
 ; CHECK-NEXT:    clrldi r3, r3, 32
 ; CHECK-NEXT:    blr
 entry:
-  %0 = inttoptr i64 %ptr to i8*
-  %1 = load i8, i8* %0, align 1
+  %0 = inttoptr i64 %ptr to ptr
+  %1 = load i8, ptr %0, align 1
   %conv = sext i8 %1 to i32
   ret i32 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i32 @ld_align16_uint32_t_int8_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i32 @ld_align16_uint32_t_int8_t(ptr nocapture readonly %ptr) {
 ; CHECK-LABEL: ld_align16_uint32_t_int8_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lbz r3, 8(r3)
@@ -2660,14 +2639,14 @@ define dso_local zeroext i32 @ld_align16_uint32_t_int8_t(i8* nocapture readonly
 ; CHECK-NEXT:    clrldi r3, r3, 32
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  %0 = load i8, ptr %add.ptr, align 1
   %conv = sext i8 %0 to i32
   ret i32 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i32 @ld_align32_uint32_t_int8_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i32 @ld_align32_uint32_t_int8_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align32_uint32_t_int8_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    plbz r3, 99999000(r3), 0
@@ -2684,14 +2663,14 @@ define dso_local zeroext i32 @ld_align32_uint32_t_int8_t(i8* nocapture readonly
 ; CHECK-PREP10-NEXT:    clrldi r3, r3, 32
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  %0 = load i8, ptr %add.ptr, align 1
   %conv = sext i8 %0 to i32
   ret i32 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i32 @ld_align64_uint32_t_int8_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i32 @ld_align64_uint32_t_int8_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align64_uint32_t_int8_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r4, 244140625
@@ -2711,14 +2690,14 @@ define dso_local zeroext i32 @ld_align64_uint32_t_int8_t(i8* nocapture readonly
 ; CHECK-PREP10-NEXT:    clrldi r3, r3, 32
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  %0 = load i8, ptr %add.ptr, align 1
   %conv = sext i8 %0 to i32
   ret i32 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i32 @ld_reg_uint32_t_int8_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local zeroext i32 @ld_reg_uint32_t_int8_t(ptr nocapture readonly %ptr, i64 %off) {
 ; CHECK-LABEL: ld_reg_uint32_t_int8_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lbzx r3, r3, r4
@@ -2726,8 +2705,8 @@ define dso_local zeroext i32 @ld_reg_uint32_t_int8_t(i8* nocapture readonly %ptr
 ; CHECK-NEXT:    clrldi r3, r3, 32
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  %0 = load i8, ptr %add.ptr, align 1
   %conv = sext i8 %0 to i32
   ret i32 %conv
 }
@@ -2744,8 +2723,8 @@ define dso_local zeroext i32 @ld_or_uint32_t_int8_t(i64 %ptr, i8 zeroext %off) {
 entry:
   %conv = zext i8 %off to i64
   %or = or i64 %conv, %ptr
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 1
   %conv1 = sext i8 %1 to i32
   ret i32 %conv1
 }
@@ -2761,8 +2740,8 @@ define dso_local zeroext i32 @ld_not_disjoint16_uint32_t_int8_t(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 1
   %conv = sext i8 %1 to i32
   ret i32 %conv
 }
@@ -2779,8 +2758,8 @@ define dso_local zeroext i32 @ld_disjoint_align16_uint32_t_int8_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -4096
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 8
   %conv = sext i8 %1 to i32
   ret i32 %conv
 }
@@ -2797,8 +2776,8 @@ define dso_local zeroext i32 @ld_not_disjoint32_uint32_t_int8_t(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 1
   %conv = sext i8 %1 to i32
   ret i32 %conv
 }
@@ -2838,8 +2817,8 @@ define dso_local zeroext i32 @ld_disjoint_align32_uint32_t_int8_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1000341504
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 16
   %conv = sext i8 %1 to i32
   ret i32 %conv
 }
@@ -2870,8 +2849,8 @@ define dso_local zeroext i32 @ld_not_disjoint64_uint32_t_int8_t(i64 %ptr) {
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 1
   %conv = sext i8 %1 to i32
   ret i32 %conv
 }
@@ -2901,8 +2880,8 @@ define dso_local zeroext i32 @ld_disjoint_align64_uint32_t_int8_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 4096
   %conv = sext i8 %1 to i32
   ret i32 %conv
 }
@@ -2916,7 +2895,7 @@ define dso_local zeroext i32 @ld_cst_align16_uint32_t_int8_t() {
 ; CHECK-NEXT:    clrldi r3, r3, 32
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i8, i8* inttoptr (i64 4080 to i8*), align 16
+  %0 = load i8, ptr inttoptr (i64 4080 to ptr), align 16
   %conv = sext i8 %0 to i32
   ret i32 %conv
 }
@@ -2931,7 +2910,7 @@ define dso_local zeroext i32 @ld_cst_align32_uint32_t_int8_t() {
 ; CHECK-NEXT:    clrldi r3, r3, 32
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i8, i8* inttoptr (i64 9999900 to i8*), align 4
+  %0 = load i8, ptr inttoptr (i64 9999900 to ptr), align 4
   %conv = sext i8 %0 to i32
   ret i32 %conv
 }
@@ -2957,7 +2936,7 @@ define dso_local zeroext i32 @ld_cst_align64_uint32_t_int8_t() {
 ; CHECK-PREP10-NEXT:    clrldi r3, r3, 32
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %0 = load i8, i8* inttoptr (i64 1000000000000 to i8*), align 4096
+  %0 = load i8, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   %conv = sext i8 %0 to i32
   ret i32 %conv
 }
@@ -2969,28 +2948,27 @@ define dso_local zeroext i32 @ld_0_uint32_t_uint16_t(i64 %ptr) {
 ; CHECK-NEXT:    lhz r3, 0(r3)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = inttoptr i64 %ptr to i16*
-  %1 = load i16, i16* %0, align 2
+  %0 = inttoptr i64 %ptr to ptr
+  %1 = load i16, ptr %0, align 2
   %conv = zext i16 %1 to i32
   ret i32 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i32 @ld_align16_uint32_t_uint16_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i32 @ld_align16_uint32_t_uint16_t(ptr nocapture readonly %ptr) {
 ; CHECK-LABEL: ld_align16_uint32_t_uint16_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lhz r3, 8(r3)
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to i16*
-  %1 = load i16, i16* %0, align 2
-  %conv = zext i16 %1 to i32
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  %0 = load i16, ptr %add.ptr, align 2
+  %conv = zext i16 %0 to i32
   ret i32 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i32 @ld_align32_uint32_t_uint16_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i32 @ld_align32_uint32_t_uint16_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align32_uint32_t_uint16_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    plhz r3, 99999000(r3), 0
@@ -3003,15 +2981,14 @@ define dso_local zeroext i32 @ld_align32_uint32_t_uint16_t(i8* nocapture readonl
 ; CHECK-PREP10-NEXT:    lhzx r3, r3, r4
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to i16*
-  %1 = load i16, i16* %0, align 2
-  %conv = zext i16 %1 to i32
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  %0 = load i16, ptr %add.ptr, align 2
+  %conv = zext i16 %0 to i32
   ret i32 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i32 @ld_align64_uint32_t_uint16_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i32 @ld_align64_uint32_t_uint16_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align64_uint32_t_uint16_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r4, 244140625
@@ -3027,24 +3004,22 @@ define dso_local zeroext i32 @ld_align64_uint32_t_uint16_t(i8* nocapture readonl
 ; CHECK-PREP10-NEXT:    lhzx r3, r3, r4
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to i16*
-  %1 = load i16, i16* %0, align 2
-  %conv = zext i16 %1 to i32
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  %0 = load i16, ptr %add.ptr, align 2
+  %conv = zext i16 %0 to i32
   ret i32 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i32 @ld_reg_uint32_t_uint16_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local zeroext i32 @ld_reg_uint32_t_uint16_t(ptr nocapture readonly %ptr, i64 %off) {
 ; CHECK-LABEL: ld_reg_uint32_t_uint16_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lhzx r3, r3, r4
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to i16*
-  %1 = load i16, i16* %0, align 2
-  %conv = zext i16 %1 to i32
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  %0 = load i16, ptr %add.ptr, align 2
+  %conv = zext i16 %0 to i32
   ret i32 %conv
 }
 
@@ -3058,8 +3033,8 @@ define dso_local zeroext i32 @ld_or_uint32_t_uint16_t(i64 %ptr, i8 zeroext %off)
 entry:
   %conv = zext i8 %off to i64
   %or = or i64 %conv, %ptr
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 2
   %conv1 = zext i16 %1 to i32
   ret i32 %conv1
 }
@@ -3073,8 +3048,8 @@ define dso_local zeroext i32 @ld_not_disjoint16_uint32_t_uint16_t(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 2
   %conv = zext i16 %1 to i32
   ret i32 %conv
 }
@@ -3089,8 +3064,8 @@ define dso_local zeroext i32 @ld_disjoint_align16_uint32_t_uint16_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -4096
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 8
   %conv = zext i16 %1 to i32
   ret i32 %conv
 }
@@ -3105,8 +3080,8 @@ define dso_local zeroext i32 @ld_not_disjoint32_uint32_t_uint16_t(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 2
   %conv = zext i16 %1 to i32
   ret i32 %conv
 }
@@ -3140,8 +3115,8 @@ define dso_local zeroext i32 @ld_disjoint_align32_uint32_t_uint16_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1000341504
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 16
   %conv = zext i16 %1 to i32
   ret i32 %conv
 }
@@ -3168,8 +3143,8 @@ define dso_local zeroext i32 @ld_not_disjoint64_uint32_t_uint16_t(i64 %ptr) {
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 2
   %conv = zext i16 %1 to i32
   ret i32 %conv
 }
@@ -3195,8 +3170,8 @@ define dso_local zeroext i32 @ld_disjoint_align64_uint32_t_uint16_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 4096
   %conv = zext i16 %1 to i32
   ret i32 %conv
 }
@@ -3208,7 +3183,7 @@ define dso_local zeroext i32 @ld_cst_align16_uint32_t_uint16_t() {
 ; CHECK-NEXT:    lhz r3, 4080(0)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i16, i16* inttoptr (i64 4080 to i16*), align 16
+  %0 = load i16, ptr inttoptr (i64 4080 to ptr), align 16
   %conv = zext i16 %0 to i32
   ret i32 %conv
 }
@@ -3221,7 +3196,7 @@ define dso_local zeroext i32 @ld_cst_align32_uint32_t_uint16_t() {
 ; CHECK-NEXT:    lhz r3, -27108(r3)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i16, i16* inttoptr (i64 9999900 to i16*), align 4
+  %0 = load i16, ptr inttoptr (i64 9999900 to ptr), align 4
   %conv = zext i16 %0 to i32
   ret i32 %conv
 }
@@ -3243,7 +3218,7 @@ define dso_local zeroext i32 @ld_cst_align64_uint32_t_uint16_t() {
 ; CHECK-PREP10-NEXT:    lhz r3, 0(r3)
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %0 = load i16, i16* inttoptr (i64 1000000000000 to i16*), align 4096
+  %0 = load i16, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   %conv = zext i16 %0 to i32
   ret i32 %conv
 }
@@ -3256,29 +3231,28 @@ define dso_local zeroext i32 @ld_0_uint32_t_int16_t(i64 %ptr) {
 ; CHECK-NEXT:    clrldi r3, r3, 32
 ; CHECK-NEXT:    blr
 entry:
-  %0 = inttoptr i64 %ptr to i16*
-  %1 = load i16, i16* %0, align 2
+  %0 = inttoptr i64 %ptr to ptr
+  %1 = load i16, ptr %0, align 2
   %conv = sext i16 %1 to i32
   ret i32 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i32 @ld_align16_uint32_t_int16_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i32 @ld_align16_uint32_t_int16_t(ptr nocapture readonly %ptr) {
 ; CHECK-LABEL: ld_align16_uint32_t_int16_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lha r3, 8(r3)
 ; CHECK-NEXT:    clrldi r3, r3, 32
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to i16*
-  %1 = load i16, i16* %0, align 2
-  %conv = sext i16 %1 to i32
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  %0 = load i16, ptr %add.ptr, align 2
+  %conv = sext i16 %0 to i32
   ret i32 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i32 @ld_align32_uint32_t_int16_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i32 @ld_align32_uint32_t_int16_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align32_uint32_t_int16_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    plha r3, 99999000(r3), 0
@@ -3293,15 +3267,14 @@ define dso_local zeroext i32 @ld_align32_uint32_t_int16_t(i8* nocapture readonly
 ; CHECK-PREP10-NEXT:    clrldi r3, r3, 32
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to i16*
-  %1 = load i16, i16* %0, align 2
-  %conv = sext i16 %1 to i32
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  %0 = load i16, ptr %add.ptr, align 2
+  %conv = sext i16 %0 to i32
   ret i32 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i32 @ld_align64_uint32_t_int16_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i32 @ld_align64_uint32_t_int16_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align64_uint32_t_int16_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r4, 244140625
@@ -3319,25 +3292,23 @@ define dso_local zeroext i32 @ld_align64_uint32_t_int16_t(i8* nocapture readonly
 ; CHECK-PREP10-NEXT:    clrldi r3, r3, 32
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to i16*
-  %1 = load i16, i16* %0, align 2
-  %conv = sext i16 %1 to i32
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  %0 = load i16, ptr %add.ptr, align 2
+  %conv = sext i16 %0 to i32
   ret i32 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i32 @ld_reg_uint32_t_int16_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local zeroext i32 @ld_reg_uint32_t_int16_t(ptr nocapture readonly %ptr, i64 %off) {
 ; CHECK-LABEL: ld_reg_uint32_t_int16_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lhax r3, r3, r4
 ; CHECK-NEXT:    clrldi r3, r3, 32
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to i16*
-  %1 = load i16, i16* %0, align 2
-  %conv = sext i16 %1 to i32
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  %0 = load i16, ptr %add.ptr, align 2
+  %conv = sext i16 %0 to i32
   ret i32 %conv
 }
 
@@ -3352,8 +3323,8 @@ define dso_local zeroext i32 @ld_or_uint32_t_int16_t(i64 %ptr, i8 zeroext %off)
 entry:
   %conv = zext i8 %off to i64
   %or = or i64 %conv, %ptr
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 2
   %conv1 = sext i16 %1 to i32
   ret i32 %conv1
 }
@@ -3368,8 +3339,8 @@ define dso_local zeroext i32 @ld_not_disjoint16_uint32_t_int16_t(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 2
   %conv = sext i16 %1 to i32
   ret i32 %conv
 }
@@ -3385,8 +3356,8 @@ define dso_local zeroext i32 @ld_disjoint_align16_uint32_t_int16_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -4096
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 8
   %conv = sext i16 %1 to i32
   ret i32 %conv
 }
@@ -3402,8 +3373,8 @@ define dso_local zeroext i32 @ld_not_disjoint32_uint32_t_int16_t(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 2
   %conv = sext i16 %1 to i32
   ret i32 %conv
 }
@@ -3440,8 +3411,8 @@ define dso_local zeroext i32 @ld_disjoint_align32_uint32_t_int16_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1000341504
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 16
   %conv = sext i16 %1 to i32
   ret i32 %conv
 }
@@ -3470,8 +3441,8 @@ define dso_local zeroext i32 @ld_not_disjoint64_uint32_t_int16_t(i64 %ptr) {
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 2
   %conv = sext i16 %1 to i32
   ret i32 %conv
 }
@@ -3499,8 +3470,8 @@ define dso_local zeroext i32 @ld_disjoint_align64_uint32_t_int16_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 4096
   %conv = sext i16 %1 to i32
   ret i32 %conv
 }
@@ -3513,7 +3484,7 @@ define dso_local zeroext i32 @ld_cst_align16_uint32_t_int16_t() {
 ; CHECK-NEXT:    clrldi r3, r3, 32
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i16, i16* inttoptr (i64 4080 to i16*), align 16
+  %0 = load i16, ptr inttoptr (i64 4080 to ptr), align 16
   %conv = sext i16 %0 to i32
   ret i32 %conv
 }
@@ -3527,7 +3498,7 @@ define dso_local zeroext i32 @ld_cst_align32_uint32_t_int16_t() {
 ; CHECK-NEXT:    clrldi r3, r3, 32
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i16, i16* inttoptr (i64 9999900 to i16*), align 4
+  %0 = load i16, ptr inttoptr (i64 9999900 to ptr), align 4
   %conv = sext i16 %0 to i32
   ret i32 %conv
 }
@@ -3551,7 +3522,7 @@ define dso_local zeroext i32 @ld_cst_align64_uint32_t_int16_t() {
 ; CHECK-PREP10-NEXT:    clrldi r3, r3, 32
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %0 = load i16, i16* inttoptr (i64 1000000000000 to i16*), align 4096
+  %0 = load i16, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   %conv = sext i16 %0 to i32
   ret i32 %conv
 }
@@ -3563,26 +3534,25 @@ define dso_local zeroext i32 @ld_0_uint32_t_uint32_t(i64 %ptr) {
 ; CHECK-NEXT:    lwz r3, 0(r3)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = inttoptr i64 %ptr to i32*
-  %1 = load i32, i32* %0, align 4
+  %0 = inttoptr i64 %ptr to ptr
+  %1 = load i32, ptr %0, align 4
   ret i32 %1
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i32 @ld_align16_uint32_t_uint32_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i32 @ld_align16_uint32_t_uint32_t(ptr nocapture readonly %ptr) {
 ; CHECK-LABEL: ld_align16_uint32_t_uint32_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lwz r3, 8(r3)
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to i32*
-  %1 = load i32, i32* %0, align 4
-  ret i32 %1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  %0 = load i32, ptr %add.ptr, align 4
+  ret i32 %0
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i32 @ld_align32_uint32_t_uint32_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i32 @ld_align32_uint32_t_uint32_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align32_uint32_t_uint32_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    plwz r3, 99999000(r3), 0
@@ -3595,14 +3565,13 @@ define dso_local zeroext i32 @ld_align32_uint32_t_uint32_t(i8* nocapture readonl
 ; CHECK-PREP10-NEXT:    lwzx r3, r3, r4
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to i32*
-  %1 = load i32, i32* %0, align 4
-  ret i32 %1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  %0 = load i32, ptr %add.ptr, align 4
+  ret i32 %0
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i32 @ld_align64_uint32_t_uint32_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i32 @ld_align64_uint32_t_uint32_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align64_uint32_t_uint32_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r4, 244140625
@@ -3618,23 +3587,21 @@ define dso_local zeroext i32 @ld_align64_uint32_t_uint32_t(i8* nocapture readonl
 ; CHECK-PREP10-NEXT:    lwzx r3, r3, r4
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to i32*
-  %1 = load i32, i32* %0, align 4
-  ret i32 %1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  %0 = load i32, ptr %add.ptr, align 4
+  ret i32 %0
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i32 @ld_reg_uint32_t_uint32_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local zeroext i32 @ld_reg_uint32_t_uint32_t(ptr nocapture readonly %ptr, i64 %off) {
 ; CHECK-LABEL: ld_reg_uint32_t_uint32_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lwzx r3, r3, r4
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to i32*
-  %1 = load i32, i32* %0, align 4
-  ret i32 %1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  %0 = load i32, ptr %add.ptr, align 4
+  ret i32 %0
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
@@ -3647,8 +3614,8 @@ define dso_local zeroext i32 @ld_or_uint32_t_uint32_t(i64 %ptr, i8 zeroext %off)
 entry:
   %conv = zext i8 %off to i64
   %or = or i64 %conv, %ptr
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 4
   ret i32 %1
 }
 
@@ -3661,8 +3628,8 @@ define dso_local zeroext i32 @ld_not_disjoint16_uint32_t_uint32_t(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 4
   ret i32 %1
 }
 
@@ -3676,8 +3643,8 @@ define dso_local zeroext i32 @ld_disjoint_align16_uint32_t_uint32_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -4096
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 8
   ret i32 %1
 }
 
@@ -3691,8 +3658,8 @@ define dso_local zeroext i32 @ld_not_disjoint32_uint32_t_uint32_t(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 4
   ret i32 %1
 }
 
@@ -3725,8 +3692,8 @@ define dso_local zeroext i32 @ld_disjoint_align32_uint32_t_uint32_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1000341504
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 16
   ret i32 %1
 }
 
@@ -3752,8 +3719,8 @@ define dso_local zeroext i32 @ld_not_disjoint64_uint32_t_uint32_t(i64 %ptr) {
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 4
   ret i32 %1
 }
 
@@ -3778,8 +3745,8 @@ define dso_local zeroext i32 @ld_disjoint_align64_uint32_t_uint32_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 4096
   ret i32 %1
 }
 
@@ -3790,7 +3757,7 @@ define dso_local zeroext i32 @ld_cst_align16_uint32_t_uint32_t() {
 ; CHECK-NEXT:    lwz r3, 4080(0)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i32, i32* inttoptr (i64 4080 to i32*), align 16
+  %0 = load i32, ptr inttoptr (i64 4080 to ptr), align 16
   ret i32 %0
 }
 
@@ -3802,7 +3769,7 @@ define dso_local zeroext i32 @ld_cst_align32_uint32_t_uint32_t() {
 ; CHECK-NEXT:    lwz r3, -27108(r3)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i32, i32* inttoptr (i64 9999900 to i32*), align 4
+  %0 = load i32, ptr inttoptr (i64 9999900 to ptr), align 4
   ret i32 %0
 }
 
@@ -3823,7 +3790,7 @@ define dso_local zeroext i32 @ld_cst_align64_uint32_t_uint32_t() {
 ; CHECK-PREP10-NEXT:    lwz r3, 0(r3)
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %0 = load i32, i32* inttoptr (i64 1000000000000 to i32*), align 4096
+  %0 = load i32, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   ret i32 %0
 }
 
@@ -3839,14 +3806,14 @@ define dso_local zeroext i32 @ld_0_uint32_t_uint64_t(i64 %ptr) {
 ; CHECK-BE-NEXT:    lwz r3, 4(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %0 = inttoptr i64 %ptr to i64*
-  %1 = load i64, i64* %0, align 8
+  %0 = inttoptr i64 %ptr to ptr
+  %1 = load i64, ptr %0, align 8
   %conv = trunc i64 %1 to i32
   ret i32 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i32 @ld_align16_uint32_t_uint64_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i32 @ld_align16_uint32_t_uint64_t(ptr nocapture readonly %ptr) {
 ; CHECK-LE-LABEL: ld_align16_uint32_t_uint64_t:
 ; CHECK-LE:       # %bb.0: # %entry
 ; CHECK-LE-NEXT:    lwz r3, 8(r3)
@@ -3857,15 +3824,14 @@ define dso_local zeroext i32 @ld_align16_uint32_t_uint64_t(i8* nocapture readonl
 ; CHECK-BE-NEXT:    lwz r3, 12(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to i64*
-  %1 = load i64, i64* %0, align 8
-  %conv = trunc i64 %1 to i32
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  %0 = load i64, ptr %add.ptr, align 8
+  %conv = trunc i64 %0 to i32
   ret i32 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i32 @ld_align32_uint32_t_uint64_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i32 @ld_align32_uint32_t_uint64_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LE-LABEL: ld_align32_uint32_t_uint64_t:
 ; CHECK-P10-LE:       # %bb.0: # %entry
 ; CHECK-P10-LE-NEXT:    plwz r3, 99999000(r3), 0
@@ -3904,15 +3870,14 @@ define dso_local zeroext i32 @ld_align32_uint32_t_uint64_t(i8* nocapture readonl
 ; CHECK-P8-BE-NEXT:    lwzx r3, r3, r4
 ; CHECK-P8-BE-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to i64*
-  %1 = load i64, i64* %0, align 8
-  %conv = trunc i64 %1 to i32
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  %0 = load i64, ptr %add.ptr, align 8
+  %conv = trunc i64 %0 to i32
   ret i32 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i32 @ld_align64_uint32_t_uint64_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i32 @ld_align64_uint32_t_uint64_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LE-LABEL: ld_align64_uint32_t_uint64_t:
 ; CHECK-P10-LE:       # %bb.0: # %entry
 ; CHECK-P10-LE-NEXT:    pli r4, 244140625
@@ -3962,15 +3927,14 @@ define dso_local zeroext i32 @ld_align64_uint32_t_uint64_t(i8* nocapture readonl
 ; CHECK-P8-BE-NEXT:    lwzx r3, r3, r4
 ; CHECK-P8-BE-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to i64*
-  %1 = load i64, i64* %0, align 8
-  %conv = trunc i64 %1 to i32
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  %0 = load i64, ptr %add.ptr, align 8
+  %conv = trunc i64 %0 to i32
   ret i32 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i32 @ld_reg_uint32_t_uint64_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local zeroext i32 @ld_reg_uint32_t_uint64_t(ptr nocapture readonly %ptr, i64 %off) {
 ; CHECK-LE-LABEL: ld_reg_uint32_t_uint64_t:
 ; CHECK-LE:       # %bb.0: # %entry
 ; CHECK-LE-NEXT:    lwzx r3, r3, r4
@@ -3982,10 +3946,9 @@ define dso_local zeroext i32 @ld_reg_uint32_t_uint64_t(i8* nocapture readonly %p
 ; CHECK-BE-NEXT:    lwz r3, 4(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to i64*
-  %1 = load i64, i64* %0, align 8
-  %conv = trunc i64 %1 to i32
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  %0 = load i64, ptr %add.ptr, align 8
+  %conv = trunc i64 %0 to i32
   ret i32 %conv
 }
 
@@ -4005,8 +3968,8 @@ define dso_local zeroext i32 @ld_or_uint32_t_uint64_t(i64 %ptr, i8 zeroext %off)
 entry:
   %conv = zext i8 %off to i64
   %or = or i64 %conv, %ptr
-  %0 = inttoptr i64 %or to i64*
-  %1 = load i64, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i64, ptr %0, align 8
   %conv1 = trunc i64 %1 to i32
   ret i32 %conv1
 }
@@ -4026,8 +3989,8 @@ define dso_local zeroext i32 @ld_not_disjoint16_uint32_t_uint64_t(i64 %ptr) {
 ; CHECK-BE-NEXT:    blr
 entry:
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to i64*
-  %1 = load i64, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i64, ptr %0, align 8
   %conv = trunc i64 %1 to i32
   ret i32 %conv
 }
@@ -4048,8 +4011,8 @@ define dso_local zeroext i32 @ld_disjoint_align16_uint32_t_uint64_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -4096
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to i64*
-  %1 = load i64, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i64, ptr %0, align 8
   %conv = trunc i64 %1 to i32
   ret i32 %conv
 }
@@ -4071,8 +4034,8 @@ define dso_local zeroext i32 @ld_not_disjoint32_uint32_t_uint64_t(i64 %ptr) {
 ; CHECK-BE-NEXT:    blr
 entry:
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to i64*
-  %1 = load i64, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i64, ptr %0, align 8
   %conv = trunc i64 %1 to i32
   ret i32 %conv
 }
@@ -4131,8 +4094,8 @@ define dso_local zeroext i32 @ld_disjoint_align32_uint32_t_uint64_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1000341504
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to i64*
-  %1 = load i64, i64* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i64, ptr %0, align 16
   %conv = trunc i64 %1 to i32
   ret i32 %conv
 }
@@ -4198,8 +4161,8 @@ define dso_local zeroext i32 @ld_not_disjoint64_uint32_t_uint64_t(i64 %ptr) {
 ; CHECK-P8-BE-NEXT:    blr
 entry:
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to i64*
-  %1 = load i64, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i64, ptr %0, align 8
   %conv = trunc i64 %1 to i32
   ret i32 %conv
 }
@@ -4263,8 +4226,8 @@ define dso_local zeroext i32 @ld_disjoint_align64_uint32_t_uint64_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to i64*
-  %1 = load i64, i64* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i64, ptr %0, align 4096
   %conv = trunc i64 %1 to i32
   ret i32 %conv
 }
@@ -4281,7 +4244,7 @@ define dso_local zeroext i32 @ld_cst_align16_uint32_t_uint64_t() {
 ; CHECK-BE-NEXT:    lwz r3, 4084(0)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %0 = load i64, i64* inttoptr (i64 4080 to i64*), align 16
+  %0 = load i64, ptr inttoptr (i64 4080 to ptr), align 16
   %conv = trunc i64 %0 to i32
   ret i32 %conv
 }
@@ -4300,7 +4263,7 @@ define dso_local zeroext i32 @ld_cst_align32_uint32_t_uint64_t() {
 ; CHECK-BE-NEXT:    lwz r3, -27104(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %0 = load i64, i64* inttoptr (i64 9999900 to i64*), align 8
+  %0 = load i64, ptr inttoptr (i64 9999900 to ptr), align 8
   %conv = trunc i64 %0 to i32
   ret i32 %conv
 }
@@ -4356,7 +4319,7 @@ define dso_local zeroext i32 @ld_cst_align64_uint32_t_uint64_t() {
 ; CHECK-P8-BE-NEXT:    lwz r3, 0(r3)
 ; CHECK-P8-BE-NEXT:    blr
 entry:
-  %0 = load i64, i64* inttoptr (i64 1000000000000 to i64*), align 4096
+  %0 = load i64, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   %conv = trunc i64 %0 to i32
   ret i32 %conv
 }
@@ -4370,14 +4333,14 @@ define dso_local zeroext i32 @ld_0_uint32_t_float(i64 %ptr) {
 ; CHECK-NEXT:    mffprwz r3, f0
 ; CHECK-NEXT:    blr
 entry:
-  %0 = inttoptr i64 %ptr to float*
-  %1 = load float, float* %0, align 4
+  %0 = inttoptr i64 %ptr to ptr
+  %1 = load float, ptr %0, align 4
   %conv = fptoui float %1 to i32
   ret i32 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i32 @ld_align16_uint32_t_float(i8* nocapture readonly %ptr) {
+define dso_local zeroext i32 @ld_align16_uint32_t_float(ptr nocapture readonly %ptr) {
 ; CHECK-LABEL: ld_align16_uint32_t_float:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lfs f0, 8(r3)
@@ -4385,15 +4348,14 @@ define dso_local zeroext i32 @ld_align16_uint32_t_float(i8* nocapture readonly %
 ; CHECK-NEXT:    mffprwz r3, f0
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to float*
-  %1 = load float, float* %0, align 4
-  %conv = fptoui float %1 to i32
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  %0 = load float, ptr %add.ptr, align 4
+  %conv = fptoui float %0 to i32
   ret i32 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i32 @ld_align32_uint32_t_float(i8* nocapture readonly %ptr) {
+define dso_local zeroext i32 @ld_align32_uint32_t_float(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align32_uint32_t_float:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    plfs f0, 99999000(r3), 0
@@ -4410,15 +4372,14 @@ define dso_local zeroext i32 @ld_align32_uint32_t_float(i8* nocapture readonly %
 ; CHECK-PREP10-NEXT:    mffprwz r3, f0
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to float*
-  %1 = load float, float* %0, align 4
-  %conv = fptoui float %1 to i32
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  %0 = load float, ptr %add.ptr, align 4
+  %conv = fptoui float %0 to i32
   ret i32 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i32 @ld_align64_uint32_t_float(i8* nocapture readonly %ptr) {
+define dso_local zeroext i32 @ld_align64_uint32_t_float(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align64_uint32_t_float:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r4, 244140625
@@ -4438,15 +4399,14 @@ define dso_local zeroext i32 @ld_align64_uint32_t_float(i8* nocapture readonly %
 ; CHECK-PREP10-NEXT:    mffprwz r3, f0
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to float*
-  %1 = load float, float* %0, align 4
-  %conv = fptoui float %1 to i32
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  %0 = load float, ptr %add.ptr, align 4
+  %conv = fptoui float %0 to i32
   ret i32 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i32 @ld_reg_uint32_t_float(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local zeroext i32 @ld_reg_uint32_t_float(ptr nocapture readonly %ptr, i64 %off) {
 ; CHECK-LABEL: ld_reg_uint32_t_float:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lfsx f0, r3, r4
@@ -4454,10 +4414,9 @@ define dso_local zeroext i32 @ld_reg_uint32_t_float(i8* nocapture readonly %ptr,
 ; CHECK-NEXT:    mffprwz r3, f0
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to float*
-  %1 = load float, float* %0, align 4
-  %conv = fptoui float %1 to i32
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  %0 = load float, ptr %add.ptr, align 4
+  %conv = fptoui float %0 to i32
   ret i32 %conv
 }
 
@@ -4473,8 +4432,8 @@ define dso_local zeroext i32 @ld_or_uint32_t_float(i64 %ptr, i8 zeroext %off) {
 entry:
   %conv = zext i8 %off to i64
   %or = or i64 %conv, %ptr
-  %0 = inttoptr i64 %or to float*
-  %1 = load float, float* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load float, ptr %0, align 4
   %conv1 = fptoui float %1 to i32
   ret i32 %conv1
 }
@@ -4490,8 +4449,8 @@ define dso_local zeroext i32 @ld_not_disjoint16_uint32_t_float(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to float*
-  %1 = load float, float* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load float, ptr %0, align 4
   %conv = fptoui float %1 to i32
   ret i32 %conv
 }
@@ -4508,8 +4467,8 @@ define dso_local zeroext i32 @ld_disjoint_align16_uint32_t_float(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -4096
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to float*
-  %1 = load float, float* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load float, ptr %0, align 8
   %conv = fptoui float %1 to i32
   ret i32 %conv
 }
@@ -4526,8 +4485,8 @@ define dso_local zeroext i32 @ld_not_disjoint32_uint32_t_float(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to float*
-  %1 = load float, float* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load float, ptr %0, align 4
   %conv = fptoui float %1 to i32
   ret i32 %conv
 }
@@ -4567,8 +4526,8 @@ define dso_local zeroext i32 @ld_disjoint_align32_uint32_t_float(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1000341504
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to float*
-  %1 = load float, float* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  %1 = load float, ptr %0, align 16
   %conv = fptoui float %1 to i32
   ret i32 %conv
 }
@@ -4599,8 +4558,8 @@ define dso_local zeroext i32 @ld_not_disjoint64_uint32_t_float(i64 %ptr) {
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to float*
-  %1 = load float, float* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load float, ptr %0, align 4
   %conv = fptoui float %1 to i32
   ret i32 %conv
 }
@@ -4630,8 +4589,8 @@ define dso_local zeroext i32 @ld_disjoint_align64_uint32_t_float(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to float*
-  %1 = load float, float* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  %1 = load float, ptr %0, align 4096
   %conv = fptoui float %1 to i32
   ret i32 %conv
 }
@@ -4645,7 +4604,7 @@ define dso_local zeroext i32 @ld_cst_align16_uint32_t_float() {
 ; CHECK-NEXT:    mffprwz r3, f0
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load float, float* inttoptr (i64 4080 to float*), align 16
+  %0 = load float, ptr inttoptr (i64 4080 to ptr), align 16
   %conv = fptoui float %0 to i32
   ret i32 %conv
 }
@@ -4660,7 +4619,7 @@ define dso_local zeroext i32 @ld_cst_align32_uint32_t_float() {
 ; CHECK-NEXT:    mffprwz r3, f0
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load float, float* inttoptr (i64 9999900 to float*), align 4
+  %0 = load float, ptr inttoptr (i64 9999900 to ptr), align 4
   %conv = fptoui float %0 to i32
   ret i32 %conv
 }
@@ -4686,7 +4645,7 @@ define dso_local zeroext i32 @ld_cst_align64_uint32_t_float() {
 ; CHECK-PREP10-NEXT:    mffprwz r3, f0
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %0 = load float, float* inttoptr (i64 1000000000000 to float*), align 4096
+  %0 = load float, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   %conv = fptoui float %0 to i32
   ret i32 %conv
 }
@@ -4700,14 +4659,14 @@ define dso_local zeroext i32 @ld_0_uint32_t_double(i64 %ptr) {
 ; CHECK-NEXT:    mffprwz r3, f0
 ; CHECK-NEXT:    blr
 entry:
-  %0 = inttoptr i64 %ptr to double*
-  %1 = load double, double* %0, align 8
+  %0 = inttoptr i64 %ptr to ptr
+  %1 = load double, ptr %0, align 8
   %conv = fptoui double %1 to i32
   ret i32 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i32 @ld_align16_uint32_t_double(i8* nocapture readonly %ptr) {
+define dso_local zeroext i32 @ld_align16_uint32_t_double(ptr nocapture readonly %ptr) {
 ; CHECK-LABEL: ld_align16_uint32_t_double:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lfd f0, 8(r3)
@@ -4715,15 +4674,14 @@ define dso_local zeroext i32 @ld_align16_uint32_t_double(i8* nocapture readonly
 ; CHECK-NEXT:    mffprwz r3, f0
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to double*
-  %1 = load double, double* %0, align 8
-  %conv = fptoui double %1 to i32
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  %0 = load double, ptr %add.ptr, align 8
+  %conv = fptoui double %0 to i32
   ret i32 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i32 @ld_align32_uint32_t_double(i8* nocapture readonly %ptr) {
+define dso_local zeroext i32 @ld_align32_uint32_t_double(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align32_uint32_t_double:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    plfd f0, 99999000(r3), 0
@@ -4740,15 +4698,14 @@ define dso_local zeroext i32 @ld_align32_uint32_t_double(i8* nocapture readonly
 ; CHECK-PREP10-NEXT:    mffprwz r3, f0
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to double*
-  %1 = load double, double* %0, align 8
-  %conv = fptoui double %1 to i32
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  %0 = load double, ptr %add.ptr, align 8
+  %conv = fptoui double %0 to i32
   ret i32 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i32 @ld_align64_uint32_t_double(i8* nocapture readonly %ptr) {
+define dso_local zeroext i32 @ld_align64_uint32_t_double(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align64_uint32_t_double:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r4, 244140625
@@ -4768,15 +4725,14 @@ define dso_local zeroext i32 @ld_align64_uint32_t_double(i8* nocapture readonly
 ; CHECK-PREP10-NEXT:    mffprwz r3, f0
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to double*
-  %1 = load double, double* %0, align 8
-  %conv = fptoui double %1 to i32
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  %0 = load double, ptr %add.ptr, align 8
+  %conv = fptoui double %0 to i32
   ret i32 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i32 @ld_reg_uint32_t_double(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local zeroext i32 @ld_reg_uint32_t_double(ptr nocapture readonly %ptr, i64 %off) {
 ; CHECK-LABEL: ld_reg_uint32_t_double:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lfdx f0, r3, r4
@@ -4784,10 +4740,9 @@ define dso_local zeroext i32 @ld_reg_uint32_t_double(i8* nocapture readonly %ptr
 ; CHECK-NEXT:    mffprwz r3, f0
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to double*
-  %1 = load double, double* %0, align 8
-  %conv = fptoui double %1 to i32
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  %0 = load double, ptr %add.ptr, align 8
+  %conv = fptoui double %0 to i32
   ret i32 %conv
 }
 
@@ -4803,8 +4758,8 @@ define dso_local zeroext i32 @ld_or_uint32_t_double(i64 %ptr, i8 zeroext %off) {
 entry:
   %conv = zext i8 %off to i64
   %or = or i64 %conv, %ptr
-  %0 = inttoptr i64 %or to double*
-  %1 = load double, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load double, ptr %0, align 8
   %conv1 = fptoui double %1 to i32
   ret i32 %conv1
 }
@@ -4820,8 +4775,8 @@ define dso_local zeroext i32 @ld_not_disjoint16_uint32_t_double(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to double*
-  %1 = load double, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load double, ptr %0, align 8
   %conv = fptoui double %1 to i32
   ret i32 %conv
 }
@@ -4838,8 +4793,8 @@ define dso_local zeroext i32 @ld_disjoint_align16_uint32_t_double(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -4096
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to double*
-  %1 = load double, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load double, ptr %0, align 8
   %conv = fptoui double %1 to i32
   ret i32 %conv
 }
@@ -4856,8 +4811,8 @@ define dso_local zeroext i32 @ld_not_disjoint32_uint32_t_double(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to double*
-  %1 = load double, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load double, ptr %0, align 8
   %conv = fptoui double %1 to i32
   ret i32 %conv
 }
@@ -4897,8 +4852,8 @@ define dso_local zeroext i32 @ld_disjoint_align32_uint32_t_double(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1000341504
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to double*
-  %1 = load double, double* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  %1 = load double, ptr %0, align 16
   %conv = fptoui double %1 to i32
   ret i32 %conv
 }
@@ -4929,8 +4884,8 @@ define dso_local zeroext i32 @ld_not_disjoint64_uint32_t_double(i64 %ptr) {
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to double*
-  %1 = load double, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load double, ptr %0, align 8
   %conv = fptoui double %1 to i32
   ret i32 %conv
 }
@@ -4960,8 +4915,8 @@ define dso_local zeroext i32 @ld_disjoint_align64_uint32_t_double(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to double*
-  %1 = load double, double* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  %1 = load double, ptr %0, align 4096
   %conv = fptoui double %1 to i32
   ret i32 %conv
 }
@@ -4975,7 +4930,7 @@ define dso_local zeroext i32 @ld_cst_align16_uint32_t_double() {
 ; CHECK-NEXT:    mffprwz r3, f0
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load double, double* inttoptr (i64 4080 to double*), align 16
+  %0 = load double, ptr inttoptr (i64 4080 to ptr), align 16
   %conv = fptoui double %0 to i32
   ret i32 %conv
 }
@@ -4990,7 +4945,7 @@ define dso_local zeroext i32 @ld_cst_align32_uint32_t_double() {
 ; CHECK-NEXT:    mffprwz r3, f0
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load double, double* inttoptr (i64 9999900 to double*), align 8
+  %0 = load double, ptr inttoptr (i64 9999900 to ptr), align 8
   %conv = fptoui double %0 to i32
   ret i32 %conv
 }
@@ -5016,7 +4971,7 @@ define dso_local zeroext i32 @ld_cst_align64_uint32_t_double() {
 ; CHECK-PREP10-NEXT:    mffprwz r3, f0
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %0 = load double, double* inttoptr (i64 1000000000000 to double*), align 4096
+  %0 = load double, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   %conv = fptoui double %0 to i32
   ret i32 %conv
 }
@@ -5029,26 +4984,26 @@ define dso_local void @st_0_uint32_t_uint8_t(i64 %ptr, i32 zeroext %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = trunc i32 %str to i8
-  %0 = inttoptr i64 %ptr to i8*
-  store i8 %conv, i8* %0, align 1
+  %0 = inttoptr i64 %ptr to ptr
+  store i8 %conv, ptr %0, align 1
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_uint32_t_uint8_t(i8* nocapture %ptr, i32 zeroext %str) {
+define dso_local void @st_align16_uint32_t_uint8_t(ptr nocapture %ptr, i32 zeroext %str) {
 ; CHECK-LABEL: st_align16_uint32_t_uint8_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    stb r4, 8(r3)
 ; CHECK-NEXT:    blr
 entry:
   %conv = trunc i32 %str to i8
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  store i8 %conv, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  store i8 %conv, ptr %add.ptr, align 1
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_uint32_t_uint8_t(i8* nocapture %ptr, i32 zeroext %str) {
+define dso_local void @st_align32_uint32_t_uint8_t(ptr nocapture %ptr, i32 zeroext %str) {
 ; CHECK-P10-LABEL: st_align32_uint32_t_uint8_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pstb r4, 99999000(r3), 0
@@ -5062,13 +5017,13 @@ define dso_local void @st_align32_uint32_t_uint8_t(i8* nocapture %ptr, i32 zeroe
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %conv = trunc i32 %str to i8
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  store i8 %conv, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  store i8 %conv, ptr %add.ptr, align 1
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_uint32_t_uint8_t(i8* nocapture %ptr, i32 zeroext %str) {
+define dso_local void @st_align64_uint32_t_uint8_t(ptr nocapture %ptr, i32 zeroext %str) {
 ; CHECK-P10-LABEL: st_align64_uint32_t_uint8_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r5, 244140625
@@ -5085,21 +5040,21 @@ define dso_local void @st_align64_uint32_t_uint8_t(i8* nocapture %ptr, i32 zeroe
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %conv = trunc i32 %str to i8
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  store i8 %conv, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  store i8 %conv, ptr %add.ptr, align 1
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_uint32_t_uint8_t(i8* nocapture %ptr, i64 %off, i32 zeroext %str) {
+define dso_local void @st_reg_uint32_t_uint8_t(ptr nocapture %ptr, i64 %off, i32 zeroext %str) {
 ; CHECK-LABEL: st_reg_uint32_t_uint8_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    stbx r5, r3, r4
 ; CHECK-NEXT:    blr
 entry:
   %conv = trunc i32 %str to i8
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  store i8 %conv, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  store i8 %conv, ptr %add.ptr, align 1
   ret void
 }
 
@@ -5114,8 +5069,8 @@ entry:
   %conv = trunc i32 %str to i8
   %conv1 = zext i8 %off to i64
   %or = or i64 %conv1, %ptr
-  %0 = inttoptr i64 %or to i8*
-  store i8 %conv, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  store i8 %conv, ptr %0, align 1
   ret void
 }
 
@@ -5129,8 +5084,8 @@ define dso_local void @st_not_disjoint16_uint32_t_uint8_t(i64 %ptr, i32 zeroext
 entry:
   %conv = trunc i32 %str to i8
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to i8*
-  store i8 %conv, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  store i8 %conv, ptr %0, align 1
   ret void
 }
 
@@ -5145,8 +5100,8 @@ entry:
   %and = and i64 %ptr, -4096
   %conv = trunc i32 %str to i8
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to i8*
-  store i8 %conv, i8* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store i8 %conv, ptr %0, align 8
   ret void
 }
 
@@ -5161,8 +5116,8 @@ define dso_local void @st_not_disjoint32_uint32_t_uint8_t(i64 %ptr, i32 zeroext
 entry:
   %conv = trunc i32 %str to i8
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to i8*
-  store i8 %conv, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  store i8 %conv, ptr %0, align 1
   ret void
 }
 
@@ -5196,8 +5151,8 @@ entry:
   %and = and i64 %ptr, -1000341504
   %conv = trunc i32 %str to i8
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to i8*
-  store i8 %conv, i8* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  store i8 %conv, ptr %0, align 16
   ret void
 }
 
@@ -5224,8 +5179,8 @@ define dso_local void @st_not_disjoint64_uint32_t_uint8_t(i64 %ptr, i32 zeroext
 entry:
   %conv = trunc i32 %str to i8
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to i8*
-  store i8 %conv, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  store i8 %conv, ptr %0, align 1
   ret void
 }
 
@@ -5251,8 +5206,8 @@ entry:
   %and = and i64 %ptr, -1099511627776
   %conv = trunc i32 %str to i8
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to i8*
-  store i8 %conv, i8* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  store i8 %conv, ptr %0, align 4096
   ret void
 }
 
@@ -5264,7 +5219,7 @@ define dso_local void @st_cst_align16_uint32_t_uint8_t(i32 zeroext %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = trunc i32 %str to i8
-  store i8 %conv, i8* inttoptr (i64 4080 to i8*), align 16
+  store i8 %conv, ptr inttoptr (i64 4080 to ptr), align 16
   ret void
 }
 
@@ -5277,7 +5232,7 @@ define dso_local void @st_cst_align32_uint32_t_uint8_t(i32 zeroext %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = trunc i32 %str to i8
-  store i8 %conv, i8* inttoptr (i64 9999900 to i8*), align 4
+  store i8 %conv, ptr inttoptr (i64 9999900 to ptr), align 4
   ret void
 }
 
@@ -5299,7 +5254,7 @@ define dso_local void @st_cst_align64_uint32_t_uint8_t(i32 zeroext %str) {
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %conv = trunc i32 %str to i8
-  store i8 %conv, i8* inttoptr (i64 1000000000000 to i8*), align 4096
+  store i8 %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   ret void
 }
 
@@ -5311,27 +5266,26 @@ define dso_local void @st_0_uint32_t_uint16_t(i64 %ptr, i32 zeroext %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = trunc i32 %str to i16
-  %0 = inttoptr i64 %ptr to i16*
-  store i16 %conv, i16* %0, align 2
+  %0 = inttoptr i64 %ptr to ptr
+  store i16 %conv, ptr %0, align 2
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_uint32_t_uint16_t(i8* nocapture %ptr, i32 zeroext %str) {
+define dso_local void @st_align16_uint32_t_uint16_t(ptr nocapture %ptr, i32 zeroext %str) {
 ; CHECK-LABEL: st_align16_uint32_t_uint16_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    sth r4, 8(r3)
 ; CHECK-NEXT:    blr
 entry:
   %conv = trunc i32 %str to i16
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to i16*
-  store i16 %conv, i16* %0, align 2
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  store i16 %conv, ptr %add.ptr, align 2
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_uint32_t_uint16_t(i8* nocapture %ptr, i32 zeroext %str) {
+define dso_local void @st_align32_uint32_t_uint16_t(ptr nocapture %ptr, i32 zeroext %str) {
 ; CHECK-P10-LABEL: st_align32_uint32_t_uint16_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    psth r4, 99999000(r3), 0
@@ -5345,14 +5299,13 @@ define dso_local void @st_align32_uint32_t_uint16_t(i8* nocapture %ptr, i32 zero
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %conv = trunc i32 %str to i16
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to i16*
-  store i16 %conv, i16* %0, align 2
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  store i16 %conv, ptr %add.ptr, align 2
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_uint32_t_uint16_t(i8* nocapture %ptr, i32 zeroext %str) {
+define dso_local void @st_align64_uint32_t_uint16_t(ptr nocapture %ptr, i32 zeroext %str) {
 ; CHECK-P10-LABEL: st_align64_uint32_t_uint16_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r5, 244140625
@@ -5369,23 +5322,21 @@ define dso_local void @st_align64_uint32_t_uint16_t(i8* nocapture %ptr, i32 zero
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %conv = trunc i32 %str to i16
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to i16*
-  store i16 %conv, i16* %0, align 2
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  store i16 %conv, ptr %add.ptr, align 2
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_uint32_t_uint16_t(i8* nocapture %ptr, i64 %off, i32 zeroext %str) {
+define dso_local void @st_reg_uint32_t_uint16_t(ptr nocapture %ptr, i64 %off, i32 zeroext %str) {
 ; CHECK-LABEL: st_reg_uint32_t_uint16_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    sthx r5, r3, r4
 ; CHECK-NEXT:    blr
 entry:
   %conv = trunc i32 %str to i16
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to i16*
-  store i16 %conv, i16* %0, align 2
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  store i16 %conv, ptr %add.ptr, align 2
   ret void
 }
 
@@ -5400,8 +5351,8 @@ entry:
   %conv = trunc i32 %str to i16
   %conv1 = zext i8 %off to i64
   %or = or i64 %conv1, %ptr
-  %0 = inttoptr i64 %or to i16*
-  store i16 %conv, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  store i16 %conv, ptr %0, align 2
   ret void
 }
 
@@ -5415,8 +5366,8 @@ define dso_local void @st_not_disjoint16_uint32_t_uint16_t(i64 %ptr, i32 zeroext
 entry:
   %conv = trunc i32 %str to i16
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to i16*
-  store i16 %conv, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  store i16 %conv, ptr %0, align 2
   ret void
 }
 
@@ -5431,8 +5382,8 @@ entry:
   %and = and i64 %ptr, -4096
   %conv = trunc i32 %str to i16
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to i16*
-  store i16 %conv, i16* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store i16 %conv, ptr %0, align 8
   ret void
 }
 
@@ -5447,8 +5398,8 @@ define dso_local void @st_not_disjoint32_uint32_t_uint16_t(i64 %ptr, i32 zeroext
 entry:
   %conv = trunc i32 %str to i16
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to i16*
-  store i16 %conv, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  store i16 %conv, ptr %0, align 2
   ret void
 }
 
@@ -5482,8 +5433,8 @@ entry:
   %and = and i64 %ptr, -1000341504
   %conv = trunc i32 %str to i16
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to i16*
-  store i16 %conv, i16* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  store i16 %conv, ptr %0, align 16
   ret void
 }
 
@@ -5510,8 +5461,8 @@ define dso_local void @st_not_disjoint64_uint32_t_uint16_t(i64 %ptr, i32 zeroext
 entry:
   %conv = trunc i32 %str to i16
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to i16*
-  store i16 %conv, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  store i16 %conv, ptr %0, align 2
   ret void
 }
 
@@ -5537,8 +5488,8 @@ entry:
   %and = and i64 %ptr, -1099511627776
   %conv = trunc i32 %str to i16
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to i16*
-  store i16 %conv, i16* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  store i16 %conv, ptr %0, align 4096
   ret void
 }
 
@@ -5550,7 +5501,7 @@ define dso_local void @st_cst_align16_uint32_t_uint16_t(i32 zeroext %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = trunc i32 %str to i16
-  store i16 %conv, i16* inttoptr (i64 4080 to i16*), align 16
+  store i16 %conv, ptr inttoptr (i64 4080 to ptr), align 16
   ret void
 }
 
@@ -5563,7 +5514,7 @@ define dso_local void @st_cst_align32_uint32_t_uint16_t(i32 zeroext %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = trunc i32 %str to i16
-  store i16 %conv, i16* inttoptr (i64 9999900 to i16*), align 4
+  store i16 %conv, ptr inttoptr (i64 9999900 to ptr), align 4
   ret void
 }
 
@@ -5585,7 +5536,7 @@ define dso_local void @st_cst_align64_uint32_t_uint16_t(i32 zeroext %str) {
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %conv = trunc i32 %str to i16
-  store i16 %conv, i16* inttoptr (i64 1000000000000 to i16*), align 4096
+  store i16 %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   ret void
 }
 
@@ -5596,26 +5547,25 @@ define dso_local void @st_0_uint32_t_uint32_t(i64 %ptr, i32 zeroext %str) {
 ; CHECK-NEXT:    stw r4, 0(r3)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = inttoptr i64 %ptr to i32*
-  store i32 %str, i32* %0, align 4
+  %0 = inttoptr i64 %ptr to ptr
+  store i32 %str, ptr %0, align 4
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_uint32_t_uint32_t(i8* nocapture %ptr, i32 zeroext %str) {
+define dso_local void @st_align16_uint32_t_uint32_t(ptr nocapture %ptr, i32 zeroext %str) {
 ; CHECK-LABEL: st_align16_uint32_t_uint32_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    stw r4, 8(r3)
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to i32*
-  store i32 %str, i32* %0, align 4
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  store i32 %str, ptr %add.ptr, align 4
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_uint32_t_uint32_t(i8* nocapture %ptr, i32 zeroext %str) {
+define dso_local void @st_align32_uint32_t_uint32_t(ptr nocapture %ptr, i32 zeroext %str) {
 ; CHECK-P10-LABEL: st_align32_uint32_t_uint32_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pstw r4, 99999000(r3), 0
@@ -5628,14 +5578,13 @@ define dso_local void @st_align32_uint32_t_uint32_t(i8* nocapture %ptr, i32 zero
 ; CHECK-PREP10-NEXT:    stwx r4, r3, r5
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to i32*
-  store i32 %str, i32* %0, align 4
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  store i32 %str, ptr %add.ptr, align 4
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_uint32_t_uint32_t(i8* nocapture %ptr, i32 zeroext %str) {
+define dso_local void @st_align64_uint32_t_uint32_t(ptr nocapture %ptr, i32 zeroext %str) {
 ; CHECK-P10-LABEL: st_align64_uint32_t_uint32_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r5, 244140625
@@ -5651,22 +5600,20 @@ define dso_local void @st_align64_uint32_t_uint32_t(i8* nocapture %ptr, i32 zero
 ; CHECK-PREP10-NEXT:    stwx r4, r3, r5
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to i32*
-  store i32 %str, i32* %0, align 4
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  store i32 %str, ptr %add.ptr, align 4
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_uint32_t_uint32_t(i8* nocapture %ptr, i64 %off, i32 zeroext %str) {
+define dso_local void @st_reg_uint32_t_uint32_t(ptr nocapture %ptr, i64 %off, i32 zeroext %str) {
 ; CHECK-LABEL: st_reg_uint32_t_uint32_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    stwx r5, r3, r4
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to i32*
-  store i32 %str, i32* %0, align 4
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  store i32 %str, ptr %add.ptr, align 4
   ret void
 }
 
@@ -5680,8 +5627,8 @@ define dso_local void @st_or1_uint32_t_uint32_t(i64 %ptr, i8 zeroext %off, i32 z
 entry:
   %conv = zext i8 %off to i64
   %or = or i64 %conv, %ptr
-  %0 = inttoptr i64 %or to i32*
-  store i32 %str, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  store i32 %str, ptr %0, align 4
   ret void
 }
 
@@ -5694,8 +5641,8 @@ define dso_local void @st_not_disjoint16_uint32_t_uint32_t(i64 %ptr, i32 zeroext
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to i32*
-  store i32 %str, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  store i32 %str, ptr %0, align 4
   ret void
 }
 
@@ -5709,8 +5656,8 @@ define dso_local void @st_disjoint_align16_uint32_t_uint32_t(i64 %ptr, i32 zeroe
 entry:
   %and = and i64 %ptr, -4096
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to i32*
-  store i32 %str, i32* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store i32 %str, ptr %0, align 8
   ret void
 }
 
@@ -5724,8 +5671,8 @@ define dso_local void @st_not_disjoint32_uint32_t_uint32_t(i64 %ptr, i32 zeroext
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to i32*
-  store i32 %str, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  store i32 %str, ptr %0, align 4
   ret void
 }
 
@@ -5758,8 +5705,8 @@ define dso_local void @st_disjoint_align32_uint32_t_uint32_t(i64 %ptr, i32 zeroe
 entry:
   %and = and i64 %ptr, -1000341504
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to i32*
-  store i32 %str, i32* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  store i32 %str, ptr %0, align 16
   ret void
 }
 
@@ -5785,8 +5732,8 @@ define dso_local void @st_not_disjoint64_uint32_t_uint32_t(i64 %ptr, i32 zeroext
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to i32*
-  store i32 %str, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  store i32 %str, ptr %0, align 4
   ret void
 }
 
@@ -5811,8 +5758,8 @@ define dso_local void @st_disjoint_align64_uint32_t_uint32_t(i64 %ptr, i32 zeroe
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to i32*
-  store i32 %str, i32* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  store i32 %str, ptr %0, align 4096
   ret void
 }
 
@@ -5823,7 +5770,7 @@ define dso_local void @st_cst_align16_uint32_t_uint32_t(i32 zeroext %str) {
 ; CHECK-NEXT:    stw r3, 4080(0)
 ; CHECK-NEXT:    blr
 entry:
-  store i32 %str, i32* inttoptr (i64 4080 to i32*), align 16
+  store i32 %str, ptr inttoptr (i64 4080 to ptr), align 16
   ret void
 }
 
@@ -5835,7 +5782,7 @@ define dso_local void @st_cst_align32_uint32_t_uint32_t(i32 zeroext %str) {
 ; CHECK-NEXT:    stw r3, -27108(r4)
 ; CHECK-NEXT:    blr
 entry:
-  store i32 %str, i32* inttoptr (i64 9999900 to i32*), align 4
+  store i32 %str, ptr inttoptr (i64 9999900 to ptr), align 4
   ret void
 }
 
@@ -5856,7 +5803,7 @@ define dso_local void @st_cst_align64_uint32_t_uint32_t(i32 zeroext %str) {
 ; CHECK-PREP10-NEXT:    stw r3, 0(r4)
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  store i32 %str, i32* inttoptr (i64 1000000000000 to i32*), align 4096
+  store i32 %str, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   ret void
 }
 
@@ -5868,27 +5815,26 @@ define dso_local void @st_0_uint32_t_uint64_t(i64 %ptr, i32 zeroext %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = zext i32 %str to i64
-  %0 = inttoptr i64 %ptr to i64*
-  store i64 %conv, i64* %0, align 8
+  %0 = inttoptr i64 %ptr to ptr
+  store i64 %conv, ptr %0, align 8
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_uint32_t_uint64_t(i8* nocapture %ptr, i32 zeroext %str) {
+define dso_local void @st_align16_uint32_t_uint64_t(ptr nocapture %ptr, i32 zeroext %str) {
 ; CHECK-LABEL: st_align16_uint32_t_uint64_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    std r4, 8(r3)
 ; CHECK-NEXT:    blr
 entry:
   %conv = zext i32 %str to i64
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to i64*
-  store i64 %conv, i64* %0, align 8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  store i64 %conv, ptr %add.ptr, align 8
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_uint32_t_uint64_t(i8* nocapture %ptr, i32 zeroext %str) {
+define dso_local void @st_align32_uint32_t_uint64_t(ptr nocapture %ptr, i32 zeroext %str) {
 ; CHECK-P10-LABEL: st_align32_uint32_t_uint64_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pstd r4, 99999000(r3), 0
@@ -5902,14 +5848,13 @@ define dso_local void @st_align32_uint32_t_uint64_t(i8* nocapture %ptr, i32 zero
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %conv = zext i32 %str to i64
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to i64*
-  store i64 %conv, i64* %0, align 8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  store i64 %conv, ptr %add.ptr, align 8
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_uint32_t_uint64_t(i8* nocapture %ptr, i32 zeroext %str) {
+define dso_local void @st_align64_uint32_t_uint64_t(ptr nocapture %ptr, i32 zeroext %str) {
 ; CHECK-P10-LABEL: st_align64_uint32_t_uint64_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r5, 244140625
@@ -5926,23 +5871,21 @@ define dso_local void @st_align64_uint32_t_uint64_t(i8* nocapture %ptr, i32 zero
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %conv = zext i32 %str to i64
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to i64*
-  store i64 %conv, i64* %0, align 8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  store i64 %conv, ptr %add.ptr, align 8
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_uint32_t_uint64_t(i8* nocapture %ptr, i64 %off, i32 zeroext %str) {
+define dso_local void @st_reg_uint32_t_uint64_t(ptr nocapture %ptr, i64 %off, i32 zeroext %str) {
 ; CHECK-LABEL: st_reg_uint32_t_uint64_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    stdx r5, r3, r4
 ; CHECK-NEXT:    blr
 entry:
   %conv = zext i32 %str to i64
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to i64*
-  store i64 %conv, i64* %0, align 8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  store i64 %conv, ptr %add.ptr, align 8
   ret void
 }
 
@@ -5957,8 +5900,8 @@ entry:
   %conv = zext i32 %str to i64
   %conv1 = zext i8 %off to i64
   %or = or i64 %conv1, %ptr
-  %0 = inttoptr i64 %or to i64*
-  store i64 %conv, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store i64 %conv, ptr %0, align 8
   ret void
 }
 
@@ -5972,8 +5915,8 @@ define dso_local void @st_not_disjoint16_uint32_t_uint64_t(i64 %ptr, i32 zeroext
 entry:
   %conv = zext i32 %str to i64
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to i64*
-  store i64 %conv, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store i64 %conv, ptr %0, align 8
   ret void
 }
 
@@ -5988,8 +5931,8 @@ entry:
   %and = and i64 %ptr, -4096
   %conv = zext i32 %str to i64
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to i64*
-  store i64 %conv, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store i64 %conv, ptr %0, align 8
   ret void
 }
 
@@ -6004,8 +5947,8 @@ define dso_local void @st_not_disjoint32_uint32_t_uint64_t(i64 %ptr, i32 zeroext
 entry:
   %conv = zext i32 %str to i64
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to i64*
-  store i64 %conv, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store i64 %conv, ptr %0, align 8
   ret void
 }
 
@@ -6039,8 +5982,8 @@ entry:
   %and = and i64 %ptr, -1000341504
   %conv = zext i32 %str to i64
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to i64*
-  store i64 %conv, i64* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  store i64 %conv, ptr %0, align 16
   ret void
 }
 
@@ -6067,8 +6010,8 @@ define dso_local void @st_not_disjoint64_uint32_t_uint64_t(i64 %ptr, i32 zeroext
 entry:
   %conv = zext i32 %str to i64
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to i64*
-  store i64 %conv, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store i64 %conv, ptr %0, align 8
   ret void
 }
 
@@ -6094,8 +6037,8 @@ entry:
   %and = and i64 %ptr, -1099511627776
   %conv = zext i32 %str to i64
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to i64*
-  store i64 %conv, i64* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  store i64 %conv, ptr %0, align 4096
   ret void
 }
 
@@ -6107,7 +6050,7 @@ define dso_local void @st_cst_align16_uint32_t_uint64_t(i32 zeroext %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = zext i32 %str to i64
-  store i64 %conv, i64* inttoptr (i64 4080 to i64*), align 16
+  store i64 %conv, ptr inttoptr (i64 4080 to ptr), align 16
   ret void
 }
 
@@ -6120,7 +6063,7 @@ define dso_local void @st_cst_align32_uint32_t_uint64_t(i32 zeroext %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = zext i32 %str to i64
-  store i64 %conv, i64* inttoptr (i64 9999900 to i64*), align 8
+  store i64 %conv, ptr inttoptr (i64 9999900 to ptr), align 8
   ret void
 }
 
@@ -6142,7 +6085,7 @@ define dso_local void @st_cst_align64_uint32_t_uint64_t(i32 zeroext %str) {
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %conv = zext i32 %str to i64
-  store i64 %conv, i64* inttoptr (i64 1000000000000 to i64*), align 4096
+  store i64 %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   ret void
 }
 
@@ -6156,13 +6099,13 @@ define dso_local void @st_0_uint32_t_float(i64 %ptr, i32 zeroext %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = uitofp i32 %str to float
-  %0 = inttoptr i64 %ptr to float*
-  store float %conv, float* %0, align 4
+  %0 = inttoptr i64 %ptr to ptr
+  store float %conv, ptr %0, align 4
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_uint32_t_float(i8* nocapture %ptr, i32 zeroext %str) {
+define dso_local void @st_align16_uint32_t_float(ptr nocapture %ptr, i32 zeroext %str) {
 ; CHECK-LABEL: st_align16_uint32_t_float:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    mtfprwz f0, r4
@@ -6171,14 +6114,13 @@ define dso_local void @st_align16_uint32_t_float(i8* nocapture %ptr, i32 zeroext
 ; CHECK-NEXT:    blr
 entry:
   %conv = uitofp i32 %str to float
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to float*
-  store float %conv, float* %0, align 4
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  store float %conv, ptr %add.ptr, align 4
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_uint32_t_float(i8* nocapture %ptr, i32 zeroext %str) {
+define dso_local void @st_align32_uint32_t_float(ptr nocapture %ptr, i32 zeroext %str) {
 ; CHECK-P10-LABEL: st_align32_uint32_t_float:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    mtfprwz f0, r4
@@ -6205,14 +6147,13 @@ define dso_local void @st_align32_uint32_t_float(i8* nocapture %ptr, i32 zeroext
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = uitofp i32 %str to float
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to float*
-  store float %conv, float* %0, align 4
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  store float %conv, ptr %add.ptr, align 4
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_uint32_t_float(i8* nocapture %ptr, i32 zeroext %str) {
+define dso_local void @st_align64_uint32_t_float(ptr nocapture %ptr, i32 zeroext %str) {
 ; CHECK-P10-LABEL: st_align64_uint32_t_float:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    mtfprwz f0, r4
@@ -6243,14 +6184,13 @@ define dso_local void @st_align64_uint32_t_float(i8* nocapture %ptr, i32 zeroext
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = uitofp i32 %str to float
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to float*
-  store float %conv, float* %0, align 4
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  store float %conv, ptr %add.ptr, align 4
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_uint32_t_float(i8* nocapture %ptr, i64 %off, i32 zeroext %str) {
+define dso_local void @st_reg_uint32_t_float(ptr nocapture %ptr, i64 %off, i32 zeroext %str) {
 ; CHECK-LABEL: st_reg_uint32_t_float:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    mtfprwz f0, r5
@@ -6259,9 +6199,8 @@ define dso_local void @st_reg_uint32_t_float(i8* nocapture %ptr, i64 %off, i32 z
 ; CHECK-NEXT:    blr
 entry:
   %conv = uitofp i32 %str to float
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to float*
-  store float %conv, float* %0, align 4
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  store float %conv, ptr %add.ptr, align 4
   ret void
 }
 
@@ -6278,8 +6217,8 @@ entry:
   %conv = uitofp i32 %str to float
   %conv1 = zext i8 %off to i64
   %or = or i64 %conv1, %ptr
-  %0 = inttoptr i64 %or to float*
-  store float %conv, float* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  store float %conv, ptr %0, align 4
   ret void
 }
 
@@ -6295,8 +6234,8 @@ define dso_local void @st_not_disjoint16_uint32_t_float(i64 %ptr, i32 zeroext %s
 entry:
   %conv = uitofp i32 %str to float
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to float*
-  store float %conv, float* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  store float %conv, ptr %0, align 4
   ret void
 }
 
@@ -6313,8 +6252,8 @@ entry:
   %and = and i64 %ptr, -4096
   %conv = uitofp i32 %str to float
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to float*
-  store float %conv, float* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store float %conv, ptr %0, align 8
   ret void
 }
 
@@ -6349,8 +6288,8 @@ define dso_local void @st_not_disjoint32_uint32_t_float(i64 %ptr, i32 zeroext %s
 entry:
   %conv = uitofp i32 %str to float
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to float*
-  store float %conv, float* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  store float %conv, ptr %0, align 4
   ret void
 }
 
@@ -6390,8 +6329,8 @@ entry:
   %and = and i64 %ptr, -1000341504
   %conv = uitofp i32 %str to float
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to float*
-  store float %conv, float* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  store float %conv, ptr %0, align 16
   ret void
 }
 
@@ -6422,8 +6361,8 @@ define dso_local void @st_not_disjoint64_uint32_t_float(i64 %ptr, i32 zeroext %s
 entry:
   %conv = uitofp i32 %str to float
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to float*
-  store float %conv, float* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  store float %conv, ptr %0, align 4
   ret void
 }
 
@@ -6464,8 +6403,8 @@ entry:
   %and = and i64 %ptr, -1099511627776
   %conv = uitofp i32 %str to float
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to float*
-  store float %conv, float* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  store float %conv, ptr %0, align 4096
   ret void
 }
 
@@ -6479,7 +6418,7 @@ define dso_local void @st_cst_align16_uint32_t_float(i32 zeroext %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = uitofp i32 %str to float
-  store float %conv, float* inttoptr (i64 4080 to float*), align 16
+  store float %conv, ptr inttoptr (i64 4080 to ptr), align 16
   ret void
 }
 
@@ -6494,7 +6433,7 @@ define dso_local void @st_cst_align32_uint32_t_float(i32 zeroext %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = uitofp i32 %str to float
-  store float %conv, float* inttoptr (i64 9999900 to float*), align 4
+  store float %conv, ptr inttoptr (i64 9999900 to ptr), align 4
   ret void
 }
 
@@ -6530,7 +6469,7 @@ define dso_local void @st_cst_align64_uint32_t_float(i32 zeroext %str) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = uitofp i32 %str to float
-  store float %conv, float* inttoptr (i64 1000000000000 to float*), align 4096
+  store float %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   ret void
 }
 
@@ -6544,13 +6483,13 @@ define dso_local void @st_0_uint32_t_double(i64 %ptr, i32 zeroext %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = uitofp i32 %str to double
-  %0 = inttoptr i64 %ptr to double*
-  store double %conv, double* %0, align 8
+  %0 = inttoptr i64 %ptr to ptr
+  store double %conv, ptr %0, align 8
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_uint32_t_double(i8* nocapture %ptr, i32 zeroext %str) {
+define dso_local void @st_align16_uint32_t_double(ptr nocapture %ptr, i32 zeroext %str) {
 ; CHECK-LABEL: st_align16_uint32_t_double:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    mtfprwz f0, r4
@@ -6559,14 +6498,13 @@ define dso_local void @st_align16_uint32_t_double(i8* nocapture %ptr, i32 zeroex
 ; CHECK-NEXT:    blr
 entry:
   %conv = uitofp i32 %str to double
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to double*
-  store double %conv, double* %0, align 8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  store double %conv, ptr %add.ptr, align 8
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_uint32_t_double(i8* nocapture %ptr, i32 zeroext %str) {
+define dso_local void @st_align32_uint32_t_double(ptr nocapture %ptr, i32 zeroext %str) {
 ; CHECK-P10-LABEL: st_align32_uint32_t_double:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    mtfprwz f0, r4
@@ -6593,14 +6531,13 @@ define dso_local void @st_align32_uint32_t_double(i8* nocapture %ptr, i32 zeroex
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = uitofp i32 %str to double
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to double*
-  store double %conv, double* %0, align 8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  store double %conv, ptr %add.ptr, align 8
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_uint32_t_double(i8* nocapture %ptr, i32 zeroext %str) {
+define dso_local void @st_align64_uint32_t_double(ptr nocapture %ptr, i32 zeroext %str) {
 ; CHECK-P10-LABEL: st_align64_uint32_t_double:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    mtfprwz f0, r4
@@ -6631,14 +6568,13 @@ define dso_local void @st_align64_uint32_t_double(i8* nocapture %ptr, i32 zeroex
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = uitofp i32 %str to double
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to double*
-  store double %conv, double* %0, align 8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  store double %conv, ptr %add.ptr, align 8
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_uint32_t_double(i8* nocapture %ptr, i64 %off, i32 zeroext %str) {
+define dso_local void @st_reg_uint32_t_double(ptr nocapture %ptr, i64 %off, i32 zeroext %str) {
 ; CHECK-LABEL: st_reg_uint32_t_double:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    mtfprwz f0, r5
@@ -6647,9 +6583,8 @@ define dso_local void @st_reg_uint32_t_double(i8* nocapture %ptr, i64 %off, i32
 ; CHECK-NEXT:    blr
 entry:
   %conv = uitofp i32 %str to double
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to double*
-  store double %conv, double* %0, align 8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  store double %conv, ptr %add.ptr, align 8
   ret void
 }
 
@@ -6666,8 +6601,8 @@ entry:
   %conv = uitofp i32 %str to double
   %conv1 = zext i8 %off to i64
   %or = or i64 %conv1, %ptr
-  %0 = inttoptr i64 %or to double*
-  store double %conv, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store double %conv, ptr %0, align 8
   ret void
 }
 
@@ -6683,8 +6618,8 @@ define dso_local void @st_not_disjoint16_uint32_t_double(i64 %ptr, i32 zeroext %
 entry:
   %conv = uitofp i32 %str to double
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to double*
-  store double %conv, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store double %conv, ptr %0, align 8
   ret void
 }
 
@@ -6701,8 +6636,8 @@ entry:
   %and = and i64 %ptr, -4096
   %conv = uitofp i32 %str to double
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to double*
-  store double %conv, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store double %conv, ptr %0, align 8
   ret void
 }
 
@@ -6737,8 +6672,8 @@ define dso_local void @st_not_disjoint32_uint32_t_double(i64 %ptr, i32 zeroext %
 entry:
   %conv = uitofp i32 %str to double
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to double*
-  store double %conv, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store double %conv, ptr %0, align 8
   ret void
 }
 
@@ -6778,8 +6713,8 @@ entry:
   %and = and i64 %ptr, -1000341504
   %conv = uitofp i32 %str to double
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to double*
-  store double %conv, double* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  store double %conv, ptr %0, align 16
   ret void
 }
 
@@ -6810,8 +6745,8 @@ define dso_local void @st_not_disjoint64_uint32_t_double(i64 %ptr, i32 zeroext %
 entry:
   %conv = uitofp i32 %str to double
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to double*
-  store double %conv, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store double %conv, ptr %0, align 8
   ret void
 }
 
@@ -6852,8 +6787,8 @@ entry:
   %and = and i64 %ptr, -1099511627776
   %conv = uitofp i32 %str to double
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to double*
-  store double %conv, double* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  store double %conv, ptr %0, align 4096
   ret void
 }
 
@@ -6867,7 +6802,7 @@ define dso_local void @st_cst_align16_uint32_t_double(i32 zeroext %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = uitofp i32 %str to double
-  store double %conv, double* inttoptr (i64 4080 to double*), align 16
+  store double %conv, ptr inttoptr (i64 4080 to ptr), align 16
   ret void
 }
 
@@ -6882,7 +6817,7 @@ define dso_local void @st_cst_align32_uint32_t_double(i32 zeroext %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = uitofp i32 %str to double
-  store double %conv, double* inttoptr (i64 9999900 to double*), align 8
+  store double %conv, ptr inttoptr (i64 9999900 to ptr), align 8
   ret void
 }
 
@@ -6918,7 +6853,7 @@ define dso_local void @st_cst_align64_uint32_t_double(i32 zeroext %str) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = uitofp i32 %str to double
-  store double %conv, double* inttoptr (i64 1000000000000 to double*), align 4096
+  store double %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   ret void
 }
 
@@ -6930,27 +6865,26 @@ define dso_local void @st_0_int32_t_uint64_t(i64 %ptr, i32 signext %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = sext i32 %str to i64
-  %0 = inttoptr i64 %ptr to i64*
-  store i64 %conv, i64* %0, align 8
+  %0 = inttoptr i64 %ptr to ptr
+  store i64 %conv, ptr %0, align 8
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_int32_t_uint64_t(i8* nocapture %ptr, i32 signext %str) {
+define dso_local void @st_align16_int32_t_uint64_t(ptr nocapture %ptr, i32 signext %str) {
 ; CHECK-LABEL: st_align16_int32_t_uint64_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    std r4, 8(r3)
 ; CHECK-NEXT:    blr
 entry:
   %conv = sext i32 %str to i64
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to i64*
-  store i64 %conv, i64* %0, align 8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  store i64 %conv, ptr %add.ptr, align 8
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_int32_t_uint64_t(i8* nocapture %ptr, i32 signext %str) {
+define dso_local void @st_align32_int32_t_uint64_t(ptr nocapture %ptr, i32 signext %str) {
 ; CHECK-P10-LABEL: st_align32_int32_t_uint64_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pstd r4, 99999000(r3), 0
@@ -6964,14 +6898,13 @@ define dso_local void @st_align32_int32_t_uint64_t(i8* nocapture %ptr, i32 signe
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %conv = sext i32 %str to i64
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to i64*
-  store i64 %conv, i64* %0, align 8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  store i64 %conv, ptr %add.ptr, align 8
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_int32_t_uint64_t(i8* nocapture %ptr, i32 signext %str) {
+define dso_local void @st_align64_int32_t_uint64_t(ptr nocapture %ptr, i32 signext %str) {
 ; CHECK-P10-LABEL: st_align64_int32_t_uint64_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r5, 244140625
@@ -6988,23 +6921,21 @@ define dso_local void @st_align64_int32_t_uint64_t(i8* nocapture %ptr, i32 signe
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %conv = sext i32 %str to i64
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to i64*
-  store i64 %conv, i64* %0, align 8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  store i64 %conv, ptr %add.ptr, align 8
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_int32_t_uint64_t(i8* nocapture %ptr, i64 %off, i32 signext %str) {
+define dso_local void @st_reg_int32_t_uint64_t(ptr nocapture %ptr, i64 %off, i32 signext %str) {
 ; CHECK-LABEL: st_reg_int32_t_uint64_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    stdx r5, r3, r4
 ; CHECK-NEXT:    blr
 entry:
   %conv = sext i32 %str to i64
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to i64*
-  store i64 %conv, i64* %0, align 8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  store i64 %conv, ptr %add.ptr, align 8
   ret void
 }
 
@@ -7019,8 +6950,8 @@ entry:
   %conv = sext i32 %str to i64
   %conv1 = zext i8 %off to i64
   %or = or i64 %conv1, %ptr
-  %0 = inttoptr i64 %or to i64*
-  store i64 %conv, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store i64 %conv, ptr %0, align 8
   ret void
 }
 
@@ -7034,8 +6965,8 @@ define dso_local void @st_not_disjoint16_int32_t_uint64_t(i64 %ptr, i32 signext
 entry:
   %conv = sext i32 %str to i64
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to i64*
-  store i64 %conv, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store i64 %conv, ptr %0, align 8
   ret void
 }
 
@@ -7050,8 +6981,8 @@ entry:
   %and = and i64 %ptr, -4096
   %conv = sext i32 %str to i64
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to i64*
-  store i64 %conv, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store i64 %conv, ptr %0, align 8
   ret void
 }
 
@@ -7066,8 +6997,8 @@ define dso_local void @st_not_disjoint32_int32_t_uint64_t(i64 %ptr, i32 signext
 entry:
   %conv = sext i32 %str to i64
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to i64*
-  store i64 %conv, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store i64 %conv, ptr %0, align 8
   ret void
 }
 
@@ -7101,8 +7032,8 @@ entry:
   %and = and i64 %ptr, -1000341504
   %conv = sext i32 %str to i64
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to i64*
-  store i64 %conv, i64* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  store i64 %conv, ptr %0, align 16
   ret void
 }
 
@@ -7129,8 +7060,8 @@ define dso_local void @st_not_disjoint64_int32_t_uint64_t(i64 %ptr, i32 signext
 entry:
   %conv = sext i32 %str to i64
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to i64*
-  store i64 %conv, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store i64 %conv, ptr %0, align 8
   ret void
 }
 
@@ -7156,8 +7087,8 @@ entry:
   %and = and i64 %ptr, -1099511627776
   %conv = sext i32 %str to i64
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to i64*
-  store i64 %conv, i64* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  store i64 %conv, ptr %0, align 4096
   ret void
 }
 
@@ -7169,7 +7100,7 @@ define dso_local void @st_cst_align16_int32_t_uint64_t(i32 signext %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = sext i32 %str to i64
-  store i64 %conv, i64* inttoptr (i64 4080 to i64*), align 16
+  store i64 %conv, ptr inttoptr (i64 4080 to ptr), align 16
   ret void
 }
 
@@ -7182,7 +7113,7 @@ define dso_local void @st_cst_align32_int32_t_uint64_t(i32 signext %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = sext i32 %str to i64
-  store i64 %conv, i64* inttoptr (i64 9999900 to i64*), align 8
+  store i64 %conv, ptr inttoptr (i64 9999900 to ptr), align 8
   ret void
 }
 
@@ -7204,7 +7135,7 @@ define dso_local void @st_cst_align64_int32_t_uint64_t(i32 signext %str) {
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %conv = sext i32 %str to i64
-  store i64 %conv, i64* inttoptr (i64 1000000000000 to i64*), align 4096
+  store i64 %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   ret void
 }
 
@@ -7218,13 +7149,13 @@ define dso_local void @st_0_int32_t_float(i64 %ptr, i32 signext %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = sitofp i32 %str to float
-  %0 = inttoptr i64 %ptr to float*
-  store float %conv, float* %0, align 4
+  %0 = inttoptr i64 %ptr to ptr
+  store float %conv, ptr %0, align 4
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_int32_t_float(i8* nocapture %ptr, i32 signext %str) {
+define dso_local void @st_align16_int32_t_float(ptr nocapture %ptr, i32 signext %str) {
 ; CHECK-LABEL: st_align16_int32_t_float:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    mtfprwa f0, r4
@@ -7233,14 +7164,13 @@ define dso_local void @st_align16_int32_t_float(i8* nocapture %ptr, i32 signext
 ; CHECK-NEXT:    blr
 entry:
   %conv = sitofp i32 %str to float
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to float*
-  store float %conv, float* %0, align 4
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  store float %conv, ptr %add.ptr, align 4
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_int32_t_float(i8* nocapture %ptr, i32 signext %str) {
+define dso_local void @st_align32_int32_t_float(ptr nocapture %ptr, i32 signext %str) {
 ; CHECK-P10-LABEL: st_align32_int32_t_float:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    mtfprwa f0, r4
@@ -7267,14 +7197,13 @@ define dso_local void @st_align32_int32_t_float(i8* nocapture %ptr, i32 signext
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = sitofp i32 %str to float
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to float*
-  store float %conv, float* %0, align 4
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  store float %conv, ptr %add.ptr, align 4
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_int32_t_float(i8* nocapture %ptr, i32 signext %str) {
+define dso_local void @st_align64_int32_t_float(ptr nocapture %ptr, i32 signext %str) {
 ; CHECK-P10-LABEL: st_align64_int32_t_float:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    mtfprwa f0, r4
@@ -7305,14 +7234,13 @@ define dso_local void @st_align64_int32_t_float(i8* nocapture %ptr, i32 signext
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = sitofp i32 %str to float
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to float*
-  store float %conv, float* %0, align 4
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  store float %conv, ptr %add.ptr, align 4
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_int32_t_float(i8* nocapture %ptr, i64 %off, i32 signext %str) {
+define dso_local void @st_reg_int32_t_float(ptr nocapture %ptr, i64 %off, i32 signext %str) {
 ; CHECK-LABEL: st_reg_int32_t_float:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    mtfprwa f0, r5
@@ -7321,9 +7249,8 @@ define dso_local void @st_reg_int32_t_float(i8* nocapture %ptr, i64 %off, i32 si
 ; CHECK-NEXT:    blr
 entry:
   %conv = sitofp i32 %str to float
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to float*
-  store float %conv, float* %0, align 4
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  store float %conv, ptr %add.ptr, align 4
   ret void
 }
 
@@ -7340,8 +7267,8 @@ entry:
   %conv = sitofp i32 %str to float
   %conv1 = zext i8 %off to i64
   %or = or i64 %conv1, %ptr
-  %0 = inttoptr i64 %or to float*
-  store float %conv, float* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  store float %conv, ptr %0, align 4
   ret void
 }
 
@@ -7357,8 +7284,8 @@ define dso_local void @st_not_disjoint16_int32_t_float(i64 %ptr, i32 signext %st
 entry:
   %conv = sitofp i32 %str to float
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to float*
-  store float %conv, float* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  store float %conv, ptr %0, align 4
   ret void
 }
 
@@ -7375,8 +7302,8 @@ entry:
   %and = and i64 %ptr, -4096
   %conv = sitofp i32 %str to float
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to float*
-  store float %conv, float* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store float %conv, ptr %0, align 8
   ret void
 }
 
@@ -7411,8 +7338,8 @@ define dso_local void @st_not_disjoint32_int32_t_float(i64 %ptr, i32 signext %st
 entry:
   %conv = sitofp i32 %str to float
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to float*
-  store float %conv, float* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  store float %conv, ptr %0, align 4
   ret void
 }
 
@@ -7452,8 +7379,8 @@ entry:
   %and = and i64 %ptr, -1000341504
   %conv = sitofp i32 %str to float
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to float*
-  store float %conv, float* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  store float %conv, ptr %0, align 16
   ret void
 }
 
@@ -7484,8 +7411,8 @@ define dso_local void @st_not_disjoint64_int32_t_float(i64 %ptr, i32 signext %st
 entry:
   %conv = sitofp i32 %str to float
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to float*
-  store float %conv, float* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  store float %conv, ptr %0, align 4
   ret void
 }
 
@@ -7526,8 +7453,8 @@ entry:
   %and = and i64 %ptr, -1099511627776
   %conv = sitofp i32 %str to float
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to float*
-  store float %conv, float* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  store float %conv, ptr %0, align 4096
   ret void
 }
 
@@ -7541,7 +7468,7 @@ define dso_local void @st_cst_align16_int32_t_float(i32 signext %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = sitofp i32 %str to float
-  store float %conv, float* inttoptr (i64 4080 to float*), align 16
+  store float %conv, ptr inttoptr (i64 4080 to ptr), align 16
   ret void
 }
 
@@ -7556,7 +7483,7 @@ define dso_local void @st_cst_align32_int32_t_float(i32 signext %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = sitofp i32 %str to float
-  store float %conv, float* inttoptr (i64 9999900 to float*), align 4
+  store float %conv, ptr inttoptr (i64 9999900 to ptr), align 4
   ret void
 }
 
@@ -7592,7 +7519,7 @@ define dso_local void @st_cst_align64_int32_t_float(i32 signext %str) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = sitofp i32 %str to float
-  store float %conv, float* inttoptr (i64 1000000000000 to float*), align 4096
+  store float %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   ret void
 }
 
@@ -7606,13 +7533,13 @@ define dso_local void @st_0_int32_t_double(i64 %ptr, i32 signext %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = sitofp i32 %str to double
-  %0 = inttoptr i64 %ptr to double*
-  store double %conv, double* %0, align 8
+  %0 = inttoptr i64 %ptr to ptr
+  store double %conv, ptr %0, align 8
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_int32_t_double(i8* nocapture %ptr, i32 signext %str) {
+define dso_local void @st_align16_int32_t_double(ptr nocapture %ptr, i32 signext %str) {
 ; CHECK-LABEL: st_align16_int32_t_double:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    mtfprwa f0, r4
@@ -7621,14 +7548,13 @@ define dso_local void @st_align16_int32_t_double(i8* nocapture %ptr, i32 signext
 ; CHECK-NEXT:    blr
 entry:
   %conv = sitofp i32 %str to double
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to double*
-  store double %conv, double* %0, align 8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  store double %conv, ptr %add.ptr, align 8
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_int32_t_double(i8* nocapture %ptr, i32 signext %str) {
+define dso_local void @st_align32_int32_t_double(ptr nocapture %ptr, i32 signext %str) {
 ; CHECK-P10-LABEL: st_align32_int32_t_double:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    mtfprwa f0, r4
@@ -7655,14 +7581,13 @@ define dso_local void @st_align32_int32_t_double(i8* nocapture %ptr, i32 signext
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = sitofp i32 %str to double
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to double*
-  store double %conv, double* %0, align 8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  store double %conv, ptr %add.ptr, align 8
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_int32_t_double(i8* nocapture %ptr, i32 signext %str) {
+define dso_local void @st_align64_int32_t_double(ptr nocapture %ptr, i32 signext %str) {
 ; CHECK-P10-LABEL: st_align64_int32_t_double:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    mtfprwa f0, r4
@@ -7693,14 +7618,13 @@ define dso_local void @st_align64_int32_t_double(i8* nocapture %ptr, i32 signext
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = sitofp i32 %str to double
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to double*
-  store double %conv, double* %0, align 8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  store double %conv, ptr %add.ptr, align 8
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_int32_t_double(i8* nocapture %ptr, i64 %off, i32 signext %str) {
+define dso_local void @st_reg_int32_t_double(ptr nocapture %ptr, i64 %off, i32 signext %str) {
 ; CHECK-LABEL: st_reg_int32_t_double:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    mtfprwa f0, r5
@@ -7709,9 +7633,8 @@ define dso_local void @st_reg_int32_t_double(i8* nocapture %ptr, i64 %off, i32 s
 ; CHECK-NEXT:    blr
 entry:
   %conv = sitofp i32 %str to double
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to double*
-  store double %conv, double* %0, align 8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  store double %conv, ptr %add.ptr, align 8
   ret void
 }
 
@@ -7728,8 +7651,8 @@ entry:
   %conv = sitofp i32 %str to double
   %conv1 = zext i8 %off to i64
   %or = or i64 %conv1, %ptr
-  %0 = inttoptr i64 %or to double*
-  store double %conv, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store double %conv, ptr %0, align 8
   ret void
 }
 
@@ -7745,8 +7668,8 @@ define dso_local void @st_not_disjoint16_int32_t_double(i64 %ptr, i32 signext %s
 entry:
   %conv = sitofp i32 %str to double
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to double*
-  store double %conv, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store double %conv, ptr %0, align 8
   ret void
 }
 
@@ -7763,8 +7686,8 @@ entry:
   %and = and i64 %ptr, -4096
   %conv = sitofp i32 %str to double
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to double*
-  store double %conv, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store double %conv, ptr %0, align 8
   ret void
 }
 
@@ -7799,8 +7722,8 @@ define dso_local void @st_not_disjoint32_int32_t_double(i64 %ptr, i32 signext %s
 entry:
   %conv = sitofp i32 %str to double
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to double*
-  store double %conv, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store double %conv, ptr %0, align 8
   ret void
 }
 
@@ -7840,8 +7763,8 @@ entry:
   %and = and i64 %ptr, -1000341504
   %conv = sitofp i32 %str to double
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to double*
-  store double %conv, double* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  store double %conv, ptr %0, align 16
   ret void
 }
 
@@ -7872,8 +7795,8 @@ define dso_local void @st_not_disjoint64_int32_t_double(i64 %ptr, i32 signext %s
 entry:
   %conv = sitofp i32 %str to double
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to double*
-  store double %conv, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store double %conv, ptr %0, align 8
   ret void
 }
 
@@ -7914,8 +7837,8 @@ entry:
   %and = and i64 %ptr, -1099511627776
   %conv = sitofp i32 %str to double
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to double*
-  store double %conv, double* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  store double %conv, ptr %0, align 4096
   ret void
 }
 
@@ -7929,7 +7852,7 @@ define dso_local void @st_cst_align16_int32_t_double(i32 signext %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = sitofp i32 %str to double
-  store double %conv, double* inttoptr (i64 4080 to double*), align 16
+  store double %conv, ptr inttoptr (i64 4080 to ptr), align 16
   ret void
 }
 
@@ -7944,7 +7867,7 @@ define dso_local void @st_cst_align32_int32_t_double(i32 signext %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = sitofp i32 %str to double
-  store double %conv, double* inttoptr (i64 9999900 to double*), align 8
+  store double %conv, ptr inttoptr (i64 9999900 to ptr), align 8
   ret void
 }
 
@@ -7980,6 +7903,6 @@ define dso_local void @st_cst_align64_int32_t_double(i32 signext %str) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = sitofp i32 %str to double
-  store double %conv, double* inttoptr (i64 1000000000000 to double*), align 4096
+  store double %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/scalar-i64-ldst.ll b/llvm/test/CodeGen/PowerPC/scalar-i64-ldst.ll
index a9f0bc31ab18..ab0711577f35 100644
--- a/llvm/test/CodeGen/PowerPC/scalar-i64-ldst.ll
+++ b/llvm/test/CodeGen/PowerPC/scalar-i64-ldst.ll
@@ -27,14 +27,14 @@ define dso_local i64 @ld_0_int64_t_float(i64 %ptr) {
 ; CHECK-NEXT:    mffprd r3, f0
 ; CHECK-NEXT:    blr
 entry:
-  %0 = inttoptr i64 %ptr to float*
-  %1 = load float, float* %0, align 4
+  %0 = inttoptr i64 %ptr to ptr
+  %1 = load float, ptr %0, align 4
   %conv = fptosi float %1 to i64
   ret i64 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_align16_int64_t_float(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align16_int64_t_float(ptr nocapture readonly %ptr) {
 ; CHECK-LABEL: ld_align16_int64_t_float:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lfs f0, 8(r3)
@@ -42,15 +42,14 @@ define dso_local i64 @ld_align16_int64_t_float(i8* nocapture readonly %ptr) {
 ; CHECK-NEXT:    mffprd r3, f0
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to float*
-  %1 = load float, float* %0, align 4
-  %conv = fptosi float %1 to i64
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  %0 = load float, ptr %add.ptr, align 4
+  %conv = fptosi float %0 to i64
   ret i64 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_align32_int64_t_float(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align32_int64_t_float(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align32_int64_t_float:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    plfs f0, 99999000(r3), 0
@@ -67,15 +66,14 @@ define dso_local i64 @ld_align32_int64_t_float(i8* nocapture readonly %ptr) {
 ; CHECK-PREP10-NEXT:    mffprd r3, f0
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to float*
-  %1 = load float, float* %0, align 4
-  %conv = fptosi float %1 to i64
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  %0 = load float, ptr %add.ptr, align 4
+  %conv = fptosi float %0 to i64
   ret i64 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_align64_int64_t_float(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align64_int64_t_float(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align64_int64_t_float:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r4, 244140625
@@ -95,15 +93,14 @@ define dso_local i64 @ld_align64_int64_t_float(i8* nocapture readonly %ptr) {
 ; CHECK-PREP10-NEXT:    mffprd r3, f0
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to float*
-  %1 = load float, float* %0, align 4
-  %conv = fptosi float %1 to i64
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  %0 = load float, ptr %add.ptr, align 4
+  %conv = fptosi float %0 to i64
   ret i64 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_reg_int64_t_float(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local i64 @ld_reg_int64_t_float(ptr nocapture readonly %ptr, i64 %off) {
 ; CHECK-LABEL: ld_reg_int64_t_float:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lfsx f0, r3, r4
@@ -111,10 +108,9 @@ define dso_local i64 @ld_reg_int64_t_float(i8* nocapture readonly %ptr, i64 %off
 ; CHECK-NEXT:    mffprd r3, f0
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to float*
-  %1 = load float, float* %0, align 4
-  %conv = fptosi float %1 to i64
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  %0 = load float, ptr %add.ptr, align 4
+  %conv = fptosi float %0 to i64
   ret i64 %conv
 }
 
@@ -130,8 +126,8 @@ define dso_local i64 @ld_or_int64_t_float(i64 %ptr, i8 zeroext %off) {
 entry:
   %conv = zext i8 %off to i64
   %or = or i64 %conv, %ptr
-  %0 = inttoptr i64 %or to float*
-  %1 = load float, float* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load float, ptr %0, align 4
   %conv1 = fptosi float %1 to i64
   ret i64 %conv1
 }
@@ -147,8 +143,8 @@ define dso_local i64 @ld_not_disjoint16_int64_t_float(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to float*
-  %1 = load float, float* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load float, ptr %0, align 4
   %conv = fptosi float %1 to i64
   ret i64 %conv
 }
@@ -165,8 +161,8 @@ define dso_local i64 @ld_disjoint_align16_int64_t_float(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -4096
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to float*
-  %1 = load float, float* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load float, ptr %0, align 8
   %conv = fptosi float %1 to i64
   ret i64 %conv
 }
@@ -183,8 +179,8 @@ define dso_local i64 @ld_not_disjoint32_int64_t_float(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to float*
-  %1 = load float, float* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load float, ptr %0, align 4
   %conv = fptosi float %1 to i64
   ret i64 %conv
 }
@@ -224,8 +220,8 @@ define dso_local i64 @ld_disjoint_align32_int64_t_float(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1000341504
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to float*
-  %1 = load float, float* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  %1 = load float, ptr %0, align 16
   %conv = fptosi float %1 to i64
   ret i64 %conv
 }
@@ -256,8 +252,8 @@ define dso_local i64 @ld_not_disjoint64_int64_t_float(i64 %ptr) {
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to float*
-  %1 = load float, float* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load float, ptr %0, align 4
   %conv = fptosi float %1 to i64
   ret i64 %conv
 }
@@ -287,8 +283,8 @@ define dso_local i64 @ld_disjoint_align64_int64_t_float(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to float*
-  %1 = load float, float* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  %1 = load float, ptr %0, align 4096
   %conv = fptosi float %1 to i64
   ret i64 %conv
 }
@@ -302,7 +298,7 @@ define dso_local i64 @ld_cst_align16_int64_t_float() {
 ; CHECK-NEXT:    mffprd r3, f0
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load float, float* inttoptr (i64 4080 to float*), align 16
+  %0 = load float, ptr inttoptr (i64 4080 to ptr), align 16
   %conv = fptosi float %0 to i64
   ret i64 %conv
 }
@@ -317,7 +313,7 @@ define dso_local i64 @ld_cst_align32_int64_t_float() {
 ; CHECK-NEXT:    mffprd r3, f0
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load float, float* inttoptr (i64 9999900 to float*), align 4
+  %0 = load float, ptr inttoptr (i64 9999900 to ptr), align 4
   %conv = fptosi float %0 to i64
   ret i64 %conv
 }
@@ -343,7 +339,7 @@ define dso_local i64 @ld_cst_align64_int64_t_float() {
 ; CHECK-PREP10-NEXT:    mffprd r3, f0
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %0 = load float, float* inttoptr (i64 1000000000000 to float*), align 4096
+  %0 = load float, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   %conv = fptosi float %0 to i64
   ret i64 %conv
 }
@@ -357,14 +353,14 @@ define dso_local i64 @ld_0_int64_t_double(i64 %ptr) {
 ; CHECK-NEXT:    mffprd r3, f0
 ; CHECK-NEXT:    blr
 entry:
-  %0 = inttoptr i64 %ptr to double*
-  %1 = load double, double* %0, align 8
+  %0 = inttoptr i64 %ptr to ptr
+  %1 = load double, ptr %0, align 8
   %conv = fptosi double %1 to i64
   ret i64 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_align16_int64_t_double(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align16_int64_t_double(ptr nocapture readonly %ptr) {
 ; CHECK-LABEL: ld_align16_int64_t_double:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lfd f0, 8(r3)
@@ -372,15 +368,14 @@ define dso_local i64 @ld_align16_int64_t_double(i8* nocapture readonly %ptr) {
 ; CHECK-NEXT:    mffprd r3, f0
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to double*
-  %1 = load double, double* %0, align 8
-  %conv = fptosi double %1 to i64
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  %0 = load double, ptr %add.ptr, align 8
+  %conv = fptosi double %0 to i64
   ret i64 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_align32_int64_t_double(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align32_int64_t_double(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align32_int64_t_double:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    plfd f0, 99999000(r3), 0
@@ -397,15 +392,14 @@ define dso_local i64 @ld_align32_int64_t_double(i8* nocapture readonly %ptr) {
 ; CHECK-PREP10-NEXT:    mffprd r3, f0
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to double*
-  %1 = load double, double* %0, align 8
-  %conv = fptosi double %1 to i64
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  %0 = load double, ptr %add.ptr, align 8
+  %conv = fptosi double %0 to i64
   ret i64 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_align64_int64_t_double(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align64_int64_t_double(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align64_int64_t_double:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r4, 244140625
@@ -425,15 +419,14 @@ define dso_local i64 @ld_align64_int64_t_double(i8* nocapture readonly %ptr) {
 ; CHECK-PREP10-NEXT:    mffprd r3, f0
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to double*
-  %1 = load double, double* %0, align 8
-  %conv = fptosi double %1 to i64
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  %0 = load double, ptr %add.ptr, align 8
+  %conv = fptosi double %0 to i64
   ret i64 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_reg_int64_t_double(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local i64 @ld_reg_int64_t_double(ptr nocapture readonly %ptr, i64 %off) {
 ; CHECK-LABEL: ld_reg_int64_t_double:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lfdx f0, r3, r4
@@ -441,10 +434,9 @@ define dso_local i64 @ld_reg_int64_t_double(i8* nocapture readonly %ptr, i64 %of
 ; CHECK-NEXT:    mffprd r3, f0
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to double*
-  %1 = load double, double* %0, align 8
-  %conv = fptosi double %1 to i64
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  %0 = load double, ptr %add.ptr, align 8
+  %conv = fptosi double %0 to i64
   ret i64 %conv
 }
 
@@ -460,8 +452,8 @@ define dso_local i64 @ld_or_int64_t_double(i64 %ptr, i8 zeroext %off) {
 entry:
   %conv = zext i8 %off to i64
   %or = or i64 %conv, %ptr
-  %0 = inttoptr i64 %or to double*
-  %1 = load double, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load double, ptr %0, align 8
   %conv1 = fptosi double %1 to i64
   ret i64 %conv1
 }
@@ -477,8 +469,8 @@ define dso_local i64 @ld_not_disjoint16_int64_t_double(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to double*
-  %1 = load double, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load double, ptr %0, align 8
   %conv = fptosi double %1 to i64
   ret i64 %conv
 }
@@ -495,8 +487,8 @@ define dso_local i64 @ld_disjoint_align16_int64_t_double(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -4096
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to double*
-  %1 = load double, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load double, ptr %0, align 8
   %conv = fptosi double %1 to i64
   ret i64 %conv
 }
@@ -513,8 +505,8 @@ define dso_local i64 @ld_not_disjoint32_int64_t_double(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to double*
-  %1 = load double, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load double, ptr %0, align 8
   %conv = fptosi double %1 to i64
   ret i64 %conv
 }
@@ -554,8 +546,8 @@ define dso_local i64 @ld_disjoint_align32_int64_t_double(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1000341504
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to double*
-  %1 = load double, double* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  %1 = load double, ptr %0, align 16
   %conv = fptosi double %1 to i64
   ret i64 %conv
 }
@@ -586,8 +578,8 @@ define dso_local i64 @ld_not_disjoint64_int64_t_double(i64 %ptr) {
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to double*
-  %1 = load double, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load double, ptr %0, align 8
   %conv = fptosi double %1 to i64
   ret i64 %conv
 }
@@ -617,8 +609,8 @@ define dso_local i64 @ld_disjoint_align64_int64_t_double(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to double*
-  %1 = load double, double* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  %1 = load double, ptr %0, align 4096
   %conv = fptosi double %1 to i64
   ret i64 %conv
 }
@@ -632,7 +624,7 @@ define dso_local i64 @ld_cst_align16_int64_t_double() {
 ; CHECK-NEXT:    mffprd r3, f0
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load double, double* inttoptr (i64 4080 to double*), align 16
+  %0 = load double, ptr inttoptr (i64 4080 to ptr), align 16
   %conv = fptosi double %0 to i64
   ret i64 %conv
 }
@@ -647,7 +639,7 @@ define dso_local i64 @ld_cst_align32_int64_t_double() {
 ; CHECK-NEXT:    mffprd r3, f0
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load double, double* inttoptr (i64 9999900 to double*), align 8
+  %0 = load double, ptr inttoptr (i64 9999900 to ptr), align 8
   %conv = fptosi double %0 to i64
   ret i64 %conv
 }
@@ -673,7 +665,7 @@ define dso_local i64 @ld_cst_align64_int64_t_double() {
 ; CHECK-PREP10-NEXT:    mffprd r3, f0
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %0 = load double, double* inttoptr (i64 1000000000000 to double*), align 4096
+  %0 = load double, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   %conv = fptosi double %0 to i64
   ret i64 %conv
 }
@@ -685,40 +677,40 @@ define dso_local i64 @ld_0_uint64_t_uint8_t(i64 %ptr) {
 ; CHECK-NEXT:    lbz r3, 0(r3)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = inttoptr i64 %ptr to i8*
-  %1 = load i8, i8* %0, align 1
+  %0 = inttoptr i64 %ptr to ptr
+  %1 = load i8, ptr %0, align 1
   %conv = zext i8 %1 to i64
   ret i64 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_unalign16_uint64_t_uint8_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_unalign16_uint64_t_uint8_t(ptr nocapture readonly %ptr) {
 ; CHECK-LABEL: ld_unalign16_uint64_t_uint8_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lbz r3, 1(r3)
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1
-  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1
+  %0 = load i8, ptr %add.ptr, align 1
   %conv = zext i8 %0 to i64
   ret i64 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_align16_uint64_t_uint8_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align16_uint64_t_uint8_t(ptr nocapture readonly %ptr) {
 ; CHECK-LABEL: ld_align16_uint64_t_uint8_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lbz r3, 8(r3)
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  %0 = load i8, ptr %add.ptr, align 1
   %conv = zext i8 %0 to i64
   ret i64 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_unalign32_uint64_t_uint8_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_unalign32_uint64_t_uint8_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_unalign32_uint64_t_uint8_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    plbz r3, 99999(r3), 0
@@ -731,14 +723,14 @@ define dso_local i64 @ld_unalign32_uint64_t_uint8_t(i8* nocapture readonly %ptr)
 ; CHECK-PREP10-NEXT:    lbzx r3, r3, r4
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999
-  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999
+  %0 = load i8, ptr %add.ptr, align 1
   %conv = zext i8 %0 to i64
   ret i64 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_align32_uint64_t_uint8_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align32_uint64_t_uint8_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align32_uint64_t_uint8_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    plbz r3, 99999000(r3), 0
@@ -751,14 +743,14 @@ define dso_local i64 @ld_align32_uint64_t_uint8_t(i8* nocapture readonly %ptr) {
 ; CHECK-PREP10-NEXT:    lbzx r3, r3, r4
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  %0 = load i8, ptr %add.ptr, align 1
   %conv = zext i8 %0 to i64
   ret i64 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_unalign64_uint64_t_uint8_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_unalign64_uint64_t_uint8_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_unalign64_uint64_t_uint8_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r4, 232
@@ -776,14 +768,14 @@ define dso_local i64 @ld_unalign64_uint64_t_uint8_t(i8* nocapture readonly %ptr)
 ; CHECK-PREP10-NEXT:    lbzx r3, r3, r4
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000001
-  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000001
+  %0 = load i8, ptr %add.ptr, align 1
   %conv = zext i8 %0 to i64
   ret i64 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_align64_uint64_t_uint8_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align64_uint64_t_uint8_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align64_uint64_t_uint8_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r4, 244140625
@@ -799,21 +791,21 @@ define dso_local i64 @ld_align64_uint64_t_uint8_t(i8* nocapture readonly %ptr) {
 ; CHECK-PREP10-NEXT:    lbzx r3, r3, r4
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  %0 = load i8, ptr %add.ptr, align 1
   %conv = zext i8 %0 to i64
   ret i64 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_reg_uint64_t_uint8_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local i64 @ld_reg_uint64_t_uint8_t(ptr nocapture readonly %ptr, i64 %off) {
 ; CHECK-LABEL: ld_reg_uint64_t_uint8_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lbzx r3, r3, r4
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  %0 = load i8, ptr %add.ptr, align 1
   %conv = zext i8 %0 to i64
   ret i64 %conv
 }
@@ -828,8 +820,8 @@ define dso_local i64 @ld_or_uint64_t_uint8_t(i64 %ptr, i8 zeroext %off) {
 entry:
   %conv = zext i8 %off to i64
   %or = or i64 %conv, %ptr
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 1
   %conv1 = zext i8 %1 to i64
   ret i64 %conv1
 }
@@ -845,8 +837,8 @@ entry:
   %and = and i64 %ptr, -4096
   %conv = zext i8 %off to i64
   %or = or i64 %and, %conv
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 1
   %conv1 = zext i8 %1 to i64
   ret i64 %conv1
 }
@@ -860,8 +852,8 @@ define dso_local i64 @ld_not_disjoint16_uint64_t_uint8_t(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 1
   %conv = zext i8 %1 to i64
   ret i64 %conv
 }
@@ -876,8 +868,8 @@ define dso_local i64 @ld_disjoint_unalign16_uint64_t_uint8_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -4096
   %or = or i64 %and, 6
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 2
   %conv = zext i8 %1 to i64
   ret i64 %conv
 }
@@ -892,8 +884,8 @@ define dso_local i64 @ld_disjoint_align16_uint64_t_uint8_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -4096
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 8
   %conv = zext i8 %1 to i64
   ret i64 %conv
 }
@@ -908,8 +900,8 @@ define dso_local i64 @ld_not_disjoint32_uint64_t_uint8_t(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 1
   %conv = zext i8 %1 to i64
   ret i64 %conv
 }
@@ -932,8 +924,8 @@ define dso_local i64 @ld_disjoint_unalign32_uint64_t_uint8_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1048576
   %or = or i64 %and, 99999
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 1
   %conv = zext i8 %1 to i64
   ret i64 %conv
 }
@@ -967,8 +959,8 @@ define dso_local i64 @ld_disjoint_align32_uint64_t_uint8_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1000341504
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 16
   %conv = zext i8 %1 to i64
   ret i64 %conv
 }
@@ -995,8 +987,8 @@ define dso_local i64 @ld_not_disjoint64_uint64_t_uint8_t(i64 %ptr) {
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 1
   %conv = zext i8 %1 to i64
   ret i64 %conv
 }
@@ -1024,8 +1016,8 @@ define dso_local i64 @ld_disjoint_unalign64_uint64_t_uint8_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000001
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 1
   %conv = zext i8 %1 to i64
   ret i64 %conv
 }
@@ -1051,8 +1043,8 @@ define dso_local i64 @ld_disjoint_align64_uint64_t_uint8_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 4096
   %conv = zext i8 %1 to i64
   ret i64 %conv
 }
@@ -1064,7 +1056,7 @@ define dso_local i64 @ld_cst_unalign16_uint64_t_uint8_t() {
 ; CHECK-NEXT:    lbz r3, 255(0)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i8, i8* inttoptr (i64 255 to i8*), align 1
+  %0 = load i8, ptr inttoptr (i64 255 to ptr), align 1
   %conv = zext i8 %0 to i64
   ret i64 %conv
 }
@@ -1076,7 +1068,7 @@ define dso_local i64 @ld_cst_align16_uint64_t_uint8_t() {
 ; CHECK-NEXT:    lbz r3, 4080(0)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i8, i8* inttoptr (i64 4080 to i8*), align 16
+  %0 = load i8, ptr inttoptr (i64 4080 to ptr), align 16
   %conv = zext i8 %0 to i64
   ret i64 %conv
 }
@@ -1089,7 +1081,7 @@ define dso_local i64 @ld_cst_unalign32_uint64_t_uint8_t() {
 ; CHECK-NEXT:    lbz r3, -31073(r3)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i8, i8* inttoptr (i64 99999 to i8*), align 1
+  %0 = load i8, ptr inttoptr (i64 99999 to ptr), align 1
   %conv = zext i8 %0 to i64
   ret i64 %conv
 }
@@ -1102,7 +1094,7 @@ define dso_local i64 @ld_cst_align32_uint64_t_uint8_t() {
 ; CHECK-NEXT:    lbz r3, -27108(r3)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i8, i8* inttoptr (i64 9999900 to i8*), align 4
+  %0 = load i8, ptr inttoptr (i64 9999900 to ptr), align 4
   %conv = zext i8 %0 to i64
   ret i64 %conv
 }
@@ -1126,7 +1118,7 @@ define dso_local i64 @ld_cst_unalign64_uint64_t_uint8_t() {
 ; CHECK-PREP10-NEXT:    lbz r3, 0(r3)
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %0 = load i8, i8* inttoptr (i64 1000000000001 to i8*), align 1
+  %0 = load i8, ptr inttoptr (i64 1000000000001 to ptr), align 1
   %conv = zext i8 %0 to i64
   ret i64 %conv
 }
@@ -1148,7 +1140,7 @@ define dso_local i64 @ld_cst_align64_uint64_t_uint8_t() {
 ; CHECK-PREP10-NEXT:    lbz r3, 0(r3)
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %0 = load i8, i8* inttoptr (i64 1000000000000 to i8*), align 4096
+  %0 = load i8, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   %conv = zext i8 %0 to i64
   ret i64 %conv
 }
@@ -1161,42 +1153,42 @@ define dso_local i64 @ld_0_uint64_t_int8_t(i64 %ptr) {
 ; CHECK-NEXT:    extsb r3, r3
 ; CHECK-NEXT:    blr
 entry:
-  %0 = inttoptr i64 %ptr to i8*
-  %1 = load i8, i8* %0, align 1
+  %0 = inttoptr i64 %ptr to ptr
+  %1 = load i8, ptr %0, align 1
   %conv = sext i8 %1 to i64
   ret i64 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_unalign16_uint64_t_int8_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_unalign16_uint64_t_int8_t(ptr nocapture readonly %ptr) {
 ; CHECK-LABEL: ld_unalign16_uint64_t_int8_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lbz r3, 1(r3)
 ; CHECK-NEXT:    extsb r3, r3
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1
-  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1
+  %0 = load i8, ptr %add.ptr, align 1
   %conv = sext i8 %0 to i64
   ret i64 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_align16_uint64_t_int8_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align16_uint64_t_int8_t(ptr nocapture readonly %ptr) {
 ; CHECK-LABEL: ld_align16_uint64_t_int8_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lbz r3, 8(r3)
 ; CHECK-NEXT:    extsb r3, r3
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  %0 = load i8, ptr %add.ptr, align 1
   %conv = sext i8 %0 to i64
   ret i64 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_unalign32_uint64_t_int8_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_unalign32_uint64_t_int8_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_unalign32_uint64_t_int8_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    plbz r3, 99999(r3), 0
@@ -1211,14 +1203,14 @@ define dso_local i64 @ld_unalign32_uint64_t_int8_t(i8* nocapture readonly %ptr)
 ; CHECK-PREP10-NEXT:    extsb r3, r3
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999
-  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999
+  %0 = load i8, ptr %add.ptr, align 1
   %conv = sext i8 %0 to i64
   ret i64 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_align32_uint64_t_int8_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align32_uint64_t_int8_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align32_uint64_t_int8_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    plbz r3, 99999000(r3), 0
@@ -1233,14 +1225,14 @@ define dso_local i64 @ld_align32_uint64_t_int8_t(i8* nocapture readonly %ptr) {
 ; CHECK-PREP10-NEXT:    extsb r3, r3
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  %0 = load i8, ptr %add.ptr, align 1
   %conv = sext i8 %0 to i64
   ret i64 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_unalign64_uint64_t_int8_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_unalign64_uint64_t_int8_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_unalign64_uint64_t_int8_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r4, 232
@@ -1260,14 +1252,14 @@ define dso_local i64 @ld_unalign64_uint64_t_int8_t(i8* nocapture readonly %ptr)
 ; CHECK-PREP10-NEXT:    extsb r3, r3
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000001
-  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000001
+  %0 = load i8, ptr %add.ptr, align 1
   %conv = sext i8 %0 to i64
   ret i64 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_align64_uint64_t_int8_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align64_uint64_t_int8_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align64_uint64_t_int8_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r4, 244140625
@@ -1285,22 +1277,22 @@ define dso_local i64 @ld_align64_uint64_t_int8_t(i8* nocapture readonly %ptr) {
 ; CHECK-PREP10-NEXT:    extsb r3, r3
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  %0 = load i8, ptr %add.ptr, align 1
   %conv = sext i8 %0 to i64
   ret i64 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_reg_uint64_t_int8_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local i64 @ld_reg_uint64_t_int8_t(ptr nocapture readonly %ptr, i64 %off) {
 ; CHECK-LABEL: ld_reg_uint64_t_int8_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lbzx r3, r3, r4
 ; CHECK-NEXT:    extsb r3, r3
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  %0 = load i8, ptr %add.ptr, align 1
   %conv = sext i8 %0 to i64
   ret i64 %conv
 }
@@ -1316,8 +1308,8 @@ define dso_local i64 @ld_or_uint64_t_int8_t(i64 %ptr, i8 zeroext %off) {
 entry:
   %conv = zext i8 %off to i64
   %or = or i64 %conv, %ptr
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 1
   %conv1 = sext i8 %1 to i64
   ret i64 %conv1
 }
@@ -1334,8 +1326,8 @@ entry:
   %and = and i64 %ptr, -4096
   %conv = zext i8 %off to i64
   %or = or i64 %and, %conv
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 1
   %conv1 = sext i8 %1 to i64
   ret i64 %conv1
 }
@@ -1350,8 +1342,8 @@ define dso_local i64 @ld_not_disjoint16_uint64_t_int8_t(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 1
   %conv = sext i8 %1 to i64
   ret i64 %conv
 }
@@ -1367,8 +1359,8 @@ define dso_local i64 @ld_disjoint_unalign16_uint64_t_int8_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -4096
   %or = or i64 %and, 6
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 2
   %conv = sext i8 %1 to i64
   ret i64 %conv
 }
@@ -1384,8 +1376,8 @@ define dso_local i64 @ld_disjoint_align16_uint64_t_int8_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -4096
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 8
   %conv = sext i8 %1 to i64
   ret i64 %conv
 }
@@ -1401,8 +1393,8 @@ define dso_local i64 @ld_not_disjoint32_uint64_t_int8_t(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 1
   %conv = sext i8 %1 to i64
   ret i64 %conv
 }
@@ -1427,8 +1419,8 @@ define dso_local i64 @ld_disjoint_unalign32_uint64_t_int8_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1048576
   %or = or i64 %and, 99999
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 1
   %conv = sext i8 %1 to i64
   ret i64 %conv
 }
@@ -1465,8 +1457,8 @@ define dso_local i64 @ld_disjoint_align32_uint64_t_int8_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1000341504
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 16
   %conv = sext i8 %1 to i64
   ret i64 %conv
 }
@@ -1495,8 +1487,8 @@ define dso_local i64 @ld_not_disjoint64_uint64_t_int8_t(i64 %ptr) {
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 1
   %conv = sext i8 %1 to i64
   ret i64 %conv
 }
@@ -1526,8 +1518,8 @@ define dso_local i64 @ld_disjoint_unalign64_uint64_t_int8_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000001
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 1
   %conv = sext i8 %1 to i64
   ret i64 %conv
 }
@@ -1555,8 +1547,8 @@ define dso_local i64 @ld_disjoint_align64_uint64_t_int8_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 4096
   %conv = sext i8 %1 to i64
   ret i64 %conv
 }
@@ -1569,7 +1561,7 @@ define dso_local i64 @ld_cst_unalign16_uint64_t_int8_t() {
 ; CHECK-NEXT:    extsb r3, r3
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i8, i8* inttoptr (i64 255 to i8*), align 1
+  %0 = load i8, ptr inttoptr (i64 255 to ptr), align 1
   %conv = sext i8 %0 to i64
   ret i64 %conv
 }
@@ -1582,7 +1574,7 @@ define dso_local i64 @ld_cst_align16_uint64_t_int8_t() {
 ; CHECK-NEXT:    extsb r3, r3
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i8, i8* inttoptr (i64 4080 to i8*), align 16
+  %0 = load i8, ptr inttoptr (i64 4080 to ptr), align 16
   %conv = sext i8 %0 to i64
   ret i64 %conv
 }
@@ -1596,7 +1588,7 @@ define dso_local i64 @ld_cst_unalign32_uint64_t_int8_t() {
 ; CHECK-NEXT:    extsb r3, r3
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i8, i8* inttoptr (i64 99999 to i8*), align 1
+  %0 = load i8, ptr inttoptr (i64 99999 to ptr), align 1
   %conv = sext i8 %0 to i64
   ret i64 %conv
 }
@@ -1610,7 +1602,7 @@ define dso_local i64 @ld_cst_align32_uint64_t_int8_t() {
 ; CHECK-NEXT:    extsb r3, r3
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i8, i8* inttoptr (i64 9999900 to i8*), align 4
+  %0 = load i8, ptr inttoptr (i64 9999900 to ptr), align 4
   %conv = sext i8 %0 to i64
   ret i64 %conv
 }
@@ -1636,7 +1628,7 @@ define dso_local i64 @ld_cst_unalign64_uint64_t_int8_t() {
 ; CHECK-PREP10-NEXT:    extsb r3, r3
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %0 = load i8, i8* inttoptr (i64 1000000000001 to i8*), align 1
+  %0 = load i8, ptr inttoptr (i64 1000000000001 to ptr), align 1
   %conv = sext i8 %0 to i64
   ret i64 %conv
 }
@@ -1660,7 +1652,7 @@ define dso_local i64 @ld_cst_align64_uint64_t_int8_t() {
 ; CHECK-PREP10-NEXT:    extsb r3, r3
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %0 = load i8, i8* inttoptr (i64 1000000000000 to i8*), align 4096
+  %0 = load i8, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   %conv = sext i8 %0 to i64
   ret i64 %conv
 }
@@ -1672,42 +1664,40 @@ define dso_local i64 @ld_0_uint64_t_uint16_t(i64 %ptr) {
 ; CHECK-NEXT:    lhz r3, 0(r3)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = inttoptr i64 %ptr to i16*
-  %1 = load i16, i16* %0, align 2
+  %0 = inttoptr i64 %ptr to ptr
+  %1 = load i16, ptr %0, align 2
   %conv = zext i16 %1 to i64
   ret i64 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_unalign16_uint64_t_uint16_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_unalign16_uint64_t_uint16_t(ptr nocapture readonly %ptr) {
 ; CHECK-LABEL: ld_unalign16_uint64_t_uint16_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lhz r3, 1(r3)
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1
-  %0 = bitcast i8* %add.ptr to i16*
-  %1 = load i16, i16* %0, align 2
-  %conv = zext i16 %1 to i64
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1
+  %0 = load i16, ptr %add.ptr, align 2
+  %conv = zext i16 %0 to i64
   ret i64 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_align16_uint64_t_uint16_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align16_uint64_t_uint16_t(ptr nocapture readonly %ptr) {
 ; CHECK-LABEL: ld_align16_uint64_t_uint16_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lhz r3, 8(r3)
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to i16*
-  %1 = load i16, i16* %0, align 2
-  %conv = zext i16 %1 to i64
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  %0 = load i16, ptr %add.ptr, align 2
+  %conv = zext i16 %0 to i64
   ret i64 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_unalign32_uint64_t_uint16_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_unalign32_uint64_t_uint16_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_unalign32_uint64_t_uint16_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    plhz r3, 99999(r3), 0
@@ -1720,15 +1710,14 @@ define dso_local i64 @ld_unalign32_uint64_t_uint16_t(i8* nocapture readonly %ptr
 ; CHECK-PREP10-NEXT:    lhzx r3, r3, r4
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999
-  %0 = bitcast i8* %add.ptr to i16*
-  %1 = load i16, i16* %0, align 2
-  %conv = zext i16 %1 to i64
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999
+  %0 = load i16, ptr %add.ptr, align 2
+  %conv = zext i16 %0 to i64
   ret i64 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_align32_uint64_t_uint16_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align32_uint64_t_uint16_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align32_uint64_t_uint16_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    plhz r3, 99999000(r3), 0
@@ -1741,15 +1730,14 @@ define dso_local i64 @ld_align32_uint64_t_uint16_t(i8* nocapture readonly %ptr)
 ; CHECK-PREP10-NEXT:    lhzx r3, r3, r4
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to i16*
-  %1 = load i16, i16* %0, align 2
-  %conv = zext i16 %1 to i64
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  %0 = load i16, ptr %add.ptr, align 2
+  %conv = zext i16 %0 to i64
   ret i64 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_unalign64_uint64_t_uint16_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_unalign64_uint64_t_uint16_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_unalign64_uint64_t_uint16_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r4, 232
@@ -1767,15 +1755,14 @@ define dso_local i64 @ld_unalign64_uint64_t_uint16_t(i8* nocapture readonly %ptr
 ; CHECK-PREP10-NEXT:    lhzx r3, r3, r4
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000001
-  %0 = bitcast i8* %add.ptr to i16*
-  %1 = load i16, i16* %0, align 2
-  %conv = zext i16 %1 to i64
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000001
+  %0 = load i16, ptr %add.ptr, align 2
+  %conv = zext i16 %0 to i64
   ret i64 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_align64_uint64_t_uint16_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align64_uint64_t_uint16_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align64_uint64_t_uint16_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r4, 244140625
@@ -1791,24 +1778,22 @@ define dso_local i64 @ld_align64_uint64_t_uint16_t(i8* nocapture readonly %ptr)
 ; CHECK-PREP10-NEXT:    lhzx r3, r3, r4
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to i16*
-  %1 = load i16, i16* %0, align 2
-  %conv = zext i16 %1 to i64
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  %0 = load i16, ptr %add.ptr, align 2
+  %conv = zext i16 %0 to i64
   ret i64 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_reg_uint64_t_uint16_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local i64 @ld_reg_uint64_t_uint16_t(ptr nocapture readonly %ptr, i64 %off) {
 ; CHECK-LABEL: ld_reg_uint64_t_uint16_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lhzx r3, r3, r4
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to i16*
-  %1 = load i16, i16* %0, align 2
-  %conv = zext i16 %1 to i64
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  %0 = load i16, ptr %add.ptr, align 2
+  %conv = zext i16 %0 to i64
   ret i64 %conv
 }
 
@@ -1822,8 +1807,8 @@ define dso_local i64 @ld_or_uint64_t_uint16_t(i64 %ptr, i8 zeroext %off) {
 entry:
   %conv = zext i8 %off to i64
   %or = or i64 %conv, %ptr
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 2
   %conv1 = zext i16 %1 to i64
   ret i64 %conv1
 }
@@ -1839,8 +1824,8 @@ entry:
   %and = and i64 %ptr, -4096
   %conv = zext i8 %off to i64
   %or = or i64 %and, %conv
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 2
   %conv1 = zext i16 %1 to i64
   ret i64 %conv1
 }
@@ -1854,8 +1839,8 @@ define dso_local i64 @ld_not_disjoint16_uint64_t_uint16_t(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 2
   %conv = zext i16 %1 to i64
   ret i64 %conv
 }
@@ -1870,8 +1855,8 @@ define dso_local i64 @ld_disjoint_unalign16_uint64_t_uint16_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -4096
   %or = or i64 %and, 6
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 2
   %conv = zext i16 %1 to i64
   ret i64 %conv
 }
@@ -1886,8 +1871,8 @@ define dso_local i64 @ld_disjoint_align16_uint64_t_uint16_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -4096
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 8
   %conv = zext i16 %1 to i64
   ret i64 %conv
 }
@@ -1902,8 +1887,8 @@ define dso_local i64 @ld_not_disjoint32_uint64_t_uint16_t(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 2
   %conv = zext i16 %1 to i64
   ret i64 %conv
 }
@@ -1926,8 +1911,8 @@ define dso_local i64 @ld_disjoint_unalign32_uint64_t_uint16_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1048576
   %or = or i64 %and, 99999
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 2
   %conv = zext i16 %1 to i64
   ret i64 %conv
 }
@@ -1961,8 +1946,8 @@ define dso_local i64 @ld_disjoint_align32_uint64_t_uint16_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1000341504
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 16
   %conv = zext i16 %1 to i64
   ret i64 %conv
 }
@@ -1989,8 +1974,8 @@ define dso_local i64 @ld_not_disjoint64_uint64_t_uint16_t(i64 %ptr) {
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 2
   %conv = zext i16 %1 to i64
   ret i64 %conv
 }
@@ -2018,8 +2003,8 @@ define dso_local i64 @ld_disjoint_unalign64_uint64_t_uint16_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000001
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 2
   %conv = zext i16 %1 to i64
   ret i64 %conv
 }
@@ -2045,8 +2030,8 @@ define dso_local i64 @ld_disjoint_align64_uint64_t_uint16_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 4096
   %conv = zext i16 %1 to i64
   ret i64 %conv
 }
@@ -2058,7 +2043,7 @@ define dso_local i64 @ld_cst_unalign16_uint64_t_uint16_t() {
 ; CHECK-NEXT:    lhz r3, 255(0)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i16, i16* inttoptr (i64 255 to i16*), align 2
+  %0 = load i16, ptr inttoptr (i64 255 to ptr), align 2
   %conv = zext i16 %0 to i64
   ret i64 %conv
 }
@@ -2070,7 +2055,7 @@ define dso_local i64 @ld_cst_align16_uint64_t_uint16_t() {
 ; CHECK-NEXT:    lhz r3, 4080(0)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i16, i16* inttoptr (i64 4080 to i16*), align 16
+  %0 = load i16, ptr inttoptr (i64 4080 to ptr), align 16
   %conv = zext i16 %0 to i64
   ret i64 %conv
 }
@@ -2083,7 +2068,7 @@ define dso_local i64 @ld_cst_unalign32_uint64_t_uint16_t() {
 ; CHECK-NEXT:    lhz r3, -31073(r3)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i16, i16* inttoptr (i64 99999 to i16*), align 2
+  %0 = load i16, ptr inttoptr (i64 99999 to ptr), align 2
   %conv = zext i16 %0 to i64
   ret i64 %conv
 }
@@ -2096,7 +2081,7 @@ define dso_local i64 @ld_cst_align32_uint64_t_uint16_t() {
 ; CHECK-NEXT:    lhz r3, -27108(r3)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i16, i16* inttoptr (i64 9999900 to i16*), align 4
+  %0 = load i16, ptr inttoptr (i64 9999900 to ptr), align 4
   %conv = zext i16 %0 to i64
   ret i64 %conv
 }
@@ -2120,7 +2105,7 @@ define dso_local i64 @ld_cst_unalign64_uint64_t_uint16_t() {
 ; CHECK-PREP10-NEXT:    lhz r3, 0(r3)
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %0 = load i16, i16* inttoptr (i64 1000000000001 to i16*), align 2
+  %0 = load i16, ptr inttoptr (i64 1000000000001 to ptr), align 2
   %conv = zext i16 %0 to i64
   ret i64 %conv
 }
@@ -2142,7 +2127,7 @@ define dso_local i64 @ld_cst_align64_uint64_t_uint16_t() {
 ; CHECK-PREP10-NEXT:    lhz r3, 0(r3)
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %0 = load i16, i16* inttoptr (i64 1000000000000 to i16*), align 4096
+  %0 = load i16, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   %conv = zext i16 %0 to i64
   ret i64 %conv
 }
@@ -2154,42 +2139,40 @@ define dso_local i64 @ld_0_uint64_t_int16_t(i64 %ptr) {
 ; CHECK-NEXT:    lha r3, 0(r3)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = inttoptr i64 %ptr to i16*
-  %1 = load i16, i16* %0, align 2
+  %0 = inttoptr i64 %ptr to ptr
+  %1 = load i16, ptr %0, align 2
   %conv = sext i16 %1 to i64
   ret i64 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_unalign16_uint64_t_int16_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_unalign16_uint64_t_int16_t(ptr nocapture readonly %ptr) {
 ; CHECK-LABEL: ld_unalign16_uint64_t_int16_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lha r3, 1(r3)
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1
-  %0 = bitcast i8* %add.ptr to i16*
-  %1 = load i16, i16* %0, align 2
-  %conv = sext i16 %1 to i64
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1
+  %0 = load i16, ptr %add.ptr, align 2
+  %conv = sext i16 %0 to i64
   ret i64 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_align16_uint64_t_int16_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align16_uint64_t_int16_t(ptr nocapture readonly %ptr) {
 ; CHECK-LABEL: ld_align16_uint64_t_int16_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lha r3, 8(r3)
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to i16*
-  %1 = load i16, i16* %0, align 2
-  %conv = sext i16 %1 to i64
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  %0 = load i16, ptr %add.ptr, align 2
+  %conv = sext i16 %0 to i64
   ret i64 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_unalign32_uint64_t_int16_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_unalign32_uint64_t_int16_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_unalign32_uint64_t_int16_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    plha r3, 99999(r3), 0
@@ -2202,15 +2185,14 @@ define dso_local i64 @ld_unalign32_uint64_t_int16_t(i8* nocapture readonly %ptr)
 ; CHECK-PREP10-NEXT:    lhax r3, r3, r4
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999
-  %0 = bitcast i8* %add.ptr to i16*
-  %1 = load i16, i16* %0, align 2
-  %conv = sext i16 %1 to i64
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999
+  %0 = load i16, ptr %add.ptr, align 2
+  %conv = sext i16 %0 to i64
   ret i64 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_align32_uint64_t_int16_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align32_uint64_t_int16_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align32_uint64_t_int16_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    plha r3, 99999000(r3), 0
@@ -2223,15 +2205,14 @@ define dso_local i64 @ld_align32_uint64_t_int16_t(i8* nocapture readonly %ptr) {
 ; CHECK-PREP10-NEXT:    lhax r3, r3, r4
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to i16*
-  %1 = load i16, i16* %0, align 2
-  %conv = sext i16 %1 to i64
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  %0 = load i16, ptr %add.ptr, align 2
+  %conv = sext i16 %0 to i64
   ret i64 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_unalign64_uint64_t_int16_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_unalign64_uint64_t_int16_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_unalign64_uint64_t_int16_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r4, 232
@@ -2249,15 +2230,14 @@ define dso_local i64 @ld_unalign64_uint64_t_int16_t(i8* nocapture readonly %ptr)
 ; CHECK-PREP10-NEXT:    lhax r3, r3, r4
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000001
-  %0 = bitcast i8* %add.ptr to i16*
-  %1 = load i16, i16* %0, align 2
-  %conv = sext i16 %1 to i64
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000001
+  %0 = load i16, ptr %add.ptr, align 2
+  %conv = sext i16 %0 to i64
   ret i64 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_align64_uint64_t_int16_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align64_uint64_t_int16_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align64_uint64_t_int16_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r4, 244140625
@@ -2273,24 +2253,22 @@ define dso_local i64 @ld_align64_uint64_t_int16_t(i8* nocapture readonly %ptr) {
 ; CHECK-PREP10-NEXT:    lhax r3, r3, r4
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to i16*
-  %1 = load i16, i16* %0, align 2
-  %conv = sext i16 %1 to i64
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  %0 = load i16, ptr %add.ptr, align 2
+  %conv = sext i16 %0 to i64
   ret i64 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_reg_uint64_t_int16_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local i64 @ld_reg_uint64_t_int16_t(ptr nocapture readonly %ptr, i64 %off) {
 ; CHECK-LABEL: ld_reg_uint64_t_int16_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lhax r3, r3, r4
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to i16*
-  %1 = load i16, i16* %0, align 2
-  %conv = sext i16 %1 to i64
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  %0 = load i16, ptr %add.ptr, align 2
+  %conv = sext i16 %0 to i64
   ret i64 %conv
 }
 
@@ -2304,8 +2282,8 @@ define dso_local i64 @ld_or_uint64_t_int16_t(i64 %ptr, i8 zeroext %off) {
 entry:
   %conv = zext i8 %off to i64
   %or = or i64 %conv, %ptr
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 2
   %conv1 = sext i16 %1 to i64
   ret i64 %conv1
 }
@@ -2321,8 +2299,8 @@ entry:
   %and = and i64 %ptr, -4096
   %conv = zext i8 %off to i64
   %or = or i64 %and, %conv
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 2
   %conv1 = sext i16 %1 to i64
   ret i64 %conv1
 }
@@ -2336,8 +2314,8 @@ define dso_local i64 @ld_not_disjoint16_uint64_t_int16_t(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 2
   %conv = sext i16 %1 to i64
   ret i64 %conv
 }
@@ -2352,8 +2330,8 @@ define dso_local i64 @ld_disjoint_unalign16_uint64_t_int16_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -4096
   %or = or i64 %and, 6
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 2
   %conv = sext i16 %1 to i64
   ret i64 %conv
 }
@@ -2368,8 +2346,8 @@ define dso_local i64 @ld_disjoint_align16_uint64_t_int16_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -4096
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 8
   %conv = sext i16 %1 to i64
   ret i64 %conv
 }
@@ -2384,8 +2362,8 @@ define dso_local i64 @ld_not_disjoint32_uint64_t_int16_t(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 2
   %conv = sext i16 %1 to i64
   ret i64 %conv
 }
@@ -2408,8 +2386,8 @@ define dso_local i64 @ld_disjoint_unalign32_uint64_t_int16_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1048576
   %or = or i64 %and, 99999
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 2
   %conv = sext i16 %1 to i64
   ret i64 %conv
 }
@@ -2443,8 +2421,8 @@ define dso_local i64 @ld_disjoint_align32_uint64_t_int16_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1000341504
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 16
   %conv = sext i16 %1 to i64
   ret i64 %conv
 }
@@ -2471,8 +2449,8 @@ define dso_local i64 @ld_not_disjoint64_uint64_t_int16_t(i64 %ptr) {
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 2
   %conv = sext i16 %1 to i64
   ret i64 %conv
 }
@@ -2500,8 +2478,8 @@ define dso_local i64 @ld_disjoint_unalign64_uint64_t_int16_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000001
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 2
   %conv = sext i16 %1 to i64
   ret i64 %conv
 }
@@ -2527,8 +2505,8 @@ define dso_local i64 @ld_disjoint_align64_uint64_t_int16_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 4096
   %conv = sext i16 %1 to i64
   ret i64 %conv
 }
@@ -2540,7 +2518,7 @@ define dso_local i64 @ld_cst_unalign16_uint64_t_int16_t() {
 ; CHECK-NEXT:    lha r3, 255(0)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i16, i16* inttoptr (i64 255 to i16*), align 2
+  %0 = load i16, ptr inttoptr (i64 255 to ptr), align 2
   %conv = sext i16 %0 to i64
   ret i64 %conv
 }
@@ -2552,7 +2530,7 @@ define dso_local i64 @ld_cst_align16_uint64_t_int16_t() {
 ; CHECK-NEXT:    lha r3, 4080(0)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i16, i16* inttoptr (i64 4080 to i16*), align 16
+  %0 = load i16, ptr inttoptr (i64 4080 to ptr), align 16
   %conv = sext i16 %0 to i64
   ret i64 %conv
 }
@@ -2565,7 +2543,7 @@ define dso_local i64 @ld_cst_unalign32_uint64_t_int16_t() {
 ; CHECK-NEXT:    lha r3, -31073(r3)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i16, i16* inttoptr (i64 99999 to i16*), align 2
+  %0 = load i16, ptr inttoptr (i64 99999 to ptr), align 2
   %conv = sext i16 %0 to i64
   ret i64 %conv
 }
@@ -2578,7 +2556,7 @@ define dso_local i64 @ld_cst_align32_uint64_t_int16_t() {
 ; CHECK-NEXT:    lha r3, -27108(r3)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i16, i16* inttoptr (i64 9999900 to i16*), align 4
+  %0 = load i16, ptr inttoptr (i64 9999900 to ptr), align 4
   %conv = sext i16 %0 to i64
   ret i64 %conv
 }
@@ -2602,7 +2580,7 @@ define dso_local i64 @ld_cst_unalign64_uint64_t_int16_t() {
 ; CHECK-PREP10-NEXT:    lha r3, 0(r3)
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %0 = load i16, i16* inttoptr (i64 1000000000001 to i16*), align 2
+  %0 = load i16, ptr inttoptr (i64 1000000000001 to ptr), align 2
   %conv = sext i16 %0 to i64
   ret i64 %conv
 }
@@ -2624,7 +2602,7 @@ define dso_local i64 @ld_cst_align64_uint64_t_int16_t() {
 ; CHECK-PREP10-NEXT:    lha r3, 0(r3)
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %0 = load i16, i16* inttoptr (i64 1000000000000 to i16*), align 4096
+  %0 = load i16, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   %conv = sext i16 %0 to i64
   ret i64 %conv
 }
@@ -2636,42 +2614,40 @@ define dso_local i64 @ld_0_uint64_t_uint32_t(i64 %ptr) {
 ; CHECK-NEXT:    lwz r3, 0(r3)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = inttoptr i64 %ptr to i32*
-  %1 = load i32, i32* %0, align 4
+  %0 = inttoptr i64 %ptr to ptr
+  %1 = load i32, ptr %0, align 4
   %conv = zext i32 %1 to i64
   ret i64 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_unalign16_uint64_t_uint32_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_unalign16_uint64_t_uint32_t(ptr nocapture readonly %ptr) {
 ; CHECK-LABEL: ld_unalign16_uint64_t_uint32_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lwz r3, 1(r3)
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1
-  %0 = bitcast i8* %add.ptr to i32*
-  %1 = load i32, i32* %0, align 4
-  %conv = zext i32 %1 to i64
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1
+  %0 = load i32, ptr %add.ptr, align 4
+  %conv = zext i32 %0 to i64
   ret i64 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_align16_uint64_t_uint32_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align16_uint64_t_uint32_t(ptr nocapture readonly %ptr) {
 ; CHECK-LABEL: ld_align16_uint64_t_uint32_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lwz r3, 8(r3)
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to i32*
-  %1 = load i32, i32* %0, align 4
-  %conv = zext i32 %1 to i64
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  %0 = load i32, ptr %add.ptr, align 4
+  %conv = zext i32 %0 to i64
   ret i64 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_unalign32_uint64_t_uint32_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_unalign32_uint64_t_uint32_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_unalign32_uint64_t_uint32_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    plwz r3, 99999(r3), 0
@@ -2684,15 +2660,14 @@ define dso_local i64 @ld_unalign32_uint64_t_uint32_t(i8* nocapture readonly %ptr
 ; CHECK-PREP10-NEXT:    lwzx r3, r3, r4
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999
-  %0 = bitcast i8* %add.ptr to i32*
-  %1 = load i32, i32* %0, align 4
-  %conv = zext i32 %1 to i64
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999
+  %0 = load i32, ptr %add.ptr, align 4
+  %conv = zext i32 %0 to i64
   ret i64 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_align32_uint64_t_uint32_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align32_uint64_t_uint32_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align32_uint64_t_uint32_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    plwz r3, 99999000(r3), 0
@@ -2705,15 +2680,14 @@ define dso_local i64 @ld_align32_uint64_t_uint32_t(i8* nocapture readonly %ptr)
 ; CHECK-PREP10-NEXT:    lwzx r3, r3, r4
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to i32*
-  %1 = load i32, i32* %0, align 4
-  %conv = zext i32 %1 to i64
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  %0 = load i32, ptr %add.ptr, align 4
+  %conv = zext i32 %0 to i64
   ret i64 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_unalign64_uint64_t_uint32_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_unalign64_uint64_t_uint32_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_unalign64_uint64_t_uint32_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r4, 232
@@ -2731,15 +2705,14 @@ define dso_local i64 @ld_unalign64_uint64_t_uint32_t(i8* nocapture readonly %ptr
 ; CHECK-PREP10-NEXT:    lwzx r3, r3, r4
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000001
-  %0 = bitcast i8* %add.ptr to i32*
-  %1 = load i32, i32* %0, align 4
-  %conv = zext i32 %1 to i64
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000001
+  %0 = load i32, ptr %add.ptr, align 4
+  %conv = zext i32 %0 to i64
   ret i64 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_align64_uint64_t_uint32_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align64_uint64_t_uint32_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align64_uint64_t_uint32_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r4, 244140625
@@ -2755,24 +2728,22 @@ define dso_local i64 @ld_align64_uint64_t_uint32_t(i8* nocapture readonly %ptr)
 ; CHECK-PREP10-NEXT:    lwzx r3, r3, r4
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to i32*
-  %1 = load i32, i32* %0, align 4
-  %conv = zext i32 %1 to i64
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  %0 = load i32, ptr %add.ptr, align 4
+  %conv = zext i32 %0 to i64
   ret i64 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_reg_uint64_t_uint32_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local i64 @ld_reg_uint64_t_uint32_t(ptr nocapture readonly %ptr, i64 %off) {
 ; CHECK-LABEL: ld_reg_uint64_t_uint32_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lwzx r3, r3, r4
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to i32*
-  %1 = load i32, i32* %0, align 4
-  %conv = zext i32 %1 to i64
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  %0 = load i32, ptr %add.ptr, align 4
+  %conv = zext i32 %0 to i64
   ret i64 %conv
 }
 
@@ -2786,8 +2757,8 @@ define dso_local i64 @ld_or_uint64_t_uint32_t(i64 %ptr, i8 zeroext %off) {
 entry:
   %conv = zext i8 %off to i64
   %or = or i64 %conv, %ptr
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 4
   %conv1 = zext i32 %1 to i64
   ret i64 %conv1
 }
@@ -2803,8 +2774,8 @@ entry:
   %and = and i64 %ptr, -4096
   %conv = zext i8 %off to i64
   %or = or i64 %and, %conv
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 4
   %conv1 = zext i32 %1 to i64
   ret i64 %conv1
 }
@@ -2818,8 +2789,8 @@ define dso_local i64 @ld_not_disjoint16_uint64_t_uint32_t(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 4
   %conv = zext i32 %1 to i64
   ret i64 %conv
 }
@@ -2834,8 +2805,8 @@ define dso_local i64 @ld_disjoint_unalign16_uint64_t_uint32_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -4096
   %or = or i64 %and, 6
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 4
   %conv = zext i32 %1 to i64
   ret i64 %conv
 }
@@ -2850,8 +2821,8 @@ define dso_local i64 @ld_disjoint_align16_uint64_t_uint32_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -4096
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 8
   %conv = zext i32 %1 to i64
   ret i64 %conv
 }
@@ -2866,8 +2837,8 @@ define dso_local i64 @ld_not_disjoint32_uint64_t_uint32_t(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 4
   %conv = zext i32 %1 to i64
   ret i64 %conv
 }
@@ -2890,8 +2861,8 @@ define dso_local i64 @ld_disjoint_unalign32_uint64_t_uint32_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1048576
   %or = or i64 %and, 99999
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 4
   %conv = zext i32 %1 to i64
   ret i64 %conv
 }
@@ -2925,8 +2896,8 @@ define dso_local i64 @ld_disjoint_align32_uint64_t_uint32_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1000341504
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 16
   %conv = zext i32 %1 to i64
   ret i64 %conv
 }
@@ -2953,8 +2924,8 @@ define dso_local i64 @ld_not_disjoint64_uint64_t_uint32_t(i64 %ptr) {
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 4
   %conv = zext i32 %1 to i64
   ret i64 %conv
 }
@@ -2982,8 +2953,8 @@ define dso_local i64 @ld_disjoint_unalign64_uint64_t_uint32_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000001
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 4
   %conv = zext i32 %1 to i64
   ret i64 %conv
 }
@@ -3009,8 +2980,8 @@ define dso_local i64 @ld_disjoint_align64_uint64_t_uint32_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 4096
   %conv = zext i32 %1 to i64
   ret i64 %conv
 }
@@ -3022,7 +2993,7 @@ define dso_local i64 @ld_cst_unalign16_uint64_t_uint32_t() {
 ; CHECK-NEXT:    lwz r3, 255(0)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i32, i32* inttoptr (i64 255 to i32*), align 4
+  %0 = load i32, ptr inttoptr (i64 255 to ptr), align 4
   %conv = zext i32 %0 to i64
   ret i64 %conv
 }
@@ -3034,7 +3005,7 @@ define dso_local i64 @ld_cst_align16_uint64_t_uint32_t() {
 ; CHECK-NEXT:    lwz r3, 4080(0)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i32, i32* inttoptr (i64 4080 to i32*), align 16
+  %0 = load i32, ptr inttoptr (i64 4080 to ptr), align 16
   %conv = zext i32 %0 to i64
   ret i64 %conv
 }
@@ -3047,7 +3018,7 @@ define dso_local i64 @ld_cst_unalign32_uint64_t_uint32_t() {
 ; CHECK-NEXT:    lwz r3, -31073(r3)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i32, i32* inttoptr (i64 99999 to i32*), align 4
+  %0 = load i32, ptr inttoptr (i64 99999 to ptr), align 4
   %conv = zext i32 %0 to i64
   ret i64 %conv
 }
@@ -3060,7 +3031,7 @@ define dso_local i64 @ld_cst_align32_uint64_t_uint32_t() {
 ; CHECK-NEXT:    lwz r3, -27108(r3)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i32, i32* inttoptr (i64 9999900 to i32*), align 4
+  %0 = load i32, ptr inttoptr (i64 9999900 to ptr), align 4
   %conv = zext i32 %0 to i64
   ret i64 %conv
 }
@@ -3084,7 +3055,7 @@ define dso_local i64 @ld_cst_unalign64_uint64_t_uint32_t() {
 ; CHECK-PREP10-NEXT:    lwz r3, 0(r3)
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %0 = load i32, i32* inttoptr (i64 1000000000001 to i32*), align 4
+  %0 = load i32, ptr inttoptr (i64 1000000000001 to ptr), align 4
   %conv = zext i32 %0 to i64
   ret i64 %conv
 }
@@ -3106,7 +3077,7 @@ define dso_local i64 @ld_cst_align64_uint64_t_uint32_t() {
 ; CHECK-PREP10-NEXT:    lwz r3, 0(r3)
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %0 = load i32, i32* inttoptr (i64 1000000000000 to i32*), align 4096
+  %0 = load i32, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   %conv = zext i32 %0 to i64
   ret i64 %conv
 }
@@ -3118,14 +3089,14 @@ define dso_local i64 @ld_0_uint64_t_int32_t(i64 %ptr) {
 ; CHECK-NEXT:    lwa r3, 0(r3)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = inttoptr i64 %ptr to i32*
-  %1 = load i32, i32* %0, align 4
+  %0 = inttoptr i64 %ptr to ptr
+  %1 = load i32, ptr %0, align 4
   %conv = sext i32 %1 to i64
   ret i64 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_unalign16_uint64_t_int32_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_unalign16_uint64_t_int32_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_unalign16_uint64_t_int32_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    plwa r3, 1(r3), 0
@@ -3137,29 +3108,27 @@ define dso_local i64 @ld_unalign16_uint64_t_int32_t(i8* nocapture readonly %ptr)
 ; CHECK-PREP10-NEXT:    lwax r3, r3, r4
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1
-  %0 = bitcast i8* %add.ptr to i32*
-  %1 = load i32, i32* %0, align 4
-  %conv = sext i32 %1 to i64
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1
+  %0 = load i32, ptr %add.ptr, align 4
+  %conv = sext i32 %0 to i64
   ret i64 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_align16_uint64_t_int32_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align16_uint64_t_int32_t(ptr nocapture readonly %ptr) {
 ; CHECK-LABEL: ld_align16_uint64_t_int32_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lwa r3, 8(r3)
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to i32*
-  %1 = load i32, i32* %0, align 4
-  %conv = sext i32 %1 to i64
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  %0 = load i32, ptr %add.ptr, align 4
+  %conv = sext i32 %0 to i64
   ret i64 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_unalign32_uint64_t_int32_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_unalign32_uint64_t_int32_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_unalign32_uint64_t_int32_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    plwa r3, 99999(r3), 0
@@ -3172,15 +3141,14 @@ define dso_local i64 @ld_unalign32_uint64_t_int32_t(i8* nocapture readonly %ptr)
 ; CHECK-PREP10-NEXT:    lwax r3, r3, r4
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999
-  %0 = bitcast i8* %add.ptr to i32*
-  %1 = load i32, i32* %0, align 4
-  %conv = sext i32 %1 to i64
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999
+  %0 = load i32, ptr %add.ptr, align 4
+  %conv = sext i32 %0 to i64
   ret i64 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_align32_uint64_t_int32_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align32_uint64_t_int32_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align32_uint64_t_int32_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    plwa r3, 99999000(r3), 0
@@ -3193,15 +3161,14 @@ define dso_local i64 @ld_align32_uint64_t_int32_t(i8* nocapture readonly %ptr) {
 ; CHECK-PREP10-NEXT:    lwax r3, r3, r4
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to i32*
-  %1 = load i32, i32* %0, align 4
-  %conv = sext i32 %1 to i64
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  %0 = load i32, ptr %add.ptr, align 4
+  %conv = sext i32 %0 to i64
   ret i64 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_unalign64_uint64_t_int32_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_unalign64_uint64_t_int32_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_unalign64_uint64_t_int32_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r4, 232
@@ -3219,15 +3186,14 @@ define dso_local i64 @ld_unalign64_uint64_t_int32_t(i8* nocapture readonly %ptr)
 ; CHECK-PREP10-NEXT:    lwax r3, r3, r4
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000001
-  %0 = bitcast i8* %add.ptr to i32*
-  %1 = load i32, i32* %0, align 4
-  %conv = sext i32 %1 to i64
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000001
+  %0 = load i32, ptr %add.ptr, align 4
+  %conv = sext i32 %0 to i64
   ret i64 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_align64_uint64_t_int32_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align64_uint64_t_int32_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align64_uint64_t_int32_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r4, 244140625
@@ -3243,24 +3209,22 @@ define dso_local i64 @ld_align64_uint64_t_int32_t(i8* nocapture readonly %ptr) {
 ; CHECK-PREP10-NEXT:    lwax r3, r3, r4
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to i32*
-  %1 = load i32, i32* %0, align 4
-  %conv = sext i32 %1 to i64
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  %0 = load i32, ptr %add.ptr, align 4
+  %conv = sext i32 %0 to i64
   ret i64 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_reg_uint64_t_int32_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local i64 @ld_reg_uint64_t_int32_t(ptr nocapture readonly %ptr, i64 %off) {
 ; CHECK-LABEL: ld_reg_uint64_t_int32_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lwax r3, r3, r4
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to i32*
-  %1 = load i32, i32* %0, align 4
-  %conv = sext i32 %1 to i64
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  %0 = load i32, ptr %add.ptr, align 4
+  %conv = sext i32 %0 to i64
   ret i64 %conv
 }
 
@@ -3274,8 +3238,8 @@ define dso_local i64 @ld_or_uint64_t_int32_t(i64 %ptr, i8 zeroext %off) {
 entry:
   %conv = zext i8 %off to i64
   %or = or i64 %conv, %ptr
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 4
   %conv1 = sext i32 %1 to i64
   ret i64 %conv1
 }
@@ -3291,8 +3255,8 @@ entry:
   %and = and i64 %ptr, -4096
   %conv = zext i8 %off to i64
   %or = or i64 %and, %conv
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 4
   %conv1 = sext i32 %1 to i64
   ret i64 %conv1
 }
@@ -3306,8 +3270,8 @@ define dso_local i64 @ld_not_disjoint16_uint64_t_int32_t(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 4
   %conv = sext i32 %1 to i64
   ret i64 %conv
 }
@@ -3336,8 +3300,8 @@ define dso_local i64 @ld_disjoint_unalign16_uint64_t_int32_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -4096
   %or = or i64 %and, 6
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 4
   %conv = sext i32 %1 to i64
   ret i64 %conv
 }
@@ -3352,8 +3316,8 @@ define dso_local i64 @ld_disjoint_align16_uint64_t_int32_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -4096
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 8
   %conv = sext i32 %1 to i64
   ret i64 %conv
 }
@@ -3368,8 +3332,8 @@ define dso_local i64 @ld_not_disjoint32_uint64_t_int32_t(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 4
   %conv = sext i32 %1 to i64
   ret i64 %conv
 }
@@ -3392,8 +3356,8 @@ define dso_local i64 @ld_disjoint_unalign32_uint64_t_int32_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1048576
   %or = or i64 %and, 99999
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 4
   %conv = sext i32 %1 to i64
   ret i64 %conv
 }
@@ -3427,8 +3391,8 @@ define dso_local i64 @ld_disjoint_align32_uint64_t_int32_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1000341504
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 16
   %conv = sext i32 %1 to i64
   ret i64 %conv
 }
@@ -3455,8 +3419,8 @@ define dso_local i64 @ld_not_disjoint64_uint64_t_int32_t(i64 %ptr) {
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 4
   %conv = sext i32 %1 to i64
   ret i64 %conv
 }
@@ -3484,8 +3448,8 @@ define dso_local i64 @ld_disjoint_unalign64_uint64_t_int32_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000001
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 4
   %conv = sext i32 %1 to i64
   ret i64 %conv
 }
@@ -3511,8 +3475,8 @@ define dso_local i64 @ld_disjoint_align64_uint64_t_int32_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 4096
   %conv = sext i32 %1 to i64
   ret i64 %conv
 }
@@ -3525,7 +3489,7 @@ define dso_local i64 @ld_cst_unalign16_uint64_t_int32_t() {
 ; CHECK-NEXT:    lwa r3, 0(r3)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i32, i32* inttoptr (i64 255 to i32*), align 4
+  %0 = load i32, ptr inttoptr (i64 255 to ptr), align 4
   %conv = sext i32 %0 to i64
   ret i64 %conv
 }
@@ -3537,7 +3501,7 @@ define dso_local i64 @ld_cst_align16_uint64_t_int32_t() {
 ; CHECK-NEXT:    lwa r3, 4080(0)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i32, i32* inttoptr (i64 4080 to i32*), align 16
+  %0 = load i32, ptr inttoptr (i64 4080 to ptr), align 16
   %conv = sext i32 %0 to i64
   ret i64 %conv
 }
@@ -3557,7 +3521,7 @@ define dso_local i64 @ld_cst_unalign32_uint64_t_int32_t() {
 ; CHECK-PREP10-NEXT:    lwa r3, 0(r3)
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %0 = load i32, i32* inttoptr (i64 99999 to i32*), align 4
+  %0 = load i32, ptr inttoptr (i64 99999 to ptr), align 4
   %conv = sext i32 %0 to i64
   ret i64 %conv
 }
@@ -3570,7 +3534,7 @@ define dso_local i64 @ld_cst_align32_uint64_t_int32_t() {
 ; CHECK-NEXT:    lwa r3, -27108(r3)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i32, i32* inttoptr (i64 9999900 to i32*), align 4
+  %0 = load i32, ptr inttoptr (i64 9999900 to ptr), align 4
   %conv = sext i32 %0 to i64
   ret i64 %conv
 }
@@ -3594,7 +3558,7 @@ define dso_local i64 @ld_cst_unalign64_uint64_t_int32_t() {
 ; CHECK-PREP10-NEXT:    lwa r3, 0(r3)
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %0 = load i32, i32* inttoptr (i64 1000000000001 to i32*), align 4
+  %0 = load i32, ptr inttoptr (i64 1000000000001 to ptr), align 4
   %conv = sext i32 %0 to i64
   ret i64 %conv
 }
@@ -3616,7 +3580,7 @@ define dso_local i64 @ld_cst_align64_uint64_t_int32_t() {
 ; CHECK-PREP10-NEXT:    lwa r3, 0(r3)
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %0 = load i32, i32* inttoptr (i64 1000000000000 to i32*), align 4096
+  %0 = load i32, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   %conv = sext i32 %0 to i64
   ret i64 %conv
 }
@@ -3628,13 +3592,13 @@ define dso_local i64 @ld_0_uint64_t_uint64_t(i64 %ptr) {
 ; CHECK-NEXT:    ld r3, 0(r3)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = inttoptr i64 %ptr to i64*
-  %1 = load i64, i64* %0, align 8
+  %0 = inttoptr i64 %ptr to ptr
+  %1 = load i64, ptr %0, align 8
   ret i64 %1
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_unalign16_uint64_t_uint64_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_unalign16_uint64_t_uint64_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_unalign16_uint64_t_uint64_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pld r3, 1(r3), 0
@@ -3646,27 +3610,25 @@ define dso_local i64 @ld_unalign16_uint64_t_uint64_t(i8* nocapture readonly %ptr
 ; CHECK-PREP10-NEXT:    ldx r3, r3, r4
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1
-  %0 = bitcast i8* %add.ptr to i64*
-  %1 = load i64, i64* %0, align 8
-  ret i64 %1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1
+  %0 = load i64, ptr %add.ptr, align 8
+  ret i64 %0
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_align16_uint64_t_uint64_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align16_uint64_t_uint64_t(ptr nocapture readonly %ptr) {
 ; CHECK-LABEL: ld_align16_uint64_t_uint64_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    ld r3, 8(r3)
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to i64*
-  %1 = load i64, i64* %0, align 8
-  ret i64 %1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  %0 = load i64, ptr %add.ptr, align 8
+  ret i64 %0
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_unalign32_uint64_t_uint64_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_unalign32_uint64_t_uint64_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_unalign32_uint64_t_uint64_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pld r3, 99999(r3), 0
@@ -3679,14 +3641,13 @@ define dso_local i64 @ld_unalign32_uint64_t_uint64_t(i8* nocapture readonly %ptr
 ; CHECK-PREP10-NEXT:    ldx r3, r3, r4
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999
-  %0 = bitcast i8* %add.ptr to i64*
-  %1 = load i64, i64* %0, align 8
-  ret i64 %1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999
+  %0 = load i64, ptr %add.ptr, align 8
+  ret i64 %0
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_align32_uint64_t_uint64_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align32_uint64_t_uint64_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align32_uint64_t_uint64_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pld r3, 99999000(r3), 0
@@ -3699,14 +3660,13 @@ define dso_local i64 @ld_align32_uint64_t_uint64_t(i8* nocapture readonly %ptr)
 ; CHECK-PREP10-NEXT:    ldx r3, r3, r4
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to i64*
-  %1 = load i64, i64* %0, align 8
-  ret i64 %1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  %0 = load i64, ptr %add.ptr, align 8
+  ret i64 %0
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_unalign64_uint64_t_uint64_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_unalign64_uint64_t_uint64_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_unalign64_uint64_t_uint64_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r4, 232
@@ -3724,14 +3684,13 @@ define dso_local i64 @ld_unalign64_uint64_t_uint64_t(i8* nocapture readonly %ptr
 ; CHECK-PREP10-NEXT:    ldx r3, r3, r4
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000001
-  %0 = bitcast i8* %add.ptr to i64*
-  %1 = load i64, i64* %0, align 8
-  ret i64 %1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000001
+  %0 = load i64, ptr %add.ptr, align 8
+  ret i64 %0
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_align64_uint64_t_uint64_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align64_uint64_t_uint64_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align64_uint64_t_uint64_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r4, 244140625
@@ -3747,23 +3706,21 @@ define dso_local i64 @ld_align64_uint64_t_uint64_t(i8* nocapture readonly %ptr)
 ; CHECK-PREP10-NEXT:    ldx r3, r3, r4
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to i64*
-  %1 = load i64, i64* %0, align 8
-  ret i64 %1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  %0 = load i64, ptr %add.ptr, align 8
+  ret i64 %0
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_reg_uint64_t_uint64_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local i64 @ld_reg_uint64_t_uint64_t(ptr nocapture readonly %ptr, i64 %off) {
 ; CHECK-LABEL: ld_reg_uint64_t_uint64_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    ldx r3, r3, r4
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to i64*
-  %1 = load i64, i64* %0, align 8
-  ret i64 %1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  %0 = load i64, ptr %add.ptr, align 8
+  ret i64 %0
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
@@ -3776,8 +3733,8 @@ define dso_local i64 @ld_or_uint64_t_uint64_t(i64 %ptr, i8 zeroext %off) {
 entry:
   %conv = zext i8 %off to i64
   %or = or i64 %conv, %ptr
-  %0 = inttoptr i64 %or to i64*
-  %1 = load i64, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i64, ptr %0, align 8
   ret i64 %1
 }
 
@@ -3792,8 +3749,8 @@ entry:
   %and = and i64 %ptr, -4096
   %conv = zext i8 %off to i64
   %or = or i64 %and, %conv
-  %0 = inttoptr i64 %or to i64*
-  %1 = load i64, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i64, ptr %0, align 8
   ret i64 %1
 }
 
@@ -3806,8 +3763,8 @@ define dso_local i64 @ld_not_disjoint16_uint64_t_uint64_t(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to i64*
-  %1 = load i64, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i64, ptr %0, align 8
   ret i64 %1
 }
 
@@ -3835,8 +3792,8 @@ define dso_local i64 @ld_disjoint_unalign16_uint64_t_uint64_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -4096
   %or = or i64 %and, 6
-  %0 = inttoptr i64 %or to i64*
-  %1 = load i64, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i64, ptr %0, align 8
   ret i64 %1
 }
 
@@ -3850,8 +3807,8 @@ define dso_local i64 @ld_disjoint_align16_uint64_t_uint64_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -4096
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to i64*
-  %1 = load i64, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i64, ptr %0, align 8
   ret i64 %1
 }
 
@@ -3865,8 +3822,8 @@ define dso_local i64 @ld_not_disjoint32_uint64_t_uint64_t(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to i64*
-  %1 = load i64, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i64, ptr %0, align 8
   ret i64 %1
 }
 
@@ -3888,8 +3845,8 @@ define dso_local i64 @ld_disjoint_unalign32_uint64_t_uint64_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1048576
   %or = or i64 %and, 99999
-  %0 = inttoptr i64 %or to i64*
-  %1 = load i64, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i64, ptr %0, align 8
   ret i64 %1
 }
 
@@ -3922,8 +3879,8 @@ define dso_local i64 @ld_disjoint_align32_uint64_t_uint64_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1000341504
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to i64*
-  %1 = load i64, i64* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i64, ptr %0, align 16
   ret i64 %1
 }
 
@@ -3949,8 +3906,8 @@ define dso_local i64 @ld_not_disjoint64_uint64_t_uint64_t(i64 %ptr) {
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to i64*
-  %1 = load i64, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i64, ptr %0, align 8
   ret i64 %1
 }
 
@@ -3977,8 +3934,8 @@ define dso_local i64 @ld_disjoint_unalign64_uint64_t_uint64_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000001
-  %0 = inttoptr i64 %or to i64*
-  %1 = load i64, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i64, ptr %0, align 8
   ret i64 %1
 }
 
@@ -4003,8 +3960,8 @@ define dso_local i64 @ld_disjoint_align64_uint64_t_uint64_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to i64*
-  %1 = load i64, i64* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i64, ptr %0, align 4096
   ret i64 %1
 }
 
@@ -4016,7 +3973,7 @@ define dso_local i64 @ld_cst_unalign16_uint64_t_uint64_t() {
 ; CHECK-NEXT:    ld r3, 0(r3)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i64, i64* inttoptr (i64 255 to i64*), align 8
+  %0 = load i64, ptr inttoptr (i64 255 to ptr), align 8
   ret i64 %0
 }
 
@@ -4027,7 +3984,7 @@ define dso_local i64 @ld_cst_align16_uint64_t_uint64_t() {
 ; CHECK-NEXT:    ld r3, 4080(0)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i64, i64* inttoptr (i64 4080 to i64*), align 16
+  %0 = load i64, ptr inttoptr (i64 4080 to ptr), align 16
   ret i64 %0
 }
 
@@ -4046,7 +4003,7 @@ define dso_local i64 @ld_cst_unalign32_uint64_t_uint64_t() {
 ; CHECK-PREP10-NEXT:    ld r3, 0(r3)
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %0 = load i64, i64* inttoptr (i64 99999 to i64*), align 8
+  %0 = load i64, ptr inttoptr (i64 99999 to ptr), align 8
   ret i64 %0
 }
 
@@ -4058,7 +4015,7 @@ define dso_local i64 @ld_cst_align32_uint64_t_uint64_t() {
 ; CHECK-NEXT:    ld r3, -27108(r3)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i64, i64* inttoptr (i64 9999900 to i64*), align 8
+  %0 = load i64, ptr inttoptr (i64 9999900 to ptr), align 8
   ret i64 %0
 }
 
@@ -4081,7 +4038,7 @@ define dso_local i64 @ld_cst_unalign64_uint64_t_uint64_t() {
 ; CHECK-PREP10-NEXT:    ld r3, 0(r3)
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %0 = load i64, i64* inttoptr (i64 1000000000001 to i64*), align 8
+  %0 = load i64, ptr inttoptr (i64 1000000000001 to ptr), align 8
   ret i64 %0
 }
 
@@ -4102,7 +4059,7 @@ define dso_local i64 @ld_cst_align64_uint64_t_uint64_t() {
 ; CHECK-PREP10-NEXT:    ld r3, 0(r3)
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %0 = load i64, i64* inttoptr (i64 1000000000000 to i64*), align 4096
+  %0 = load i64, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   ret i64 %0
 }
 
@@ -4115,14 +4072,14 @@ define dso_local i64 @ld_0_uint64_t_float(i64 %ptr) {
 ; CHECK-NEXT:    mffprd r3, f0
 ; CHECK-NEXT:    blr
 entry:
-  %0 = inttoptr i64 %ptr to float*
-  %1 = load float, float* %0, align 4
+  %0 = inttoptr i64 %ptr to ptr
+  %1 = load float, ptr %0, align 4
   %conv = fptoui float %1 to i64
   ret i64 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_unalign16_uint64_t_float(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_unalign16_uint64_t_float(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_unalign16_uint64_t_float:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    plfs f0, 1(r3), 0
@@ -4137,15 +4094,14 @@ define dso_local i64 @ld_unalign16_uint64_t_float(i8* nocapture readonly %ptr) {
 ; CHECK-PREP10-NEXT:    mffprd r3, f0
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1
-  %0 = bitcast i8* %add.ptr to float*
-  %1 = load float, float* %0, align 4
-  %conv = fptoui float %1 to i64
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1
+  %0 = load float, ptr %add.ptr, align 4
+  %conv = fptoui float %0 to i64
   ret i64 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_align16_uint64_t_float(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align16_uint64_t_float(ptr nocapture readonly %ptr) {
 ; CHECK-LABEL: ld_align16_uint64_t_float:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lfs f0, 8(r3)
@@ -4153,15 +4109,14 @@ define dso_local i64 @ld_align16_uint64_t_float(i8* nocapture readonly %ptr) {
 ; CHECK-NEXT:    mffprd r3, f0
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to float*
-  %1 = load float, float* %0, align 4
-  %conv = fptoui float %1 to i64
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  %0 = load float, ptr %add.ptr, align 4
+  %conv = fptoui float %0 to i64
   ret i64 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_unalign32_uint64_t_float(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_unalign32_uint64_t_float(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_unalign32_uint64_t_float:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    plfs f0, 99999(r3), 0
@@ -4178,15 +4133,14 @@ define dso_local i64 @ld_unalign32_uint64_t_float(i8* nocapture readonly %ptr) {
 ; CHECK-PREP10-NEXT:    mffprd r3, f0
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999
-  %0 = bitcast i8* %add.ptr to float*
-  %1 = load float, float* %0, align 4
-  %conv = fptoui float %1 to i64
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999
+  %0 = load float, ptr %add.ptr, align 4
+  %conv = fptoui float %0 to i64
   ret i64 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_align32_uint64_t_float(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align32_uint64_t_float(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align32_uint64_t_float:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    plfs f0, 99999000(r3), 0
@@ -4203,15 +4157,14 @@ define dso_local i64 @ld_align32_uint64_t_float(i8* nocapture readonly %ptr) {
 ; CHECK-PREP10-NEXT:    mffprd r3, f0
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to float*
-  %1 = load float, float* %0, align 4
-  %conv = fptoui float %1 to i64
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  %0 = load float, ptr %add.ptr, align 4
+  %conv = fptoui float %0 to i64
   ret i64 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_unalign64_uint64_t_float(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_unalign64_uint64_t_float(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_unalign64_uint64_t_float:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r4, 232
@@ -4233,15 +4186,14 @@ define dso_local i64 @ld_unalign64_uint64_t_float(i8* nocapture readonly %ptr) {
 ; CHECK-PREP10-NEXT:    mffprd r3, f0
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000001
-  %0 = bitcast i8* %add.ptr to float*
-  %1 = load float, float* %0, align 4
-  %conv = fptoui float %1 to i64
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000001
+  %0 = load float, ptr %add.ptr, align 4
+  %conv = fptoui float %0 to i64
   ret i64 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_align64_uint64_t_float(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align64_uint64_t_float(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align64_uint64_t_float:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r4, 244140625
@@ -4261,15 +4213,14 @@ define dso_local i64 @ld_align64_uint64_t_float(i8* nocapture readonly %ptr) {
 ; CHECK-PREP10-NEXT:    mffprd r3, f0
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to float*
-  %1 = load float, float* %0, align 4
-  %conv = fptoui float %1 to i64
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  %0 = load float, ptr %add.ptr, align 4
+  %conv = fptoui float %0 to i64
   ret i64 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_reg_uint64_t_float(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local i64 @ld_reg_uint64_t_float(ptr nocapture readonly %ptr, i64 %off) {
 ; CHECK-LABEL: ld_reg_uint64_t_float:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lfsx f0, r3, r4
@@ -4277,10 +4228,9 @@ define dso_local i64 @ld_reg_uint64_t_float(i8* nocapture readonly %ptr, i64 %of
 ; CHECK-NEXT:    mffprd r3, f0
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to float*
-  %1 = load float, float* %0, align 4
-  %conv = fptoui float %1 to i64
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  %0 = load float, ptr %add.ptr, align 4
+  %conv = fptoui float %0 to i64
   ret i64 %conv
 }
 
@@ -4296,8 +4246,8 @@ define dso_local i64 @ld_or_uint64_t_float(i64 %ptr, i8 zeroext %off) {
 entry:
   %conv = zext i8 %off to i64
   %or = or i64 %conv, %ptr
-  %0 = inttoptr i64 %or to float*
-  %1 = load float, float* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load float, ptr %0, align 4
   %conv1 = fptoui float %1 to i64
   ret i64 %conv1
 }
@@ -4315,8 +4265,8 @@ entry:
   %and = and i64 %ptr, -4096
   %conv = zext i8 %off to i64
   %or = or i64 %and, %conv
-  %0 = inttoptr i64 %or to float*
-  %1 = load float, float* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load float, ptr %0, align 4
   %conv1 = fptoui float %1 to i64
   ret i64 %conv1
 }
@@ -4332,8 +4282,8 @@ define dso_local i64 @ld_not_disjoint16_uint64_t_float(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to float*
-  %1 = load float, float* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load float, ptr %0, align 4
   %conv = fptoui float %1 to i64
   ret i64 %conv
 }
@@ -4358,8 +4308,8 @@ define dso_local i64 @ld_disjoint_unalign16_uint64_t_float(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -4096
   %or = or i64 %and, 6
-  %0 = inttoptr i64 %or to float*
-  %1 = load float, float* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load float, ptr %0, align 4
   %conv = fptoui float %1 to i64
   ret i64 %conv
 }
@@ -4376,8 +4326,8 @@ define dso_local i64 @ld_disjoint_align16_uint64_t_float(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -4096
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to float*
-  %1 = load float, float* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load float, ptr %0, align 8
   %conv = fptoui float %1 to i64
   ret i64 %conv
 }
@@ -4394,8 +4344,8 @@ define dso_local i64 @ld_not_disjoint32_uint64_t_float(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to float*
-  %1 = load float, float* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load float, ptr %0, align 4
   %conv = fptoui float %1 to i64
   ret i64 %conv
 }
@@ -4422,8 +4372,8 @@ define dso_local i64 @ld_disjoint_unalign32_uint64_t_float(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1048576
   %or = or i64 %and, 99999
-  %0 = inttoptr i64 %or to float*
-  %1 = load float, float* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load float, ptr %0, align 4
   %conv = fptoui float %1 to i64
   ret i64 %conv
 }
@@ -4463,8 +4413,8 @@ define dso_local i64 @ld_disjoint_align32_uint64_t_float(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1000341504
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to float*
-  %1 = load float, float* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  %1 = load float, ptr %0, align 16
   %conv = fptoui float %1 to i64
   ret i64 %conv
 }
@@ -4495,8 +4445,8 @@ define dso_local i64 @ld_not_disjoint64_uint64_t_float(i64 %ptr) {
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to float*
-  %1 = load float, float* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load float, ptr %0, align 4
   %conv = fptoui float %1 to i64
   ret i64 %conv
 }
@@ -4528,8 +4478,8 @@ define dso_local i64 @ld_disjoint_unalign64_uint64_t_float(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000001
-  %0 = inttoptr i64 %or to float*
-  %1 = load float, float* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load float, ptr %0, align 4
   %conv = fptoui float %1 to i64
   ret i64 %conv
 }
@@ -4559,8 +4509,8 @@ define dso_local i64 @ld_disjoint_align64_uint64_t_float(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to float*
-  %1 = load float, float* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  %1 = load float, ptr %0, align 4096
   %conv = fptoui float %1 to i64
   ret i64 %conv
 }
@@ -4590,7 +4540,7 @@ define dso_local i64 @ld_cst_unalign16_uint64_t_float() {
 ; CHECK-P8-NEXT:    mffprd r3, f0
 ; CHECK-P8-NEXT:    blr
 entry:
-  %0 = load float, float* inttoptr (i64 255 to float*), align 4
+  %0 = load float, ptr inttoptr (i64 255 to ptr), align 4
   %conv = fptoui float %0 to i64
   ret i64 %conv
 }
@@ -4604,7 +4554,7 @@ define dso_local i64 @ld_cst_align16_uint64_t_float() {
 ; CHECK-NEXT:    mffprd r3, f0
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load float, float* inttoptr (i64 4080 to float*), align 16
+  %0 = load float, ptr inttoptr (i64 4080 to ptr), align 16
   %conv = fptoui float %0 to i64
   ret i64 %conv
 }
@@ -4636,7 +4586,7 @@ define dso_local i64 @ld_cst_unalign32_uint64_t_float() {
 ; CHECK-P8-NEXT:    mffprd r3, f0
 ; CHECK-P8-NEXT:    blr
 entry:
-  %0 = load float, float* inttoptr (i64 99999 to float*), align 4
+  %0 = load float, ptr inttoptr (i64 99999 to ptr), align 4
   %conv = fptoui float %0 to i64
   ret i64 %conv
 }
@@ -4651,7 +4601,7 @@ define dso_local i64 @ld_cst_align32_uint64_t_float() {
 ; CHECK-NEXT:    mffprd r3, f0
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load float, float* inttoptr (i64 9999900 to float*), align 4
+  %0 = load float, ptr inttoptr (i64 9999900 to ptr), align 4
   %conv = fptoui float %0 to i64
   ret i64 %conv
 }
@@ -4679,7 +4629,7 @@ define dso_local i64 @ld_cst_unalign64_uint64_t_float() {
 ; CHECK-PREP10-NEXT:    mffprd r3, f0
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %0 = load float, float* inttoptr (i64 1000000000001 to float*), align 4
+  %0 = load float, ptr inttoptr (i64 1000000000001 to ptr), align 4
   %conv = fptoui float %0 to i64
   ret i64 %conv
 }
@@ -4705,7 +4655,7 @@ define dso_local i64 @ld_cst_align64_uint64_t_float() {
 ; CHECK-PREP10-NEXT:    mffprd r3, f0
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %0 = load float, float* inttoptr (i64 1000000000000 to float*), align 4096
+  %0 = load float, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   %conv = fptoui float %0 to i64
   ret i64 %conv
 }
@@ -4719,14 +4669,14 @@ define dso_local i64 @ld_0_uint64_t_double(i64 %ptr) {
 ; CHECK-NEXT:    mffprd r3, f0
 ; CHECK-NEXT:    blr
 entry:
-  %0 = inttoptr i64 %ptr to double*
-  %1 = load double, double* %0, align 8
+  %0 = inttoptr i64 %ptr to ptr
+  %1 = load double, ptr %0, align 8
   %conv = fptoui double %1 to i64
   ret i64 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_unalign16_uint64_t_double(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_unalign16_uint64_t_double(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_unalign16_uint64_t_double:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    plfd f0, 1(r3), 0
@@ -4741,15 +4691,14 @@ define dso_local i64 @ld_unalign16_uint64_t_double(i8* nocapture readonly %ptr)
 ; CHECK-PREP10-NEXT:    mffprd r3, f0
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1
-  %0 = bitcast i8* %add.ptr to double*
-  %1 = load double, double* %0, align 8
-  %conv = fptoui double %1 to i64
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1
+  %0 = load double, ptr %add.ptr, align 8
+  %conv = fptoui double %0 to i64
   ret i64 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_align16_uint64_t_double(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align16_uint64_t_double(ptr nocapture readonly %ptr) {
 ; CHECK-LABEL: ld_align16_uint64_t_double:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lfd f0, 8(r3)
@@ -4757,15 +4706,14 @@ define dso_local i64 @ld_align16_uint64_t_double(i8* nocapture readonly %ptr) {
 ; CHECK-NEXT:    mffprd r3, f0
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to double*
-  %1 = load double, double* %0, align 8
-  %conv = fptoui double %1 to i64
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  %0 = load double, ptr %add.ptr, align 8
+  %conv = fptoui double %0 to i64
   ret i64 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_unalign32_uint64_t_double(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_unalign32_uint64_t_double(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_unalign32_uint64_t_double:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    plfd f0, 99999(r3), 0
@@ -4782,15 +4730,14 @@ define dso_local i64 @ld_unalign32_uint64_t_double(i8* nocapture readonly %ptr)
 ; CHECK-PREP10-NEXT:    mffprd r3, f0
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999
-  %0 = bitcast i8* %add.ptr to double*
-  %1 = load double, double* %0, align 8
-  %conv = fptoui double %1 to i64
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999
+  %0 = load double, ptr %add.ptr, align 8
+  %conv = fptoui double %0 to i64
   ret i64 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_align32_uint64_t_double(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align32_uint64_t_double(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align32_uint64_t_double:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    plfd f0, 99999000(r3), 0
@@ -4807,15 +4754,14 @@ define dso_local i64 @ld_align32_uint64_t_double(i8* nocapture readonly %ptr) {
 ; CHECK-PREP10-NEXT:    mffprd r3, f0
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to double*
-  %1 = load double, double* %0, align 8
-  %conv = fptoui double %1 to i64
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  %0 = load double, ptr %add.ptr, align 8
+  %conv = fptoui double %0 to i64
   ret i64 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_unalign64_uint64_t_double(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_unalign64_uint64_t_double(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_unalign64_uint64_t_double:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r4, 232
@@ -4837,15 +4783,14 @@ define dso_local i64 @ld_unalign64_uint64_t_double(i8* nocapture readonly %ptr)
 ; CHECK-PREP10-NEXT:    mffprd r3, f0
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000001
-  %0 = bitcast i8* %add.ptr to double*
-  %1 = load double, double* %0, align 8
-  %conv = fptoui double %1 to i64
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000001
+  %0 = load double, ptr %add.ptr, align 8
+  %conv = fptoui double %0 to i64
   ret i64 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_align64_uint64_t_double(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align64_uint64_t_double(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align64_uint64_t_double:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r4, 244140625
@@ -4865,15 +4810,14 @@ define dso_local i64 @ld_align64_uint64_t_double(i8* nocapture readonly %ptr) {
 ; CHECK-PREP10-NEXT:    mffprd r3, f0
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to double*
-  %1 = load double, double* %0, align 8
-  %conv = fptoui double %1 to i64
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  %0 = load double, ptr %add.ptr, align 8
+  %conv = fptoui double %0 to i64
   ret i64 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_reg_uint64_t_double(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local i64 @ld_reg_uint64_t_double(ptr nocapture readonly %ptr, i64 %off) {
 ; CHECK-LABEL: ld_reg_uint64_t_double:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lfdx f0, r3, r4
@@ -4881,10 +4825,9 @@ define dso_local i64 @ld_reg_uint64_t_double(i8* nocapture readonly %ptr, i64 %o
 ; CHECK-NEXT:    mffprd r3, f0
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to double*
-  %1 = load double, double* %0, align 8
-  %conv = fptoui double %1 to i64
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  %0 = load double, ptr %add.ptr, align 8
+  %conv = fptoui double %0 to i64
   ret i64 %conv
 }
 
@@ -4900,8 +4843,8 @@ define dso_local i64 @ld_or_uint64_t_double(i64 %ptr, i8 zeroext %off) {
 entry:
   %conv = zext i8 %off to i64
   %or = or i64 %conv, %ptr
-  %0 = inttoptr i64 %or to double*
-  %1 = load double, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load double, ptr %0, align 8
   %conv1 = fptoui double %1 to i64
   ret i64 %conv1
 }
@@ -4919,8 +4862,8 @@ entry:
   %and = and i64 %ptr, -4096
   %conv = zext i8 %off to i64
   %or = or i64 %and, %conv
-  %0 = inttoptr i64 %or to double*
-  %1 = load double, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load double, ptr %0, align 8
   %conv1 = fptoui double %1 to i64
   ret i64 %conv1
 }
@@ -4936,8 +4879,8 @@ define dso_local i64 @ld_not_disjoint16_uint64_t_double(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to double*
-  %1 = load double, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load double, ptr %0, align 8
   %conv = fptoui double %1 to i64
   ret i64 %conv
 }
@@ -4962,8 +4905,8 @@ define dso_local i64 @ld_disjoint_unalign16_uint64_t_double(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -4096
   %or = or i64 %and, 6
-  %0 = inttoptr i64 %or to double*
-  %1 = load double, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load double, ptr %0, align 8
   %conv = fptoui double %1 to i64
   ret i64 %conv
 }
@@ -4980,8 +4923,8 @@ define dso_local i64 @ld_disjoint_align16_uint64_t_double(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -4096
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to double*
-  %1 = load double, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load double, ptr %0, align 8
   %conv = fptoui double %1 to i64
   ret i64 %conv
 }
@@ -4998,8 +4941,8 @@ define dso_local i64 @ld_not_disjoint32_uint64_t_double(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to double*
-  %1 = load double, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load double, ptr %0, align 8
   %conv = fptoui double %1 to i64
   ret i64 %conv
 }
@@ -5026,8 +4969,8 @@ define dso_local i64 @ld_disjoint_unalign32_uint64_t_double(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1048576
   %or = or i64 %and, 99999
-  %0 = inttoptr i64 %or to double*
-  %1 = load double, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load double, ptr %0, align 8
   %conv = fptoui double %1 to i64
   ret i64 %conv
 }
@@ -5067,8 +5010,8 @@ define dso_local i64 @ld_disjoint_align32_uint64_t_double(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1000341504
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to double*
-  %1 = load double, double* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  %1 = load double, ptr %0, align 16
   %conv = fptoui double %1 to i64
   ret i64 %conv
 }
@@ -5099,8 +5042,8 @@ define dso_local i64 @ld_not_disjoint64_uint64_t_double(i64 %ptr) {
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to double*
-  %1 = load double, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load double, ptr %0, align 8
   %conv = fptoui double %1 to i64
   ret i64 %conv
 }
@@ -5132,8 +5075,8 @@ define dso_local i64 @ld_disjoint_unalign64_uint64_t_double(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000001
-  %0 = inttoptr i64 %or to double*
-  %1 = load double, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load double, ptr %0, align 8
   %conv = fptoui double %1 to i64
   ret i64 %conv
 }
@@ -5163,8 +5106,8 @@ define dso_local i64 @ld_disjoint_align64_uint64_t_double(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to double*
-  %1 = load double, double* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  %1 = load double, ptr %0, align 4096
   %conv = fptoui double %1 to i64
   ret i64 %conv
 }
@@ -5194,7 +5137,7 @@ define dso_local i64 @ld_cst_unalign16_uint64_t_double() {
 ; CHECK-P8-NEXT:    mffprd r3, f0
 ; CHECK-P8-NEXT:    blr
 entry:
-  %0 = load double, double* inttoptr (i64 255 to double*), align 8
+  %0 = load double, ptr inttoptr (i64 255 to ptr), align 8
   %conv = fptoui double %0 to i64
   ret i64 %conv
 }
@@ -5208,7 +5151,7 @@ define dso_local i64 @ld_cst_align16_uint64_t_double() {
 ; CHECK-NEXT:    mffprd r3, f0
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load double, double* inttoptr (i64 4080 to double*), align 16
+  %0 = load double, ptr inttoptr (i64 4080 to ptr), align 16
   %conv = fptoui double %0 to i64
   ret i64 %conv
 }
@@ -5240,7 +5183,7 @@ define dso_local i64 @ld_cst_unalign32_uint64_t_double() {
 ; CHECK-P8-NEXT:    mffprd r3, f0
 ; CHECK-P8-NEXT:    blr
 entry:
-  %0 = load double, double* inttoptr (i64 99999 to double*), align 8
+  %0 = load double, ptr inttoptr (i64 99999 to ptr), align 8
   %conv = fptoui double %0 to i64
   ret i64 %conv
 }
@@ -5255,7 +5198,7 @@ define dso_local i64 @ld_cst_align32_uint64_t_double() {
 ; CHECK-NEXT:    mffprd r3, f0
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load double, double* inttoptr (i64 9999900 to double*), align 8
+  %0 = load double, ptr inttoptr (i64 9999900 to ptr), align 8
   %conv = fptoui double %0 to i64
   ret i64 %conv
 }
@@ -5283,7 +5226,7 @@ define dso_local i64 @ld_cst_unalign64_uint64_t_double() {
 ; CHECK-PREP10-NEXT:    mffprd r3, f0
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %0 = load double, double* inttoptr (i64 1000000000001 to double*), align 8
+  %0 = load double, ptr inttoptr (i64 1000000000001 to ptr), align 8
   %conv = fptoui double %0 to i64
   ret i64 %conv
 }
@@ -5309,7 +5252,7 @@ define dso_local i64 @ld_cst_align64_uint64_t_double() {
 ; CHECK-PREP10-NEXT:    mffprd r3, f0
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %0 = load double, double* inttoptr (i64 1000000000000 to double*), align 4096
+  %0 = load double, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   %conv = fptoui double %0 to i64
   ret i64 %conv
 }
@@ -5322,26 +5265,26 @@ define dso_local void @st_0_uint64_t_uint8_t(i64 %ptr, i64 %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = trunc i64 %str to i8
-  %0 = inttoptr i64 %ptr to i8*
-  store i8 %conv, i8* %0, align 1
+  %0 = inttoptr i64 %ptr to ptr
+  store i8 %conv, ptr %0, align 1
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_uint64_t_uint8_t(i8* nocapture %ptr, i64 %str) {
+define dso_local void @st_align16_uint64_t_uint8_t(ptr nocapture %ptr, i64 %str) {
 ; CHECK-LABEL: st_align16_uint64_t_uint8_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    stb r4, 8(r3)
 ; CHECK-NEXT:    blr
 entry:
   %conv = trunc i64 %str to i8
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  store i8 %conv, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  store i8 %conv, ptr %add.ptr, align 1
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_uint64_t_uint8_t(i8* nocapture %ptr, i64 %str) {
+define dso_local void @st_align32_uint64_t_uint8_t(ptr nocapture %ptr, i64 %str) {
 ; CHECK-P10-LABEL: st_align32_uint64_t_uint8_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pstb r4, 99999000(r3), 0
@@ -5355,13 +5298,13 @@ define dso_local void @st_align32_uint64_t_uint8_t(i8* nocapture %ptr, i64 %str)
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %conv = trunc i64 %str to i8
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  store i8 %conv, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  store i8 %conv, ptr %add.ptr, align 1
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_uint64_t_uint8_t(i8* nocapture %ptr, i64 %str) {
+define dso_local void @st_align64_uint64_t_uint8_t(ptr nocapture %ptr, i64 %str) {
 ; CHECK-P10-LABEL: st_align64_uint64_t_uint8_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r5, 244140625
@@ -5378,21 +5321,21 @@ define dso_local void @st_align64_uint64_t_uint8_t(i8* nocapture %ptr, i64 %str)
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %conv = trunc i64 %str to i8
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  store i8 %conv, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  store i8 %conv, ptr %add.ptr, align 1
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_uint64_t_uint8_t(i8* nocapture %ptr, i64 %off, i64 %str) {
+define dso_local void @st_reg_uint64_t_uint8_t(ptr nocapture %ptr, i64 %off, i64 %str) {
 ; CHECK-LABEL: st_reg_uint64_t_uint8_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    stbx r5, r3, r4
 ; CHECK-NEXT:    blr
 entry:
   %conv = trunc i64 %str to i8
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  store i8 %conv, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  store i8 %conv, ptr %add.ptr, align 1
   ret void
 }
 
@@ -5407,8 +5350,8 @@ entry:
   %conv = trunc i64 %str to i8
   %conv1 = zext i8 %off to i64
   %or = or i64 %conv1, %ptr
-  %0 = inttoptr i64 %or to i8*
-  store i8 %conv, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  store i8 %conv, ptr %0, align 1
   ret void
 }
 
@@ -5424,8 +5367,8 @@ entry:
   %conv = trunc i64 %str to i8
   %conv1 = zext i8 %off to i64
   %or = or i64 %and, %conv1
-  %0 = inttoptr i64 %or to i8*
-  store i8 %conv, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  store i8 %conv, ptr %0, align 1
   ret void
 }
 
@@ -5439,8 +5382,8 @@ define dso_local void @st_not_disjoint16_uint64_t_uint8_t(i64 %ptr, i64 %str) {
 entry:
   %conv = trunc i64 %str to i8
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to i8*
-  store i8 %conv, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  store i8 %conv, ptr %0, align 1
   ret void
 }
 
@@ -5455,8 +5398,8 @@ entry:
   %and = and i64 %ptr, -4096
   %conv = trunc i64 %str to i8
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to i8*
-  store i8 %conv, i8* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store i8 %conv, ptr %0, align 8
   ret void
 }
 
@@ -5471,8 +5414,8 @@ define dso_local void @st_not_disjoint32_uint64_t_uint8_t(i64 %ptr, i64 %str) {
 entry:
   %conv = trunc i64 %str to i8
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to i8*
-  store i8 %conv, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  store i8 %conv, ptr %0, align 1
   ret void
 }
 
@@ -5506,8 +5449,8 @@ entry:
   %and = and i64 %ptr, -1000341504
   %conv = trunc i64 %str to i8
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to i8*
-  store i8 %conv, i8* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  store i8 %conv, ptr %0, align 16
   ret void
 }
 
@@ -5534,8 +5477,8 @@ define dso_local void @st_not_disjoint64_uint64_t_uint8_t(i64 %ptr, i64 %str) {
 entry:
   %conv = trunc i64 %str to i8
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to i8*
-  store i8 %conv, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  store i8 %conv, ptr %0, align 1
   ret void
 }
 
@@ -5561,8 +5504,8 @@ entry:
   %and = and i64 %ptr, -1099511627776
   %conv = trunc i64 %str to i8
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to i8*
-  store i8 %conv, i8* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  store i8 %conv, ptr %0, align 4096
   ret void
 }
 
@@ -5574,7 +5517,7 @@ define dso_local void @st_cst_align16_uint64_t_uint8_t(i64 %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = trunc i64 %str to i8
-  store i8 %conv, i8* inttoptr (i64 4080 to i8*), align 16
+  store i8 %conv, ptr inttoptr (i64 4080 to ptr), align 16
   ret void
 }
 
@@ -5587,7 +5530,7 @@ define dso_local void @st_cst_align32_uint64_t_uint8_t(i64 %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = trunc i64 %str to i8
-  store i8 %conv, i8* inttoptr (i64 9999900 to i8*), align 4
+  store i8 %conv, ptr inttoptr (i64 9999900 to ptr), align 4
   ret void
 }
 
@@ -5609,7 +5552,7 @@ define dso_local void @st_cst_align64_uint64_t_uint8_t(i64 %str) {
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %conv = trunc i64 %str to i8
-  store i8 %conv, i8* inttoptr (i64 1000000000000 to i8*), align 4096
+  store i8 %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   ret void
 }
 
@@ -5621,27 +5564,26 @@ define dso_local void @st_0_uint64_t_uint16_t(i64 %ptr, i64 %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = trunc i64 %str to i16
-  %0 = inttoptr i64 %ptr to i16*
-  store i16 %conv, i16* %0, align 2
+  %0 = inttoptr i64 %ptr to ptr
+  store i16 %conv, ptr %0, align 2
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_uint64_t_uint16_t(i8* nocapture %ptr, i64 %str) {
+define dso_local void @st_align16_uint64_t_uint16_t(ptr nocapture %ptr, i64 %str) {
 ; CHECK-LABEL: st_align16_uint64_t_uint16_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    sth r4, 8(r3)
 ; CHECK-NEXT:    blr
 entry:
   %conv = trunc i64 %str to i16
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to i16*
-  store i16 %conv, i16* %0, align 2
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  store i16 %conv, ptr %add.ptr, align 2
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_uint64_t_uint16_t(i8* nocapture %ptr, i64 %str) {
+define dso_local void @st_align32_uint64_t_uint16_t(ptr nocapture %ptr, i64 %str) {
 ; CHECK-P10-LABEL: st_align32_uint64_t_uint16_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    psth r4, 99999000(r3), 0
@@ -5655,14 +5597,13 @@ define dso_local void @st_align32_uint64_t_uint16_t(i8* nocapture %ptr, i64 %str
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %conv = trunc i64 %str to i16
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to i16*
-  store i16 %conv, i16* %0, align 2
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  store i16 %conv, ptr %add.ptr, align 2
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_uint64_t_uint16_t(i8* nocapture %ptr, i64 %str) {
+define dso_local void @st_align64_uint64_t_uint16_t(ptr nocapture %ptr, i64 %str) {
 ; CHECK-P10-LABEL: st_align64_uint64_t_uint16_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r5, 244140625
@@ -5679,23 +5620,21 @@ define dso_local void @st_align64_uint64_t_uint16_t(i8* nocapture %ptr, i64 %str
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %conv = trunc i64 %str to i16
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to i16*
-  store i16 %conv, i16* %0, align 2
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  store i16 %conv, ptr %add.ptr, align 2
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_uint64_t_uint16_t(i8* nocapture %ptr, i64 %off, i64 %str) {
+define dso_local void @st_reg_uint64_t_uint16_t(ptr nocapture %ptr, i64 %off, i64 %str) {
 ; CHECK-LABEL: st_reg_uint64_t_uint16_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    sthx r5, r3, r4
 ; CHECK-NEXT:    blr
 entry:
   %conv = trunc i64 %str to i16
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to i16*
-  store i16 %conv, i16* %0, align 2
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  store i16 %conv, ptr %add.ptr, align 2
   ret void
 }
 
@@ -5710,8 +5649,8 @@ entry:
   %conv = trunc i64 %str to i16
   %conv1 = zext i8 %off to i64
   %or = or i64 %conv1, %ptr
-  %0 = inttoptr i64 %or to i16*
-  store i16 %conv, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  store i16 %conv, ptr %0, align 2
   ret void
 }
 
@@ -5727,8 +5666,8 @@ entry:
   %conv = trunc i64 %str to i16
   %conv1 = zext i8 %off to i64
   %or = or i64 %and, %conv1
-  %0 = inttoptr i64 %or to i16*
-  store i16 %conv, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  store i16 %conv, ptr %0, align 2
   ret void
 }
 
@@ -5742,8 +5681,8 @@ define dso_local void @st_not_disjoint16_uint64_t_uint16_t(i64 %ptr, i64 %str) {
 entry:
   %conv = trunc i64 %str to i16
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to i16*
-  store i16 %conv, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  store i16 %conv, ptr %0, align 2
   ret void
 }
 
@@ -5758,8 +5697,8 @@ entry:
   %and = and i64 %ptr, -4096
   %conv = trunc i64 %str to i16
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to i16*
-  store i16 %conv, i16* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store i16 %conv, ptr %0, align 8
   ret void
 }
 
@@ -5774,8 +5713,8 @@ define dso_local void @st_not_disjoint32_uint64_t_uint16_t(i64 %ptr, i64 %str) {
 entry:
   %conv = trunc i64 %str to i16
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to i16*
-  store i16 %conv, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  store i16 %conv, ptr %0, align 2
   ret void
 }
 
@@ -5809,8 +5748,8 @@ entry:
   %and = and i64 %ptr, -1000341504
   %conv = trunc i64 %str to i16
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to i16*
-  store i16 %conv, i16* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  store i16 %conv, ptr %0, align 16
   ret void
 }
 
@@ -5837,8 +5776,8 @@ define dso_local void @st_not_disjoint64_uint64_t_uint16_t(i64 %ptr, i64 %str) {
 entry:
   %conv = trunc i64 %str to i16
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to i16*
-  store i16 %conv, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  store i16 %conv, ptr %0, align 2
   ret void
 }
 
@@ -5864,8 +5803,8 @@ entry:
   %and = and i64 %ptr, -1099511627776
   %conv = trunc i64 %str to i16
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to i16*
-  store i16 %conv, i16* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  store i16 %conv, ptr %0, align 4096
   ret void
 }
 
@@ -5877,7 +5816,7 @@ define dso_local void @st_cst_align16_uint64_t_uint16_t(i64 %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = trunc i64 %str to i16
-  store i16 %conv, i16* inttoptr (i64 4080 to i16*), align 16
+  store i16 %conv, ptr inttoptr (i64 4080 to ptr), align 16
   ret void
 }
 
@@ -5890,7 +5829,7 @@ define dso_local void @st_cst_align32_uint64_t_uint16_t(i64 %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = trunc i64 %str to i16
-  store i16 %conv, i16* inttoptr (i64 9999900 to i16*), align 4
+  store i16 %conv, ptr inttoptr (i64 9999900 to ptr), align 4
   ret void
 }
 
@@ -5912,7 +5851,7 @@ define dso_local void @st_cst_align64_uint64_t_uint16_t(i64 %str) {
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %conv = trunc i64 %str to i16
-  store i16 %conv, i16* inttoptr (i64 1000000000000 to i16*), align 4096
+  store i16 %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   ret void
 }
 
@@ -5924,27 +5863,26 @@ define dso_local void @st_0_uint64_t_int16_t(i64 %ptr, i64 %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = trunc i64 %str to i16
-  %0 = inttoptr i64 %ptr to i16*
-  store i16 %conv, i16* %0, align 2
+  %0 = inttoptr i64 %ptr to ptr
+  store i16 %conv, ptr %0, align 2
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_uint64_t_int16_t(i8* nocapture %ptr, i64 %str) {
+define dso_local void @st_align16_uint64_t_int16_t(ptr nocapture %ptr, i64 %str) {
 ; CHECK-LABEL: st_align16_uint64_t_int16_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    sth r4, 8(r3)
 ; CHECK-NEXT:    blr
 entry:
   %conv = trunc i64 %str to i16
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to i16*
-  store i16 %conv, i16* %0, align 2
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  store i16 %conv, ptr %add.ptr, align 2
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_uint64_t_int16_t(i8* nocapture %ptr, i64 %str) {
+define dso_local void @st_align32_uint64_t_int16_t(ptr nocapture %ptr, i64 %str) {
 ; CHECK-P10-LABEL: st_align32_uint64_t_int16_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    psth r4, 99999000(r3), 0
@@ -5958,14 +5896,13 @@ define dso_local void @st_align32_uint64_t_int16_t(i8* nocapture %ptr, i64 %str)
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %conv = trunc i64 %str to i16
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to i16*
-  store i16 %conv, i16* %0, align 2
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  store i16 %conv, ptr %add.ptr, align 2
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_uint64_t_int16_t(i8* nocapture %ptr, i64 %str) {
+define dso_local void @st_align64_uint64_t_int16_t(ptr nocapture %ptr, i64 %str) {
 ; CHECK-P10-LABEL: st_align64_uint64_t_int16_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r5, 244140625
@@ -5982,23 +5919,21 @@ define dso_local void @st_align64_uint64_t_int16_t(i8* nocapture %ptr, i64 %str)
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %conv = trunc i64 %str to i16
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to i16*
-  store i16 %conv, i16* %0, align 2
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  store i16 %conv, ptr %add.ptr, align 2
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_uint64_t_int16_t(i8* nocapture %ptr, i64 %off, i64 %str) {
+define dso_local void @st_reg_uint64_t_int16_t(ptr nocapture %ptr, i64 %off, i64 %str) {
 ; CHECK-LABEL: st_reg_uint64_t_int16_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    sthx r5, r3, r4
 ; CHECK-NEXT:    blr
 entry:
   %conv = trunc i64 %str to i16
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to i16*
-  store i16 %conv, i16* %0, align 2
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  store i16 %conv, ptr %add.ptr, align 2
   ret void
 }
 
@@ -6013,8 +5948,8 @@ entry:
   %conv = trunc i64 %str to i16
   %conv1 = zext i8 %off to i64
   %or = or i64 %conv1, %ptr
-  %0 = inttoptr i64 %or to i16*
-  store i16 %conv, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  store i16 %conv, ptr %0, align 2
   ret void
 }
 
@@ -6030,8 +5965,8 @@ entry:
   %conv = trunc i64 %str to i16
   %conv1 = zext i8 %off to i64
   %or = or i64 %and, %conv1
-  %0 = inttoptr i64 %or to i16*
-  store i16 %conv, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  store i16 %conv, ptr %0, align 2
   ret void
 }
 
@@ -6045,8 +5980,8 @@ define dso_local void @st_not_disjoint16_uint64_t_int16_t(i64 %ptr, i64 %str) {
 entry:
   %conv = trunc i64 %str to i16
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to i16*
-  store i16 %conv, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  store i16 %conv, ptr %0, align 2
   ret void
 }
 
@@ -6061,8 +5996,8 @@ entry:
   %and = and i64 %ptr, -4096
   %conv = trunc i64 %str to i16
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to i16*
-  store i16 %conv, i16* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store i16 %conv, ptr %0, align 8
   ret void
 }
 
@@ -6077,8 +6012,8 @@ define dso_local void @st_not_disjoint32_uint64_t_int16_t(i64 %ptr, i64 %str) {
 entry:
   %conv = trunc i64 %str to i16
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to i16*
-  store i16 %conv, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  store i16 %conv, ptr %0, align 2
   ret void
 }
 
@@ -6112,8 +6047,8 @@ entry:
   %and = and i64 %ptr, -1000341504
   %conv = trunc i64 %str to i16
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to i16*
-  store i16 %conv, i16* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  store i16 %conv, ptr %0, align 16
   ret void
 }
 
@@ -6140,8 +6075,8 @@ define dso_local void @st_not_disjoint64_uint64_t_int16_t(i64 %ptr, i64 %str) {
 entry:
   %conv = trunc i64 %str to i16
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to i16*
-  store i16 %conv, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  store i16 %conv, ptr %0, align 2
   ret void
 }
 
@@ -6167,8 +6102,8 @@ entry:
   %and = and i64 %ptr, -1099511627776
   %conv = trunc i64 %str to i16
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to i16*
-  store i16 %conv, i16* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  store i16 %conv, ptr %0, align 4096
   ret void
 }
 
@@ -6180,7 +6115,7 @@ define dso_local void @st_cst_align16_uint64_t_int16_t(i64 %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = trunc i64 %str to i16
-  store i16 %conv, i16* inttoptr (i64 4080 to i16*), align 16
+  store i16 %conv, ptr inttoptr (i64 4080 to ptr), align 16
   ret void
 }
 
@@ -6193,7 +6128,7 @@ define dso_local void @st_cst_align32_uint64_t_int16_t(i64 %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = trunc i64 %str to i16
-  store i16 %conv, i16* inttoptr (i64 9999900 to i16*), align 4
+  store i16 %conv, ptr inttoptr (i64 9999900 to ptr), align 4
   ret void
 }
 
@@ -6215,7 +6150,7 @@ define dso_local void @st_cst_align64_uint64_t_int16_t(i64 %str) {
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %conv = trunc i64 %str to i16
-  store i16 %conv, i16* inttoptr (i64 1000000000000 to i16*), align 4096
+  store i16 %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   ret void
 }
 
@@ -6227,27 +6162,26 @@ define dso_local void @st_0_uint64_t_uint32_t(i64 %ptr, i64 %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = trunc i64 %str to i32
-  %0 = inttoptr i64 %ptr to i32*
-  store i32 %conv, i32* %0, align 4
+  %0 = inttoptr i64 %ptr to ptr
+  store i32 %conv, ptr %0, align 4
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_uint64_t_uint32_t(i8* nocapture %ptr, i64 %str) {
+define dso_local void @st_align16_uint64_t_uint32_t(ptr nocapture %ptr, i64 %str) {
 ; CHECK-LABEL: st_align16_uint64_t_uint32_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    stw r4, 8(r3)
 ; CHECK-NEXT:    blr
 entry:
   %conv = trunc i64 %str to i32
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to i32*
-  store i32 %conv, i32* %0, align 4
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  store i32 %conv, ptr %add.ptr, align 4
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_uint64_t_uint32_t(i8* nocapture %ptr, i64 %str) {
+define dso_local void @st_align32_uint64_t_uint32_t(ptr nocapture %ptr, i64 %str) {
 ; CHECK-P10-LABEL: st_align32_uint64_t_uint32_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pstw r4, 99999000(r3), 0
@@ -6261,14 +6195,13 @@ define dso_local void @st_align32_uint64_t_uint32_t(i8* nocapture %ptr, i64 %str
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %conv = trunc i64 %str to i32
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to i32*
-  store i32 %conv, i32* %0, align 4
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  store i32 %conv, ptr %add.ptr, align 4
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_uint64_t_uint32_t(i8* nocapture %ptr, i64 %str) {
+define dso_local void @st_align64_uint64_t_uint32_t(ptr nocapture %ptr, i64 %str) {
 ; CHECK-P10-LABEL: st_align64_uint64_t_uint32_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r5, 244140625
@@ -6285,23 +6218,21 @@ define dso_local void @st_align64_uint64_t_uint32_t(i8* nocapture %ptr, i64 %str
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %conv = trunc i64 %str to i32
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to i32*
-  store i32 %conv, i32* %0, align 4
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  store i32 %conv, ptr %add.ptr, align 4
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_uint64_t_uint32_t(i8* nocapture %ptr, i64 %off, i64 %str) {
+define dso_local void @st_reg_uint64_t_uint32_t(ptr nocapture %ptr, i64 %off, i64 %str) {
 ; CHECK-LABEL: st_reg_uint64_t_uint32_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    stwx r5, r3, r4
 ; CHECK-NEXT:    blr
 entry:
   %conv = trunc i64 %str to i32
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to i32*
-  store i32 %conv, i32* %0, align 4
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  store i32 %conv, ptr %add.ptr, align 4
   ret void
 }
 
@@ -6316,8 +6247,8 @@ entry:
   %conv = trunc i64 %str to i32
   %conv1 = zext i8 %off to i64
   %or = or i64 %conv1, %ptr
-  %0 = inttoptr i64 %or to i32*
-  store i32 %conv, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  store i32 %conv, ptr %0, align 4
   ret void
 }
 
@@ -6333,8 +6264,8 @@ entry:
   %conv = trunc i64 %str to i32
   %conv1 = zext i8 %off to i64
   %or = or i64 %and, %conv1
-  %0 = inttoptr i64 %or to i32*
-  store i32 %conv, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  store i32 %conv, ptr %0, align 4
   ret void
 }
 
@@ -6348,8 +6279,8 @@ define dso_local void @st_not_disjoint16_uint64_t_uint32_t(i64 %ptr, i64 %str) {
 entry:
   %conv = trunc i64 %str to i32
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to i32*
-  store i32 %conv, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  store i32 %conv, ptr %0, align 4
   ret void
 }
 
@@ -6364,8 +6295,8 @@ entry:
   %and = and i64 %ptr, -4096
   %conv = trunc i64 %str to i32
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to i32*
-  store i32 %conv, i32* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store i32 %conv, ptr %0, align 8
   ret void
 }
 
@@ -6380,8 +6311,8 @@ define dso_local void @st_not_disjoint32_uint64_t_uint32_t(i64 %ptr, i64 %str) {
 entry:
   %conv = trunc i64 %str to i32
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to i32*
-  store i32 %conv, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  store i32 %conv, ptr %0, align 4
   ret void
 }
 
@@ -6415,8 +6346,8 @@ entry:
   %and = and i64 %ptr, -1000341504
   %conv = trunc i64 %str to i32
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to i32*
-  store i32 %conv, i32* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  store i32 %conv, ptr %0, align 16
   ret void
 }
 
@@ -6443,8 +6374,8 @@ define dso_local void @st_not_disjoint64_uint64_t_uint32_t(i64 %ptr, i64 %str) {
 entry:
   %conv = trunc i64 %str to i32
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to i32*
-  store i32 %conv, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  store i32 %conv, ptr %0, align 4
   ret void
 }
 
@@ -6470,8 +6401,8 @@ entry:
   %and = and i64 %ptr, -1099511627776
   %conv = trunc i64 %str to i32
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to i32*
-  store i32 %conv, i32* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  store i32 %conv, ptr %0, align 4096
   ret void
 }
 
@@ -6483,7 +6414,7 @@ define dso_local void @st_cst_align16_uint64_t_uint32_t(i64 %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = trunc i64 %str to i32
-  store i32 %conv, i32* inttoptr (i64 4080 to i32*), align 16
+  store i32 %conv, ptr inttoptr (i64 4080 to ptr), align 16
   ret void
 }
 
@@ -6496,7 +6427,7 @@ define dso_local void @st_cst_align32_uint64_t_uint32_t(i64 %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = trunc i64 %str to i32
-  store i32 %conv, i32* inttoptr (i64 9999900 to i32*), align 4
+  store i32 %conv, ptr inttoptr (i64 9999900 to ptr), align 4
   ret void
 }
 
@@ -6518,7 +6449,7 @@ define dso_local void @st_cst_align64_uint64_t_uint32_t(i64 %str) {
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %conv = trunc i64 %str to i32
-  store i32 %conv, i32* inttoptr (i64 1000000000000 to i32*), align 4096
+  store i32 %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   ret void
 }
 
@@ -6529,26 +6460,25 @@ define dso_local void @st_0_uint64_t_uint64_t(i64 %ptr, i64 %str) {
 ; CHECK-NEXT:    std r4, 0(r3)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = inttoptr i64 %ptr to i64*
-  store i64 %str, i64* %0, align 8
+  %0 = inttoptr i64 %ptr to ptr
+  store i64 %str, ptr %0, align 8
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_uint64_t_uint64_t(i8* nocapture %ptr, i64 %str) {
+define dso_local void @st_align16_uint64_t_uint64_t(ptr nocapture %ptr, i64 %str) {
 ; CHECK-LABEL: st_align16_uint64_t_uint64_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    std r4, 8(r3)
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to i64*
-  store i64 %str, i64* %0, align 8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  store i64 %str, ptr %add.ptr, align 8
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_uint64_t_uint64_t(i8* nocapture %ptr, i64 %str) {
+define dso_local void @st_align32_uint64_t_uint64_t(ptr nocapture %ptr, i64 %str) {
 ; CHECK-P10-LABEL: st_align32_uint64_t_uint64_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pstd r4, 99999000(r3), 0
@@ -6561,14 +6491,13 @@ define dso_local void @st_align32_uint64_t_uint64_t(i8* nocapture %ptr, i64 %str
 ; CHECK-PREP10-NEXT:    stdx r4, r3, r5
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to i64*
-  store i64 %str, i64* %0, align 8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  store i64 %str, ptr %add.ptr, align 8
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_uint64_t_uint64_t(i8* nocapture %ptr, i64 %str) {
+define dso_local void @st_align64_uint64_t_uint64_t(ptr nocapture %ptr, i64 %str) {
 ; CHECK-P10-LABEL: st_align64_uint64_t_uint64_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r5, 244140625
@@ -6584,22 +6513,20 @@ define dso_local void @st_align64_uint64_t_uint64_t(i8* nocapture %ptr, i64 %str
 ; CHECK-PREP10-NEXT:    stdx r4, r3, r5
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to i64*
-  store i64 %str, i64* %0, align 8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  store i64 %str, ptr %add.ptr, align 8
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_uint64_t_uint64_t(i8* nocapture %ptr, i64 %off, i64 %str) {
+define dso_local void @st_reg_uint64_t_uint64_t(ptr nocapture %ptr, i64 %off, i64 %str) {
 ; CHECK-LABEL: st_reg_uint64_t_uint64_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    stdx r5, r3, r4
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to i64*
-  store i64 %str, i64* %0, align 8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  store i64 %str, ptr %add.ptr, align 8
   ret void
 }
 
@@ -6613,8 +6540,8 @@ define dso_local void @st_or1_uint64_t_uint64_t(i64 %ptr, i8 zeroext %off, i64 %
 entry:
   %conv = zext i8 %off to i64
   %or = or i64 %conv, %ptr
-  %0 = inttoptr i64 %or to i64*
-  store i64 %str, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store i64 %str, ptr %0, align 8
   ret void
 }
 
@@ -6629,8 +6556,8 @@ entry:
   %and = and i64 %ptr, -4096
   %conv = zext i8 %off to i64
   %or = or i64 %and, %conv
-  %0 = inttoptr i64 %or to i64*
-  store i64 %str, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store i64 %str, ptr %0, align 8
   ret void
 }
 
@@ -6643,8 +6570,8 @@ define dso_local void @st_not_disjoint16_uint64_t_uint64_t(i64 %ptr, i64 %str) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to i64*
-  store i64 %str, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store i64 %str, ptr %0, align 8
   ret void
 }
 
@@ -6658,8 +6585,8 @@ define dso_local void @st_disjoint_align16_uint64_t_uint64_t(i64 %ptr, i64 %str)
 entry:
   %and = and i64 %ptr, -4096
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to i64*
-  store i64 %str, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store i64 %str, ptr %0, align 8
   ret void
 }
 
@@ -6673,8 +6600,8 @@ define dso_local void @st_not_disjoint32_uint64_t_uint64_t(i64 %ptr, i64 %str) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to i64*
-  store i64 %str, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store i64 %str, ptr %0, align 8
   ret void
 }
 
@@ -6707,8 +6634,8 @@ define dso_local void @st_disjoint_align32_uint64_t_uint64_t(i64 %ptr, i64 %str)
 entry:
   %and = and i64 %ptr, -1000341504
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to i64*
-  store i64 %str, i64* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  store i64 %str, ptr %0, align 16
   ret void
 }
 
@@ -6734,8 +6661,8 @@ define dso_local void @st_not_disjoint64_uint64_t_uint64_t(i64 %ptr, i64 %str) {
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to i64*
-  store i64 %str, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store i64 %str, ptr %0, align 8
   ret void
 }
 
@@ -6760,8 +6687,8 @@ define dso_local void @st_disjoint_align64_uint64_t_uint64_t(i64 %ptr, i64 %str)
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to i64*
-  store i64 %str, i64* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  store i64 %str, ptr %0, align 4096
   ret void
 }
 
@@ -6772,7 +6699,7 @@ define dso_local void @st_cst_align16_uint64_t_uint64_t(i64 %str) {
 ; CHECK-NEXT:    std r3, 4080(0)
 ; CHECK-NEXT:    blr
 entry:
-  store i64 %str, i64* inttoptr (i64 4080 to i64*), align 16
+  store i64 %str, ptr inttoptr (i64 4080 to ptr), align 16
   ret void
 }
 
@@ -6784,7 +6711,7 @@ define dso_local void @st_cst_align32_uint64_t_uint64_t(i64 %str) {
 ; CHECK-NEXT:    std r3, -27108(r4)
 ; CHECK-NEXT:    blr
 entry:
-  store i64 %str, i64* inttoptr (i64 9999900 to i64*), align 8
+  store i64 %str, ptr inttoptr (i64 9999900 to ptr), align 8
   ret void
 }
 
@@ -6805,7 +6732,7 @@ define dso_local void @st_cst_align64_uint64_t_uint64_t(i64 %str) {
 ; CHECK-PREP10-NEXT:    std r3, 0(r4)
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  store i64 %str, i64* inttoptr (i64 1000000000000 to i64*), align 4096
+  store i64 %str, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   ret void
 }
 
@@ -6819,13 +6746,13 @@ define dso_local void @st_0_uint64_t_float(i64 %ptr, i64 %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = uitofp i64 %str to float
-  %0 = inttoptr i64 %ptr to float*
-  store float %conv, float* %0, align 4
+  %0 = inttoptr i64 %ptr to ptr
+  store float %conv, ptr %0, align 4
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_uint64_t_float(i8* nocapture %ptr, i64 %str) {
+define dso_local void @st_align16_uint64_t_float(ptr nocapture %ptr, i64 %str) {
 ; CHECK-LABEL: st_align16_uint64_t_float:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    mtfprd f0, r4
@@ -6834,14 +6761,13 @@ define dso_local void @st_align16_uint64_t_float(i8* nocapture %ptr, i64 %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = uitofp i64 %str to float
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to float*
-  store float %conv, float* %0, align 4
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  store float %conv, ptr %add.ptr, align 4
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_uint64_t_float(i8* nocapture %ptr, i64 %str) {
+define dso_local void @st_align32_uint64_t_float(ptr nocapture %ptr, i64 %str) {
 ; CHECK-P10-LABEL: st_align32_uint64_t_float:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    mtfprd f0, r4
@@ -6868,14 +6794,13 @@ define dso_local void @st_align32_uint64_t_float(i8* nocapture %ptr, i64 %str) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = uitofp i64 %str to float
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to float*
-  store float %conv, float* %0, align 4
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  store float %conv, ptr %add.ptr, align 4
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_uint64_t_float(i8* nocapture %ptr, i64 %str) {
+define dso_local void @st_align64_uint64_t_float(ptr nocapture %ptr, i64 %str) {
 ; CHECK-P10-LABEL: st_align64_uint64_t_float:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    mtfprd f0, r4
@@ -6906,14 +6831,13 @@ define dso_local void @st_align64_uint64_t_float(i8* nocapture %ptr, i64 %str) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = uitofp i64 %str to float
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to float*
-  store float %conv, float* %0, align 4
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  store float %conv, ptr %add.ptr, align 4
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_uint64_t_float(i8* nocapture %ptr, i64 %off, i64 %str) {
+define dso_local void @st_reg_uint64_t_float(ptr nocapture %ptr, i64 %off, i64 %str) {
 ; CHECK-LABEL: st_reg_uint64_t_float:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    mtfprd f0, r5
@@ -6922,9 +6846,8 @@ define dso_local void @st_reg_uint64_t_float(i8* nocapture %ptr, i64 %off, i64 %
 ; CHECK-NEXT:    blr
 entry:
   %conv = uitofp i64 %str to float
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to float*
-  store float %conv, float* %0, align 4
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  store float %conv, ptr %add.ptr, align 4
   ret void
 }
 
@@ -6941,8 +6864,8 @@ entry:
   %conv = uitofp i64 %str to float
   %conv1 = zext i8 %off to i64
   %or = or i64 %conv1, %ptr
-  %0 = inttoptr i64 %or to float*
-  store float %conv, float* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  store float %conv, ptr %0, align 4
   ret void
 }
 
@@ -6960,8 +6883,8 @@ entry:
   %conv = uitofp i64 %str to float
   %conv1 = zext i8 %off to i64
   %or = or i64 %and, %conv1
-  %0 = inttoptr i64 %or to float*
-  store float %conv, float* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  store float %conv, ptr %0, align 4
   ret void
 }
 
@@ -6977,8 +6900,8 @@ define dso_local void @st_not_disjoint16_uint64_t_float(i64 %ptr, i64 %str) {
 entry:
   %conv = uitofp i64 %str to float
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to float*
-  store float %conv, float* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  store float %conv, ptr %0, align 4
   ret void
 }
 
@@ -6995,8 +6918,8 @@ entry:
   %and = and i64 %ptr, -4096
   %conv = uitofp i64 %str to float
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to float*
-  store float %conv, float* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store float %conv, ptr %0, align 8
   ret void
 }
 
@@ -7031,8 +6954,8 @@ define dso_local void @st_not_disjoint32_uint64_t_float(i64 %ptr, i64 %str) {
 entry:
   %conv = uitofp i64 %str to float
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to float*
-  store float %conv, float* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  store float %conv, ptr %0, align 4
   ret void
 }
 
@@ -7072,8 +6995,8 @@ entry:
   %and = and i64 %ptr, -1000341504
   %conv = uitofp i64 %str to float
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to float*
-  store float %conv, float* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  store float %conv, ptr %0, align 16
   ret void
 }
 
@@ -7104,8 +7027,8 @@ define dso_local void @st_not_disjoint64_uint64_t_float(i64 %ptr, i64 %str) {
 entry:
   %conv = uitofp i64 %str to float
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to float*
-  store float %conv, float* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  store float %conv, ptr %0, align 4
   ret void
 }
 
@@ -7146,8 +7069,8 @@ entry:
   %and = and i64 %ptr, -1099511627776
   %conv = uitofp i64 %str to float
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to float*
-  store float %conv, float* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  store float %conv, ptr %0, align 4096
   ret void
 }
 
@@ -7161,7 +7084,7 @@ define dso_local void @st_cst_align16_uint64_t_float(i64 %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = uitofp i64 %str to float
-  store float %conv, float* inttoptr (i64 4080 to float*), align 16
+  store float %conv, ptr inttoptr (i64 4080 to ptr), align 16
   ret void
 }
 
@@ -7176,7 +7099,7 @@ define dso_local void @st_cst_align32_uint64_t_float(i64 %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = uitofp i64 %str to float
-  store float %conv, float* inttoptr (i64 9999900 to float*), align 4
+  store float %conv, ptr inttoptr (i64 9999900 to ptr), align 4
   ret void
 }
 
@@ -7212,7 +7135,7 @@ define dso_local void @st_cst_align64_uint64_t_float(i64 %str) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = uitofp i64 %str to float
-  store float %conv, float* inttoptr (i64 1000000000000 to float*), align 4096
+  store float %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   ret void
 }
 
@@ -7226,13 +7149,13 @@ define dso_local void @st_0_uint64_t_double(i64 %ptr, i64 %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = uitofp i64 %str to double
-  %0 = inttoptr i64 %ptr to double*
-  store double %conv, double* %0, align 8
+  %0 = inttoptr i64 %ptr to ptr
+  store double %conv, ptr %0, align 8
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_uint64_t_double(i8* nocapture %ptr, i64 %str) {
+define dso_local void @st_align16_uint64_t_double(ptr nocapture %ptr, i64 %str) {
 ; CHECK-LABEL: st_align16_uint64_t_double:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    mtfprd f0, r4
@@ -7241,14 +7164,13 @@ define dso_local void @st_align16_uint64_t_double(i8* nocapture %ptr, i64 %str)
 ; CHECK-NEXT:    blr
 entry:
   %conv = uitofp i64 %str to double
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to double*
-  store double %conv, double* %0, align 8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  store double %conv, ptr %add.ptr, align 8
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_uint64_t_double(i8* nocapture %ptr, i64 %str) {
+define dso_local void @st_align32_uint64_t_double(ptr nocapture %ptr, i64 %str) {
 ; CHECK-P10-LABEL: st_align32_uint64_t_double:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    mtfprd f0, r4
@@ -7275,14 +7197,13 @@ define dso_local void @st_align32_uint64_t_double(i8* nocapture %ptr, i64 %str)
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = uitofp i64 %str to double
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to double*
-  store double %conv, double* %0, align 8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  store double %conv, ptr %add.ptr, align 8
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_uint64_t_double(i8* nocapture %ptr, i64 %str) {
+define dso_local void @st_align64_uint64_t_double(ptr nocapture %ptr, i64 %str) {
 ; CHECK-P10-LABEL: st_align64_uint64_t_double:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    mtfprd f0, r4
@@ -7313,14 +7234,13 @@ define dso_local void @st_align64_uint64_t_double(i8* nocapture %ptr, i64 %str)
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = uitofp i64 %str to double
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to double*
-  store double %conv, double* %0, align 8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  store double %conv, ptr %add.ptr, align 8
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_uint64_t_double(i8* nocapture %ptr, i64 %off, i64 %str) {
+define dso_local void @st_reg_uint64_t_double(ptr nocapture %ptr, i64 %off, i64 %str) {
 ; CHECK-LABEL: st_reg_uint64_t_double:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    mtfprd f0, r5
@@ -7329,9 +7249,8 @@ define dso_local void @st_reg_uint64_t_double(i8* nocapture %ptr, i64 %off, i64
 ; CHECK-NEXT:    blr
 entry:
   %conv = uitofp i64 %str to double
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to double*
-  store double %conv, double* %0, align 8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  store double %conv, ptr %add.ptr, align 8
   ret void
 }
 
@@ -7348,8 +7267,8 @@ entry:
   %conv = uitofp i64 %str to double
   %conv1 = zext i8 %off to i64
   %or = or i64 %conv1, %ptr
-  %0 = inttoptr i64 %or to double*
-  store double %conv, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store double %conv, ptr %0, align 8
   ret void
 }
 
@@ -7367,8 +7286,8 @@ entry:
   %conv = uitofp i64 %str to double
   %conv1 = zext i8 %off to i64
   %or = or i64 %and, %conv1
-  %0 = inttoptr i64 %or to double*
-  store double %conv, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store double %conv, ptr %0, align 8
   ret void
 }
 
@@ -7384,8 +7303,8 @@ define dso_local void @st_not_disjoint16_uint64_t_double(i64 %ptr, i64 %str) {
 entry:
   %conv = uitofp i64 %str to double
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to double*
-  store double %conv, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store double %conv, ptr %0, align 8
   ret void
 }
 
@@ -7402,8 +7321,8 @@ entry:
   %and = and i64 %ptr, -4096
   %conv = uitofp i64 %str to double
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to double*
-  store double %conv, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store double %conv, ptr %0, align 8
   ret void
 }
 
@@ -7438,8 +7357,8 @@ define dso_local void @st_not_disjoint32_uint64_t_double(i64 %ptr, i64 %str) {
 entry:
   %conv = uitofp i64 %str to double
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to double*
-  store double %conv, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store double %conv, ptr %0, align 8
   ret void
 }
 
@@ -7479,8 +7398,8 @@ entry:
   %and = and i64 %ptr, -1000341504
   %conv = uitofp i64 %str to double
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to double*
-  store double %conv, double* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  store double %conv, ptr %0, align 16
   ret void
 }
 
@@ -7511,8 +7430,8 @@ define dso_local void @st_not_disjoint64_uint64_t_double(i64 %ptr, i64 %str) {
 entry:
   %conv = uitofp i64 %str to double
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to double*
-  store double %conv, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store double %conv, ptr %0, align 8
   ret void
 }
 
@@ -7553,8 +7472,8 @@ entry:
   %and = and i64 %ptr, -1099511627776
   %conv = uitofp i64 %str to double
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to double*
-  store double %conv, double* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  store double %conv, ptr %0, align 4096
   ret void
 }
 
@@ -7568,7 +7487,7 @@ define dso_local void @st_cst_align16_uint64_t_double(i64 %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = uitofp i64 %str to double
-  store double %conv, double* inttoptr (i64 4080 to double*), align 16
+  store double %conv, ptr inttoptr (i64 4080 to ptr), align 16
   ret void
 }
 
@@ -7583,7 +7502,7 @@ define dso_local void @st_cst_align32_uint64_t_double(i64 %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = uitofp i64 %str to double
-  store double %conv, double* inttoptr (i64 9999900 to double*), align 8
+  store double %conv, ptr inttoptr (i64 9999900 to ptr), align 8
   ret void
 }
 
@@ -7619,7 +7538,7 @@ define dso_local void @st_cst_align64_uint64_t_double(i64 %str) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = uitofp i64 %str to double
-  store double %conv, double* inttoptr (i64 1000000000000 to double*), align 4096
+  store double %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   ret void
 }
 
@@ -7633,13 +7552,13 @@ define dso_local void @st_0_int64_t_float(i64 %ptr, i64 %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = sitofp i64 %str to float
-  %0 = inttoptr i64 %ptr to float*
-  store float %conv, float* %0, align 4
+  %0 = inttoptr i64 %ptr to ptr
+  store float %conv, ptr %0, align 4
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_int64_t_float(i8* nocapture %ptr, i64 %str) {
+define dso_local void @st_align16_int64_t_float(ptr nocapture %ptr, i64 %str) {
 ; CHECK-LABEL: st_align16_int64_t_float:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    mtfprd f0, r4
@@ -7648,14 +7567,13 @@ define dso_local void @st_align16_int64_t_float(i8* nocapture %ptr, i64 %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = sitofp i64 %str to float
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to float*
-  store float %conv, float* %0, align 4
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  store float %conv, ptr %add.ptr, align 4
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_int64_t_float(i8* nocapture %ptr, i64 %str) {
+define dso_local void @st_align32_int64_t_float(ptr nocapture %ptr, i64 %str) {
 ; CHECK-P10-LABEL: st_align32_int64_t_float:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    mtfprd f0, r4
@@ -7682,14 +7600,13 @@ define dso_local void @st_align32_int64_t_float(i8* nocapture %ptr, i64 %str) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = sitofp i64 %str to float
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to float*
-  store float %conv, float* %0, align 4
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  store float %conv, ptr %add.ptr, align 4
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_int64_t_float(i8* nocapture %ptr, i64 %str) {
+define dso_local void @st_align64_int64_t_float(ptr nocapture %ptr, i64 %str) {
 ; CHECK-P10-LABEL: st_align64_int64_t_float:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    mtfprd f0, r4
@@ -7720,14 +7637,13 @@ define dso_local void @st_align64_int64_t_float(i8* nocapture %ptr, i64 %str) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = sitofp i64 %str to float
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to float*
-  store float %conv, float* %0, align 4
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  store float %conv, ptr %add.ptr, align 4
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_int64_t_float(i8* nocapture %ptr, i64 %off, i64 %str) {
+define dso_local void @st_reg_int64_t_float(ptr nocapture %ptr, i64 %off, i64 %str) {
 ; CHECK-LABEL: st_reg_int64_t_float:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    mtfprd f0, r5
@@ -7736,9 +7652,8 @@ define dso_local void @st_reg_int64_t_float(i8* nocapture %ptr, i64 %off, i64 %s
 ; CHECK-NEXT:    blr
 entry:
   %conv = sitofp i64 %str to float
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to float*
-  store float %conv, float* %0, align 4
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  store float %conv, ptr %add.ptr, align 4
   ret void
 }
 
@@ -7755,8 +7670,8 @@ entry:
   %conv = sitofp i64 %str to float
   %conv1 = zext i8 %off to i64
   %or = or i64 %conv1, %ptr
-  %0 = inttoptr i64 %or to float*
-  store float %conv, float* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  store float %conv, ptr %0, align 4
   ret void
 }
 
@@ -7774,8 +7689,8 @@ entry:
   %conv = sitofp i64 %str to float
   %conv1 = zext i8 %off to i64
   %or = or i64 %and, %conv1
-  %0 = inttoptr i64 %or to float*
-  store float %conv, float* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  store float %conv, ptr %0, align 4
   ret void
 }
 
@@ -7791,8 +7706,8 @@ define dso_local void @st_not_disjoint16_int64_t_float(i64 %ptr, i64 %str) {
 entry:
   %conv = sitofp i64 %str to float
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to float*
-  store float %conv, float* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  store float %conv, ptr %0, align 4
   ret void
 }
 
@@ -7809,8 +7724,8 @@ entry:
   %and = and i64 %ptr, -4096
   %conv = sitofp i64 %str to float
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to float*
-  store float %conv, float* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store float %conv, ptr %0, align 8
   ret void
 }
 
@@ -7845,8 +7760,8 @@ define dso_local void @st_not_disjoint32_int64_t_float(i64 %ptr, i64 %str) {
 entry:
   %conv = sitofp i64 %str to float
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to float*
-  store float %conv, float* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  store float %conv, ptr %0, align 4
   ret void
 }
 
@@ -7886,8 +7801,8 @@ entry:
   %and = and i64 %ptr, -1000341504
   %conv = sitofp i64 %str to float
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to float*
-  store float %conv, float* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  store float %conv, ptr %0, align 16
   ret void
 }
 
@@ -7918,8 +7833,8 @@ define dso_local void @st_not_disjoint64_int64_t_float(i64 %ptr, i64 %str) {
 entry:
   %conv = sitofp i64 %str to float
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to float*
-  store float %conv, float* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  store float %conv, ptr %0, align 4
   ret void
 }
 
@@ -7960,8 +7875,8 @@ entry:
   %and = and i64 %ptr, -1099511627776
   %conv = sitofp i64 %str to float
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to float*
-  store float %conv, float* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  store float %conv, ptr %0, align 4096
   ret void
 }
 
@@ -7975,7 +7890,7 @@ define dso_local void @st_cst_align16_int64_t_float(i64 %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = sitofp i64 %str to float
-  store float %conv, float* inttoptr (i64 4080 to float*), align 16
+  store float %conv, ptr inttoptr (i64 4080 to ptr), align 16
   ret void
 }
 
@@ -7990,7 +7905,7 @@ define dso_local void @st_cst_align32_int64_t_float(i64 %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = sitofp i64 %str to float
-  store float %conv, float* inttoptr (i64 9999900 to float*), align 4
+  store float %conv, ptr inttoptr (i64 9999900 to ptr), align 4
   ret void
 }
 
@@ -8026,7 +7941,7 @@ define dso_local void @st_cst_align64_int64_t_float(i64 %str) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = sitofp i64 %str to float
-  store float %conv, float* inttoptr (i64 1000000000000 to float*), align 4096
+  store float %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   ret void
 }
 
@@ -8040,13 +7955,13 @@ define dso_local void @st_0_int64_t_double(i64 %ptr, i64 %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = sitofp i64 %str to double
-  %0 = inttoptr i64 %ptr to double*
-  store double %conv, double* %0, align 8
+  %0 = inttoptr i64 %ptr to ptr
+  store double %conv, ptr %0, align 8
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_int64_t_double(i8* nocapture %ptr, i64 %str) {
+define dso_local void @st_align16_int64_t_double(ptr nocapture %ptr, i64 %str) {
 ; CHECK-LABEL: st_align16_int64_t_double:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    mtfprd f0, r4
@@ -8055,14 +7970,13 @@ define dso_local void @st_align16_int64_t_double(i8* nocapture %ptr, i64 %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = sitofp i64 %str to double
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to double*
-  store double %conv, double* %0, align 8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  store double %conv, ptr %add.ptr, align 8
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_int64_t_double(i8* nocapture %ptr, i64 %str) {
+define dso_local void @st_align32_int64_t_double(ptr nocapture %ptr, i64 %str) {
 ; CHECK-P10-LABEL: st_align32_int64_t_double:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    mtfprd f0, r4
@@ -8089,14 +8003,13 @@ define dso_local void @st_align32_int64_t_double(i8* nocapture %ptr, i64 %str) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = sitofp i64 %str to double
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to double*
-  store double %conv, double* %0, align 8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  store double %conv, ptr %add.ptr, align 8
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_int64_t_double(i8* nocapture %ptr, i64 %str) {
+define dso_local void @st_align64_int64_t_double(ptr nocapture %ptr, i64 %str) {
 ; CHECK-P10-LABEL: st_align64_int64_t_double:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    mtfprd f0, r4
@@ -8127,14 +8040,13 @@ define dso_local void @st_align64_int64_t_double(i8* nocapture %ptr, i64 %str) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = sitofp i64 %str to double
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to double*
-  store double %conv, double* %0, align 8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  store double %conv, ptr %add.ptr, align 8
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_int64_t_double(i8* nocapture %ptr, i64 %off, i64 %str) {
+define dso_local void @st_reg_int64_t_double(ptr nocapture %ptr, i64 %off, i64 %str) {
 ; CHECK-LABEL: st_reg_int64_t_double:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    mtfprd f0, r5
@@ -8143,9 +8055,8 @@ define dso_local void @st_reg_int64_t_double(i8* nocapture %ptr, i64 %off, i64 %
 ; CHECK-NEXT:    blr
 entry:
   %conv = sitofp i64 %str to double
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to double*
-  store double %conv, double* %0, align 8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  store double %conv, ptr %add.ptr, align 8
   ret void
 }
 
@@ -8162,8 +8073,8 @@ entry:
   %conv = sitofp i64 %str to double
   %conv1 = zext i8 %off to i64
   %or = or i64 %conv1, %ptr
-  %0 = inttoptr i64 %or to double*
-  store double %conv, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store double %conv, ptr %0, align 8
   ret void
 }
 
@@ -8181,8 +8092,8 @@ entry:
   %conv = sitofp i64 %str to double
   %conv1 = zext i8 %off to i64
   %or = or i64 %and, %conv1
-  %0 = inttoptr i64 %or to double*
-  store double %conv, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store double %conv, ptr %0, align 8
   ret void
 }
 
@@ -8198,8 +8109,8 @@ define dso_local void @st_not_disjoint16_int64_t_double(i64 %ptr, i64 %str) {
 entry:
   %conv = sitofp i64 %str to double
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to double*
-  store double %conv, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store double %conv, ptr %0, align 8
   ret void
 }
 
@@ -8216,8 +8127,8 @@ entry:
   %and = and i64 %ptr, -4096
   %conv = sitofp i64 %str to double
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to double*
-  store double %conv, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store double %conv, ptr %0, align 8
   ret void
 }
 
@@ -8252,8 +8163,8 @@ define dso_local void @st_not_disjoint32_int64_t_double(i64 %ptr, i64 %str) {
 entry:
   %conv = sitofp i64 %str to double
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to double*
-  store double %conv, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store double %conv, ptr %0, align 8
   ret void
 }
 
@@ -8293,8 +8204,8 @@ entry:
   %and = and i64 %ptr, -1000341504
   %conv = sitofp i64 %str to double
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to double*
-  store double %conv, double* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  store double %conv, ptr %0, align 16
   ret void
 }
 
@@ -8325,8 +8236,8 @@ define dso_local void @st_not_disjoint64_int64_t_double(i64 %ptr, i64 %str) {
 entry:
   %conv = sitofp i64 %str to double
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to double*
-  store double %conv, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store double %conv, ptr %0, align 8
   ret void
 }
 
@@ -8367,8 +8278,8 @@ entry:
   %and = and i64 %ptr, -1099511627776
   %conv = sitofp i64 %str to double
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to double*
-  store double %conv, double* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  store double %conv, ptr %0, align 4096
   ret void
 }
 
@@ -8382,7 +8293,7 @@ define dso_local void @st_cst_align16_int64_t_double(i64 %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = sitofp i64 %str to double
-  store double %conv, double* inttoptr (i64 4080 to double*), align 16
+  store double %conv, ptr inttoptr (i64 4080 to ptr), align 16
   ret void
 }
 
@@ -8397,7 +8308,7 @@ define dso_local void @st_cst_align32_int64_t_double(i64 %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = sitofp i64 %str to double
-  store double %conv, double* inttoptr (i64 9999900 to double*), align 8
+  store double %conv, ptr inttoptr (i64 9999900 to ptr), align 8
   ret void
 }
 
@@ -8433,6 +8344,6 @@ define dso_local void @st_cst_align64_int64_t_double(i64 %str) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = sitofp i64 %str to double
-  store double %conv, double* inttoptr (i64 1000000000000 to double*), align 4096
+  store double %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/scalar-i8-ldst.ll b/llvm/test/CodeGen/PowerPC/scalar-i8-ldst.ll
index e1dbad0dcc8d..42a2cf6a4664 100644
--- a/llvm/test/CodeGen/PowerPC/scalar-i8-ldst.ll
+++ b/llvm/test/CodeGen/PowerPC/scalar-i8-ldst.ll
@@ -26,26 +26,26 @@ define dso_local signext i8 @ld_0_int8_t_uint8_t(i64 %ptr) {
 ; CHECK-NEXT:    extsb r3, r3
 ; CHECK-NEXT:    blr
 entry:
-  %0 = inttoptr i64 %ptr to i8*
-  %1 = load i8, i8* %0, align 1
+  %0 = inttoptr i64 %ptr to ptr
+  %1 = load i8, ptr %0, align 1
   ret i8 %1
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i8 @ld_align16_int8_t_uint8_t(i8* nocapture readonly %ptr) {
+define dso_local signext i8 @ld_align16_int8_t_uint8_t(ptr nocapture readonly %ptr) {
 ; CHECK-LABEL: ld_align16_int8_t_uint8_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lbz r3, 8(r3)
 ; CHECK-NEXT:    extsb r3, r3
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  %0 = load i8, ptr %add.ptr, align 1
   ret i8 %0
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i8 @ld_align32_int8_t_uint8_t(i8* nocapture readonly %ptr) {
+define dso_local signext i8 @ld_align32_int8_t_uint8_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align32_int8_t_uint8_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    plbz r3, 99999000(r3), 0
@@ -60,13 +60,13 @@ define dso_local signext i8 @ld_align32_int8_t_uint8_t(i8* nocapture readonly %p
 ; CHECK-PREP10-NEXT:    extsb r3, r3
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  %0 = load i8, ptr %add.ptr, align 1
   ret i8 %0
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i8 @ld_align64_int8_t_uint8_t(i8* nocapture readonly %ptr) {
+define dso_local signext i8 @ld_align64_int8_t_uint8_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align64_int8_t_uint8_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r4, 244140625
@@ -84,21 +84,21 @@ define dso_local signext i8 @ld_align64_int8_t_uint8_t(i8* nocapture readonly %p
 ; CHECK-PREP10-NEXT:    extsb r3, r3
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  %0 = load i8, ptr %add.ptr, align 1
   ret i8 %0
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i8 @ld_reg_int8_t_uint8_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local signext i8 @ld_reg_int8_t_uint8_t(ptr nocapture readonly %ptr, i64 %off) {
 ; CHECK-LABEL: ld_reg_int8_t_uint8_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lbzx r3, r3, r4
 ; CHECK-NEXT:    extsb r3, r3
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  %0 = load i8, ptr %add.ptr, align 1
   ret i8 %0
 }
 
@@ -113,8 +113,8 @@ define dso_local signext i8 @ld_or_int8_t_uint8_t(i64 %ptr, i8 zeroext %off) {
 entry:
   %conv = zext i8 %off to i64
   %or = or i64 %conv, %ptr
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 1
   ret i8 %1
 }
 
@@ -128,8 +128,8 @@ define dso_local signext i8 @ld_not_disjoint16_int8_t_uint8_t(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 1
   ret i8 %1
 }
 
@@ -144,8 +144,8 @@ define dso_local signext i8 @ld_disjoint_align16_int8_t_uint8_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -4096
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 8
   ret i8 %1
 }
 
@@ -160,8 +160,8 @@ define dso_local signext i8 @ld_not_disjoint32_int8_t_uint8_t(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 1
   ret i8 %1
 }
 
@@ -197,8 +197,8 @@ define dso_local signext i8 @ld_disjoint_align32_int8_t_uint8_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1000341504
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 16
   ret i8 %1
 }
 
@@ -226,8 +226,8 @@ define dso_local signext i8 @ld_not_disjoint64_int8_t_uint8_t(i64 %ptr) {
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 1
   ret i8 %1
 }
 
@@ -254,8 +254,8 @@ define dso_local signext i8 @ld_disjoint_align64_int8_t_uint8_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 4096
   ret i8 %1
 }
 
@@ -267,7 +267,7 @@ define dso_local signext i8 @ld_cst_align16_int8_t_uint8_t() {
 ; CHECK-NEXT:    extsb r3, r3
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i8, i8* inttoptr (i64 4080 to i8*), align 16
+  %0 = load i8, ptr inttoptr (i64 4080 to ptr), align 16
   ret i8 %0
 }
 
@@ -280,7 +280,7 @@ define dso_local signext i8 @ld_cst_align32_int8_t_uint8_t() {
 ; CHECK-NEXT:    extsb r3, r3
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i8, i8* inttoptr (i64 9999900 to i8*), align 4
+  %0 = load i8, ptr inttoptr (i64 9999900 to ptr), align 4
   ret i8 %0
 }
 
@@ -303,7 +303,7 @@ define dso_local signext i8 @ld_cst_align64_int8_t_uint8_t() {
 ; CHECK-PREP10-NEXT:    extsb r3, r3
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %0 = load i8, i8* inttoptr (i64 1000000000000 to i8*), align 4096
+  %0 = load i8, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   ret i8 %0
 }
 
@@ -321,14 +321,14 @@ define dso_local signext i8 @ld_0_int8_t_uint16_t(i64 %ptr) {
 ; CHECK-BE-NEXT:    extsb r3, r3
 ; CHECK-BE-NEXT:    blr
 entry:
-  %0 = inttoptr i64 %ptr to i16*
-  %1 = load i16, i16* %0, align 2
+  %0 = inttoptr i64 %ptr to ptr
+  %1 = load i16, ptr %0, align 2
   %conv = trunc i16 %1 to i8
   ret i8 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i8 @ld_align16_int8_t_uint16_t(i8* nocapture readonly %ptr) {
+define dso_local signext i8 @ld_align16_int8_t_uint16_t(ptr nocapture readonly %ptr) {
 ; CHECK-LE-LABEL: ld_align16_int8_t_uint16_t:
 ; CHECK-LE:       # %bb.0: # %entry
 ; CHECK-LE-NEXT:    lbz r3, 8(r3)
@@ -341,15 +341,14 @@ define dso_local signext i8 @ld_align16_int8_t_uint16_t(i8* nocapture readonly %
 ; CHECK-BE-NEXT:    extsb r3, r3
 ; CHECK-BE-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to i16*
-  %1 = load i16, i16* %0, align 2
-  %conv = trunc i16 %1 to i8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  %0 = load i16, ptr %add.ptr, align 2
+  %conv = trunc i16 %0 to i8
   ret i8 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i8 @ld_align32_int8_t_uint16_t(i8* nocapture readonly %ptr) {
+define dso_local signext i8 @ld_align32_int8_t_uint16_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LE-LABEL: ld_align32_int8_t_uint16_t:
 ; CHECK-P10-LE:       # %bb.0: # %entry
 ; CHECK-P10-LE-NEXT:    plbz r3, 99999000(r3), 0
@@ -394,15 +393,14 @@ define dso_local signext i8 @ld_align32_int8_t_uint16_t(i8* nocapture readonly %
 ; CHECK-P8-BE-NEXT:    extsb r3, r3
 ; CHECK-P8-BE-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to i16*
-  %1 = load i16, i16* %0, align 2
-  %conv = trunc i16 %1 to i8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  %0 = load i16, ptr %add.ptr, align 2
+  %conv = trunc i16 %0 to i8
   ret i8 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i8 @ld_align64_int8_t_uint16_t(i8* nocapture readonly %ptr) {
+define dso_local signext i8 @ld_align64_int8_t_uint16_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LE-LABEL: ld_align64_int8_t_uint16_t:
 ; CHECK-P10-LE:       # %bb.0: # %entry
 ; CHECK-P10-LE-NEXT:    pli r4, 244140625
@@ -458,15 +456,14 @@ define dso_local signext i8 @ld_align64_int8_t_uint16_t(i8* nocapture readonly %
 ; CHECK-P8-BE-NEXT:    extsb r3, r3
 ; CHECK-P8-BE-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to i16*
-  %1 = load i16, i16* %0, align 2
-  %conv = trunc i16 %1 to i8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  %0 = load i16, ptr %add.ptr, align 2
+  %conv = trunc i16 %0 to i8
   ret i8 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i8 @ld_reg_int8_t_uint16_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local signext i8 @ld_reg_int8_t_uint16_t(ptr nocapture readonly %ptr, i64 %off) {
 ; CHECK-LE-LABEL: ld_reg_int8_t_uint16_t:
 ; CHECK-LE:       # %bb.0: # %entry
 ; CHECK-LE-NEXT:    lbzx r3, r3, r4
@@ -480,10 +477,9 @@ define dso_local signext i8 @ld_reg_int8_t_uint16_t(i8* nocapture readonly %ptr,
 ; CHECK-BE-NEXT:    extsb r3, r3
 ; CHECK-BE-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to i16*
-  %1 = load i16, i16* %0, align 2
-  %conv = trunc i16 %1 to i8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  %0 = load i16, ptr %add.ptr, align 2
+  %conv = trunc i16 %0 to i8
   ret i8 %conv
 }
 
@@ -505,8 +501,8 @@ define dso_local signext i8 @ld_or_int8_t_uint16_t(i64 %ptr, i8 zeroext %off) {
 entry:
   %conv = zext i8 %off to i64
   %or = or i64 %conv, %ptr
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 2
   %conv1 = trunc i16 %1 to i8
   ret i8 %conv1
 }
@@ -528,8 +524,8 @@ define dso_local signext i8 @ld_not_disjoint16_int8_t_uint16_t(i64 %ptr) {
 ; CHECK-BE-NEXT:    blr
 entry:
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 2
   %conv = trunc i16 %1 to i8
   ret i8 %conv
 }
@@ -552,8 +548,8 @@ define dso_local signext i8 @ld_disjoint_align16_int8_t_uint16_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -4096
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 8
   %conv = trunc i16 %1 to i8
   ret i8 %conv
 }
@@ -577,8 +573,8 @@ define dso_local signext i8 @ld_not_disjoint32_int8_t_uint16_t(i64 %ptr) {
 ; CHECK-BE-NEXT:    blr
 entry:
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 2
   %conv = trunc i16 %1 to i8
   ret i8 %conv
 }
@@ -643,8 +639,8 @@ define dso_local signext i8 @ld_disjoint_align32_int8_t_uint16_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1000341504
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 16
   %conv = trunc i16 %1 to i8
   ret i8 %conv
 }
@@ -716,8 +712,8 @@ define dso_local signext i8 @ld_not_disjoint64_int8_t_uint16_t(i64 %ptr) {
 ; CHECK-P8-BE-NEXT:    blr
 entry:
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 2
   %conv = trunc i16 %1 to i8
   ret i8 %conv
 }
@@ -787,8 +783,8 @@ define dso_local signext i8 @ld_disjoint_align64_int8_t_uint16_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 4096
   %conv = trunc i16 %1 to i8
   ret i8 %conv
 }
@@ -807,7 +803,7 @@ define dso_local signext i8 @ld_cst_align16_int8_t_uint16_t() {
 ; CHECK-BE-NEXT:    extsb r3, r3
 ; CHECK-BE-NEXT:    blr
 entry:
-  %0 = load i16, i16* inttoptr (i64 4080 to i16*), align 16
+  %0 = load i16, ptr inttoptr (i64 4080 to ptr), align 16
   %conv = trunc i16 %0 to i8
   ret i8 %conv
 }
@@ -828,7 +824,7 @@ define dso_local signext i8 @ld_cst_align32_int8_t_uint16_t() {
 ; CHECK-BE-NEXT:    extsb r3, r3
 ; CHECK-BE-NEXT:    blr
 entry:
-  %0 = load i16, i16* inttoptr (i64 9999900 to i16*), align 4
+  %0 = load i16, ptr inttoptr (i64 9999900 to ptr), align 4
   %conv = trunc i16 %0 to i8
   ret i8 %conv
 }
@@ -890,7 +886,7 @@ define dso_local signext i8 @ld_cst_align64_int8_t_uint16_t() {
 ; CHECK-P8-BE-NEXT:    extsb r3, r3
 ; CHECK-P8-BE-NEXT:    blr
 entry:
-  %0 = load i16, i16* inttoptr (i64 1000000000000 to i16*), align 4096
+  %0 = load i16, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   %conv = trunc i16 %0 to i8
   ret i8 %conv
 }
@@ -909,14 +905,14 @@ define dso_local signext i8 @ld_0_int8_t_uint32_t(i64 %ptr) {
 ; CHECK-BE-NEXT:    extsb r3, r3
 ; CHECK-BE-NEXT:    blr
 entry:
-  %0 = inttoptr i64 %ptr to i32*
-  %1 = load i32, i32* %0, align 4
+  %0 = inttoptr i64 %ptr to ptr
+  %1 = load i32, ptr %0, align 4
   %conv = trunc i32 %1 to i8
   ret i8 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i8 @ld_align16_int8_t_uint32_t(i8* nocapture readonly %ptr) {
+define dso_local signext i8 @ld_align16_int8_t_uint32_t(ptr nocapture readonly %ptr) {
 ; CHECK-LE-LABEL: ld_align16_int8_t_uint32_t:
 ; CHECK-LE:       # %bb.0: # %entry
 ; CHECK-LE-NEXT:    lbz r3, 8(r3)
@@ -929,15 +925,14 @@ define dso_local signext i8 @ld_align16_int8_t_uint32_t(i8* nocapture readonly %
 ; CHECK-BE-NEXT:    extsb r3, r3
 ; CHECK-BE-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to i32*
-  %1 = load i32, i32* %0, align 4
-  %conv = trunc i32 %1 to i8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  %0 = load i32, ptr %add.ptr, align 4
+  %conv = trunc i32 %0 to i8
   ret i8 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i8 @ld_align32_int8_t_uint32_t(i8* nocapture readonly %ptr) {
+define dso_local signext i8 @ld_align32_int8_t_uint32_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LE-LABEL: ld_align32_int8_t_uint32_t:
 ; CHECK-P10-LE:       # %bb.0: # %entry
 ; CHECK-P10-LE-NEXT:    plbz r3, 99999000(r3), 0
@@ -982,15 +977,14 @@ define dso_local signext i8 @ld_align32_int8_t_uint32_t(i8* nocapture readonly %
 ; CHECK-P8-BE-NEXT:    extsb r3, r3
 ; CHECK-P8-BE-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to i32*
-  %1 = load i32, i32* %0, align 4
-  %conv = trunc i32 %1 to i8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  %0 = load i32, ptr %add.ptr, align 4
+  %conv = trunc i32 %0 to i8
   ret i8 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i8 @ld_align64_int8_t_uint32_t(i8* nocapture readonly %ptr) {
+define dso_local signext i8 @ld_align64_int8_t_uint32_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LE-LABEL: ld_align64_int8_t_uint32_t:
 ; CHECK-P10-LE:       # %bb.0: # %entry
 ; CHECK-P10-LE-NEXT:    pli r4, 244140625
@@ -1046,15 +1040,14 @@ define dso_local signext i8 @ld_align64_int8_t_uint32_t(i8* nocapture readonly %
 ; CHECK-P8-BE-NEXT:    extsb r3, r3
 ; CHECK-P8-BE-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to i32*
-  %1 = load i32, i32* %0, align 4
-  %conv = trunc i32 %1 to i8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  %0 = load i32, ptr %add.ptr, align 4
+  %conv = trunc i32 %0 to i8
   ret i8 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i8 @ld_reg_int8_t_uint32_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local signext i8 @ld_reg_int8_t_uint32_t(ptr nocapture readonly %ptr, i64 %off) {
 ; CHECK-LE-LABEL: ld_reg_int8_t_uint32_t:
 ; CHECK-LE:       # %bb.0: # %entry
 ; CHECK-LE-NEXT:    lbzx r3, r3, r4
@@ -1068,10 +1061,9 @@ define dso_local signext i8 @ld_reg_int8_t_uint32_t(i8* nocapture readonly %ptr,
 ; CHECK-BE-NEXT:    extsb r3, r3
 ; CHECK-BE-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to i32*
-  %1 = load i32, i32* %0, align 4
-  %conv = trunc i32 %1 to i8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  %0 = load i32, ptr %add.ptr, align 4
+  %conv = trunc i32 %0 to i8
   ret i8 %conv
 }
 
@@ -1093,8 +1085,8 @@ define dso_local signext i8 @ld_or_int8_t_uint32_t(i64 %ptr, i8 zeroext %off) {
 entry:
   %conv = zext i8 %off to i64
   %or = or i64 %conv, %ptr
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 4
   %conv1 = trunc i32 %1 to i8
   ret i8 %conv1
 }
@@ -1116,8 +1108,8 @@ define dso_local signext i8 @ld_not_disjoint16_int8_t_uint32_t(i64 %ptr) {
 ; CHECK-BE-NEXT:    blr
 entry:
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 4
   %conv = trunc i32 %1 to i8
   ret i8 %conv
 }
@@ -1140,8 +1132,8 @@ define dso_local signext i8 @ld_disjoint_align16_int8_t_uint32_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -4096
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 8
   %conv = trunc i32 %1 to i8
   ret i8 %conv
 }
@@ -1165,8 +1157,8 @@ define dso_local signext i8 @ld_not_disjoint32_int8_t_uint32_t(i64 %ptr) {
 ; CHECK-BE-NEXT:    blr
 entry:
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 4
   %conv = trunc i32 %1 to i8
   ret i8 %conv
 }
@@ -1231,8 +1223,8 @@ define dso_local signext i8 @ld_disjoint_align32_int8_t_uint32_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1000341504
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 16
   %conv = trunc i32 %1 to i8
   ret i8 %conv
 }
@@ -1304,8 +1296,8 @@ define dso_local signext i8 @ld_not_disjoint64_int8_t_uint32_t(i64 %ptr) {
 ; CHECK-P8-BE-NEXT:    blr
 entry:
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 4
   %conv = trunc i32 %1 to i8
   ret i8 %conv
 }
@@ -1375,8 +1367,8 @@ define dso_local signext i8 @ld_disjoint_align64_int8_t_uint32_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 4096
   %conv = trunc i32 %1 to i8
   ret i8 %conv
 }
@@ -1395,7 +1387,7 @@ define dso_local signext i8 @ld_cst_align16_int8_t_uint32_t() {
 ; CHECK-BE-NEXT:    extsb r3, r3
 ; CHECK-BE-NEXT:    blr
 entry:
-  %0 = load i32, i32* inttoptr (i64 4080 to i32*), align 16
+  %0 = load i32, ptr inttoptr (i64 4080 to ptr), align 16
   %conv = trunc i32 %0 to i8
   ret i8 %conv
 }
@@ -1416,7 +1408,7 @@ define dso_local signext i8 @ld_cst_align32_int8_t_uint32_t() {
 ; CHECK-BE-NEXT:    extsb r3, r3
 ; CHECK-BE-NEXT:    blr
 entry:
-  %0 = load i32, i32* inttoptr (i64 9999900 to i32*), align 4
+  %0 = load i32, ptr inttoptr (i64 9999900 to ptr), align 4
   %conv = trunc i32 %0 to i8
   ret i8 %conv
 }
@@ -1478,7 +1470,7 @@ define dso_local signext i8 @ld_cst_align64_int8_t_uint32_t() {
 ; CHECK-P8-BE-NEXT:    extsb r3, r3
 ; CHECK-P8-BE-NEXT:    blr
 entry:
-  %0 = load i32, i32* inttoptr (i64 1000000000000 to i32*), align 4096
+  %0 = load i32, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   %conv = trunc i32 %0 to i8
   ret i8 %conv
 }
@@ -1497,14 +1489,14 @@ define dso_local signext i8 @ld_0_int8_t_uint64_t(i64 %ptr) {
 ; CHECK-BE-NEXT:    extsb r3, r3
 ; CHECK-BE-NEXT:    blr
 entry:
-  %0 = inttoptr i64 %ptr to i64*
-  %1 = load i64, i64* %0, align 8
+  %0 = inttoptr i64 %ptr to ptr
+  %1 = load i64, ptr %0, align 8
   %conv = trunc i64 %1 to i8
   ret i8 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i8 @ld_align16_int8_t_uint64_t(i8* nocapture readonly %ptr) {
+define dso_local signext i8 @ld_align16_int8_t_uint64_t(ptr nocapture readonly %ptr) {
 ; CHECK-LE-LABEL: ld_align16_int8_t_uint64_t:
 ; CHECK-LE:       # %bb.0: # %entry
 ; CHECK-LE-NEXT:    lbz r3, 8(r3)
@@ -1517,15 +1509,14 @@ define dso_local signext i8 @ld_align16_int8_t_uint64_t(i8* nocapture readonly %
 ; CHECK-BE-NEXT:    extsb r3, r3
 ; CHECK-BE-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to i64*
-  %1 = load i64, i64* %0, align 8
-  %conv = trunc i64 %1 to i8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  %0 = load i64, ptr %add.ptr, align 8
+  %conv = trunc i64 %0 to i8
   ret i8 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i8 @ld_align32_int8_t_uint64_t(i8* nocapture readonly %ptr) {
+define dso_local signext i8 @ld_align32_int8_t_uint64_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LE-LABEL: ld_align32_int8_t_uint64_t:
 ; CHECK-P10-LE:       # %bb.0: # %entry
 ; CHECK-P10-LE-NEXT:    plbz r3, 99999000(r3), 0
@@ -1570,15 +1561,14 @@ define dso_local signext i8 @ld_align32_int8_t_uint64_t(i8* nocapture readonly %
 ; CHECK-P8-BE-NEXT:    extsb r3, r3
 ; CHECK-P8-BE-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to i64*
-  %1 = load i64, i64* %0, align 8
-  %conv = trunc i64 %1 to i8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  %0 = load i64, ptr %add.ptr, align 8
+  %conv = trunc i64 %0 to i8
   ret i8 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i8 @ld_align64_int8_t_uint64_t(i8* nocapture readonly %ptr) {
+define dso_local signext i8 @ld_align64_int8_t_uint64_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LE-LABEL: ld_align64_int8_t_uint64_t:
 ; CHECK-P10-LE:       # %bb.0: # %entry
 ; CHECK-P10-LE-NEXT:    pli r4, 244140625
@@ -1634,15 +1624,14 @@ define dso_local signext i8 @ld_align64_int8_t_uint64_t(i8* nocapture readonly %
 ; CHECK-P8-BE-NEXT:    extsb r3, r3
 ; CHECK-P8-BE-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to i64*
-  %1 = load i64, i64* %0, align 8
-  %conv = trunc i64 %1 to i8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  %0 = load i64, ptr %add.ptr, align 8
+  %conv = trunc i64 %0 to i8
   ret i8 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i8 @ld_reg_int8_t_uint64_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local signext i8 @ld_reg_int8_t_uint64_t(ptr nocapture readonly %ptr, i64 %off) {
 ; CHECK-LE-LABEL: ld_reg_int8_t_uint64_t:
 ; CHECK-LE:       # %bb.0: # %entry
 ; CHECK-LE-NEXT:    lbzx r3, r3, r4
@@ -1656,10 +1645,9 @@ define dso_local signext i8 @ld_reg_int8_t_uint64_t(i8* nocapture readonly %ptr,
 ; CHECK-BE-NEXT:    extsb r3, r3
 ; CHECK-BE-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to i64*
-  %1 = load i64, i64* %0, align 8
-  %conv = trunc i64 %1 to i8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  %0 = load i64, ptr %add.ptr, align 8
+  %conv = trunc i64 %0 to i8
   ret i8 %conv
 }
 
@@ -1681,8 +1669,8 @@ define dso_local signext i8 @ld_or_int8_t_uint64_t(i64 %ptr, i8 zeroext %off) {
 entry:
   %conv = zext i8 %off to i64
   %or = or i64 %conv, %ptr
-  %0 = inttoptr i64 %or to i64*
-  %1 = load i64, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i64, ptr %0, align 8
   %conv1 = trunc i64 %1 to i8
   ret i8 %conv1
 }
@@ -1704,8 +1692,8 @@ define dso_local signext i8 @ld_not_disjoint16_int8_t_uint64_t(i64 %ptr) {
 ; CHECK-BE-NEXT:    blr
 entry:
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to i64*
-  %1 = load i64, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i64, ptr %0, align 8
   %conv = trunc i64 %1 to i8
   ret i8 %conv
 }
@@ -1728,8 +1716,8 @@ define dso_local signext i8 @ld_disjoint_align16_int8_t_uint64_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -4096
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to i64*
-  %1 = load i64, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i64, ptr %0, align 8
   %conv = trunc i64 %1 to i8
   ret i8 %conv
 }
@@ -1753,8 +1741,8 @@ define dso_local signext i8 @ld_not_disjoint32_int8_t_uint64_t(i64 %ptr) {
 ; CHECK-BE-NEXT:    blr
 entry:
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to i64*
-  %1 = load i64, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i64, ptr %0, align 8
   %conv = trunc i64 %1 to i8
   ret i8 %conv
 }
@@ -1819,8 +1807,8 @@ define dso_local signext i8 @ld_disjoint_align32_int8_t_uint64_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1000341504
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to i64*
-  %1 = load i64, i64* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i64, ptr %0, align 16
   %conv = trunc i64 %1 to i8
   ret i8 %conv
 }
@@ -1892,8 +1880,8 @@ define dso_local signext i8 @ld_not_disjoint64_int8_t_uint64_t(i64 %ptr) {
 ; CHECK-P8-BE-NEXT:    blr
 entry:
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to i64*
-  %1 = load i64, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i64, ptr %0, align 8
   %conv = trunc i64 %1 to i8
   ret i8 %conv
 }
@@ -1963,8 +1951,8 @@ define dso_local signext i8 @ld_disjoint_align64_int8_t_uint64_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to i64*
-  %1 = load i64, i64* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i64, ptr %0, align 4096
   %conv = trunc i64 %1 to i8
   ret i8 %conv
 }
@@ -1983,7 +1971,7 @@ define dso_local signext i8 @ld_cst_align16_int8_t_uint64_t() {
 ; CHECK-BE-NEXT:    extsb r3, r3
 ; CHECK-BE-NEXT:    blr
 entry:
-  %0 = load i64, i64* inttoptr (i64 4080 to i64*), align 16
+  %0 = load i64, ptr inttoptr (i64 4080 to ptr), align 16
   %conv = trunc i64 %0 to i8
   ret i8 %conv
 }
@@ -2004,7 +1992,7 @@ define dso_local signext i8 @ld_cst_align32_int8_t_uint64_t() {
 ; CHECK-BE-NEXT:    extsb r3, r3
 ; CHECK-BE-NEXT:    blr
 entry:
-  %0 = load i64, i64* inttoptr (i64 9999900 to i64*), align 8
+  %0 = load i64, ptr inttoptr (i64 9999900 to ptr), align 8
   %conv = trunc i64 %0 to i8
   ret i8 %conv
 }
@@ -2066,7 +2054,7 @@ define dso_local signext i8 @ld_cst_align64_int8_t_uint64_t() {
 ; CHECK-P8-BE-NEXT:    extsb r3, r3
 ; CHECK-P8-BE-NEXT:    blr
 entry:
-  %0 = load i64, i64* inttoptr (i64 1000000000000 to i64*), align 4096
+  %0 = load i64, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   %conv = trunc i64 %0 to i8
   ret i8 %conv
 }
@@ -2081,14 +2069,14 @@ define dso_local signext i8 @ld_0_int8_t_float(i64 %ptr) {
 ; CHECK-NEXT:    extsw r3, r3
 ; CHECK-NEXT:    blr
 entry:
-  %0 = inttoptr i64 %ptr to float*
-  %1 = load float, float* %0, align 4
+  %0 = inttoptr i64 %ptr to ptr
+  %1 = load float, ptr %0, align 4
   %conv = fptosi float %1 to i8
   ret i8 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i8 @ld_align16_int8_t_float(i8* nocapture readonly %ptr) {
+define dso_local signext i8 @ld_align16_int8_t_float(ptr nocapture readonly %ptr) {
 ; CHECK-LABEL: ld_align16_int8_t_float:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lfs f0, 8(r3)
@@ -2097,15 +2085,14 @@ define dso_local signext i8 @ld_align16_int8_t_float(i8* nocapture readonly %ptr
 ; CHECK-NEXT:    extsw r3, r3
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to float*
-  %1 = load float, float* %0, align 4
-  %conv = fptosi float %1 to i8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  %0 = load float, ptr %add.ptr, align 4
+  %conv = fptosi float %0 to i8
   ret i8 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i8 @ld_align32_int8_t_float(i8* nocapture readonly %ptr) {
+define dso_local signext i8 @ld_align32_int8_t_float(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align32_int8_t_float:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    plfs f0, 99999000(r3), 0
@@ -2124,15 +2111,14 @@ define dso_local signext i8 @ld_align32_int8_t_float(i8* nocapture readonly %ptr
 ; CHECK-PREP10-NEXT:    extsw r3, r3
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to float*
-  %1 = load float, float* %0, align 4
-  %conv = fptosi float %1 to i8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  %0 = load float, ptr %add.ptr, align 4
+  %conv = fptosi float %0 to i8
   ret i8 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i8 @ld_align64_int8_t_float(i8* nocapture readonly %ptr) {
+define dso_local signext i8 @ld_align64_int8_t_float(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align64_int8_t_float:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r4, 244140625
@@ -2154,15 +2140,14 @@ define dso_local signext i8 @ld_align64_int8_t_float(i8* nocapture readonly %ptr
 ; CHECK-PREP10-NEXT:    extsw r3, r3
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to float*
-  %1 = load float, float* %0, align 4
-  %conv = fptosi float %1 to i8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  %0 = load float, ptr %add.ptr, align 4
+  %conv = fptosi float %0 to i8
   ret i8 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i8 @ld_reg_int8_t_float(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local signext i8 @ld_reg_int8_t_float(ptr nocapture readonly %ptr, i64 %off) {
 ; CHECK-LABEL: ld_reg_int8_t_float:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lfsx f0, r3, r4
@@ -2171,10 +2156,9 @@ define dso_local signext i8 @ld_reg_int8_t_float(i8* nocapture readonly %ptr, i6
 ; CHECK-NEXT:    extsw r3, r3
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to float*
-  %1 = load float, float* %0, align 4
-  %conv = fptosi float %1 to i8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  %0 = load float, ptr %add.ptr, align 4
+  %conv = fptosi float %0 to i8
   ret i8 %conv
 }
 
@@ -2191,8 +2175,8 @@ define dso_local signext i8 @ld_or_int8_t_float(i64 %ptr, i8 zeroext %off) {
 entry:
   %conv = zext i8 %off to i64
   %or = or i64 %conv, %ptr
-  %0 = inttoptr i64 %or to float*
-  %1 = load float, float* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load float, ptr %0, align 4
   %conv1 = fptosi float %1 to i8
   ret i8 %conv1
 }
@@ -2209,8 +2193,8 @@ define dso_local signext i8 @ld_not_disjoint16_int8_t_float(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to float*
-  %1 = load float, float* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load float, ptr %0, align 4
   %conv = fptosi float %1 to i8
   ret i8 %conv
 }
@@ -2228,8 +2212,8 @@ define dso_local signext i8 @ld_disjoint_align16_int8_t_float(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -4096
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to float*
-  %1 = load float, float* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load float, ptr %0, align 8
   %conv = fptosi float %1 to i8
   ret i8 %conv
 }
@@ -2247,8 +2231,8 @@ define dso_local signext i8 @ld_not_disjoint32_int8_t_float(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to float*
-  %1 = load float, float* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load float, ptr %0, align 4
   %conv = fptosi float %1 to i8
   ret i8 %conv
 }
@@ -2291,8 +2275,8 @@ define dso_local signext i8 @ld_disjoint_align32_int8_t_float(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1000341504
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to float*
-  %1 = load float, float* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  %1 = load float, ptr %0, align 16
   %conv = fptosi float %1 to i8
   ret i8 %conv
 }
@@ -2325,8 +2309,8 @@ define dso_local signext i8 @ld_not_disjoint64_int8_t_float(i64 %ptr) {
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to float*
-  %1 = load float, float* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load float, ptr %0, align 4
   %conv = fptosi float %1 to i8
   ret i8 %conv
 }
@@ -2358,8 +2342,8 @@ define dso_local signext i8 @ld_disjoint_align64_int8_t_float(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to float*
-  %1 = load float, float* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  %1 = load float, ptr %0, align 4096
   %conv = fptosi float %1 to i8
   ret i8 %conv
 }
@@ -2374,7 +2358,7 @@ define dso_local signext i8 @ld_cst_align16_int8_t_float() {
 ; CHECK-NEXT:    extsw r3, r3
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load float, float* inttoptr (i64 4080 to float*), align 16
+  %0 = load float, ptr inttoptr (i64 4080 to ptr), align 16
   %conv = fptosi float %0 to i8
   ret i8 %conv
 }
@@ -2390,7 +2374,7 @@ define dso_local signext i8 @ld_cst_align32_int8_t_float() {
 ; CHECK-NEXT:    extsw r3, r3
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load float, float* inttoptr (i64 9999900 to float*), align 4
+  %0 = load float, ptr inttoptr (i64 9999900 to ptr), align 4
   %conv = fptosi float %0 to i8
   ret i8 %conv
 }
@@ -2418,7 +2402,7 @@ define dso_local signext i8 @ld_cst_align64_int8_t_float() {
 ; CHECK-PREP10-NEXT:    extsw r3, r3
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %0 = load float, float* inttoptr (i64 1000000000000 to float*), align 4096
+  %0 = load float, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   %conv = fptosi float %0 to i8
   ret i8 %conv
 }
@@ -2433,14 +2417,14 @@ define dso_local signext i8 @ld_0_int8_t_double(i64 %ptr) {
 ; CHECK-NEXT:    extsw r3, r3
 ; CHECK-NEXT:    blr
 entry:
-  %0 = inttoptr i64 %ptr to double*
-  %1 = load double, double* %0, align 8
+  %0 = inttoptr i64 %ptr to ptr
+  %1 = load double, ptr %0, align 8
   %conv = fptosi double %1 to i8
   ret i8 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i8 @ld_align16_int8_t_double(i8* nocapture readonly %ptr) {
+define dso_local signext i8 @ld_align16_int8_t_double(ptr nocapture readonly %ptr) {
 ; CHECK-LABEL: ld_align16_int8_t_double:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lfd f0, 8(r3)
@@ -2449,15 +2433,14 @@ define dso_local signext i8 @ld_align16_int8_t_double(i8* nocapture readonly %pt
 ; CHECK-NEXT:    extsw r3, r3
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to double*
-  %1 = load double, double* %0, align 8
-  %conv = fptosi double %1 to i8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  %0 = load double, ptr %add.ptr, align 8
+  %conv = fptosi double %0 to i8
   ret i8 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i8 @ld_align32_int8_t_double(i8* nocapture readonly %ptr) {
+define dso_local signext i8 @ld_align32_int8_t_double(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align32_int8_t_double:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    plfd f0, 99999000(r3), 0
@@ -2476,15 +2459,14 @@ define dso_local signext i8 @ld_align32_int8_t_double(i8* nocapture readonly %pt
 ; CHECK-PREP10-NEXT:    extsw r3, r3
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to double*
-  %1 = load double, double* %0, align 8
-  %conv = fptosi double %1 to i8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  %0 = load double, ptr %add.ptr, align 8
+  %conv = fptosi double %0 to i8
   ret i8 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i8 @ld_align64_int8_t_double(i8* nocapture readonly %ptr) {
+define dso_local signext i8 @ld_align64_int8_t_double(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align64_int8_t_double:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r4, 244140625
@@ -2506,15 +2488,14 @@ define dso_local signext i8 @ld_align64_int8_t_double(i8* nocapture readonly %pt
 ; CHECK-PREP10-NEXT:    extsw r3, r3
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to double*
-  %1 = load double, double* %0, align 8
-  %conv = fptosi double %1 to i8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  %0 = load double, ptr %add.ptr, align 8
+  %conv = fptosi double %0 to i8
   ret i8 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i8 @ld_reg_int8_t_double(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local signext i8 @ld_reg_int8_t_double(ptr nocapture readonly %ptr, i64 %off) {
 ; CHECK-LABEL: ld_reg_int8_t_double:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lfdx f0, r3, r4
@@ -2523,10 +2504,9 @@ define dso_local signext i8 @ld_reg_int8_t_double(i8* nocapture readonly %ptr, i
 ; CHECK-NEXT:    extsw r3, r3
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to double*
-  %1 = load double, double* %0, align 8
-  %conv = fptosi double %1 to i8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  %0 = load double, ptr %add.ptr, align 8
+  %conv = fptosi double %0 to i8
   ret i8 %conv
 }
 
@@ -2543,8 +2523,8 @@ define dso_local signext i8 @ld_or_int8_t_double(i64 %ptr, i8 zeroext %off) {
 entry:
   %conv = zext i8 %off to i64
   %or = or i64 %conv, %ptr
-  %0 = inttoptr i64 %or to double*
-  %1 = load double, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load double, ptr %0, align 8
   %conv1 = fptosi double %1 to i8
   ret i8 %conv1
 }
@@ -2561,8 +2541,8 @@ define dso_local signext i8 @ld_not_disjoint16_int8_t_double(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to double*
-  %1 = load double, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load double, ptr %0, align 8
   %conv = fptosi double %1 to i8
   ret i8 %conv
 }
@@ -2580,8 +2560,8 @@ define dso_local signext i8 @ld_disjoint_align16_int8_t_double(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -4096
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to double*
-  %1 = load double, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load double, ptr %0, align 8
   %conv = fptosi double %1 to i8
   ret i8 %conv
 }
@@ -2599,8 +2579,8 @@ define dso_local signext i8 @ld_not_disjoint32_int8_t_double(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to double*
-  %1 = load double, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load double, ptr %0, align 8
   %conv = fptosi double %1 to i8
   ret i8 %conv
 }
@@ -2643,8 +2623,8 @@ define dso_local signext i8 @ld_disjoint_align32_int8_t_double(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1000341504
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to double*
-  %1 = load double, double* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  %1 = load double, ptr %0, align 16
   %conv = fptosi double %1 to i8
   ret i8 %conv
 }
@@ -2677,8 +2657,8 @@ define dso_local signext i8 @ld_not_disjoint64_int8_t_double(i64 %ptr) {
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to double*
-  %1 = load double, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load double, ptr %0, align 8
   %conv = fptosi double %1 to i8
   ret i8 %conv
 }
@@ -2710,8 +2690,8 @@ define dso_local signext i8 @ld_disjoint_align64_int8_t_double(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to double*
-  %1 = load double, double* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  %1 = load double, ptr %0, align 4096
   %conv = fptosi double %1 to i8
   ret i8 %conv
 }
@@ -2726,7 +2706,7 @@ define dso_local signext i8 @ld_cst_align16_int8_t_double() {
 ; CHECK-NEXT:    extsw r3, r3
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load double, double* inttoptr (i64 4080 to double*), align 16
+  %0 = load double, ptr inttoptr (i64 4080 to ptr), align 16
   %conv = fptosi double %0 to i8
   ret i8 %conv
 }
@@ -2742,7 +2722,7 @@ define dso_local signext i8 @ld_cst_align32_int8_t_double() {
 ; CHECK-NEXT:    extsw r3, r3
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load double, double* inttoptr (i64 9999900 to double*), align 8
+  %0 = load double, ptr inttoptr (i64 9999900 to ptr), align 8
   %conv = fptosi double %0 to i8
   ret i8 %conv
 }
@@ -2770,7 +2750,7 @@ define dso_local signext i8 @ld_cst_align64_int8_t_double() {
 ; CHECK-PREP10-NEXT:    extsw r3, r3
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %0 = load double, double* inttoptr (i64 1000000000000 to double*), align 4096
+  %0 = load double, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   %conv = fptosi double %0 to i8
   ret i8 %conv
 }
@@ -2782,25 +2762,25 @@ define dso_local zeroext i8 @ld_0_uint8_t_uint8_t(i64 %ptr) {
 ; CHECK-NEXT:    lbz r3, 0(r3)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = inttoptr i64 %ptr to i8*
-  %1 = load i8, i8* %0, align 1
+  %0 = inttoptr i64 %ptr to ptr
+  %1 = load i8, ptr %0, align 1
   ret i8 %1
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i8 @ld_align16_uint8_t_uint8_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i8 @ld_align16_uint8_t_uint8_t(ptr nocapture readonly %ptr) {
 ; CHECK-LABEL: ld_align16_uint8_t_uint8_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lbz r3, 8(r3)
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  %0 = load i8, ptr %add.ptr, align 1
   ret i8 %0
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i8 @ld_align32_uint8_t_uint8_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i8 @ld_align32_uint8_t_uint8_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align32_uint8_t_uint8_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    plbz r3, 99999000(r3), 0
@@ -2813,13 +2793,13 @@ define dso_local zeroext i8 @ld_align32_uint8_t_uint8_t(i8* nocapture readonly %
 ; CHECK-PREP10-NEXT:    lbzx r3, r3, r4
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  %0 = load i8, ptr %add.ptr, align 1
   ret i8 %0
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i8 @ld_unalign64_uint8_t_uint8_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i8 @ld_unalign64_uint8_t_uint8_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_unalign64_uint8_t_uint8_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r4, 232
@@ -2837,13 +2817,13 @@ define dso_local zeroext i8 @ld_unalign64_uint8_t_uint8_t(i8* nocapture readonly
 ; CHECK-PREP10-NEXT:    lbzx r3, r3, r4
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000001
-  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000001
+  %0 = load i8, ptr %add.ptr, align 1
   ret i8 %0
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i8 @ld_align64_uint8_t_uint8_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i8 @ld_align64_uint8_t_uint8_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align64_uint8_t_uint8_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r4, 244140625
@@ -2859,20 +2839,20 @@ define dso_local zeroext i8 @ld_align64_uint8_t_uint8_t(i8* nocapture readonly %
 ; CHECK-PREP10-NEXT:    lbzx r3, r3, r4
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  %0 = load i8, ptr %add.ptr, align 1
   ret i8 %0
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i8 @ld_reg_uint8_t_uint8_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local zeroext i8 @ld_reg_uint8_t_uint8_t(ptr nocapture readonly %ptr, i64 %off) {
 ; CHECK-LABEL: ld_reg_uint8_t_uint8_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lbzx r3, r3, r4
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  %0 = load i8, ptr %add.ptr, align 1
   ret i8 %0
 }
 
@@ -2886,8 +2866,8 @@ define dso_local zeroext i8 @ld_or_uint8_t_uint8_t(i64 %ptr, i8 zeroext %off) {
 entry:
   %conv = zext i8 %off to i64
   %or = or i64 %conv, %ptr
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 1
   ret i8 %1
 }
 
@@ -2900,8 +2880,8 @@ define dso_local zeroext i8 @ld_not_disjoint16_uint8_t_uint8_t(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 1
   ret i8 %1
 }
 
@@ -2915,8 +2895,8 @@ define dso_local zeroext i8 @ld_disjoint_align16_uint8_t_uint8_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -4096
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 8
   ret i8 %1
 }
 
@@ -2930,8 +2910,8 @@ define dso_local zeroext i8 @ld_not_disjoint32_uint8_t_uint8_t(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 1
   ret i8 %1
 }
 
@@ -2964,8 +2944,8 @@ define dso_local zeroext i8 @ld_disjoint_align32_uint8_t_uint8_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1000341504
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 16
   ret i8 %1
 }
 
@@ -2991,8 +2971,8 @@ define dso_local zeroext i8 @ld_not_disjoint64_uint8_t_uint8_t(i64 %ptr) {
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 1
   ret i8 %1
 }
 
@@ -3019,8 +2999,8 @@ define dso_local zeroext i8 @ld_disjoint_unalign64_uint8_t_uint8_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000001
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 1
   ret i8 %1
 }
 
@@ -3045,8 +3025,8 @@ define dso_local zeroext i8 @ld_disjoint_align64_uint8_t_uint8_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to i8*
-  %1 = load i8, i8* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i8, ptr %0, align 4096
   ret i8 %1
 }
 
@@ -3057,7 +3037,7 @@ define dso_local zeroext i8 @ld_cst_align16_uint8_t_uint8_t() {
 ; CHECK-NEXT:    lbz r3, 4080(0)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i8, i8* inttoptr (i64 4080 to i8*), align 16
+  %0 = load i8, ptr inttoptr (i64 4080 to ptr), align 16
   ret i8 %0
 }
 
@@ -3069,7 +3049,7 @@ define dso_local zeroext i8 @ld_cst_align32_uint8_t_uint8_t() {
 ; CHECK-NEXT:    lbz r3, -27108(r3)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i8, i8* inttoptr (i64 9999900 to i8*), align 4
+  %0 = load i8, ptr inttoptr (i64 9999900 to ptr), align 4
   ret i8 %0
 }
 
@@ -3092,7 +3072,7 @@ define dso_local zeroext i8 @ld_cst_unalign64_uint8_t_uint8_t() {
 ; CHECK-PREP10-NEXT:    lbz r3, 0(r3)
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %0 = load i8, i8* inttoptr (i64 1000000000001 to i8*), align 1
+  %0 = load i8, ptr inttoptr (i64 1000000000001 to ptr), align 1
   ret i8 %0
 }
 
@@ -3113,7 +3093,7 @@ define dso_local zeroext i8 @ld_cst_align64_uint8_t_uint8_t() {
 ; CHECK-PREP10-NEXT:    lbz r3, 0(r3)
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %0 = load i8, i8* inttoptr (i64 1000000000000 to i8*), align 4096
+  %0 = load i8, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   ret i8 %0
 }
 
@@ -3129,14 +3109,14 @@ define dso_local zeroext i8 @ld_0_uint8_t_uint16_t(i64 %ptr) {
 ; CHECK-BE-NEXT:    lbz r3, 1(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %0 = inttoptr i64 %ptr to i16*
-  %1 = load i16, i16* %0, align 2
+  %0 = inttoptr i64 %ptr to ptr
+  %1 = load i16, ptr %0, align 2
   %conv = trunc i16 %1 to i8
   ret i8 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i8 @ld_align16_uint8_t_uint16_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i8 @ld_align16_uint8_t_uint16_t(ptr nocapture readonly %ptr) {
 ; CHECK-LE-LABEL: ld_align16_uint8_t_uint16_t:
 ; CHECK-LE:       # %bb.0: # %entry
 ; CHECK-LE-NEXT:    lbz r3, 8(r3)
@@ -3147,15 +3127,14 @@ define dso_local zeroext i8 @ld_align16_uint8_t_uint16_t(i8* nocapture readonly
 ; CHECK-BE-NEXT:    lbz r3, 9(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to i16*
-  %1 = load i16, i16* %0, align 2
-  %conv = trunc i16 %1 to i8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  %0 = load i16, ptr %add.ptr, align 2
+  %conv = trunc i16 %0 to i8
   ret i8 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i8 @ld_align32_uint8_t_uint16_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i8 @ld_align32_uint8_t_uint16_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LE-LABEL: ld_align32_uint8_t_uint16_t:
 ; CHECK-P10-LE:       # %bb.0: # %entry
 ; CHECK-P10-LE-NEXT:    plbz r3, 99999000(r3), 0
@@ -3194,15 +3173,14 @@ define dso_local zeroext i8 @ld_align32_uint8_t_uint16_t(i8* nocapture readonly
 ; CHECK-P8-BE-NEXT:    lbzx r3, r3, r4
 ; CHECK-P8-BE-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to i16*
-  %1 = load i16, i16* %0, align 2
-  %conv = trunc i16 %1 to i8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  %0 = load i16, ptr %add.ptr, align 2
+  %conv = trunc i16 %0 to i8
   ret i8 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i8 @ld_unalign64_uint8_t_uint16_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i8 @ld_unalign64_uint8_t_uint16_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LE-LABEL: ld_unalign64_uint8_t_uint16_t:
 ; CHECK-P10-LE:       # %bb.0: # %entry
 ; CHECK-P10-LE-NEXT:    pli r4, 232
@@ -3255,15 +3233,14 @@ define dso_local zeroext i8 @ld_unalign64_uint8_t_uint16_t(i8* nocapture readonl
 ; CHECK-P8-BE-NEXT:    lbzx r3, r3, r4
 ; CHECK-P8-BE-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000001
-  %0 = bitcast i8* %add.ptr to i16*
-  %1 = load i16, i16* %0, align 2
-  %conv = trunc i16 %1 to i8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000001
+  %0 = load i16, ptr %add.ptr, align 2
+  %conv = trunc i16 %0 to i8
   ret i8 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i8 @ld_align64_uint8_t_uint16_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i8 @ld_align64_uint8_t_uint16_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LE-LABEL: ld_align64_uint8_t_uint16_t:
 ; CHECK-P10-LE:       # %bb.0: # %entry
 ; CHECK-P10-LE-NEXT:    pli r4, 244140625
@@ -3313,15 +3290,14 @@ define dso_local zeroext i8 @ld_align64_uint8_t_uint16_t(i8* nocapture readonly
 ; CHECK-P8-BE-NEXT:    lbzx r3, r3, r4
 ; CHECK-P8-BE-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to i16*
-  %1 = load i16, i16* %0, align 2
-  %conv = trunc i16 %1 to i8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  %0 = load i16, ptr %add.ptr, align 2
+  %conv = trunc i16 %0 to i8
   ret i8 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i8 @ld_reg_uint8_t_uint16_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local zeroext i8 @ld_reg_uint8_t_uint16_t(ptr nocapture readonly %ptr, i64 %off) {
 ; CHECK-LE-LABEL: ld_reg_uint8_t_uint16_t:
 ; CHECK-LE:       # %bb.0: # %entry
 ; CHECK-LE-NEXT:    lbzx r3, r3, r4
@@ -3333,10 +3309,9 @@ define dso_local zeroext i8 @ld_reg_uint8_t_uint16_t(i8* nocapture readonly %ptr
 ; CHECK-BE-NEXT:    lbz r3, 1(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to i16*
-  %1 = load i16, i16* %0, align 2
-  %conv = trunc i16 %1 to i8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  %0 = load i16, ptr %add.ptr, align 2
+  %conv = trunc i16 %0 to i8
   ret i8 %conv
 }
 
@@ -3356,8 +3331,8 @@ define dso_local zeroext i8 @ld_or_uint8_t_uint16_t(i64 %ptr, i8 zeroext %off) {
 entry:
   %conv = zext i8 %off to i64
   %or = or i64 %conv, %ptr
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 2
   %conv1 = trunc i16 %1 to i8
   ret i8 %conv1
 }
@@ -3377,8 +3352,8 @@ define dso_local zeroext i8 @ld_not_disjoint16_uint8_t_uint16_t(i64 %ptr) {
 ; CHECK-BE-NEXT:    blr
 entry:
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 2
   %conv = trunc i16 %1 to i8
   ret i8 %conv
 }
@@ -3399,8 +3374,8 @@ define dso_local zeroext i8 @ld_disjoint_align16_uint8_t_uint16_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -4096
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 8
   %conv = trunc i16 %1 to i8
   ret i8 %conv
 }
@@ -3422,8 +3397,8 @@ define dso_local zeroext i8 @ld_not_disjoint32_uint8_t_uint16_t(i64 %ptr) {
 ; CHECK-BE-NEXT:    blr
 entry:
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 2
   %conv = trunc i16 %1 to i8
   ret i8 %conv
 }
@@ -3482,8 +3457,8 @@ define dso_local zeroext i8 @ld_disjoint_align32_uint8_t_uint16_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1000341504
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 16
   %conv = trunc i16 %1 to i8
   ret i8 %conv
 }
@@ -3549,8 +3524,8 @@ define dso_local zeroext i8 @ld_not_disjoint64_uint8_t_uint16_t(i64 %ptr) {
 ; CHECK-P8-BE-NEXT:    blr
 entry:
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 2
   %conv = trunc i16 %1 to i8
   ret i8 %conv
 }
@@ -3617,8 +3592,8 @@ define dso_local zeroext i8 @ld_disjoint_unalign64_uint8_t_uint16_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000001
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 2
   %conv = trunc i16 %1 to i8
   ret i8 %conv
 }
@@ -3682,8 +3657,8 @@ define dso_local zeroext i8 @ld_disjoint_align64_uint8_t_uint16_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to i16*
-  %1 = load i16, i16* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i16, ptr %0, align 4096
   %conv = trunc i16 %1 to i8
   ret i8 %conv
 }
@@ -3700,7 +3675,7 @@ define dso_local zeroext i8 @ld_cst_align16_uint8_t_uint16_t() {
 ; CHECK-BE-NEXT:    lbz r3, 4081(0)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %0 = load i16, i16* inttoptr (i64 4080 to i16*), align 16
+  %0 = load i16, ptr inttoptr (i64 4080 to ptr), align 16
   %conv = trunc i16 %0 to i8
   ret i8 %conv
 }
@@ -3719,7 +3694,7 @@ define dso_local zeroext i8 @ld_cst_align32_uint8_t_uint16_t() {
 ; CHECK-BE-NEXT:    lbz r3, -27107(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %0 = load i16, i16* inttoptr (i64 9999900 to i16*), align 4
+  %0 = load i16, ptr inttoptr (i64 9999900 to ptr), align 4
   %conv = trunc i16 %0 to i8
   ret i8 %conv
 }
@@ -3778,7 +3753,7 @@ define dso_local zeroext i8 @ld_cst_unalign64_uint8_t_uint16_t() {
 ; CHECK-P8-BE-NEXT:    lbz r3, 0(r3)
 ; CHECK-P8-BE-NEXT:    blr
 entry:
-  %0 = load i16, i16* inttoptr (i64 1000000000001 to i16*), align 2
+  %0 = load i16, ptr inttoptr (i64 1000000000001 to ptr), align 2
   %conv = trunc i16 %0 to i8
   ret i8 %conv
 }
@@ -3834,7 +3809,7 @@ define dso_local zeroext i8 @ld_cst_align64_uint8_t_uint16_t() {
 ; CHECK-P8-BE-NEXT:    lbz r3, 0(r3)
 ; CHECK-P8-BE-NEXT:    blr
 entry:
-  %0 = load i16, i16* inttoptr (i64 1000000000000 to i16*), align 4096
+  %0 = load i16, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   %conv = trunc i16 %0 to i8
   ret i8 %conv
 }
@@ -3851,14 +3826,14 @@ define dso_local zeroext i8 @ld_0_uint8_t_uint32_t(i64 %ptr) {
 ; CHECK-BE-NEXT:    lbz r3, 3(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %0 = inttoptr i64 %ptr to i32*
-  %1 = load i32, i32* %0, align 4
+  %0 = inttoptr i64 %ptr to ptr
+  %1 = load i32, ptr %0, align 4
   %conv = trunc i32 %1 to i8
   ret i8 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i8 @ld_align16_uint8_t_uint32_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i8 @ld_align16_uint8_t_uint32_t(ptr nocapture readonly %ptr) {
 ; CHECK-LE-LABEL: ld_align16_uint8_t_uint32_t:
 ; CHECK-LE:       # %bb.0: # %entry
 ; CHECK-LE-NEXT:    lbz r3, 8(r3)
@@ -3869,15 +3844,14 @@ define dso_local zeroext i8 @ld_align16_uint8_t_uint32_t(i8* nocapture readonly
 ; CHECK-BE-NEXT:    lbz r3, 11(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to i32*
-  %1 = load i32, i32* %0, align 4
-  %conv = trunc i32 %1 to i8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  %0 = load i32, ptr %add.ptr, align 4
+  %conv = trunc i32 %0 to i8
   ret i8 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i8 @ld_align32_uint8_t_uint32_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i8 @ld_align32_uint8_t_uint32_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LE-LABEL: ld_align32_uint8_t_uint32_t:
 ; CHECK-P10-LE:       # %bb.0: # %entry
 ; CHECK-P10-LE-NEXT:    plbz r3, 99999000(r3), 0
@@ -3916,15 +3890,14 @@ define dso_local zeroext i8 @ld_align32_uint8_t_uint32_t(i8* nocapture readonly
 ; CHECK-P8-BE-NEXT:    lbzx r3, r3, r4
 ; CHECK-P8-BE-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to i32*
-  %1 = load i32, i32* %0, align 4
-  %conv = trunc i32 %1 to i8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  %0 = load i32, ptr %add.ptr, align 4
+  %conv = trunc i32 %0 to i8
   ret i8 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i8 @ld_unalign64_uint8_t_uint32_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i8 @ld_unalign64_uint8_t_uint32_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LE-LABEL: ld_unalign64_uint8_t_uint32_t:
 ; CHECK-P10-LE:       # %bb.0: # %entry
 ; CHECK-P10-LE-NEXT:    pli r4, 232
@@ -3977,15 +3950,14 @@ define dso_local zeroext i8 @ld_unalign64_uint8_t_uint32_t(i8* nocapture readonl
 ; CHECK-P8-BE-NEXT:    lbzx r3, r3, r4
 ; CHECK-P8-BE-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000001
-  %0 = bitcast i8* %add.ptr to i32*
-  %1 = load i32, i32* %0, align 4
-  %conv = trunc i32 %1 to i8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000001
+  %0 = load i32, ptr %add.ptr, align 4
+  %conv = trunc i32 %0 to i8
   ret i8 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i8 @ld_align64_uint8_t_uint32_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i8 @ld_align64_uint8_t_uint32_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LE-LABEL: ld_align64_uint8_t_uint32_t:
 ; CHECK-P10-LE:       # %bb.0: # %entry
 ; CHECK-P10-LE-NEXT:    pli r4, 244140625
@@ -4035,15 +4007,14 @@ define dso_local zeroext i8 @ld_align64_uint8_t_uint32_t(i8* nocapture readonly
 ; CHECK-P8-BE-NEXT:    lbzx r3, r3, r4
 ; CHECK-P8-BE-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to i32*
-  %1 = load i32, i32* %0, align 4
-  %conv = trunc i32 %1 to i8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  %0 = load i32, ptr %add.ptr, align 4
+  %conv = trunc i32 %0 to i8
   ret i8 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i8 @ld_reg_uint8_t_uint32_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local zeroext i8 @ld_reg_uint8_t_uint32_t(ptr nocapture readonly %ptr, i64 %off) {
 ; CHECK-LE-LABEL: ld_reg_uint8_t_uint32_t:
 ; CHECK-LE:       # %bb.0: # %entry
 ; CHECK-LE-NEXT:    lbzx r3, r3, r4
@@ -4055,10 +4026,9 @@ define dso_local zeroext i8 @ld_reg_uint8_t_uint32_t(i8* nocapture readonly %ptr
 ; CHECK-BE-NEXT:    lbz r3, 3(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to i32*
-  %1 = load i32, i32* %0, align 4
-  %conv = trunc i32 %1 to i8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  %0 = load i32, ptr %add.ptr, align 4
+  %conv = trunc i32 %0 to i8
   ret i8 %conv
 }
 
@@ -4078,8 +4048,8 @@ define dso_local zeroext i8 @ld_or_uint8_t_uint32_t(i64 %ptr, i8 zeroext %off) {
 entry:
   %conv = zext i8 %off to i64
   %or = or i64 %conv, %ptr
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 4
   %conv1 = trunc i32 %1 to i8
   ret i8 %conv1
 }
@@ -4099,8 +4069,8 @@ define dso_local zeroext i8 @ld_not_disjoint16_uint8_t_uint32_t(i64 %ptr) {
 ; CHECK-BE-NEXT:    blr
 entry:
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 4
   %conv = trunc i32 %1 to i8
   ret i8 %conv
 }
@@ -4121,8 +4091,8 @@ define dso_local zeroext i8 @ld_disjoint_align16_uint8_t_uint32_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -4096
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 8
   %conv = trunc i32 %1 to i8
   ret i8 %conv
 }
@@ -4144,8 +4114,8 @@ define dso_local zeroext i8 @ld_not_disjoint32_uint8_t_uint32_t(i64 %ptr) {
 ; CHECK-BE-NEXT:    blr
 entry:
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 4
   %conv = trunc i32 %1 to i8
   ret i8 %conv
 }
@@ -4204,8 +4174,8 @@ define dso_local zeroext i8 @ld_disjoint_align32_uint8_t_uint32_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1000341504
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 16
   %conv = trunc i32 %1 to i8
   ret i8 %conv
 }
@@ -4271,8 +4241,8 @@ define dso_local zeroext i8 @ld_not_disjoint64_uint8_t_uint32_t(i64 %ptr) {
 ; CHECK-P8-BE-NEXT:    blr
 entry:
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 4
   %conv = trunc i32 %1 to i8
   ret i8 %conv
 }
@@ -4339,8 +4309,8 @@ define dso_local zeroext i8 @ld_disjoint_unalign64_uint8_t_uint32_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000001
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 4
   %conv = trunc i32 %1 to i8
   ret i8 %conv
 }
@@ -4404,8 +4374,8 @@ define dso_local zeroext i8 @ld_disjoint_align64_uint8_t_uint32_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to i32*
-  %1 = load i32, i32* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i32, ptr %0, align 4096
   %conv = trunc i32 %1 to i8
   ret i8 %conv
 }
@@ -4422,7 +4392,7 @@ define dso_local zeroext i8 @ld_cst_align16_uint8_t_uint32_t() {
 ; CHECK-BE-NEXT:    lbz r3, 4083(0)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %0 = load i32, i32* inttoptr (i64 4080 to i32*), align 16
+  %0 = load i32, ptr inttoptr (i64 4080 to ptr), align 16
   %conv = trunc i32 %0 to i8
   ret i8 %conv
 }
@@ -4441,7 +4411,7 @@ define dso_local zeroext i8 @ld_cst_align32_uint8_t_uint32_t() {
 ; CHECK-BE-NEXT:    lbz r3, -27105(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %0 = load i32, i32* inttoptr (i64 9999900 to i32*), align 4
+  %0 = load i32, ptr inttoptr (i64 9999900 to ptr), align 4
   %conv = trunc i32 %0 to i8
   ret i8 %conv
 }
@@ -4500,7 +4470,7 @@ define dso_local zeroext i8 @ld_cst_unalign64_uint8_t_uint32_t() {
 ; CHECK-P8-BE-NEXT:    lbz r3, 0(r3)
 ; CHECK-P8-BE-NEXT:    blr
 entry:
-  %0 = load i32, i32* inttoptr (i64 1000000000001 to i32*), align 4
+  %0 = load i32, ptr inttoptr (i64 1000000000001 to ptr), align 4
   %conv = trunc i32 %0 to i8
   ret i8 %conv
 }
@@ -4556,7 +4526,7 @@ define dso_local zeroext i8 @ld_cst_align64_uint8_t_uint32_t() {
 ; CHECK-P8-BE-NEXT:    lbz r3, 0(r3)
 ; CHECK-P8-BE-NEXT:    blr
 entry:
-  %0 = load i32, i32* inttoptr (i64 1000000000000 to i32*), align 4096
+  %0 = load i32, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   %conv = trunc i32 %0 to i8
   ret i8 %conv
 }
@@ -4573,14 +4543,14 @@ define dso_local zeroext i8 @ld_0_uint8_t_uint64_t(i64 %ptr) {
 ; CHECK-BE-NEXT:    lbz r3, 7(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %0 = inttoptr i64 %ptr to i64*
-  %1 = load i64, i64* %0, align 8
+  %0 = inttoptr i64 %ptr to ptr
+  %1 = load i64, ptr %0, align 8
   %conv = trunc i64 %1 to i8
   ret i8 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i8 @ld_align16_uint8_t_uint64_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i8 @ld_align16_uint8_t_uint64_t(ptr nocapture readonly %ptr) {
 ; CHECK-LE-LABEL: ld_align16_uint8_t_uint64_t:
 ; CHECK-LE:       # %bb.0: # %entry
 ; CHECK-LE-NEXT:    lbz r3, 8(r3)
@@ -4591,15 +4561,14 @@ define dso_local zeroext i8 @ld_align16_uint8_t_uint64_t(i8* nocapture readonly
 ; CHECK-BE-NEXT:    lbz r3, 15(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to i64*
-  %1 = load i64, i64* %0, align 8
-  %conv = trunc i64 %1 to i8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  %0 = load i64, ptr %add.ptr, align 8
+  %conv = trunc i64 %0 to i8
   ret i8 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i8 @ld_align32_uint8_t_uint64_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i8 @ld_align32_uint8_t_uint64_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LE-LABEL: ld_align32_uint8_t_uint64_t:
 ; CHECK-P10-LE:       # %bb.0: # %entry
 ; CHECK-P10-LE-NEXT:    plbz r3, 99999000(r3), 0
@@ -4638,15 +4607,14 @@ define dso_local zeroext i8 @ld_align32_uint8_t_uint64_t(i8* nocapture readonly
 ; CHECK-P8-BE-NEXT:    lbzx r3, r3, r4
 ; CHECK-P8-BE-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to i64*
-  %1 = load i64, i64* %0, align 8
-  %conv = trunc i64 %1 to i8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  %0 = load i64, ptr %add.ptr, align 8
+  %conv = trunc i64 %0 to i8
   ret i8 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i8 @ld_unalign64_uint8_t_uint64_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i8 @ld_unalign64_uint8_t_uint64_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LE-LABEL: ld_unalign64_uint8_t_uint64_t:
 ; CHECK-P10-LE:       # %bb.0: # %entry
 ; CHECK-P10-LE-NEXT:    pli r4, 232
@@ -4699,15 +4667,14 @@ define dso_local zeroext i8 @ld_unalign64_uint8_t_uint64_t(i8* nocapture readonl
 ; CHECK-P8-BE-NEXT:    lbzx r3, r3, r4
 ; CHECK-P8-BE-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000001
-  %0 = bitcast i8* %add.ptr to i64*
-  %1 = load i64, i64* %0, align 8
-  %conv = trunc i64 %1 to i8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000001
+  %0 = load i64, ptr %add.ptr, align 8
+  %conv = trunc i64 %0 to i8
   ret i8 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i8 @ld_align64_uint8_t_uint64_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i8 @ld_align64_uint8_t_uint64_t(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LE-LABEL: ld_align64_uint8_t_uint64_t:
 ; CHECK-P10-LE:       # %bb.0: # %entry
 ; CHECK-P10-LE-NEXT:    pli r4, 244140625
@@ -4757,15 +4724,14 @@ define dso_local zeroext i8 @ld_align64_uint8_t_uint64_t(i8* nocapture readonly
 ; CHECK-P8-BE-NEXT:    lbzx r3, r3, r4
 ; CHECK-P8-BE-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to i64*
-  %1 = load i64, i64* %0, align 8
-  %conv = trunc i64 %1 to i8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  %0 = load i64, ptr %add.ptr, align 8
+  %conv = trunc i64 %0 to i8
   ret i8 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i8 @ld_reg_uint8_t_uint64_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local zeroext i8 @ld_reg_uint8_t_uint64_t(ptr nocapture readonly %ptr, i64 %off) {
 ; CHECK-LE-LABEL: ld_reg_uint8_t_uint64_t:
 ; CHECK-LE:       # %bb.0: # %entry
 ; CHECK-LE-NEXT:    lbzx r3, r3, r4
@@ -4777,10 +4743,9 @@ define dso_local zeroext i8 @ld_reg_uint8_t_uint64_t(i8* nocapture readonly %ptr
 ; CHECK-BE-NEXT:    lbz r3, 7(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to i64*
-  %1 = load i64, i64* %0, align 8
-  %conv = trunc i64 %1 to i8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  %0 = load i64, ptr %add.ptr, align 8
+  %conv = trunc i64 %0 to i8
   ret i8 %conv
 }
 
@@ -4800,8 +4765,8 @@ define dso_local zeroext i8 @ld_or_uint8_t_uint64_t(i64 %ptr, i8 zeroext %off) {
 entry:
   %conv = zext i8 %off to i64
   %or = or i64 %conv, %ptr
-  %0 = inttoptr i64 %or to i64*
-  %1 = load i64, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i64, ptr %0, align 8
   %conv1 = trunc i64 %1 to i8
   ret i8 %conv1
 }
@@ -4821,8 +4786,8 @@ define dso_local zeroext i8 @ld_not_disjoint16_uint8_t_uint64_t(i64 %ptr) {
 ; CHECK-BE-NEXT:    blr
 entry:
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to i64*
-  %1 = load i64, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i64, ptr %0, align 8
   %conv = trunc i64 %1 to i8
   ret i8 %conv
 }
@@ -4843,8 +4808,8 @@ define dso_local zeroext i8 @ld_disjoint_align16_uint8_t_uint64_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -4096
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to i64*
-  %1 = load i64, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i64, ptr %0, align 8
   %conv = trunc i64 %1 to i8
   ret i8 %conv
 }
@@ -4866,8 +4831,8 @@ define dso_local zeroext i8 @ld_not_disjoint32_uint8_t_uint64_t(i64 %ptr) {
 ; CHECK-BE-NEXT:    blr
 entry:
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to i64*
-  %1 = load i64, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i64, ptr %0, align 8
   %conv = trunc i64 %1 to i8
   ret i8 %conv
 }
@@ -4926,8 +4891,8 @@ define dso_local zeroext i8 @ld_disjoint_align32_uint8_t_uint64_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1000341504
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to i64*
-  %1 = load i64, i64* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i64, ptr %0, align 16
   %conv = trunc i64 %1 to i8
   ret i8 %conv
 }
@@ -4993,8 +4958,8 @@ define dso_local zeroext i8 @ld_not_disjoint64_uint8_t_uint64_t(i64 %ptr) {
 ; CHECK-P8-BE-NEXT:    blr
 entry:
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to i64*
-  %1 = load i64, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i64, ptr %0, align 8
   %conv = trunc i64 %1 to i8
   ret i8 %conv
 }
@@ -5061,8 +5026,8 @@ define dso_local zeroext i8 @ld_disjoint_unalign64_uint8_t_uint64_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000001
-  %0 = inttoptr i64 %or to i64*
-  %1 = load i64, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i64, ptr %0, align 8
   %conv = trunc i64 %1 to i8
   ret i8 %conv
 }
@@ -5126,8 +5091,8 @@ define dso_local zeroext i8 @ld_disjoint_align64_uint8_t_uint64_t(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to i64*
-  %1 = load i64, i64* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  %1 = load i64, ptr %0, align 4096
   %conv = trunc i64 %1 to i8
   ret i8 %conv
 }
@@ -5144,7 +5109,7 @@ define dso_local zeroext i8 @ld_cst_align16_uint8_t_uint64_t() {
 ; CHECK-BE-NEXT:    lbz r3, 4087(0)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %0 = load i64, i64* inttoptr (i64 4080 to i64*), align 16
+  %0 = load i64, ptr inttoptr (i64 4080 to ptr), align 16
   %conv = trunc i64 %0 to i8
   ret i8 %conv
 }
@@ -5163,7 +5128,7 @@ define dso_local zeroext i8 @ld_cst_align32_uint8_t_uint64_t() {
 ; CHECK-BE-NEXT:    lbz r3, -27101(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %0 = load i64, i64* inttoptr (i64 9999900 to i64*), align 8
+  %0 = load i64, ptr inttoptr (i64 9999900 to ptr), align 8
   %conv = trunc i64 %0 to i8
   ret i8 %conv
 }
@@ -5222,7 +5187,7 @@ define dso_local zeroext i8 @ld_cst_unalign64_uint8_t_uint64_t() {
 ; CHECK-P8-BE-NEXT:    lbz r3, 0(r3)
 ; CHECK-P8-BE-NEXT:    blr
 entry:
-  %0 = load i64, i64* inttoptr (i64 1000000000001 to i64*), align 8
+  %0 = load i64, ptr inttoptr (i64 1000000000001 to ptr), align 8
   %conv = trunc i64 %0 to i8
   ret i8 %conv
 }
@@ -5278,7 +5243,7 @@ define dso_local zeroext i8 @ld_cst_align64_uint8_t_uint64_t() {
 ; CHECK-P8-BE-NEXT:    lbz r3, 0(r3)
 ; CHECK-P8-BE-NEXT:    blr
 entry:
-  %0 = load i64, i64* inttoptr (i64 1000000000000 to i64*), align 4096
+  %0 = load i64, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   %conv = trunc i64 %0 to i8
   ret i8 %conv
 }
@@ -5292,14 +5257,14 @@ define dso_local zeroext i8 @ld_0_uint8_t_float(i64 %ptr) {
 ; CHECK-NEXT:    mffprwz r3, f0
 ; CHECK-NEXT:    blr
 entry:
-  %0 = inttoptr i64 %ptr to float*
-  %1 = load float, float* %0, align 4
+  %0 = inttoptr i64 %ptr to ptr
+  %1 = load float, ptr %0, align 4
   %conv = fptoui float %1 to i8
   ret i8 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i8 @ld_align16_uint8_t_float(i8* nocapture readonly %ptr) {
+define dso_local zeroext i8 @ld_align16_uint8_t_float(ptr nocapture readonly %ptr) {
 ; CHECK-LABEL: ld_align16_uint8_t_float:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lfs f0, 8(r3)
@@ -5307,15 +5272,14 @@ define dso_local zeroext i8 @ld_align16_uint8_t_float(i8* nocapture readonly %pt
 ; CHECK-NEXT:    mffprwz r3, f0
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to float*
-  %1 = load float, float* %0, align 4
-  %conv = fptoui float %1 to i8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  %0 = load float, ptr %add.ptr, align 4
+  %conv = fptoui float %0 to i8
   ret i8 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i8 @ld_align32_uint8_t_float(i8* nocapture readonly %ptr) {
+define dso_local zeroext i8 @ld_align32_uint8_t_float(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align32_uint8_t_float:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    plfs f0, 99999000(r3), 0
@@ -5332,15 +5296,14 @@ define dso_local zeroext i8 @ld_align32_uint8_t_float(i8* nocapture readonly %pt
 ; CHECK-PREP10-NEXT:    mffprwz r3, f0
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to float*
-  %1 = load float, float* %0, align 4
-  %conv = fptoui float %1 to i8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  %0 = load float, ptr %add.ptr, align 4
+  %conv = fptoui float %0 to i8
   ret i8 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i8 @ld_unalign64_uint8_t_float(i8* nocapture readonly %ptr) {
+define dso_local zeroext i8 @ld_unalign64_uint8_t_float(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_unalign64_uint8_t_float:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r4, 232
@@ -5362,15 +5325,14 @@ define dso_local zeroext i8 @ld_unalign64_uint8_t_float(i8* nocapture readonly %
 ; CHECK-PREP10-NEXT:    mffprwz r3, f0
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000001
-  %0 = bitcast i8* %add.ptr to float*
-  %1 = load float, float* %0, align 4
-  %conv = fptoui float %1 to i8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000001
+  %0 = load float, ptr %add.ptr, align 4
+  %conv = fptoui float %0 to i8
   ret i8 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i8 @ld_align64_uint8_t_float(i8* nocapture readonly %ptr) {
+define dso_local zeroext i8 @ld_align64_uint8_t_float(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align64_uint8_t_float:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r4, 244140625
@@ -5390,15 +5352,14 @@ define dso_local zeroext i8 @ld_align64_uint8_t_float(i8* nocapture readonly %pt
 ; CHECK-PREP10-NEXT:    mffprwz r3, f0
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to float*
-  %1 = load float, float* %0, align 4
-  %conv = fptoui float %1 to i8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  %0 = load float, ptr %add.ptr, align 4
+  %conv = fptoui float %0 to i8
   ret i8 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i8 @ld_reg_uint8_t_float(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local zeroext i8 @ld_reg_uint8_t_float(ptr nocapture readonly %ptr, i64 %off) {
 ; CHECK-LABEL: ld_reg_uint8_t_float:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lfsx f0, r3, r4
@@ -5406,10 +5367,9 @@ define dso_local zeroext i8 @ld_reg_uint8_t_float(i8* nocapture readonly %ptr, i
 ; CHECK-NEXT:    mffprwz r3, f0
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to float*
-  %1 = load float, float* %0, align 4
-  %conv = fptoui float %1 to i8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  %0 = load float, ptr %add.ptr, align 4
+  %conv = fptoui float %0 to i8
   ret i8 %conv
 }
 
@@ -5425,8 +5385,8 @@ define dso_local zeroext i8 @ld_or_uint8_t_float(i64 %ptr, i8 zeroext %off) {
 entry:
   %conv = zext i8 %off to i64
   %or = or i64 %conv, %ptr
-  %0 = inttoptr i64 %or to float*
-  %1 = load float, float* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load float, ptr %0, align 4
   %conv1 = fptoui float %1 to i8
   ret i8 %conv1
 }
@@ -5442,8 +5402,8 @@ define dso_local zeroext i8 @ld_not_disjoint16_uint8_t_float(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to float*
-  %1 = load float, float* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load float, ptr %0, align 4
   %conv = fptoui float %1 to i8
   ret i8 %conv
 }
@@ -5460,8 +5420,8 @@ define dso_local zeroext i8 @ld_disjoint_align16_uint8_t_float(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -4096
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to float*
-  %1 = load float, float* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load float, ptr %0, align 8
   %conv = fptoui float %1 to i8
   ret i8 %conv
 }
@@ -5478,8 +5438,8 @@ define dso_local zeroext i8 @ld_not_disjoint32_uint8_t_float(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to float*
-  %1 = load float, float* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load float, ptr %0, align 4
   %conv = fptoui float %1 to i8
   ret i8 %conv
 }
@@ -5519,8 +5479,8 @@ define dso_local zeroext i8 @ld_disjoint_align32_uint8_t_float(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1000341504
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to float*
-  %1 = load float, float* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  %1 = load float, ptr %0, align 16
   %conv = fptoui float %1 to i8
   ret i8 %conv
 }
@@ -5551,8 +5511,8 @@ define dso_local zeroext i8 @ld_not_disjoint64_uint8_t_float(i64 %ptr) {
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to float*
-  %1 = load float, float* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load float, ptr %0, align 4
   %conv = fptoui float %1 to i8
   ret i8 %conv
 }
@@ -5584,8 +5544,8 @@ define dso_local zeroext i8 @ld_disjoint_unalign64_uint8_t_float(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000001
-  %0 = inttoptr i64 %or to float*
-  %1 = load float, float* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  %1 = load float, ptr %0, align 4
   %conv = fptoui float %1 to i8
   ret i8 %conv
 }
@@ -5615,8 +5575,8 @@ define dso_local zeroext i8 @ld_disjoint_align64_uint8_t_float(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to float*
-  %1 = load float, float* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  %1 = load float, ptr %0, align 4096
   %conv = fptoui float %1 to i8
   ret i8 %conv
 }
@@ -5630,7 +5590,7 @@ define dso_local zeroext i8 @ld_cst_align16_uint8_t_float() {
 ; CHECK-NEXT:    mffprwz r3, f0
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load float, float* inttoptr (i64 4080 to float*), align 16
+  %0 = load float, ptr inttoptr (i64 4080 to ptr), align 16
   %conv = fptoui float %0 to i8
   ret i8 %conv
 }
@@ -5645,7 +5605,7 @@ define dso_local zeroext i8 @ld_cst_align32_uint8_t_float() {
 ; CHECK-NEXT:    mffprwz r3, f0
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load float, float* inttoptr (i64 9999900 to float*), align 4
+  %0 = load float, ptr inttoptr (i64 9999900 to ptr), align 4
   %conv = fptoui float %0 to i8
   ret i8 %conv
 }
@@ -5673,7 +5633,7 @@ define dso_local zeroext i8 @ld_cst_unalign64_uint8_t_float() {
 ; CHECK-PREP10-NEXT:    mffprwz r3, f0
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %0 = load float, float* inttoptr (i64 1000000000001 to float*), align 4
+  %0 = load float, ptr inttoptr (i64 1000000000001 to ptr), align 4
   %conv = fptoui float %0 to i8
   ret i8 %conv
 }
@@ -5699,7 +5659,7 @@ define dso_local zeroext i8 @ld_cst_align64_uint8_t_float() {
 ; CHECK-PREP10-NEXT:    mffprwz r3, f0
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %0 = load float, float* inttoptr (i64 1000000000000 to float*), align 4096
+  %0 = load float, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   %conv = fptoui float %0 to i8
   ret i8 %conv
 }
@@ -5713,14 +5673,14 @@ define dso_local zeroext i8 @ld_0_uint8_t_double(i64 %ptr) {
 ; CHECK-NEXT:    mffprwz r3, f0
 ; CHECK-NEXT:    blr
 entry:
-  %0 = inttoptr i64 %ptr to double*
-  %1 = load double, double* %0, align 8
+  %0 = inttoptr i64 %ptr to ptr
+  %1 = load double, ptr %0, align 8
   %conv = fptoui double %1 to i8
   ret i8 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i8 @ld_align16_uint8_t_double(i8* nocapture readonly %ptr) {
+define dso_local zeroext i8 @ld_align16_uint8_t_double(ptr nocapture readonly %ptr) {
 ; CHECK-LABEL: ld_align16_uint8_t_double:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lfd f0, 8(r3)
@@ -5728,15 +5688,14 @@ define dso_local zeroext i8 @ld_align16_uint8_t_double(i8* nocapture readonly %p
 ; CHECK-NEXT:    mffprwz r3, f0
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to double*
-  %1 = load double, double* %0, align 8
-  %conv = fptoui double %1 to i8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  %0 = load double, ptr %add.ptr, align 8
+  %conv = fptoui double %0 to i8
   ret i8 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i8 @ld_align32_uint8_t_double(i8* nocapture readonly %ptr) {
+define dso_local zeroext i8 @ld_align32_uint8_t_double(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align32_uint8_t_double:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    plfd f0, 99999000(r3), 0
@@ -5753,15 +5712,14 @@ define dso_local zeroext i8 @ld_align32_uint8_t_double(i8* nocapture readonly %p
 ; CHECK-PREP10-NEXT:    mffprwz r3, f0
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to double*
-  %1 = load double, double* %0, align 8
-  %conv = fptoui double %1 to i8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  %0 = load double, ptr %add.ptr, align 8
+  %conv = fptoui double %0 to i8
   ret i8 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i8 @ld_unalign64_uint8_t_double(i8* nocapture readonly %ptr) {
+define dso_local zeroext i8 @ld_unalign64_uint8_t_double(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_unalign64_uint8_t_double:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r4, 232
@@ -5783,15 +5741,14 @@ define dso_local zeroext i8 @ld_unalign64_uint8_t_double(i8* nocapture readonly
 ; CHECK-PREP10-NEXT:    mffprwz r3, f0
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000001
-  %0 = bitcast i8* %add.ptr to double*
-  %1 = load double, double* %0, align 8
-  %conv = fptoui double %1 to i8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000001
+  %0 = load double, ptr %add.ptr, align 8
+  %conv = fptoui double %0 to i8
   ret i8 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i8 @ld_align64_uint8_t_double(i8* nocapture readonly %ptr) {
+define dso_local zeroext i8 @ld_align64_uint8_t_double(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align64_uint8_t_double:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r4, 244140625
@@ -5811,15 +5768,14 @@ define dso_local zeroext i8 @ld_align64_uint8_t_double(i8* nocapture readonly %p
 ; CHECK-PREP10-NEXT:    mffprwz r3, f0
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to double*
-  %1 = load double, double* %0, align 8
-  %conv = fptoui double %1 to i8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  %0 = load double, ptr %add.ptr, align 8
+  %conv = fptoui double %0 to i8
   ret i8 %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i8 @ld_reg_uint8_t_double(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local zeroext i8 @ld_reg_uint8_t_double(ptr nocapture readonly %ptr, i64 %off) {
 ; CHECK-LABEL: ld_reg_uint8_t_double:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lfdx f0, r3, r4
@@ -5827,10 +5783,9 @@ define dso_local zeroext i8 @ld_reg_uint8_t_double(i8* nocapture readonly %ptr,
 ; CHECK-NEXT:    mffprwz r3, f0
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to double*
-  %1 = load double, double* %0, align 8
-  %conv = fptoui double %1 to i8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  %0 = load double, ptr %add.ptr, align 8
+  %conv = fptoui double %0 to i8
   ret i8 %conv
 }
 
@@ -5846,8 +5801,8 @@ define dso_local zeroext i8 @ld_or_uint8_t_double(i64 %ptr, i8 zeroext %off) {
 entry:
   %conv = zext i8 %off to i64
   %or = or i64 %conv, %ptr
-  %0 = inttoptr i64 %or to double*
-  %1 = load double, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load double, ptr %0, align 8
   %conv1 = fptoui double %1 to i8
   ret i8 %conv1
 }
@@ -5863,8 +5818,8 @@ define dso_local zeroext i8 @ld_not_disjoint16_uint8_t_double(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to double*
-  %1 = load double, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load double, ptr %0, align 8
   %conv = fptoui double %1 to i8
   ret i8 %conv
 }
@@ -5881,8 +5836,8 @@ define dso_local zeroext i8 @ld_disjoint_align16_uint8_t_double(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -4096
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to double*
-  %1 = load double, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load double, ptr %0, align 8
   %conv = fptoui double %1 to i8
   ret i8 %conv
 }
@@ -5899,8 +5854,8 @@ define dso_local zeroext i8 @ld_not_disjoint32_uint8_t_double(i64 %ptr) {
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to double*
-  %1 = load double, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load double, ptr %0, align 8
   %conv = fptoui double %1 to i8
   ret i8 %conv
 }
@@ -5940,8 +5895,8 @@ define dso_local zeroext i8 @ld_disjoint_align32_uint8_t_double(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1000341504
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to double*
-  %1 = load double, double* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  %1 = load double, ptr %0, align 16
   %conv = fptoui double %1 to i8
   ret i8 %conv
 }
@@ -5972,8 +5927,8 @@ define dso_local zeroext i8 @ld_not_disjoint64_uint8_t_double(i64 %ptr) {
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to double*
-  %1 = load double, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load double, ptr %0, align 8
   %conv = fptoui double %1 to i8
   ret i8 %conv
 }
@@ -6005,8 +5960,8 @@ define dso_local zeroext i8 @ld_disjoint_unalign64_uint8_t_double(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000001
-  %0 = inttoptr i64 %or to double*
-  %1 = load double, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  %1 = load double, ptr %0, align 8
   %conv = fptoui double %1 to i8
   ret i8 %conv
 }
@@ -6036,8 +5991,8 @@ define dso_local zeroext i8 @ld_disjoint_align64_uint8_t_double(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to double*
-  %1 = load double, double* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  %1 = load double, ptr %0, align 4096
   %conv = fptoui double %1 to i8
   ret i8 %conv
 }
@@ -6051,7 +6006,7 @@ define dso_local zeroext i8 @ld_cst_align16_uint8_t_double() {
 ; CHECK-NEXT:    mffprwz r3, f0
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load double, double* inttoptr (i64 4080 to double*), align 16
+  %0 = load double, ptr inttoptr (i64 4080 to ptr), align 16
   %conv = fptoui double %0 to i8
   ret i8 %conv
 }
@@ -6066,7 +6021,7 @@ define dso_local zeroext i8 @ld_cst_align32_uint8_t_double() {
 ; CHECK-NEXT:    mffprwz r3, f0
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load double, double* inttoptr (i64 9999900 to double*), align 8
+  %0 = load double, ptr inttoptr (i64 9999900 to ptr), align 8
   %conv = fptoui double %0 to i8
   ret i8 %conv
 }
@@ -6094,7 +6049,7 @@ define dso_local zeroext i8 @ld_cst_unalign64_uint8_t_double() {
 ; CHECK-PREP10-NEXT:    mffprwz r3, f0
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %0 = load double, double* inttoptr (i64 1000000000001 to double*), align 8
+  %0 = load double, ptr inttoptr (i64 1000000000001 to ptr), align 8
   %conv = fptoui double %0 to i8
   ret i8 %conv
 }
@@ -6120,7 +6075,7 @@ define dso_local zeroext i8 @ld_cst_align64_uint8_t_double() {
 ; CHECK-PREP10-NEXT:    mffprwz r3, f0
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %0 = load double, double* inttoptr (i64 1000000000000 to double*), align 4096
+  %0 = load double, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   %conv = fptoui double %0 to i8
   ret i8 %conv
 }
@@ -6132,25 +6087,25 @@ define dso_local void @st_0_uint8_t_uint8_t(i64 %ptr, i8 zeroext %str) {
 ; CHECK-NEXT:    stb r4, 0(r3)
 ; CHECK-NEXT:    blr
 entry:
-  %0 = inttoptr i64 %ptr to i8*
-  store i8 %str, i8* %0, align 1
+  %0 = inttoptr i64 %ptr to ptr
+  store i8 %str, ptr %0, align 1
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_uint8_t_uint8_t(i8* nocapture %ptr, i8 zeroext %str) {
+define dso_local void @st_align16_uint8_t_uint8_t(ptr nocapture %ptr, i8 zeroext %str) {
 ; CHECK-LABEL: st_align16_uint8_t_uint8_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    stb r4, 8(r3)
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  store i8 %str, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  store i8 %str, ptr %add.ptr, align 1
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_uint8_t_uint8_t(i8* nocapture %ptr, i8 zeroext %str) {
+define dso_local void @st_align32_uint8_t_uint8_t(ptr nocapture %ptr, i8 zeroext %str) {
 ; CHECK-P10-LABEL: st_align32_uint8_t_uint8_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pstb r4, 99999000(r3), 0
@@ -6163,13 +6118,13 @@ define dso_local void @st_align32_uint8_t_uint8_t(i8* nocapture %ptr, i8 zeroext
 ; CHECK-PREP10-NEXT:    stbx r4, r3, r5
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  store i8 %str, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  store i8 %str, ptr %add.ptr, align 1
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_uint8_t_uint8_t(i8* nocapture %ptr, i8 zeroext %str) {
+define dso_local void @st_align64_uint8_t_uint8_t(ptr nocapture %ptr, i8 zeroext %str) {
 ; CHECK-P10-LABEL: st_align64_uint8_t_uint8_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r5, 244140625
@@ -6185,20 +6140,20 @@ define dso_local void @st_align64_uint8_t_uint8_t(i8* nocapture %ptr, i8 zeroext
 ; CHECK-PREP10-NEXT:    stbx r4, r3, r5
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  store i8 %str, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  store i8 %str, ptr %add.ptr, align 1
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_uint8_t_uint8_t(i8* nocapture %ptr, i64 %off, i8 zeroext %str) {
+define dso_local void @st_reg_uint8_t_uint8_t(ptr nocapture %ptr, i64 %off, i8 zeroext %str) {
 ; CHECK-LABEL: st_reg_uint8_t_uint8_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    stbx r5, r3, r4
 ; CHECK-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  store i8 %str, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  store i8 %str, ptr %add.ptr, align 1
   ret void
 }
 
@@ -6212,8 +6167,8 @@ define dso_local void @st_or1_uint8_t_uint8_t(i64 %ptr, i8 zeroext %off, i8 zero
 entry:
   %conv = zext i8 %off to i64
   %or = or i64 %conv, %ptr
-  %0 = inttoptr i64 %or to i8*
-  store i8 %str, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  store i8 %str, ptr %0, align 1
   ret void
 }
 
@@ -6226,8 +6181,8 @@ define dso_local void @st_not_disjoint16_uint8_t_uint8_t(i64 %ptr, i8 zeroext %s
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to i8*
-  store i8 %str, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  store i8 %str, ptr %0, align 1
   ret void
 }
 
@@ -6241,8 +6196,8 @@ define dso_local void @st_disjoint_align16_uint8_t_uint8_t(i64 %ptr, i8 zeroext
 entry:
   %and = and i64 %ptr, -4096
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to i8*
-  store i8 %str, i8* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store i8 %str, ptr %0, align 8
   ret void
 }
 
@@ -6256,8 +6211,8 @@ define dso_local void @st_not_disjoint32_uint8_t_uint8_t(i64 %ptr, i8 zeroext %s
 ; CHECK-NEXT:    blr
 entry:
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to i8*
-  store i8 %str, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  store i8 %str, ptr %0, align 1
   ret void
 }
 
@@ -6290,8 +6245,8 @@ define dso_local void @st_disjoint_align32_uint8_t_uint8_t(i64 %ptr, i8 zeroext
 entry:
   %and = and i64 %ptr, -1000341504
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to i8*
-  store i8 %str, i8* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  store i8 %str, ptr %0, align 16
   ret void
 }
 
@@ -6317,8 +6272,8 @@ define dso_local void @st_not_disjoint64_uint8_t_uint8_t(i64 %ptr, i8 zeroext %s
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to i8*
-  store i8 %str, i8* %0, align 1
+  %0 = inttoptr i64 %or to ptr
+  store i8 %str, ptr %0, align 1
   ret void
 }
 
@@ -6343,8 +6298,8 @@ define dso_local void @st_disjoint_align64_uint8_t_uint8_t(i64 %ptr, i8 zeroext
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to i8*
-  store i8 %str, i8* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  store i8 %str, ptr %0, align 4096
   ret void
 }
 
@@ -6355,7 +6310,7 @@ define dso_local void @st_cst_align16_uint8_t_uint8_t(i8 zeroext %str) {
 ; CHECK-NEXT:    stb r3, 4080(0)
 ; CHECK-NEXT:    blr
 entry:
-  store i8 %str, i8* inttoptr (i64 4080 to i8*), align 16
+  store i8 %str, ptr inttoptr (i64 4080 to ptr), align 16
   ret void
 }
 
@@ -6367,7 +6322,7 @@ define dso_local void @st_cst_align32_uint8_t_uint8_t(i8 zeroext %str) {
 ; CHECK-NEXT:    stb r3, -27108(r4)
 ; CHECK-NEXT:    blr
 entry:
-  store i8 %str, i8* inttoptr (i64 9999900 to i8*), align 4
+  store i8 %str, ptr inttoptr (i64 9999900 to ptr), align 4
   ret void
 }
 
@@ -6388,7 +6343,7 @@ define dso_local void @st_cst_align64_uint8_t_uint8_t(i8 zeroext %str) {
 ; CHECK-PREP10-NEXT:    stb r3, 0(r4)
 ; CHECK-PREP10-NEXT:    blr
 entry:
-  store i8 %str, i8* inttoptr (i64 1000000000000 to i8*), align 4096
+  store i8 %str, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   ret void
 }
 
@@ -6400,27 +6355,26 @@ define dso_local void @st_0_uint8_t_uint16_t(i64 %ptr, i8 zeroext %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = zext i8 %str to i16
-  %0 = inttoptr i64 %ptr to i16*
-  store i16 %conv, i16* %0, align 2
+  %0 = inttoptr i64 %ptr to ptr
+  store i16 %conv, ptr %0, align 2
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_uint8_t_uint16_t(i8* nocapture %ptr, i8 zeroext %str) {
+define dso_local void @st_align16_uint8_t_uint16_t(ptr nocapture %ptr, i8 zeroext %str) {
 ; CHECK-LABEL: st_align16_uint8_t_uint16_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    sth r4, 8(r3)
 ; CHECK-NEXT:    blr
 entry:
   %conv = zext i8 %str to i16
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to i16*
-  store i16 %conv, i16* %0, align 2
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  store i16 %conv, ptr %add.ptr, align 2
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_uint8_t_uint16_t(i8* nocapture %ptr, i8 zeroext %str) {
+define dso_local void @st_align32_uint8_t_uint16_t(ptr nocapture %ptr, i8 zeroext %str) {
 ; CHECK-P10-LABEL: st_align32_uint8_t_uint16_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    psth r4, 99999000(r3), 0
@@ -6434,14 +6388,13 @@ define dso_local void @st_align32_uint8_t_uint16_t(i8* nocapture %ptr, i8 zeroex
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %conv = zext i8 %str to i16
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to i16*
-  store i16 %conv, i16* %0, align 2
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  store i16 %conv, ptr %add.ptr, align 2
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_uint8_t_uint16_t(i8* nocapture %ptr, i8 zeroext %str) {
+define dso_local void @st_align64_uint8_t_uint16_t(ptr nocapture %ptr, i8 zeroext %str) {
 ; CHECK-P10-LABEL: st_align64_uint8_t_uint16_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r5, 244140625
@@ -6458,23 +6411,21 @@ define dso_local void @st_align64_uint8_t_uint16_t(i8* nocapture %ptr, i8 zeroex
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %conv = zext i8 %str to i16
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to i16*
-  store i16 %conv, i16* %0, align 2
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  store i16 %conv, ptr %add.ptr, align 2
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_uint8_t_uint16_t(i8* nocapture %ptr, i64 %off, i8 zeroext %str) {
+define dso_local void @st_reg_uint8_t_uint16_t(ptr nocapture %ptr, i64 %off, i8 zeroext %str) {
 ; CHECK-LABEL: st_reg_uint8_t_uint16_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    sthx r5, r3, r4
 ; CHECK-NEXT:    blr
 entry:
   %conv = zext i8 %str to i16
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to i16*
-  store i16 %conv, i16* %0, align 2
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  store i16 %conv, ptr %add.ptr, align 2
   ret void
 }
 
@@ -6489,8 +6440,8 @@ entry:
   %conv = zext i8 %str to i16
   %conv1 = zext i8 %off to i64
   %or = or i64 %conv1, %ptr
-  %0 = inttoptr i64 %or to i16*
-  store i16 %conv, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  store i16 %conv, ptr %0, align 2
   ret void
 }
 
@@ -6504,8 +6455,8 @@ define dso_local void @st_not_disjoint16_uint8_t_uint16_t(i64 %ptr, i8 zeroext %
 entry:
   %conv = zext i8 %str to i16
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to i16*
-  store i16 %conv, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  store i16 %conv, ptr %0, align 2
   ret void
 }
 
@@ -6520,8 +6471,8 @@ entry:
   %and = and i64 %ptr, -4096
   %conv = zext i8 %str to i16
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to i16*
-  store i16 %conv, i16* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store i16 %conv, ptr %0, align 8
   ret void
 }
 
@@ -6536,8 +6487,8 @@ define dso_local void @st_not_disjoint32_uint8_t_uint16_t(i64 %ptr, i8 zeroext %
 entry:
   %conv = zext i8 %str to i16
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to i16*
-  store i16 %conv, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  store i16 %conv, ptr %0, align 2
   ret void
 }
 
@@ -6571,8 +6522,8 @@ entry:
   %and = and i64 %ptr, -1000341504
   %conv = zext i8 %str to i16
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to i16*
-  store i16 %conv, i16* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  store i16 %conv, ptr %0, align 16
   ret void
 }
 
@@ -6599,8 +6550,8 @@ define dso_local void @st_not_disjoint64_uint8_t_uint16_t(i64 %ptr, i8 zeroext %
 entry:
   %conv = zext i8 %str to i16
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to i16*
-  store i16 %conv, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  store i16 %conv, ptr %0, align 2
   ret void
 }
 
@@ -6626,8 +6577,8 @@ entry:
   %and = and i64 %ptr, -1099511627776
   %conv = zext i8 %str to i16
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to i16*
-  store i16 %conv, i16* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  store i16 %conv, ptr %0, align 4096
   ret void
 }
 
@@ -6639,7 +6590,7 @@ define dso_local void @st_cst_align16_uint8_t_uint16_t(i8 zeroext %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = zext i8 %str to i16
-  store i16 %conv, i16* inttoptr (i64 4080 to i16*), align 16
+  store i16 %conv, ptr inttoptr (i64 4080 to ptr), align 16
   ret void
 }
 
@@ -6652,7 +6603,7 @@ define dso_local void @st_cst_align32_uint8_t_uint16_t(i8 zeroext %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = zext i8 %str to i16
-  store i16 %conv, i16* inttoptr (i64 9999900 to i16*), align 4
+  store i16 %conv, ptr inttoptr (i64 9999900 to ptr), align 4
   ret void
 }
 
@@ -6674,7 +6625,7 @@ define dso_local void @st_cst_align64_uint8_t_uint16_t(i8 zeroext %str) {
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %conv = zext i8 %str to i16
-  store i16 %conv, i16* inttoptr (i64 1000000000000 to i16*), align 4096
+  store i16 %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   ret void
 }
 
@@ -6686,27 +6637,26 @@ define dso_local void @st_0_uint8_t_uint32_t(i64 %ptr, i8 zeroext %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = zext i8 %str to i32
-  %0 = inttoptr i64 %ptr to i32*
-  store i32 %conv, i32* %0, align 4
+  %0 = inttoptr i64 %ptr to ptr
+  store i32 %conv, ptr %0, align 4
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_uint8_t_uint32_t(i8* nocapture %ptr, i8 zeroext %str) {
+define dso_local void @st_align16_uint8_t_uint32_t(ptr nocapture %ptr, i8 zeroext %str) {
 ; CHECK-LABEL: st_align16_uint8_t_uint32_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    stw r4, 8(r3)
 ; CHECK-NEXT:    blr
 entry:
   %conv = zext i8 %str to i32
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to i32*
-  store i32 %conv, i32* %0, align 4
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  store i32 %conv, ptr %add.ptr, align 4
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_uint8_t_uint32_t(i8* nocapture %ptr, i8 zeroext %str) {
+define dso_local void @st_align32_uint8_t_uint32_t(ptr nocapture %ptr, i8 zeroext %str) {
 ; CHECK-P10-LABEL: st_align32_uint8_t_uint32_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pstw r4, 99999000(r3), 0
@@ -6720,14 +6670,13 @@ define dso_local void @st_align32_uint8_t_uint32_t(i8* nocapture %ptr, i8 zeroex
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %conv = zext i8 %str to i32
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to i32*
-  store i32 %conv, i32* %0, align 4
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  store i32 %conv, ptr %add.ptr, align 4
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_uint8_t_uint32_t(i8* nocapture %ptr, i8 zeroext %str) {
+define dso_local void @st_align64_uint8_t_uint32_t(ptr nocapture %ptr, i8 zeroext %str) {
 ; CHECK-P10-LABEL: st_align64_uint8_t_uint32_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r5, 244140625
@@ -6744,23 +6693,21 @@ define dso_local void @st_align64_uint8_t_uint32_t(i8* nocapture %ptr, i8 zeroex
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %conv = zext i8 %str to i32
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to i32*
-  store i32 %conv, i32* %0, align 4
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  store i32 %conv, ptr %add.ptr, align 4
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_uint8_t_uint32_t(i8* nocapture %ptr, i64 %off, i8 zeroext %str) {
+define dso_local void @st_reg_uint8_t_uint32_t(ptr nocapture %ptr, i64 %off, i8 zeroext %str) {
 ; CHECK-LABEL: st_reg_uint8_t_uint32_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    stwx r5, r3, r4
 ; CHECK-NEXT:    blr
 entry:
   %conv = zext i8 %str to i32
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to i32*
-  store i32 %conv, i32* %0, align 4
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  store i32 %conv, ptr %add.ptr, align 4
   ret void
 }
 
@@ -6775,8 +6722,8 @@ entry:
   %conv = zext i8 %str to i32
   %conv1 = zext i8 %off to i64
   %or = or i64 %conv1, %ptr
-  %0 = inttoptr i64 %or to i32*
-  store i32 %conv, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  store i32 %conv, ptr %0, align 4
   ret void
 }
 
@@ -6790,8 +6737,8 @@ define dso_local void @st_not_disjoint16_uint8_t_uint32_t(i64 %ptr, i8 zeroext %
 entry:
   %conv = zext i8 %str to i32
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to i32*
-  store i32 %conv, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  store i32 %conv, ptr %0, align 4
   ret void
 }
 
@@ -6806,8 +6753,8 @@ entry:
   %and = and i64 %ptr, -4096
   %conv = zext i8 %str to i32
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to i32*
-  store i32 %conv, i32* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store i32 %conv, ptr %0, align 8
   ret void
 }
 
@@ -6822,8 +6769,8 @@ define dso_local void @st_not_disjoint32_uint8_t_uint32_t(i64 %ptr, i8 zeroext %
 entry:
   %conv = zext i8 %str to i32
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to i32*
-  store i32 %conv, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  store i32 %conv, ptr %0, align 4
   ret void
 }
 
@@ -6857,8 +6804,8 @@ entry:
   %and = and i64 %ptr, -1000341504
   %conv = zext i8 %str to i32
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to i32*
-  store i32 %conv, i32* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  store i32 %conv, ptr %0, align 16
   ret void
 }
 
@@ -6885,8 +6832,8 @@ define dso_local void @st_not_disjoint64_uint8_t_uint32_t(i64 %ptr, i8 zeroext %
 entry:
   %conv = zext i8 %str to i32
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to i32*
-  store i32 %conv, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  store i32 %conv, ptr %0, align 4
   ret void
 }
 
@@ -6912,8 +6859,8 @@ entry:
   %and = and i64 %ptr, -1099511627776
   %conv = zext i8 %str to i32
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to i32*
-  store i32 %conv, i32* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  store i32 %conv, ptr %0, align 4096
   ret void
 }
 
@@ -6925,7 +6872,7 @@ define dso_local void @st_cst_align16_uint8_t_uint32_t(i8 zeroext %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = zext i8 %str to i32
-  store i32 %conv, i32* inttoptr (i64 4080 to i32*), align 16
+  store i32 %conv, ptr inttoptr (i64 4080 to ptr), align 16
   ret void
 }
 
@@ -6938,7 +6885,7 @@ define dso_local void @st_cst_align32_uint8_t_uint32_t(i8 zeroext %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = zext i8 %str to i32
-  store i32 %conv, i32* inttoptr (i64 9999900 to i32*), align 4
+  store i32 %conv, ptr inttoptr (i64 9999900 to ptr), align 4
   ret void
 }
 
@@ -6960,7 +6907,7 @@ define dso_local void @st_cst_align64_uint8_t_uint32_t(i8 zeroext %str) {
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %conv = zext i8 %str to i32
-  store i32 %conv, i32* inttoptr (i64 1000000000000 to i32*), align 4096
+  store i32 %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   ret void
 }
 
@@ -6972,27 +6919,26 @@ define dso_local void @st_0_uint8_t_uint64_t(i64 %ptr, i8 zeroext %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = zext i8 %str to i64
-  %0 = inttoptr i64 %ptr to i64*
-  store i64 %conv, i64* %0, align 8
+  %0 = inttoptr i64 %ptr to ptr
+  store i64 %conv, ptr %0, align 8
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_uint8_t_uint64_t(i8* nocapture %ptr, i8 zeroext %str) {
+define dso_local void @st_align16_uint8_t_uint64_t(ptr nocapture %ptr, i8 zeroext %str) {
 ; CHECK-LABEL: st_align16_uint8_t_uint64_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    std r4, 8(r3)
 ; CHECK-NEXT:    blr
 entry:
   %conv = zext i8 %str to i64
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to i64*
-  store i64 %conv, i64* %0, align 8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  store i64 %conv, ptr %add.ptr, align 8
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_uint8_t_uint64_t(i8* nocapture %ptr, i8 zeroext %str) {
+define dso_local void @st_align32_uint8_t_uint64_t(ptr nocapture %ptr, i8 zeroext %str) {
 ; CHECK-P10-LABEL: st_align32_uint8_t_uint64_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pstd r4, 99999000(r3), 0
@@ -7006,14 +6952,13 @@ define dso_local void @st_align32_uint8_t_uint64_t(i8* nocapture %ptr, i8 zeroex
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %conv = zext i8 %str to i64
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to i64*
-  store i64 %conv, i64* %0, align 8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  store i64 %conv, ptr %add.ptr, align 8
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_uint8_t_uint64_t(i8* nocapture %ptr, i8 zeroext %str) {
+define dso_local void @st_align64_uint8_t_uint64_t(ptr nocapture %ptr, i8 zeroext %str) {
 ; CHECK-P10-LABEL: st_align64_uint8_t_uint64_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r5, 244140625
@@ -7030,23 +6975,21 @@ define dso_local void @st_align64_uint8_t_uint64_t(i8* nocapture %ptr, i8 zeroex
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %conv = zext i8 %str to i64
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to i64*
-  store i64 %conv, i64* %0, align 8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  store i64 %conv, ptr %add.ptr, align 8
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_uint8_t_uint64_t(i8* nocapture %ptr, i64 %off, i8 zeroext %str) {
+define dso_local void @st_reg_uint8_t_uint64_t(ptr nocapture %ptr, i64 %off, i8 zeroext %str) {
 ; CHECK-LABEL: st_reg_uint8_t_uint64_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    stdx r5, r3, r4
 ; CHECK-NEXT:    blr
 entry:
   %conv = zext i8 %str to i64
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to i64*
-  store i64 %conv, i64* %0, align 8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  store i64 %conv, ptr %add.ptr, align 8
   ret void
 }
 
@@ -7061,8 +7004,8 @@ entry:
   %conv = zext i8 %str to i64
   %conv1 = zext i8 %off to i64
   %or = or i64 %conv1, %ptr
-  %0 = inttoptr i64 %or to i64*
-  store i64 %conv, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store i64 %conv, ptr %0, align 8
   ret void
 }
 
@@ -7076,8 +7019,8 @@ define dso_local void @st_not_disjoint16_uint8_t_uint64_t(i64 %ptr, i8 zeroext %
 entry:
   %conv = zext i8 %str to i64
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to i64*
-  store i64 %conv, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store i64 %conv, ptr %0, align 8
   ret void
 }
 
@@ -7092,8 +7035,8 @@ entry:
   %and = and i64 %ptr, -4096
   %conv = zext i8 %str to i64
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to i64*
-  store i64 %conv, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store i64 %conv, ptr %0, align 8
   ret void
 }
 
@@ -7108,8 +7051,8 @@ define dso_local void @st_not_disjoint32_uint8_t_uint64_t(i64 %ptr, i8 zeroext %
 entry:
   %conv = zext i8 %str to i64
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to i64*
-  store i64 %conv, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store i64 %conv, ptr %0, align 8
   ret void
 }
 
@@ -7143,8 +7086,8 @@ entry:
   %and = and i64 %ptr, -1000341504
   %conv = zext i8 %str to i64
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to i64*
-  store i64 %conv, i64* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  store i64 %conv, ptr %0, align 16
   ret void
 }
 
@@ -7171,8 +7114,8 @@ define dso_local void @st_not_disjoint64_uint8_t_uint64_t(i64 %ptr, i8 zeroext %
 entry:
   %conv = zext i8 %str to i64
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to i64*
-  store i64 %conv, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store i64 %conv, ptr %0, align 8
   ret void
 }
 
@@ -7198,8 +7141,8 @@ entry:
   %and = and i64 %ptr, -1099511627776
   %conv = zext i8 %str to i64
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to i64*
-  store i64 %conv, i64* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  store i64 %conv, ptr %0, align 4096
   ret void
 }
 
@@ -7211,7 +7154,7 @@ define dso_local void @st_cst_align16_uint8_t_uint64_t(i8 zeroext %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = zext i8 %str to i64
-  store i64 %conv, i64* inttoptr (i64 4080 to i64*), align 16
+  store i64 %conv, ptr inttoptr (i64 4080 to ptr), align 16
   ret void
 }
 
@@ -7224,7 +7167,7 @@ define dso_local void @st_cst_align32_uint8_t_uint64_t(i8 zeroext %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = zext i8 %str to i64
-  store i64 %conv, i64* inttoptr (i64 9999900 to i64*), align 8
+  store i64 %conv, ptr inttoptr (i64 9999900 to ptr), align 8
   ret void
 }
 
@@ -7246,7 +7189,7 @@ define dso_local void @st_cst_align64_uint8_t_uint64_t(i8 zeroext %str) {
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %conv = zext i8 %str to i64
-  store i64 %conv, i64* inttoptr (i64 1000000000000 to i64*), align 4096
+  store i64 %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   ret void
 }
 
@@ -7260,13 +7203,13 @@ define dso_local void @st_0_uint8_t_float(i64 %ptr, i8 zeroext %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = uitofp i8 %str to float
-  %0 = inttoptr i64 %ptr to float*
-  store float %conv, float* %0, align 4
+  %0 = inttoptr i64 %ptr to ptr
+  store float %conv, ptr %0, align 4
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_uint8_t_float(i8* nocapture %ptr, i8 zeroext %str) {
+define dso_local void @st_align16_uint8_t_float(ptr nocapture %ptr, i8 zeroext %str) {
 ; CHECK-LABEL: st_align16_uint8_t_float:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    mtfprwz f0, r4
@@ -7275,14 +7218,13 @@ define dso_local void @st_align16_uint8_t_float(i8* nocapture %ptr, i8 zeroext %
 ; CHECK-NEXT:    blr
 entry:
   %conv = uitofp i8 %str to float
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to float*
-  store float %conv, float* %0, align 4
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  store float %conv, ptr %add.ptr, align 4
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_uint8_t_float(i8* nocapture %ptr, i8 zeroext %str) {
+define dso_local void @st_align32_uint8_t_float(ptr nocapture %ptr, i8 zeroext %str) {
 ; CHECK-P10-LABEL: st_align32_uint8_t_float:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    mtfprwz f0, r4
@@ -7309,14 +7251,13 @@ define dso_local void @st_align32_uint8_t_float(i8* nocapture %ptr, i8 zeroext %
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = uitofp i8 %str to float
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to float*
-  store float %conv, float* %0, align 4
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  store float %conv, ptr %add.ptr, align 4
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_uint8_t_float(i8* nocapture %ptr, i8 zeroext %str) {
+define dso_local void @st_align64_uint8_t_float(ptr nocapture %ptr, i8 zeroext %str) {
 ; CHECK-P10-LABEL: st_align64_uint8_t_float:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    mtfprwz f0, r4
@@ -7347,14 +7288,13 @@ define dso_local void @st_align64_uint8_t_float(i8* nocapture %ptr, i8 zeroext %
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = uitofp i8 %str to float
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to float*
-  store float %conv, float* %0, align 4
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  store float %conv, ptr %add.ptr, align 4
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_uint8_t_float(i8* nocapture %ptr, i64 %off, i8 zeroext %str) {
+define dso_local void @st_reg_uint8_t_float(ptr nocapture %ptr, i64 %off, i8 zeroext %str) {
 ; CHECK-LABEL: st_reg_uint8_t_float:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    mtfprwz f0, r5
@@ -7363,9 +7303,8 @@ define dso_local void @st_reg_uint8_t_float(i8* nocapture %ptr, i64 %off, i8 zer
 ; CHECK-NEXT:    blr
 entry:
   %conv = uitofp i8 %str to float
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to float*
-  store float %conv, float* %0, align 4
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  store float %conv, ptr %add.ptr, align 4
   ret void
 }
 
@@ -7382,8 +7321,8 @@ entry:
   %conv = uitofp i8 %str to float
   %conv1 = zext i8 %off to i64
   %or = or i64 %conv1, %ptr
-  %0 = inttoptr i64 %or to float*
-  store float %conv, float* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  store float %conv, ptr %0, align 4
   ret void
 }
 
@@ -7399,8 +7338,8 @@ define dso_local void @st_not_disjoint16_uint8_t_float(i64 %ptr, i8 zeroext %str
 entry:
   %conv = uitofp i8 %str to float
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to float*
-  store float %conv, float* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  store float %conv, ptr %0, align 4
   ret void
 }
 
@@ -7417,8 +7356,8 @@ entry:
   %and = and i64 %ptr, -4096
   %conv = uitofp i8 %str to float
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to float*
-  store float %conv, float* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store float %conv, ptr %0, align 8
   ret void
 }
 
@@ -7453,8 +7392,8 @@ define dso_local void @st_not_disjoint32_uint8_t_float(i64 %ptr, i8 zeroext %str
 entry:
   %conv = uitofp i8 %str to float
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to float*
-  store float %conv, float* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  store float %conv, ptr %0, align 4
   ret void
 }
 
@@ -7494,8 +7433,8 @@ entry:
   %and = and i64 %ptr, -1000341504
   %conv = uitofp i8 %str to float
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to float*
-  store float %conv, float* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  store float %conv, ptr %0, align 16
   ret void
 }
 
@@ -7526,8 +7465,8 @@ define dso_local void @st_not_disjoint64_uint8_t_float(i64 %ptr, i8 zeroext %str
 entry:
   %conv = uitofp i8 %str to float
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to float*
-  store float %conv, float* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  store float %conv, ptr %0, align 4
   ret void
 }
 
@@ -7568,8 +7507,8 @@ entry:
   %and = and i64 %ptr, -1099511627776
   %conv = uitofp i8 %str to float
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to float*
-  store float %conv, float* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  store float %conv, ptr %0, align 4096
   ret void
 }
 
@@ -7583,7 +7522,7 @@ define dso_local void @st_cst_align16_uint8_t_float(i8 zeroext %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = uitofp i8 %str to float
-  store float %conv, float* inttoptr (i64 4080 to float*), align 16
+  store float %conv, ptr inttoptr (i64 4080 to ptr), align 16
   ret void
 }
 
@@ -7598,7 +7537,7 @@ define dso_local void @st_cst_align32_uint8_t_float(i8 zeroext %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = uitofp i8 %str to float
-  store float %conv, float* inttoptr (i64 9999900 to float*), align 4
+  store float %conv, ptr inttoptr (i64 9999900 to ptr), align 4
   ret void
 }
 
@@ -7634,7 +7573,7 @@ define dso_local void @st_cst_align64_uint8_t_float(i8 zeroext %str) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = uitofp i8 %str to float
-  store float %conv, float* inttoptr (i64 1000000000000 to float*), align 4096
+  store float %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   ret void
 }
 
@@ -7648,13 +7587,13 @@ define dso_local void @st_0_uint8_t_double(i64 %ptr, i8 zeroext %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = uitofp i8 %str to double
-  %0 = inttoptr i64 %ptr to double*
-  store double %conv, double* %0, align 8
+  %0 = inttoptr i64 %ptr to ptr
+  store double %conv, ptr %0, align 8
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_uint8_t_double(i8* nocapture %ptr, i8 zeroext %str) {
+define dso_local void @st_align16_uint8_t_double(ptr nocapture %ptr, i8 zeroext %str) {
 ; CHECK-LABEL: st_align16_uint8_t_double:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    mtfprwz f0, r4
@@ -7663,14 +7602,13 @@ define dso_local void @st_align16_uint8_t_double(i8* nocapture %ptr, i8 zeroext
 ; CHECK-NEXT:    blr
 entry:
   %conv = uitofp i8 %str to double
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to double*
-  store double %conv, double* %0, align 8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  store double %conv, ptr %add.ptr, align 8
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_uint8_t_double(i8* nocapture %ptr, i8 zeroext %str) {
+define dso_local void @st_align32_uint8_t_double(ptr nocapture %ptr, i8 zeroext %str) {
 ; CHECK-P10-LABEL: st_align32_uint8_t_double:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    mtfprwz f0, r4
@@ -7697,14 +7635,13 @@ define dso_local void @st_align32_uint8_t_double(i8* nocapture %ptr, i8 zeroext
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = uitofp i8 %str to double
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to double*
-  store double %conv, double* %0, align 8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  store double %conv, ptr %add.ptr, align 8
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_uint8_t_double(i8* nocapture %ptr, i8 zeroext %str) {
+define dso_local void @st_align64_uint8_t_double(ptr nocapture %ptr, i8 zeroext %str) {
 ; CHECK-P10-LABEL: st_align64_uint8_t_double:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    mtfprwz f0, r4
@@ -7735,14 +7672,13 @@ define dso_local void @st_align64_uint8_t_double(i8* nocapture %ptr, i8 zeroext
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = uitofp i8 %str to double
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to double*
-  store double %conv, double* %0, align 8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  store double %conv, ptr %add.ptr, align 8
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_uint8_t_double(i8* nocapture %ptr, i64 %off, i8 zeroext %str) {
+define dso_local void @st_reg_uint8_t_double(ptr nocapture %ptr, i64 %off, i8 zeroext %str) {
 ; CHECK-LABEL: st_reg_uint8_t_double:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    mtfprwz f0, r5
@@ -7751,9 +7687,8 @@ define dso_local void @st_reg_uint8_t_double(i8* nocapture %ptr, i64 %off, i8 ze
 ; CHECK-NEXT:    blr
 entry:
   %conv = uitofp i8 %str to double
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to double*
-  store double %conv, double* %0, align 8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  store double %conv, ptr %add.ptr, align 8
   ret void
 }
 
@@ -7770,8 +7705,8 @@ entry:
   %conv = uitofp i8 %str to double
   %conv1 = zext i8 %off to i64
   %or = or i64 %conv1, %ptr
-  %0 = inttoptr i64 %or to double*
-  store double %conv, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store double %conv, ptr %0, align 8
   ret void
 }
 
@@ -7787,8 +7722,8 @@ define dso_local void @st_not_disjoint16_uint8_t_double(i64 %ptr, i8 zeroext %st
 entry:
   %conv = uitofp i8 %str to double
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to double*
-  store double %conv, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store double %conv, ptr %0, align 8
   ret void
 }
 
@@ -7805,8 +7740,8 @@ entry:
   %and = and i64 %ptr, -4096
   %conv = uitofp i8 %str to double
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to double*
-  store double %conv, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store double %conv, ptr %0, align 8
   ret void
 }
 
@@ -7841,8 +7776,8 @@ define dso_local void @st_not_disjoint32_uint8_t_double(i64 %ptr, i8 zeroext %st
 entry:
   %conv = uitofp i8 %str to double
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to double*
-  store double %conv, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store double %conv, ptr %0, align 8
   ret void
 }
 
@@ -7882,8 +7817,8 @@ entry:
   %and = and i64 %ptr, -1000341504
   %conv = uitofp i8 %str to double
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to double*
-  store double %conv, double* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  store double %conv, ptr %0, align 16
   ret void
 }
 
@@ -7914,8 +7849,8 @@ define dso_local void @st_not_disjoint64_uint8_t_double(i64 %ptr, i8 zeroext %st
 entry:
   %conv = uitofp i8 %str to double
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to double*
-  store double %conv, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store double %conv, ptr %0, align 8
   ret void
 }
 
@@ -7956,8 +7891,8 @@ entry:
   %and = and i64 %ptr, -1099511627776
   %conv = uitofp i8 %str to double
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to double*
-  store double %conv, double* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  store double %conv, ptr %0, align 4096
   ret void
 }
 
@@ -7971,7 +7906,7 @@ define dso_local void @st_cst_align16_uint8_t_double(i8 zeroext %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = uitofp i8 %str to double
-  store double %conv, double* inttoptr (i64 4080 to double*), align 16
+  store double %conv, ptr inttoptr (i64 4080 to ptr), align 16
   ret void
 }
 
@@ -7986,7 +7921,7 @@ define dso_local void @st_cst_align32_uint8_t_double(i8 zeroext %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = uitofp i8 %str to double
-  store double %conv, double* inttoptr (i64 9999900 to double*), align 8
+  store double %conv, ptr inttoptr (i64 9999900 to ptr), align 8
   ret void
 }
 
@@ -8022,7 +7957,7 @@ define dso_local void @st_cst_align64_uint8_t_double(i8 zeroext %str) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = uitofp i8 %str to double
-  store double %conv, double* inttoptr (i64 1000000000000 to double*), align 4096
+  store double %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   ret void
 }
 
@@ -8034,27 +7969,26 @@ define dso_local void @st_0_int8_t_uint16_t(i64 %ptr, i8 signext %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = sext i8 %str to i16
-  %0 = inttoptr i64 %ptr to i16*
-  store i16 %conv, i16* %0, align 2
+  %0 = inttoptr i64 %ptr to ptr
+  store i16 %conv, ptr %0, align 2
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_int8_t_uint16_t(i8* nocapture %ptr, i8 signext %str) {
+define dso_local void @st_align16_int8_t_uint16_t(ptr nocapture %ptr, i8 signext %str) {
 ; CHECK-LABEL: st_align16_int8_t_uint16_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    sth r4, 8(r3)
 ; CHECK-NEXT:    blr
 entry:
   %conv = sext i8 %str to i16
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to i16*
-  store i16 %conv, i16* %0, align 2
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  store i16 %conv, ptr %add.ptr, align 2
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_int8_t_uint16_t(i8* nocapture %ptr, i8 signext %str) {
+define dso_local void @st_align32_int8_t_uint16_t(ptr nocapture %ptr, i8 signext %str) {
 ; CHECK-P10-LABEL: st_align32_int8_t_uint16_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    psth r4, 99999000(r3), 0
@@ -8068,14 +8002,13 @@ define dso_local void @st_align32_int8_t_uint16_t(i8* nocapture %ptr, i8 signext
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %conv = sext i8 %str to i16
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to i16*
-  store i16 %conv, i16* %0, align 2
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  store i16 %conv, ptr %add.ptr, align 2
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_int8_t_uint16_t(i8* nocapture %ptr, i8 signext %str) {
+define dso_local void @st_align64_int8_t_uint16_t(ptr nocapture %ptr, i8 signext %str) {
 ; CHECK-P10-LABEL: st_align64_int8_t_uint16_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r5, 244140625
@@ -8092,23 +8025,21 @@ define dso_local void @st_align64_int8_t_uint16_t(i8* nocapture %ptr, i8 signext
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %conv = sext i8 %str to i16
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to i16*
-  store i16 %conv, i16* %0, align 2
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  store i16 %conv, ptr %add.ptr, align 2
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_int8_t_uint16_t(i8* nocapture %ptr, i64 %off, i8 signext %str) {
+define dso_local void @st_reg_int8_t_uint16_t(ptr nocapture %ptr, i64 %off, i8 signext %str) {
 ; CHECK-LABEL: st_reg_int8_t_uint16_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    sthx r5, r3, r4
 ; CHECK-NEXT:    blr
 entry:
   %conv = sext i8 %str to i16
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to i16*
-  store i16 %conv, i16* %0, align 2
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  store i16 %conv, ptr %add.ptr, align 2
   ret void
 }
 
@@ -8123,8 +8054,8 @@ entry:
   %conv = sext i8 %str to i16
   %conv1 = zext i8 %off to i64
   %or = or i64 %conv1, %ptr
-  %0 = inttoptr i64 %or to i16*
-  store i16 %conv, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  store i16 %conv, ptr %0, align 2
   ret void
 }
 
@@ -8138,8 +8069,8 @@ define dso_local void @st_not_disjoint16_int8_t_uint16_t(i64 %ptr, i8 signext %s
 entry:
   %conv = sext i8 %str to i16
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to i16*
-  store i16 %conv, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  store i16 %conv, ptr %0, align 2
   ret void
 }
 
@@ -8154,8 +8085,8 @@ entry:
   %and = and i64 %ptr, -4096
   %conv = sext i8 %str to i16
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to i16*
-  store i16 %conv, i16* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store i16 %conv, ptr %0, align 8
   ret void
 }
 
@@ -8170,8 +8101,8 @@ define dso_local void @st_not_disjoint32_int8_t_uint16_t(i64 %ptr, i8 signext %s
 entry:
   %conv = sext i8 %str to i16
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to i16*
-  store i16 %conv, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  store i16 %conv, ptr %0, align 2
   ret void
 }
 
@@ -8205,8 +8136,8 @@ entry:
   %and = and i64 %ptr, -1000341504
   %conv = sext i8 %str to i16
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to i16*
-  store i16 %conv, i16* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  store i16 %conv, ptr %0, align 16
   ret void
 }
 
@@ -8233,8 +8164,8 @@ define dso_local void @st_not_disjoint64_int8_t_uint16_t(i64 %ptr, i8 signext %s
 entry:
   %conv = sext i8 %str to i16
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to i16*
-  store i16 %conv, i16* %0, align 2
+  %0 = inttoptr i64 %or to ptr
+  store i16 %conv, ptr %0, align 2
   ret void
 }
 
@@ -8260,8 +8191,8 @@ entry:
   %and = and i64 %ptr, -1099511627776
   %conv = sext i8 %str to i16
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to i16*
-  store i16 %conv, i16* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  store i16 %conv, ptr %0, align 4096
   ret void
 }
 
@@ -8273,7 +8204,7 @@ define dso_local void @st_cst_align16_int8_t_uint16_t(i8 signext %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = sext i8 %str to i16
-  store i16 %conv, i16* inttoptr (i64 4080 to i16*), align 16
+  store i16 %conv, ptr inttoptr (i64 4080 to ptr), align 16
   ret void
 }
 
@@ -8286,7 +8217,7 @@ define dso_local void @st_cst_align32_int8_t_uint16_t(i8 signext %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = sext i8 %str to i16
-  store i16 %conv, i16* inttoptr (i64 9999900 to i16*), align 4
+  store i16 %conv, ptr inttoptr (i64 9999900 to ptr), align 4
   ret void
 }
 
@@ -8308,7 +8239,7 @@ define dso_local void @st_cst_align64_int8_t_uint16_t(i8 signext %str) {
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %conv = sext i8 %str to i16
-  store i16 %conv, i16* inttoptr (i64 1000000000000 to i16*), align 4096
+  store i16 %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   ret void
 }
 
@@ -8320,27 +8251,26 @@ define dso_local void @st_0_int8_t_uint32_t(i64 %ptr, i8 signext %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = sext i8 %str to i32
-  %0 = inttoptr i64 %ptr to i32*
-  store i32 %conv, i32* %0, align 4
+  %0 = inttoptr i64 %ptr to ptr
+  store i32 %conv, ptr %0, align 4
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_int8_t_uint32_t(i8* nocapture %ptr, i8 signext %str) {
+define dso_local void @st_align16_int8_t_uint32_t(ptr nocapture %ptr, i8 signext %str) {
 ; CHECK-LABEL: st_align16_int8_t_uint32_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    stw r4, 8(r3)
 ; CHECK-NEXT:    blr
 entry:
   %conv = sext i8 %str to i32
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to i32*
-  store i32 %conv, i32* %0, align 4
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  store i32 %conv, ptr %add.ptr, align 4
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_int8_t_uint32_t(i8* nocapture %ptr, i8 signext %str) {
+define dso_local void @st_align32_int8_t_uint32_t(ptr nocapture %ptr, i8 signext %str) {
 ; CHECK-P10-LABEL: st_align32_int8_t_uint32_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pstw r4, 99999000(r3), 0
@@ -8354,14 +8284,13 @@ define dso_local void @st_align32_int8_t_uint32_t(i8* nocapture %ptr, i8 signext
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %conv = sext i8 %str to i32
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to i32*
-  store i32 %conv, i32* %0, align 4
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  store i32 %conv, ptr %add.ptr, align 4
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_int8_t_uint32_t(i8* nocapture %ptr, i8 signext %str) {
+define dso_local void @st_align64_int8_t_uint32_t(ptr nocapture %ptr, i8 signext %str) {
 ; CHECK-P10-LABEL: st_align64_int8_t_uint32_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r5, 244140625
@@ -8378,23 +8307,21 @@ define dso_local void @st_align64_int8_t_uint32_t(i8* nocapture %ptr, i8 signext
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %conv = sext i8 %str to i32
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to i32*
-  store i32 %conv, i32* %0, align 4
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  store i32 %conv, ptr %add.ptr, align 4
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_int8_t_uint32_t(i8* nocapture %ptr, i64 %off, i8 signext %str) {
+define dso_local void @st_reg_int8_t_uint32_t(ptr nocapture %ptr, i64 %off, i8 signext %str) {
 ; CHECK-LABEL: st_reg_int8_t_uint32_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    stwx r5, r3, r4
 ; CHECK-NEXT:    blr
 entry:
   %conv = sext i8 %str to i32
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to i32*
-  store i32 %conv, i32* %0, align 4
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  store i32 %conv, ptr %add.ptr, align 4
   ret void
 }
 
@@ -8409,8 +8336,8 @@ entry:
   %conv = sext i8 %str to i32
   %conv1 = zext i8 %off to i64
   %or = or i64 %conv1, %ptr
-  %0 = inttoptr i64 %or to i32*
-  store i32 %conv, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  store i32 %conv, ptr %0, align 4
   ret void
 }
 
@@ -8424,8 +8351,8 @@ define dso_local void @st_not_disjoint16_int8_t_uint32_t(i64 %ptr, i8 signext %s
 entry:
   %conv = sext i8 %str to i32
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to i32*
-  store i32 %conv, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  store i32 %conv, ptr %0, align 4
   ret void
 }
 
@@ -8440,8 +8367,8 @@ entry:
   %and = and i64 %ptr, -4096
   %conv = sext i8 %str to i32
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to i32*
-  store i32 %conv, i32* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store i32 %conv, ptr %0, align 8
   ret void
 }
 
@@ -8456,8 +8383,8 @@ define dso_local void @st_not_disjoint32_int8_t_uint32_t(i64 %ptr, i8 signext %s
 entry:
   %conv = sext i8 %str to i32
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to i32*
-  store i32 %conv, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  store i32 %conv, ptr %0, align 4
   ret void
 }
 
@@ -8491,8 +8418,8 @@ entry:
   %and = and i64 %ptr, -1000341504
   %conv = sext i8 %str to i32
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to i32*
-  store i32 %conv, i32* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  store i32 %conv, ptr %0, align 16
   ret void
 }
 
@@ -8519,8 +8446,8 @@ define dso_local void @st_not_disjoint64_int8_t_uint32_t(i64 %ptr, i8 signext %s
 entry:
   %conv = sext i8 %str to i32
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to i32*
-  store i32 %conv, i32* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  store i32 %conv, ptr %0, align 4
   ret void
 }
 
@@ -8546,8 +8473,8 @@ entry:
   %and = and i64 %ptr, -1099511627776
   %conv = sext i8 %str to i32
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to i32*
-  store i32 %conv, i32* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  store i32 %conv, ptr %0, align 4096
   ret void
 }
 
@@ -8559,7 +8486,7 @@ define dso_local void @st_cst_align16_int8_t_uint32_t(i8 signext %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = sext i8 %str to i32
-  store i32 %conv, i32* inttoptr (i64 4080 to i32*), align 16
+  store i32 %conv, ptr inttoptr (i64 4080 to ptr), align 16
   ret void
 }
 
@@ -8572,7 +8499,7 @@ define dso_local void @st_cst_align32_int8_t_uint32_t(i8 signext %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = sext i8 %str to i32
-  store i32 %conv, i32* inttoptr (i64 9999900 to i32*), align 4
+  store i32 %conv, ptr inttoptr (i64 9999900 to ptr), align 4
   ret void
 }
 
@@ -8594,7 +8521,7 @@ define dso_local void @st_cst_align64_int8_t_uint32_t(i8 signext %str) {
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %conv = sext i8 %str to i32
-  store i32 %conv, i32* inttoptr (i64 1000000000000 to i32*), align 4096
+  store i32 %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   ret void
 }
 
@@ -8606,27 +8533,26 @@ define dso_local void @st_0_int8_t_uint64_t(i64 %ptr, i8 signext %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = sext i8 %str to i64
-  %0 = inttoptr i64 %ptr to i64*
-  store i64 %conv, i64* %0, align 8
+  %0 = inttoptr i64 %ptr to ptr
+  store i64 %conv, ptr %0, align 8
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_int8_t_uint64_t(i8* nocapture %ptr, i8 signext %str) {
+define dso_local void @st_align16_int8_t_uint64_t(ptr nocapture %ptr, i8 signext %str) {
 ; CHECK-LABEL: st_align16_int8_t_uint64_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    std r4, 8(r3)
 ; CHECK-NEXT:    blr
 entry:
   %conv = sext i8 %str to i64
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to i64*
-  store i64 %conv, i64* %0, align 8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  store i64 %conv, ptr %add.ptr, align 8
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_int8_t_uint64_t(i8* nocapture %ptr, i8 signext %str) {
+define dso_local void @st_align32_int8_t_uint64_t(ptr nocapture %ptr, i8 signext %str) {
 ; CHECK-P10-LABEL: st_align32_int8_t_uint64_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pstd r4, 99999000(r3), 0
@@ -8640,14 +8566,13 @@ define dso_local void @st_align32_int8_t_uint64_t(i8* nocapture %ptr, i8 signext
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %conv = sext i8 %str to i64
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to i64*
-  store i64 %conv, i64* %0, align 8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  store i64 %conv, ptr %add.ptr, align 8
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_int8_t_uint64_t(i8* nocapture %ptr, i8 signext %str) {
+define dso_local void @st_align64_int8_t_uint64_t(ptr nocapture %ptr, i8 signext %str) {
 ; CHECK-P10-LABEL: st_align64_int8_t_uint64_t:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r5, 244140625
@@ -8664,23 +8589,21 @@ define dso_local void @st_align64_int8_t_uint64_t(i8* nocapture %ptr, i8 signext
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %conv = sext i8 %str to i64
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to i64*
-  store i64 %conv, i64* %0, align 8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  store i64 %conv, ptr %add.ptr, align 8
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_int8_t_uint64_t(i8* nocapture %ptr, i64 %off, i8 signext %str) {
+define dso_local void @st_reg_int8_t_uint64_t(ptr nocapture %ptr, i64 %off, i8 signext %str) {
 ; CHECK-LABEL: st_reg_int8_t_uint64_t:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    stdx r5, r3, r4
 ; CHECK-NEXT:    blr
 entry:
   %conv = sext i8 %str to i64
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to i64*
-  store i64 %conv, i64* %0, align 8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  store i64 %conv, ptr %add.ptr, align 8
   ret void
 }
 
@@ -8695,8 +8618,8 @@ entry:
   %conv = sext i8 %str to i64
   %conv1 = zext i8 %off to i64
   %or = or i64 %conv1, %ptr
-  %0 = inttoptr i64 %or to i64*
-  store i64 %conv, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store i64 %conv, ptr %0, align 8
   ret void
 }
 
@@ -8710,8 +8633,8 @@ define dso_local void @st_not_disjoint16_int8_t_uint64_t(i64 %ptr, i8 signext %s
 entry:
   %conv = sext i8 %str to i64
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to i64*
-  store i64 %conv, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store i64 %conv, ptr %0, align 8
   ret void
 }
 
@@ -8726,8 +8649,8 @@ entry:
   %and = and i64 %ptr, -4096
   %conv = sext i8 %str to i64
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to i64*
-  store i64 %conv, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store i64 %conv, ptr %0, align 8
   ret void
 }
 
@@ -8742,8 +8665,8 @@ define dso_local void @st_not_disjoint32_int8_t_uint64_t(i64 %ptr, i8 signext %s
 entry:
   %conv = sext i8 %str to i64
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to i64*
-  store i64 %conv, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store i64 %conv, ptr %0, align 8
   ret void
 }
 
@@ -8777,8 +8700,8 @@ entry:
   %and = and i64 %ptr, -1000341504
   %conv = sext i8 %str to i64
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to i64*
-  store i64 %conv, i64* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  store i64 %conv, ptr %0, align 16
   ret void
 }
 
@@ -8805,8 +8728,8 @@ define dso_local void @st_not_disjoint64_int8_t_uint64_t(i64 %ptr, i8 signext %s
 entry:
   %conv = sext i8 %str to i64
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to i64*
-  store i64 %conv, i64* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store i64 %conv, ptr %0, align 8
   ret void
 }
 
@@ -8832,8 +8755,8 @@ entry:
   %and = and i64 %ptr, -1099511627776
   %conv = sext i8 %str to i64
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to i64*
-  store i64 %conv, i64* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  store i64 %conv, ptr %0, align 4096
   ret void
 }
 
@@ -8845,7 +8768,7 @@ define dso_local void @st_cst_align16_int8_t_uint64_t(i8 signext %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = sext i8 %str to i64
-  store i64 %conv, i64* inttoptr (i64 4080 to i64*), align 16
+  store i64 %conv, ptr inttoptr (i64 4080 to ptr), align 16
   ret void
 }
 
@@ -8858,7 +8781,7 @@ define dso_local void @st_cst_align32_int8_t_uint64_t(i8 signext %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = sext i8 %str to i64
-  store i64 %conv, i64* inttoptr (i64 9999900 to i64*), align 8
+  store i64 %conv, ptr inttoptr (i64 9999900 to ptr), align 8
   ret void
 }
 
@@ -8880,7 +8803,7 @@ define dso_local void @st_cst_align64_int8_t_uint64_t(i8 signext %str) {
 ; CHECK-PREP10-NEXT:    blr
 entry:
   %conv = sext i8 %str to i64
-  store i64 %conv, i64* inttoptr (i64 1000000000000 to i64*), align 4096
+  store i64 %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   ret void
 }
 
@@ -8894,13 +8817,13 @@ define dso_local void @st_0_int8_t_float(i64 %ptr, i8 signext %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = sitofp i8 %str to float
-  %0 = inttoptr i64 %ptr to float*
-  store float %conv, float* %0, align 4
+  %0 = inttoptr i64 %ptr to ptr
+  store float %conv, ptr %0, align 4
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_int8_t_float(i8* nocapture %ptr, i8 signext %str) {
+define dso_local void @st_align16_int8_t_float(ptr nocapture %ptr, i8 signext %str) {
 ; CHECK-LABEL: st_align16_int8_t_float:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    mtfprwa f0, r4
@@ -8909,14 +8832,13 @@ define dso_local void @st_align16_int8_t_float(i8* nocapture %ptr, i8 signext %s
 ; CHECK-NEXT:    blr
 entry:
   %conv = sitofp i8 %str to float
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to float*
-  store float %conv, float* %0, align 4
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  store float %conv, ptr %add.ptr, align 4
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_int8_t_float(i8* nocapture %ptr, i8 signext %str) {
+define dso_local void @st_align32_int8_t_float(ptr nocapture %ptr, i8 signext %str) {
 ; CHECK-P10-LABEL: st_align32_int8_t_float:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    mtfprwa f0, r4
@@ -8943,14 +8865,13 @@ define dso_local void @st_align32_int8_t_float(i8* nocapture %ptr, i8 signext %s
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = sitofp i8 %str to float
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to float*
-  store float %conv, float* %0, align 4
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  store float %conv, ptr %add.ptr, align 4
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_int8_t_float(i8* nocapture %ptr, i8 signext %str) {
+define dso_local void @st_align64_int8_t_float(ptr nocapture %ptr, i8 signext %str) {
 ; CHECK-P10-LABEL: st_align64_int8_t_float:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    mtfprwa f0, r4
@@ -8981,14 +8902,13 @@ define dso_local void @st_align64_int8_t_float(i8* nocapture %ptr, i8 signext %s
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = sitofp i8 %str to float
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to float*
-  store float %conv, float* %0, align 4
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  store float %conv, ptr %add.ptr, align 4
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_int8_t_float(i8* nocapture %ptr, i64 %off, i8 signext %str) {
+define dso_local void @st_reg_int8_t_float(ptr nocapture %ptr, i64 %off, i8 signext %str) {
 ; CHECK-LABEL: st_reg_int8_t_float:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    mtfprwa f0, r5
@@ -8997,9 +8917,8 @@ define dso_local void @st_reg_int8_t_float(i8* nocapture %ptr, i64 %off, i8 sign
 ; CHECK-NEXT:    blr
 entry:
   %conv = sitofp i8 %str to float
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to float*
-  store float %conv, float* %0, align 4
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  store float %conv, ptr %add.ptr, align 4
   ret void
 }
 
@@ -9016,8 +8935,8 @@ entry:
   %conv = sitofp i8 %str to float
   %conv1 = zext i8 %off to i64
   %or = or i64 %conv1, %ptr
-  %0 = inttoptr i64 %or to float*
-  store float %conv, float* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  store float %conv, ptr %0, align 4
   ret void
 }
 
@@ -9033,8 +8952,8 @@ define dso_local void @st_not_disjoint16_int8_t_float(i64 %ptr, i8 signext %str)
 entry:
   %conv = sitofp i8 %str to float
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to float*
-  store float %conv, float* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  store float %conv, ptr %0, align 4
   ret void
 }
 
@@ -9051,8 +8970,8 @@ entry:
   %and = and i64 %ptr, -4096
   %conv = sitofp i8 %str to float
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to float*
-  store float %conv, float* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store float %conv, ptr %0, align 8
   ret void
 }
 
@@ -9087,8 +9006,8 @@ define dso_local void @st_not_disjoint32_int8_t_float(i64 %ptr, i8 signext %str)
 entry:
   %conv = sitofp i8 %str to float
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to float*
-  store float %conv, float* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  store float %conv, ptr %0, align 4
   ret void
 }
 
@@ -9128,8 +9047,8 @@ entry:
   %and = and i64 %ptr, -1000341504
   %conv = sitofp i8 %str to float
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to float*
-  store float %conv, float* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  store float %conv, ptr %0, align 16
   ret void
 }
 
@@ -9160,8 +9079,8 @@ define dso_local void @st_not_disjoint64_int8_t_float(i64 %ptr, i8 signext %str)
 entry:
   %conv = sitofp i8 %str to float
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to float*
-  store float %conv, float* %0, align 4
+  %0 = inttoptr i64 %or to ptr
+  store float %conv, ptr %0, align 4
   ret void
 }
 
@@ -9202,8 +9121,8 @@ entry:
   %and = and i64 %ptr, -1099511627776
   %conv = sitofp i8 %str to float
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to float*
-  store float %conv, float* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  store float %conv, ptr %0, align 4096
   ret void
 }
 
@@ -9217,7 +9136,7 @@ define dso_local void @st_cst_align16_int8_t_float(i8 signext %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = sitofp i8 %str to float
-  store float %conv, float* inttoptr (i64 4080 to float*), align 16
+  store float %conv, ptr inttoptr (i64 4080 to ptr), align 16
   ret void
 }
 
@@ -9232,7 +9151,7 @@ define dso_local void @st_cst_align32_int8_t_float(i8 signext %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = sitofp i8 %str to float
-  store float %conv, float* inttoptr (i64 9999900 to float*), align 4
+  store float %conv, ptr inttoptr (i64 9999900 to ptr), align 4
   ret void
 }
 
@@ -9268,7 +9187,7 @@ define dso_local void @st_cst_align64_int8_t_float(i8 signext %str) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = sitofp i8 %str to float
-  store float %conv, float* inttoptr (i64 1000000000000 to float*), align 4096
+  store float %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   ret void
 }
 
@@ -9282,13 +9201,13 @@ define dso_local void @st_0_int8_t_double(i64 %ptr, i8 signext %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = sitofp i8 %str to double
-  %0 = inttoptr i64 %ptr to double*
-  store double %conv, double* %0, align 8
+  %0 = inttoptr i64 %ptr to ptr
+  store double %conv, ptr %0, align 8
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_int8_t_double(i8* nocapture %ptr, i8 signext %str) {
+define dso_local void @st_align16_int8_t_double(ptr nocapture %ptr, i8 signext %str) {
 ; CHECK-LABEL: st_align16_int8_t_double:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    mtfprwa f0, r4
@@ -9297,14 +9216,13 @@ define dso_local void @st_align16_int8_t_double(i8* nocapture %ptr, i8 signext %
 ; CHECK-NEXT:    blr
 entry:
   %conv = sitofp i8 %str to double
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to double*
-  store double %conv, double* %0, align 8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  store double %conv, ptr %add.ptr, align 8
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_int8_t_double(i8* nocapture %ptr, i8 signext %str) {
+define dso_local void @st_align32_int8_t_double(ptr nocapture %ptr, i8 signext %str) {
 ; CHECK-P10-LABEL: st_align32_int8_t_double:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    mtfprwa f0, r4
@@ -9331,14 +9249,13 @@ define dso_local void @st_align32_int8_t_double(i8* nocapture %ptr, i8 signext %
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = sitofp i8 %str to double
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to double*
-  store double %conv, double* %0, align 8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  store double %conv, ptr %add.ptr, align 8
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_int8_t_double(i8* nocapture %ptr, i8 signext %str) {
+define dso_local void @st_align64_int8_t_double(ptr nocapture %ptr, i8 signext %str) {
 ; CHECK-P10-LABEL: st_align64_int8_t_double:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    mtfprwa f0, r4
@@ -9369,14 +9286,13 @@ define dso_local void @st_align64_int8_t_double(i8* nocapture %ptr, i8 signext %
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = sitofp i8 %str to double
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to double*
-  store double %conv, double* %0, align 8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  store double %conv, ptr %add.ptr, align 8
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_int8_t_double(i8* nocapture %ptr, i64 %off, i8 signext %str) {
+define dso_local void @st_reg_int8_t_double(ptr nocapture %ptr, i64 %off, i8 signext %str) {
 ; CHECK-LABEL: st_reg_int8_t_double:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    mtfprwa f0, r5
@@ -9385,9 +9301,8 @@ define dso_local void @st_reg_int8_t_double(i8* nocapture %ptr, i64 %off, i8 sig
 ; CHECK-NEXT:    blr
 entry:
   %conv = sitofp i8 %str to double
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to double*
-  store double %conv, double* %0, align 8
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  store double %conv, ptr %add.ptr, align 8
   ret void
 }
 
@@ -9404,8 +9319,8 @@ entry:
   %conv = sitofp i8 %str to double
   %conv1 = zext i8 %off to i64
   %or = or i64 %conv1, %ptr
-  %0 = inttoptr i64 %or to double*
-  store double %conv, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store double %conv, ptr %0, align 8
   ret void
 }
 
@@ -9421,8 +9336,8 @@ define dso_local void @st_not_disjoint16_int8_t_double(i64 %ptr, i8 signext %str
 entry:
   %conv = sitofp i8 %str to double
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to double*
-  store double %conv, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store double %conv, ptr %0, align 8
   ret void
 }
 
@@ -9439,8 +9354,8 @@ entry:
   %and = and i64 %ptr, -4096
   %conv = sitofp i8 %str to double
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to double*
-  store double %conv, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store double %conv, ptr %0, align 8
   ret void
 }
 
@@ -9475,8 +9390,8 @@ define dso_local void @st_not_disjoint32_int8_t_double(i64 %ptr, i8 signext %str
 entry:
   %conv = sitofp i8 %str to double
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to double*
-  store double %conv, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store double %conv, ptr %0, align 8
   ret void
 }
 
@@ -9516,8 +9431,8 @@ entry:
   %and = and i64 %ptr, -1000341504
   %conv = sitofp i8 %str to double
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to double*
-  store double %conv, double* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  store double %conv, ptr %0, align 16
   ret void
 }
 
@@ -9548,8 +9463,8 @@ define dso_local void @st_not_disjoint64_int8_t_double(i64 %ptr, i8 signext %str
 entry:
   %conv = sitofp i8 %str to double
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to double*
-  store double %conv, double* %0, align 8
+  %0 = inttoptr i64 %or to ptr
+  store double %conv, ptr %0, align 8
   ret void
 }
 
@@ -9590,8 +9505,8 @@ entry:
   %and = and i64 %ptr, -1099511627776
   %conv = sitofp i8 %str to double
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to double*
-  store double %conv, double* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  store double %conv, ptr %0, align 4096
   ret void
 }
 
@@ -9605,7 +9520,7 @@ define dso_local void @st_cst_align16_int8_t_double(i8 signext %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = sitofp i8 %str to double
-  store double %conv, double* inttoptr (i64 4080 to double*), align 16
+  store double %conv, ptr inttoptr (i64 4080 to ptr), align 16
   ret void
 }
 
@@ -9620,7 +9535,7 @@ define dso_local void @st_cst_align32_int8_t_double(i8 signext %str) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = sitofp i8 %str to double
-  store double %conv, double* inttoptr (i64 9999900 to double*), align 8
+  store double %conv, ptr inttoptr (i64 9999900 to ptr), align 8
   ret void
 }
 
@@ -9656,6 +9571,6 @@ define dso_local void @st_cst_align64_int8_t_double(i8 signext %str) {
 ; CHECK-P8-NEXT:    blr
 entry:
   %conv = sitofp i8 %str to double
-  store double %conv, double* inttoptr (i64 1000000000000 to double*), align 4096
+  store double %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/scalar_vector_test_1.ll b/llvm/test/CodeGen/PowerPC/scalar_vector_test_1.ll
index 6f815d17620b..92408c6dd31b 100644
--- a/llvm/test/CodeGen/PowerPC/scalar_vector_test_1.ll
+++ b/llvm/test/CodeGen/PowerPC/scalar_vector_test_1.ll
@@ -10,7 +10,7 @@
 
 
 ; Function Attrs: norecurse nounwind readonly
-define <2 x i64> @s2v_test1(i64* nocapture readonly %int64, <2 x i64> %vec) {
+define <2 x i64> @s2v_test1(ptr nocapture readonly %int64, <2 x i64> %vec) {
 ; P9LE-LABEL: s2v_test1:
 ; P9LE:       # %bb.0: # %entry
 ; P9LE-NEXT:    lfd f0, 0(r3)
@@ -36,13 +36,13 @@ define <2 x i64> @s2v_test1(i64* nocapture readonly %int64, <2 x i64> %vec) {
 ; P8BE-NEXT:    blr
 
 entry:
-  %0 = load i64, i64* %int64, align 8
+  %0 = load i64, ptr %int64, align 8
   %vecins = insertelement <2 x i64> %vec, i64 %0, i32 0
   ret <2 x i64> %vecins
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define <2 x i64> @s2v_test2(i64* nocapture readonly %int64, <2 x i64> %vec)  {
+define <2 x i64> @s2v_test2(ptr nocapture readonly %int64, <2 x i64> %vec)  {
 ; P9LE-LABEL: s2v_test2:
 ; P9LE:       # %bb.0: # %entry
 ; P9LE-NEXT:    lfd f0, 8(r3)
@@ -68,14 +68,14 @@ define <2 x i64> @s2v_test2(i64* nocapture readonly %int64, <2 x i64> %vec)  {
 ; P8BE-NEXT:    blr
 
 entry:
-  %arrayidx = getelementptr inbounds i64, i64* %int64, i64 1
-  %0 = load i64, i64* %arrayidx, align 8
+  %arrayidx = getelementptr inbounds i64, ptr %int64, i64 1
+  %0 = load i64, ptr %arrayidx, align 8
   %vecins = insertelement <2 x i64> %vec, i64 %0, i32 0
   ret <2 x i64> %vecins
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define <2 x i64> @s2v_test3(i64* nocapture readonly %int64, <2 x i64> %vec, i32 signext %Idx)  {
+define <2 x i64> @s2v_test3(ptr nocapture readonly %int64, <2 x i64> %vec, i32 signext %Idx)  {
 ; P9LE-LABEL: s2v_test3:
 ; P9LE:       # %bb.0: # %entry
 ; P9LE-NEXT:    sldi r4, r7, 3
@@ -106,14 +106,14 @@ define <2 x i64> @s2v_test3(i64* nocapture readonly %int64, <2 x i64> %vec, i32
 
 entry:
   %idxprom = sext i32 %Idx to i64
-  %arrayidx = getelementptr inbounds i64, i64* %int64, i64 %idxprom
-  %0 = load i64, i64* %arrayidx, align 8
+  %arrayidx = getelementptr inbounds i64, ptr %int64, i64 %idxprom
+  %0 = load i64, ptr %arrayidx, align 8
   %vecins = insertelement <2 x i64> %vec, i64 %0, i32 0
   ret <2 x i64> %vecins
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define <2 x i64> @s2v_test4(i64* nocapture readonly %int64, <2 x i64> %vec)  {
+define <2 x i64> @s2v_test4(ptr nocapture readonly %int64, <2 x i64> %vec)  {
 ; P9LE-LABEL: s2v_test4:
 ; P9LE:       # %bb.0: # %entry
 ; P9LE-NEXT:    lfd f0, 8(r3)
@@ -139,14 +139,14 @@ define <2 x i64> @s2v_test4(i64* nocapture readonly %int64, <2 x i64> %vec)  {
 ; P8BE-NEXT:    blr
 
 entry:
-  %arrayidx = getelementptr inbounds i64, i64* %int64, i64 1
-  %0 = load i64, i64* %arrayidx, align 8
+  %arrayidx = getelementptr inbounds i64, ptr %int64, i64 1
+  %0 = load i64, ptr %arrayidx, align 8
   %vecins = insertelement <2 x i64> %vec, i64 %0, i32 0
   ret <2 x i64> %vecins
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define <2 x i64> @s2v_test5(<2 x i64> %vec, i64* nocapture readonly %ptr1)  {
+define <2 x i64> @s2v_test5(<2 x i64> %vec, ptr nocapture readonly %ptr1)  {
 ; P9LE-LABEL: s2v_test5:
 ; P9LE:       # %bb.0: # %entry
 ; P9LE-NEXT:    lfd f0, 0(r5)
@@ -172,13 +172,13 @@ define <2 x i64> @s2v_test5(<2 x i64> %vec, i64* nocapture readonly %ptr1)  {
 ; P8BE-NEXT:    blr
 
 entry:
-  %0 = load i64, i64* %ptr1, align 8
+  %0 = load i64, ptr %ptr1, align 8
   %vecins = insertelement <2 x i64> %vec, i64 %0, i32 0
   ret <2 x i64> %vecins
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define <2 x double> @s2v_test_f1(double* nocapture readonly %f64, <2 x double> %vec)  {
+define <2 x double> @s2v_test_f1(ptr nocapture readonly %f64, <2 x double> %vec)  {
 ; P9LE-LABEL: s2v_test_f1:
 ; P9LE:       # %bb.0: # %entry
 ; P9LE-NEXT:    lfd f0, 0(r3)
@@ -206,13 +206,13 @@ define <2 x double> @s2v_test_f1(double* nocapture readonly %f64, <2 x double> %
 
 
 entry:
-  %0 = load double, double* %f64, align 8
+  %0 = load double, ptr %f64, align 8
   %vecins = insertelement <2 x double> %vec, double %0, i32 0
   ret <2 x double> %vecins
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define <2 x double> @s2v_test_f2(double* nocapture readonly %f64, <2 x double> %vec)  {
+define <2 x double> @s2v_test_f2(ptr nocapture readonly %f64, <2 x double> %vec)  {
 ; P9LE-LABEL: s2v_test_f2:
 ; P9LE:       # %bb.0: # %entry
 ; P9LE-NEXT:    lfd f0, 8(r3)
@@ -240,14 +240,14 @@ define <2 x double> @s2v_test_f2(double* nocapture readonly %f64, <2 x double> %
 
 
 entry:
-  %arrayidx = getelementptr inbounds double, double* %f64, i64 1
-  %0 = load double, double* %arrayidx, align 8
+  %arrayidx = getelementptr inbounds double, ptr %f64, i64 1
+  %0 = load double, ptr %arrayidx, align 8
   %vecins = insertelement <2 x double> %vec, double %0, i32 0
   ret <2 x double> %vecins
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define <2 x double> @s2v_test_f3(double* nocapture readonly %f64, <2 x double> %vec, i32 signext %Idx)  {
+define <2 x double> @s2v_test_f3(ptr nocapture readonly %f64, <2 x double> %vec, i32 signext %Idx)  {
 ; P9LE-LABEL: s2v_test_f3:
 ; P9LE:       # %bb.0: # %entry
 ; P9LE-NEXT:    sldi r4, r7, 3
@@ -280,14 +280,14 @@ define <2 x double> @s2v_test_f3(double* nocapture readonly %f64, <2 x double> %
 
 entry:
   %idxprom = sext i32 %Idx to i64
-  %arrayidx = getelementptr inbounds double, double* %f64, i64 %idxprom
-  %0 = load double, double* %arrayidx, align 8
+  %arrayidx = getelementptr inbounds double, ptr %f64, i64 %idxprom
+  %0 = load double, ptr %arrayidx, align 8
   %vecins = insertelement <2 x double> %vec, double %0, i32 0
   ret <2 x double> %vecins
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define <2 x double> @s2v_test_f4(double* nocapture readonly %f64, <2 x double> %vec)  {
+define <2 x double> @s2v_test_f4(ptr nocapture readonly %f64, <2 x double> %vec)  {
 ; P9LE-LABEL: s2v_test_f4:
 ; P9LE:       # %bb.0: # %entry
 ; P9LE-NEXT:    lfd f0, 8(r3)
@@ -315,14 +315,14 @@ define <2 x double> @s2v_test_f4(double* nocapture readonly %f64, <2 x double> %
 
 
 entry:
-  %arrayidx = getelementptr inbounds double, double* %f64, i64 1
-  %0 = load double, double* %arrayidx, align 8
+  %arrayidx = getelementptr inbounds double, ptr %f64, i64 1
+  %0 = load double, ptr %arrayidx, align 8
   %vecins = insertelement <2 x double> %vec, double %0, i32 0
   ret <2 x double> %vecins
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define <2 x double> @s2v_test_f5(<2 x double> %vec, double* nocapture readonly %ptr1)  {
+define <2 x double> @s2v_test_f5(<2 x double> %vec, ptr nocapture readonly %ptr1)  {
 ; P9LE-LABEL: s2v_test_f5:
 ; P9LE:       # %bb.0: # %entry
 ; P9LE-NEXT:    lfd f0, 0(r5)
@@ -350,7 +350,7 @@ define <2 x double> @s2v_test_f5(<2 x double> %vec, double* nocapture readonly %
 
 
 entry:
-  %0 = load double, double* %ptr1, align 8
+  %0 = load double, ptr %ptr1, align 8
   %vecins = insertelement <2 x double> %vec, double %0, i32 0
   ret <2 x double> %vecins
 }

diff  --git a/llvm/test/CodeGen/PowerPC/scalar_vector_test_2.ll b/llvm/test/CodeGen/PowerPC/scalar_vector_test_2.ll
index c7c8a441f52e..3f97dfd8cc77 100644
--- a/llvm/test/CodeGen/PowerPC/scalar_vector_test_2.ll
+++ b/llvm/test/CodeGen/PowerPC/scalar_vector_test_2.ll
@@ -8,7 +8,7 @@
 ; RUN: llc -mcpu=pwr8 -verify-machineinstrs -ppc-vsr-nums-as-vr -ppc-asm-full-reg-names \
 ; RUN:    -mtriple=powerpc64-unknown-linux-gnu < %s | FileCheck %s --check-prefix=P8BE
 
-define void @test_liwzx1(<1 x float>* %A, <1 x float>* %B, <1 x float>* %C) {
+define void @test_liwzx1(ptr %A, ptr %B, ptr %C) {
 ; P9LE-LABEL: test_liwzx1:
 ; P9LE:       # %bb.0:
 ; P9LE-NEXT:    lfs f0, 0(r3)
@@ -43,14 +43,14 @@ define void @test_liwzx1(<1 x float>* %A, <1 x float>* %B, <1 x float>* %C) {
 
 
 
-  %a = load <1 x float>, <1 x float>* %A
-  %b = load <1 x float>, <1 x float>* %B
+  %a = load <1 x float>, ptr %A
+  %b = load <1 x float>, ptr %B
   %X = fadd <1 x float> %a, %b
-  store <1 x float> %X, <1 x float>* %C
+  store <1 x float> %X, ptr %C
   ret void
 }
 
-define <1 x float>* @test_liwzx2(<1 x float>* %A, <1 x float>* %B, <1 x float>* %C) {
+define ptr @test_liwzx2(ptr %A, ptr %B, ptr %C) {
 ; P9LE-LABEL: test_liwzx2:
 ; P9LE:       # %bb.0:
 ; P9LE-NEXT:    lfs f0, 0(r3)
@@ -91,9 +91,9 @@ define <1 x float>* @test_liwzx2(<1 x float>* %A, <1 x float>* %B, <1 x float>*
 
 
 
-  %a = load <1 x float>, <1 x float>* %A
-  %b = load <1 x float>, <1 x float>* %B
+  %a = load <1 x float>, ptr %A
+  %b = load <1 x float>, ptr %B
   %X = fsub <1 x float> %a, %b
-  store <1 x float> %X, <1 x float>* %C
-  ret <1 x float>* %C
+  store <1 x float> %X, ptr %C
+  ret ptr %C
 }

diff  --git a/llvm/test/CodeGen/PowerPC/scalar_vector_test_3.ll b/llvm/test/CodeGen/PowerPC/scalar_vector_test_3.ll
index e2291ea2a9fb..12911901708e 100644
--- a/llvm/test/CodeGen/PowerPC/scalar_vector_test_3.ll
+++ b/llvm/test/CodeGen/PowerPC/scalar_vector_test_3.ll
@@ -9,7 +9,7 @@
 ; RUN:    -mtriple=powerpc64-unknown-linux-gnu < %s | FileCheck %s --check-prefix=P8BE
 
 ; Function Attrs: norecurse nounwind readonly
-define <2 x i64> @s2v_test1(i32* nocapture readonly %int32, <2 x i64> %vec)  {
+define <2 x i64> @s2v_test1(ptr nocapture readonly %int32, <2 x i64> %vec)  {
 ; P9LE-LABEL: s2v_test1:
 ; P9LE:       # %bb.0: # %entry
 ; P9LE-NEXT:    lfiwax f0, 0, r3
@@ -37,14 +37,14 @@ define <2 x i64> @s2v_test1(i32* nocapture readonly %int32, <2 x i64> %vec)  {
 
 
 entry:
-  %0 = load i32, i32* %int32, align 4
+  %0 = load i32, ptr %int32, align 4
   %conv = sext i32 %0 to i64
   %vecins = insertelement <2 x i64> %vec, i64 %conv, i32 0
   ret <2 x i64> %vecins
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define <2 x i64> @s2v_test2(i32* nocapture readonly %int32, <2 x i64> %vec)  {
+define <2 x i64> @s2v_test2(ptr nocapture readonly %int32, <2 x i64> %vec)  {
 ; P9LE-LABEL: s2v_test2:
 ; P9LE:       # %bb.0: # %entry
 ; P9LE-NEXT:    addi r3, r3, 4
@@ -76,15 +76,15 @@ define <2 x i64> @s2v_test2(i32* nocapture readonly %int32, <2 x i64> %vec)  {
 
 
 entry:
-  %arrayidx = getelementptr inbounds i32, i32* %int32, i64 1
-  %0 = load i32, i32* %arrayidx, align 4
+  %arrayidx = getelementptr inbounds i32, ptr %int32, i64 1
+  %0 = load i32, ptr %arrayidx, align 4
   %conv = sext i32 %0 to i64
   %vecins = insertelement <2 x i64> %vec, i64 %conv, i32 0
   ret <2 x i64> %vecins
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define <2 x i64> @s2v_test3(i32* nocapture readonly %int32, <2 x i64> %vec, i32 signext %Idx)  {
+define <2 x i64> @s2v_test3(ptr nocapture readonly %int32, <2 x i64> %vec, i32 signext %Idx)  {
 ; P9LE-LABEL: s2v_test3:
 ; P9LE:       # %bb.0: # %entry
 ; P9LE-NEXT:    sldi r4, r7, 2
@@ -117,15 +117,15 @@ define <2 x i64> @s2v_test3(i32* nocapture readonly %int32, <2 x i64> %vec, i32
 
 entry:
   %idxprom = sext i32 %Idx to i64
-  %arrayidx = getelementptr inbounds i32, i32* %int32, i64 %idxprom
-  %0 = load i32, i32* %arrayidx, align 4
+  %arrayidx = getelementptr inbounds i32, ptr %int32, i64 %idxprom
+  %0 = load i32, ptr %arrayidx, align 4
   %conv = sext i32 %0 to i64
   %vecins = insertelement <2 x i64> %vec, i64 %conv, i32 0
   ret <2 x i64> %vecins
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define <2 x i64> @s2v_test4(i32* nocapture readonly %int32, <2 x i64> %vec)  {
+define <2 x i64> @s2v_test4(ptr nocapture readonly %int32, <2 x i64> %vec)  {
 ; P9LE-LABEL: s2v_test4:
 ; P9LE:       # %bb.0: # %entry
 ; P9LE-NEXT:    addi r3, r3, 4
@@ -157,15 +157,15 @@ define <2 x i64> @s2v_test4(i32* nocapture readonly %int32, <2 x i64> %vec)  {
 
 
 entry:
-  %arrayidx = getelementptr inbounds i32, i32* %int32, i64 1
-  %0 = load i32, i32* %arrayidx, align 4
+  %arrayidx = getelementptr inbounds i32, ptr %int32, i64 1
+  %0 = load i32, ptr %arrayidx, align 4
   %conv = sext i32 %0 to i64
   %vecins = insertelement <2 x i64> %vec, i64 %conv, i32 0
   ret <2 x i64> %vecins
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define <2 x i64> @s2v_test5(<2 x i64> %vec, i32* nocapture readonly %ptr1)  {
+define <2 x i64> @s2v_test5(<2 x i64> %vec, ptr nocapture readonly %ptr1)  {
 ; P9LE-LABEL: s2v_test5:
 ; P9LE:       # %bb.0: # %entry
 ; P9LE-NEXT:    lfiwax f0, 0, r5
@@ -193,14 +193,14 @@ define <2 x i64> @s2v_test5(<2 x i64> %vec, i32* nocapture readonly %ptr1)  {
 
 
 entry:
-  %0 = load i32, i32* %ptr1, align 4
+  %0 = load i32, ptr %ptr1, align 4
   %conv = sext i32 %0 to i64
   %vecins = insertelement <2 x i64> %vec, i64 %conv, i32 0
   ret <2 x i64> %vecins
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define <2 x i64> @s2v_test6(i32* nocapture readonly %ptr)  {
+define <2 x i64> @s2v_test6(ptr nocapture readonly %ptr)  {
 ; P9LE-LABEL: s2v_test6:
 ; P9LE:       # %bb.0: # %entry
 ; P9LE-NEXT:    lfiwax f0, 0, r3
@@ -228,7 +228,7 @@ define <2 x i64> @s2v_test6(i32* nocapture readonly %ptr)  {
 
 
 entry:
-  %0 = load i32, i32* %ptr, align 4
+  %0 = load i32, ptr %ptr, align 4
   %conv = sext i32 %0 to i64
   %splat.splatinsert = insertelement <2 x i64> undef, i64 %conv, i32 0
   %splat.splat = shufflevector <2 x i64> %splat.splatinsert, <2 x i64> undef, <2 x i32> zeroinitializer
@@ -236,7 +236,7 @@ entry:
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define <2 x i64> @s2v_test7(i32* nocapture readonly %ptr)  {
+define <2 x i64> @s2v_test7(ptr nocapture readonly %ptr)  {
 ; P9LE-LABEL: s2v_test7:
 ; P9LE:       # %bb.0: # %entry
 ; P9LE-NEXT:    lfiwax f0, 0, r3
@@ -264,7 +264,7 @@ define <2 x i64> @s2v_test7(i32* nocapture readonly %ptr)  {
 
 
 entry:
-  %0 = load i32, i32* %ptr, align 4
+  %0 = load i32, ptr %ptr, align 4
   %conv = sext i32 %0 to i64
   %splat.splatinsert = insertelement <2 x i64> undef, i64 %conv, i32 0
   %splat.splat = shufflevector <2 x i64> %splat.splatinsert, <2 x i64> undef, <2 x i32> zeroinitializer

diff  --git a/llvm/test/CodeGen/PowerPC/scalar_vector_test_4.ll b/llvm/test/CodeGen/PowerPC/scalar_vector_test_4.ll
index 6a7d36e2cf3e..d74dc81722e0 100644
--- a/llvm/test/CodeGen/PowerPC/scalar_vector_test_4.ll
+++ b/llvm/test/CodeGen/PowerPC/scalar_vector_test_4.ll
@@ -22,7 +22,7 @@
 ; RUN:    --check-prefixes=AIX,P8-AIX-32
 
 ; Function Attrs: norecurse nounwind readonly
-define <4 x i32> @s2v_test1(i32* nocapture readonly %int32, <4 x i32> %vec)  {
+define <4 x i32> @s2v_test1(ptr nocapture readonly %int32, <4 x i32> %vec)  {
 ; P9LE-LABEL: s2v_test1:
 ; P9LE:       # %bb.0: # %entry
 ; P9LE-NEXT:    lwz r3, 0(r3)
@@ -82,13 +82,13 @@ define <4 x i32> @s2v_test1(i32* nocapture readonly %int32, <4 x i32> %vec)  {
 ; P8-AIX-32-NEXT:    vperm v2, v4, v2, v3
 ; P8-AIX-32-NEXT:    blr
 entry:
-  %0 = load i32, i32* %int32, align 4
+  %0 = load i32, ptr %int32, align 4
   %vecins = insertelement <4 x i32> %vec, i32 %0, i32 0
   ret <4 x i32> %vecins
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define <4 x i32> @s2v_test2(i32* nocapture readonly %int32, <4 x i32> %vec)  {
+define <4 x i32> @s2v_test2(ptr nocapture readonly %int32, <4 x i32> %vec)  {
 ; P9LE-LABEL: s2v_test2:
 ; P9LE:       # %bb.0: # %entry
 ; P9LE-NEXT:    lwz r3, 4(r3)
@@ -151,14 +151,14 @@ define <4 x i32> @s2v_test2(i32* nocapture readonly %int32, <4 x i32> %vec)  {
 ; P8-AIX-32-NEXT:    vperm v2, v4, v2, v3
 ; P8-AIX-32-NEXT:    blr
 entry:
-  %arrayidx = getelementptr inbounds i32, i32* %int32, i64 1
-  %0 = load i32, i32* %arrayidx, align 4
+  %arrayidx = getelementptr inbounds i32, ptr %int32, i64 1
+  %0 = load i32, ptr %arrayidx, align 4
   %vecins = insertelement <4 x i32> %vec, i32 %0, i32 0
   ret <4 x i32> %vecins
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define <4 x i32> @s2v_test3(i32* nocapture readonly %int32, <4 x i32> %vec, i32 signext %Idx)  {
+define <4 x i32> @s2v_test3(ptr nocapture readonly %int32, <4 x i32> %vec, i32 signext %Idx)  {
 ; P9LE-LABEL: s2v_test3:
 ; P9LE:       # %bb.0: # %entry
 ; P9LE-NEXT:    sldi r4, r7, 2
@@ -234,14 +234,14 @@ define <4 x i32> @s2v_test3(i32* nocapture readonly %int32, <4 x i32> %vec, i32
 ; P8-AIX-32-NEXT:    blr
 entry:
   %idxprom = sext i32 %Idx to i64
-  %arrayidx = getelementptr inbounds i32, i32* %int32, i64 %idxprom
-  %0 = load i32, i32* %arrayidx, align 4
+  %arrayidx = getelementptr inbounds i32, ptr %int32, i64 %idxprom
+  %0 = load i32, ptr %arrayidx, align 4
   %vecins = insertelement <4 x i32> %vec, i32 %0, i32 0
   ret <4 x i32> %vecins
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define <4 x i32> @s2v_test4(i32* nocapture readonly %int32, <4 x i32> %vec)  {
+define <4 x i32> @s2v_test4(ptr nocapture readonly %int32, <4 x i32> %vec)  {
 ; P9LE-LABEL: s2v_test4:
 ; P9LE:       # %bb.0: # %entry
 ; P9LE-NEXT:    lwz r3, 4(r3)
@@ -304,14 +304,14 @@ define <4 x i32> @s2v_test4(i32* nocapture readonly %int32, <4 x i32> %vec)  {
 ; P8-AIX-32-NEXT:    vperm v2, v4, v2, v3
 ; P8-AIX-32-NEXT:    blr
 entry:
-  %arrayidx = getelementptr inbounds i32, i32* %int32, i64 1
-  %0 = load i32, i32* %arrayidx, align 4
+  %arrayidx = getelementptr inbounds i32, ptr %int32, i64 1
+  %0 = load i32, ptr %arrayidx, align 4
   %vecins = insertelement <4 x i32> %vec, i32 %0, i32 0
   ret <4 x i32> %vecins
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define <4 x i32> @s2v_test5(<4 x i32> %vec, i32* nocapture readonly %ptr1)  {
+define <4 x i32> @s2v_test5(<4 x i32> %vec, ptr nocapture readonly %ptr1)  {
 ; P9LE-LABEL: s2v_test5:
 ; P9LE:       # %bb.0: # %entry
 ; P9LE-NEXT:    lwz r3, 0(r5)
@@ -371,13 +371,13 @@ define <4 x i32> @s2v_test5(<4 x i32> %vec, i32* nocapture readonly %ptr1)  {
 ; P8-AIX-32-NEXT:    vperm v2, v4, v2, v3
 ; P8-AIX-32-NEXT:    blr
 entry:
-  %0 = load i32, i32* %ptr1, align 4
+  %0 = load i32, ptr %ptr1, align 4
   %vecins = insertelement <4 x i32> %vec, i32 %0, i32 0
   ret <4 x i32> %vecins
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define <4 x float> @s2v_test_f1(float* nocapture readonly %f64, <4 x float> %vec)  {
+define <4 x float> @s2v_test_f1(ptr nocapture readonly %f64, <4 x float> %vec)  {
 ; P9LE-LABEL: s2v_test_f1:
 ; P9LE:       # %bb.0: # %entry
 ; P9LE-NEXT:    lwz r3, 0(r3)
@@ -434,13 +434,13 @@ define <4 x float> @s2v_test_f1(float* nocapture readonly %f64, <4 x float> %vec
 ; P8-AIX-32-NEXT:    vperm v2, v3, v2, v4
 ; P8-AIX-32-NEXT:    blr
 entry:
-  %0 = load float, float* %f64, align 4
+  %0 = load float, ptr %f64, align 4
   %vecins = insertelement <4 x float> %vec, float %0, i32 0
   ret <4 x float> %vecins
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define <2 x float> @s2v_test_f2(float* nocapture readonly %f64, <2 x float> %vec)  {
+define <2 x float> @s2v_test_f2(ptr nocapture readonly %f64, <2 x float> %vec)  {
 ; P9LE-LABEL: s2v_test_f2:
 ; P9LE:       # %bb.0: # %entry
 ; P9LE-NEXT:    addi r3, r3, 4
@@ -478,14 +478,14 @@ define <2 x float> @s2v_test_f2(float* nocapture readonly %f64, <2 x float> %vec
 ; AIX-NEXT:    vmrgow v2, v3, v2
 ; AIX-NEXT:    blr
 entry:
-  %arrayidx = getelementptr inbounds float, float* %f64, i64 1
-  %0 = load float, float* %arrayidx, align 8
+  %arrayidx = getelementptr inbounds float, ptr %f64, i64 1
+  %0 = load float, ptr %arrayidx, align 8
   %vecins = insertelement <2 x float> %vec, float %0, i32 0
   ret <2 x float> %vecins
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define <2 x float> @s2v_test_f3(float* nocapture readonly %f64, <2 x float> %vec, i32 signext %Idx)  {
+define <2 x float> @s2v_test_f3(ptr nocapture readonly %f64, <2 x float> %vec, i32 signext %Idx)  {
 ; P9LE-LABEL: s2v_test_f3:
 ; P9LE:       # %bb.0: # %entry
 ; P9LE-NEXT:    sldi r4, r7, 2
@@ -545,14 +545,14 @@ define <2 x float> @s2v_test_f3(float* nocapture readonly %f64, <2 x float> %vec
 ; P8-AIX-32-NEXT:    blr
 entry:
   %idxprom = sext i32 %Idx to i64
-  %arrayidx = getelementptr inbounds float, float* %f64, i64 %idxprom
-  %0 = load float, float* %arrayidx, align 8
+  %arrayidx = getelementptr inbounds float, ptr %f64, i64 %idxprom
+  %0 = load float, ptr %arrayidx, align 8
   %vecins = insertelement <2 x float> %vec, float %0, i32 0
   ret <2 x float> %vecins
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define <2 x float> @s2v_test_f4(float* nocapture readonly %f64, <2 x float> %vec)  {
+define <2 x float> @s2v_test_f4(ptr nocapture readonly %f64, <2 x float> %vec)  {
 ; P9LE-LABEL: s2v_test_f4:
 ; P9LE:       # %bb.0: # %entry
 ; P9LE-NEXT:    addi r3, r3, 4
@@ -590,14 +590,14 @@ define <2 x float> @s2v_test_f4(float* nocapture readonly %f64, <2 x float> %vec
 ; AIX-NEXT:    vmrgow v2, v3, v2
 ; AIX-NEXT:    blr
 entry:
-  %arrayidx = getelementptr inbounds float, float* %f64, i64 1
-  %0 = load float, float* %arrayidx, align 8
+  %arrayidx = getelementptr inbounds float, ptr %f64, i64 1
+  %0 = load float, ptr %arrayidx, align 8
   %vecins = insertelement <2 x float> %vec, float %0, i32 0
   ret <2 x float> %vecins
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define <2 x float> @s2v_test_f5(<2 x float> %vec, float* nocapture readonly %ptr1)  {
+define <2 x float> @s2v_test_f5(<2 x float> %vec, ptr nocapture readonly %ptr1)  {
 ; P9LE-LABEL: s2v_test_f5:
 ; P9LE:       # %bb.0: # %entry
 ; P9LE-NEXT:    lfiwzx f0, 0, r5
@@ -630,7 +630,7 @@ define <2 x float> @s2v_test_f5(<2 x float> %vec, float* nocapture readonly %ptr
 ; AIX-NEXT:    vmrgow v2, v3, v2
 ; AIX-NEXT:    blr
 entry:
-  %0 = load float, float* %ptr1, align 8
+  %0 = load float, ptr %ptr1, align 8
   %vecins = insertelement <2 x float> %vec, float %0, i32 0
   ret <2 x float> %vecins
 }

diff  --git a/llvm/test/CodeGen/PowerPC/scalar_vector_test_5.ll b/llvm/test/CodeGen/PowerPC/scalar_vector_test_5.ll
index 0195005b70ed..c1059be946a5 100644
--- a/llvm/test/CodeGen/PowerPC/scalar_vector_test_5.ll
+++ b/llvm/test/CodeGen/PowerPC/scalar_vector_test_5.ll
@@ -8,7 +8,7 @@
 ; RUN: llc -mcpu=pwr8 -verify-machineinstrs -ppc-vsr-nums-as-vr -ppc-asm-full-reg-names \
 ; RUN:    -mtriple=powerpc64-unknown-linux-gnu < %s | FileCheck %s --check-prefix=P8BE
 
-define i8 @scalar_to_vector_half(i16* nocapture readonly %ad) {
+define i8 @scalar_to_vector_half(ptr nocapture readonly %ad) {
 ; P9LE-LABEL: scalar_to_vector_half:
 ; P9LE:       # %bb.0: # %entry
 ; P9LE-NEXT:    lhz r3, 0(r3)
@@ -36,9 +36,8 @@ define i8 @scalar_to_vector_half(i16* nocapture readonly %ad) {
 ; P8BE-NEXT:    rldicl r3, r3, 8, 56
 ; P8BE-NEXT:    blr
 entry:
-    %0 = bitcast i16* %ad to <2 x i8>*
-    %1 = load <2 x i8>, <2 x i8>* %0, align 1
-    %2 = extractelement <2 x i8> %1, i32 0
-    ret i8 %2
+    %0 = load <2 x i8>, ptr %ad, align 1
+    %1 = extractelement <2 x i8> %0, i32 0
+    ret i8 %1
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/scalars-in-altivec-regs.ll b/llvm/test/CodeGen/PowerPC/scalars-in-altivec-regs.ll
index f1ace9d64516..06164b7ff5b9 100644
--- a/llvm/test/CodeGen/PowerPC/scalars-in-altivec-regs.ll
+++ b/llvm/test/CodeGen/PowerPC/scalars-in-altivec-regs.ll
@@ -8,7 +8,7 @@
 ; RUN: llc -mtriple=powerpc64le-- -verify-machineinstrs \
 ; RUN:   -ppc-asm-full-reg-names -mcpu=pwr8 < %s | FileCheck --check-prefix=LE %s
 
-define dso_local void @test1(<2 x double>* %v, i64 %a) local_unnamed_addr #0 {
+define dso_local void @test1(ptr %v, i64 %a) local_unnamed_addr #0 {
 ; AIX64-LABEL: test1:
 ; AIX64:       # %bb.0: # %entry
 ; AIX64-NEXT:    mtvsrd vs34, r4
@@ -36,11 +36,11 @@ define dso_local void @test1(<2 x double>* %v, i64 %a) local_unnamed_addr #0 {
 ; LE-NEXT:    #NO_APP
 ; LE-NEXT:    blr
 entry:
-  tail call void asm sideeffect "stvx $0,0,$1", "v,r,~{memory}"(i64 %a, <2 x double>* %v)
+  tail call void asm sideeffect "stvx $0,0,$1", "v,r,~{memory}"(i64 %a, ptr %v)
   ret void
 }
 
-define dso_local void @test2(<2 x double>* %v, i32 signext %a) local_unnamed_addr #0 {
+define dso_local void @test2(ptr %v, i32 signext %a) local_unnamed_addr #0 {
 ; AIX64-LABEL: test2:
 ; AIX64:       # %bb.0: # %entry
 ; AIX64-NEXT:    clrldi r4, r4, 32
@@ -71,11 +71,11 @@ define dso_local void @test2(<2 x double>* %v, i32 signext %a) local_unnamed_add
 ; LE-NEXT:    #NO_APP
 ; LE-NEXT:    blr
 entry:
-  tail call void asm sideeffect "stvx $0,0,$1", "v,r,~{memory}"(i32 %a, <2 x double>* %v)
+  tail call void asm sideeffect "stvx $0,0,$1", "v,r,~{memory}"(i32 %a, ptr %v)
   ret void
 }
 
-define dso_local void @test3(<2 x double>* %v, i16 signext %a) local_unnamed_addr #0 {
+define dso_local void @test3(ptr %v, i16 signext %a) local_unnamed_addr #0 {
 ; AIX64-LABEL: test3:
 ; AIX64:       # %bb.0: # %entry
 ; AIX64-NEXT:    clrldi r4, r4, 48
@@ -107,11 +107,11 @@ define dso_local void @test3(<2 x double>* %v, i16 signext %a) local_unnamed_add
 ; LE-NEXT:    #NO_APP
 ; LE-NEXT:    blr
 entry:
-  tail call void asm sideeffect "stvx $0,0,$1", "v,r,~{memory}"(i16 %a, <2 x double>* %v)
+  tail call void asm sideeffect "stvx $0,0,$1", "v,r,~{memory}"(i16 %a, ptr %v)
   ret void
 }
 
-define dso_local void @test4(<2 x double>* %v, i8 signext %a) local_unnamed_addr #0 {
+define dso_local void @test4(ptr %v, i8 signext %a) local_unnamed_addr #0 {
 ; AIX64-LABEL: test4:
 ; AIX64:       # %bb.0: # %entry
 ; AIX64-NEXT:    clrldi r4, r4, 56
@@ -143,11 +143,11 @@ define dso_local void @test4(<2 x double>* %v, i8 signext %a) local_unnamed_addr
 ; LE-NEXT:    #NO_APP
 ; LE-NEXT:    blr
 entry:
-  tail call void asm sideeffect "stvx $0,0,$1", "v,r,~{memory}"(i8 %a, <2 x double>* %v)
+  tail call void asm sideeffect "stvx $0,0,$1", "v,r,~{memory}"(i8 %a, ptr %v)
   ret void
 }
 
-define dso_local void @test6(<2 x double>* %v, i32 zeroext %a) local_unnamed_addr #0 {
+define dso_local void @test6(ptr %v, i32 zeroext %a) local_unnamed_addr #0 {
 ; AIX64-LABEL: test6:
 ; AIX64:       # %bb.0: # %entry
 ; AIX64-NEXT:    mtvsrd vs34, r4
@@ -176,11 +176,11 @@ define dso_local void @test6(<2 x double>* %v, i32 zeroext %a) local_unnamed_add
 ; LE-NEXT:    #NO_APP
 ; LE-NEXT:    blr
 entry:
-  tail call void asm sideeffect "stvx $0,0,$1", "v,r,~{memory}"(i32 %a, <2 x double>* %v)
+  tail call void asm sideeffect "stvx $0,0,$1", "v,r,~{memory}"(i32 %a, ptr %v)
   ret void
 }
 
-define dso_local void @test7(<2 x double>* %v, i16 zeroext %a) local_unnamed_addr #0 {
+define dso_local void @test7(ptr %v, i16 zeroext %a) local_unnamed_addr #0 {
 ; AIX64-LABEL: test7:
 ; AIX64:       # %bb.0: # %entry
 ; AIX64-NEXT:    mtvsrd vs34, r4
@@ -209,11 +209,11 @@ define dso_local void @test7(<2 x double>* %v, i16 zeroext %a) local_unnamed_add
 ; LE-NEXT:    #NO_APP
 ; LE-NEXT:    blr
 entry:
-  tail call void asm sideeffect "stvx $0,0,$1", "v,r,~{memory}"(i16 %a, <2 x double>* %v)
+  tail call void asm sideeffect "stvx $0,0,$1", "v,r,~{memory}"(i16 %a, ptr %v)
   ret void
 }
 
-define dso_local void @test8(<2 x double>* %v, i8 zeroext %a) local_unnamed_addr #0 {
+define dso_local void @test8(ptr %v, i8 zeroext %a) local_unnamed_addr #0 {
 ; AIX64-LABEL: test8:
 ; AIX64:       # %bb.0: # %entry
 ; AIX64-NEXT:    mtvsrd vs34, r4
@@ -242,7 +242,7 @@ define dso_local void @test8(<2 x double>* %v, i8 zeroext %a) local_unnamed_addr
 ; LE-NEXT:    #NO_APP
 ; LE-NEXT:    blr
 entry:
-  tail call void asm sideeffect "stvx $0,0,$1", "v,r,~{memory}"(i8 %a, <2 x double>* %v)
+  tail call void asm sideeffect "stvx $0,0,$1", "v,r,~{memory}"(i8 %a, ptr %v)
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/sched-addi.ll b/llvm/test/CodeGen/PowerPC/sched-addi.ll
index e3591b77fd75..ce6679ab7bb3 100644
--- a/llvm/test/CodeGen/PowerPC/sched-addi.ll
+++ b/llvm/test/CodeGen/PowerPC/sched-addi.ll
@@ -9,7 +9,7 @@
 
 @scalars = common dso_local local_unnamed_addr global %_type_of_scalars zeroinitializer, align 16
 
-define dso_local void @test([0 x %_elem_type_of_x]* noalias %.x, [0 x %_elem_type_of_a]* %.a, i64* noalias %.n) {
+define dso_local void @test(ptr noalias %.x, ptr %.a, ptr noalias %.n) {
 ; CHECK-P9-LABEL: test:
 ; CHECK-P9:       # %bb.0: # %entry
 ; CHECK-P9-NEXT:    ld 5, 0(5)
@@ -88,10 +88,10 @@ define dso_local void @test([0 x %_elem_type_of_x]* noalias %.x, [0 x %_elem_typ
 ; CHECK-P9-NO-HEURISTIC-NEXT:  # %bb.2: # %return.block
 ; CHECK-P9-NO-HEURISTIC-NEXT:    blr
 entry:
-  %x_rvo_based_addr_3 = getelementptr inbounds [0 x %_elem_type_of_x], [0 x %_elem_type_of_x]* %.x, i64 0, i64 -1
-  %a_rvo_based_addr_5 = getelementptr inbounds [0 x %_elem_type_of_a], [0 x %_elem_type_of_a]* %.a, i64 0, i64 -1
-  %_val_n_ = load i64, i64* %.n, align 8
-  %_val_c1_ = load double, double* getelementptr inbounds (%_type_of_scalars, %_type_of_scalars* @scalars, i64 0, i32 1), align 16
+  %x_rvo_based_addr_3 = getelementptr inbounds [0 x %_elem_type_of_x], ptr %.x, i64 0, i64 -1
+  %a_rvo_based_addr_5 = getelementptr inbounds [0 x %_elem_type_of_a], ptr %.a, i64 0, i64 -1
+  %_val_n_ = load i64, ptr %.n, align 8
+  %_val_c1_ = load double, ptr getelementptr inbounds (%_type_of_scalars, ptr @scalars, i64 0, i32 1), align 16
   %n.vec = and i64 %_val_n_, -32
   %broadcast.splatinsert26 = insertelement <4 x double> undef, double %_val_c1_, i32 0
   %broadcast.splat27 = shufflevector <4 x double> %broadcast.splatinsert26, <4 x double> undef, <4 x i32> zeroinitializer
@@ -100,57 +100,41 @@ entry:
 vector.body:
   %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
    %offset.idx = or i64 %index, 1
-  %0 = getelementptr %_elem_type_of_x, %_elem_type_of_x* %x_rvo_based_addr_3, i64 %offset.idx, i32 0
-  %1 = getelementptr %_elem_type_of_a, %_elem_type_of_a* %a_rvo_based_addr_5, i64 %offset.idx, i32 0
-  %2 = bitcast double* %1 to <4 x double>*
-  %wide.load = load <4 x double>, <4 x double>* %2, align 8
-  %3 = getelementptr double, double* %1, i64 4
-  %4 = bitcast double* %3 to <4 x double>*
-  %wide.load19 = load <4 x double>, <4 x double>* %4, align 8
-  %5 = getelementptr double, double* %1, i64 8
-  %6 = bitcast double* %5 to <4 x double>*
-  %wide.load20 = load <4 x double>, <4 x double>* %6, align 8
-  %7 = getelementptr double, double* %1, i64 12
-  %8 = bitcast double* %7 to <4 x double>*
-  %wide.load21 = load <4 x double>, <4 x double>* %8, align 8
-  %9 = getelementptr double, double* %1, i64 16
-  %10 = bitcast double* %9 to <4 x double>*
-  %wide.load22 = load <4 x double>, <4 x double>* %10, align 8
-  %11 = getelementptr double, double* %1, i64 20
-  %12 = bitcast double* %11 to <4 x double>*
-  %wide.load23 = load <4 x double>, <4 x double>* %12, align 8
-  %13 = getelementptr double, double* %1, i64 24
-  %14 = bitcast double* %13 to <4 x double>*
-  %wide.load24 = load <4 x double>, <4 x double>* %14, align 8
-  %15 = getelementptr double, double* %1, i64 28
-  %16 = bitcast double* %15 to <4 x double>*
-  %wide.load25 = load <4 x double>, <4 x double>* %16, align 8
-  %17 = fmul fast <4 x double> %wide.load, %broadcast.splat27
-  %18 = fmul fast <4 x double> %wide.load19, %broadcast.splat27
-  %19 = fmul fast <4 x double> %wide.load20, %broadcast.splat27
-  %20 = fmul fast <4 x double> %wide.load21, %broadcast.splat27
-  %21 = fmul fast <4 x double> %wide.load22, %broadcast.splat27
-  %22 = fmul fast <4 x double> %wide.load23, %broadcast.splat27
-  %23 = fmul fast <4 x double> %wide.load24, %broadcast.splat27
-  %24 = fmul fast <4 x double> %wide.load25, %broadcast.splat27
-  %25 = bitcast double* %0 to <4 x double>*
-  store <4 x double> %17, <4 x double>* %25, align 8
-  %26 = getelementptr double, double* %0, i64 4
-  %27 = bitcast double* %26 to <4 x double>*
-  store <4 x double> %18, <4 x double>* %27, align 8
-  %28 = getelementptr double, double* %0, i64 8
-  %29 = bitcast double* %28 to <4 x double>*
-  %30 = getelementptr double, double* %0, i64 12
-  %31 = bitcast double* %30 to <4 x double>*
-  %32 = getelementptr double, double* %0, i64 16
-  %33 = bitcast double* %32 to <4 x double>*
-  %34 = getelementptr double, double* %0, i64 20
-  %35 = bitcast double* %34 to <4 x double>*
-  %36 = getelementptr double, double* %0, i64 24
-  %37 = bitcast double* %36 to <4 x double>*
-  %38 = getelementptr double, double* %0, i64 28
-  %39 = bitcast double* %38 to <4 x double>*
-  store <4 x double> %24, <4 x double>* %39, align 8
+  %0 = getelementptr %_elem_type_of_x, ptr %x_rvo_based_addr_3, i64 %offset.idx, i32 0
+  %1 = getelementptr %_elem_type_of_a, ptr %a_rvo_based_addr_5, i64 %offset.idx, i32 0
+  %wide.load = load <4 x double>, ptr %1, align 8
+  %2 = getelementptr double, ptr %1, i64 4
+  %wide.load19 = load <4 x double>, ptr %2, align 8
+  %3 = getelementptr double, ptr %1, i64 8
+  %wide.load20 = load <4 x double>, ptr %3, align 8
+  %4 = getelementptr double, ptr %1, i64 12
+  %wide.load21 = load <4 x double>, ptr %4, align 8
+  %5 = getelementptr double, ptr %1, i64 16
+  %wide.load22 = load <4 x double>, ptr %5, align 8
+  %6 = getelementptr double, ptr %1, i64 20
+  %wide.load23 = load <4 x double>, ptr %6, align 8
+  %7 = getelementptr double, ptr %1, i64 24
+  %wide.load24 = load <4 x double>, ptr %7, align 8
+  %8 = getelementptr double, ptr %1, i64 28
+  %wide.load25 = load <4 x double>, ptr %8, align 8
+  %9 = fmul fast <4 x double> %wide.load, %broadcast.splat27
+  %10 = fmul fast <4 x double> %wide.load19, %broadcast.splat27
+  %11 = fmul fast <4 x double> %wide.load20, %broadcast.splat27
+  %12 = fmul fast <4 x double> %wide.load21, %broadcast.splat27
+  %13 = fmul fast <4 x double> %wide.load22, %broadcast.splat27
+  %14 = fmul fast <4 x double> %wide.load23, %broadcast.splat27
+  %15 = fmul fast <4 x double> %wide.load24, %broadcast.splat27
+  %16 = fmul fast <4 x double> %wide.load25, %broadcast.splat27
+  store <4 x double> %9, ptr %0, align 8
+  %17 = getelementptr double, ptr %0, i64 4
+  store <4 x double> %10, ptr %17, align 8
+  %18 = getelementptr double, ptr %0, i64 8
+  %19 = getelementptr double, ptr %0, i64 12
+  %20 = getelementptr double, ptr %0, i64 16
+  %21 = getelementptr double, ptr %0, i64 20
+  %22 = getelementptr double, ptr %0, i64 24
+  %23 = getelementptr double, ptr %0, i64 28
+  store <4 x double> %16, ptr %23, align 8
   %index.next = add i64 %index, 32
   %cm = icmp eq i64 %index.next, %n.vec
   br i1 %cm, label %return.block, label %vector.body

diff  --git a/llvm/test/CodeGen/PowerPC/scheduling-mem-dependency.ll b/llvm/test/CodeGen/PowerPC/scheduling-mem-dependency.ll
index 4c6f9207a34a..4e3403587c9f 100644
--- a/llvm/test/CodeGen/PowerPC/scheduling-mem-dependency.ll
+++ b/llvm/test/CodeGen/PowerPC/scheduling-mem-dependency.ll
@@ -2,7 +2,7 @@
 ; RUN: llc < %s -mtriple=powerpc64le-unknown-linux-gnu -verify-misched -debug-only=machine-scheduler -o - 2>&1 > /dev/null | FileCheck %s
 ; RUN: llc < %s -mtriple=powerpc64le-unknown-linux-gnu -mcpu=pwr9 -verify-misched -debug-only=machine-scheduler -o - 2>&1 > /dev/null | FileCheck %s --check-prefix=CHECK-P9
 
-define i64 @store_disjoint_memory(i64* nocapture %P, i64 %v) {
+define i64 @store_disjoint_memory(ptr nocapture %P, i64 %v) {
 entry:
 ; CHECK: ********** MI Scheduling **********
 ; CHECK-LABEL: store_disjoint_memory:%bb.0
@@ -12,10 +12,10 @@ entry:
 ; CHECK:SU([[REG3:[0-9]+]]):   STD renamable $x{{[0-9]+}}, 16, renamable $x[[REG5]]
 ; CHECK: Predecessors:
 ; CHECK-NOT:    SU([[REG2]]): Ord  Latency=0 Memory
-  %arrayidx = getelementptr inbounds i64, i64* %P, i64 3
-  store i64 %v, i64* %arrayidx
-  %arrayidx1 = getelementptr inbounds i64, i64* %P, i64 2
-  store i64 %v, i64* %arrayidx1
+  %arrayidx = getelementptr inbounds i64, ptr %P, i64 3
+  store i64 %v, ptr %arrayidx
+  %arrayidx1 = getelementptr inbounds i64, ptr %P, i64 2
+  store i64 %v, ptr %arrayidx1
   ret i64 %v
 }
 
@@ -25,11 +25,11 @@ entry:
 
 define double @test_lxsd_no_barrier(double %a, double %b, double %c, double %d, double %e, double %f, double %g, double %h, double %i, double %j, double %k, double %l, double %m) {
 entry:
-  %0 = load double, double* getelementptr inbounds ([500 x double], [500 x double]* @gd, i64 0, i64 10), align 8
-  %1 = load double, double* getelementptr inbounds ([500 x double], [500 x double]* @gd, i64 0, i64 17), align 8
-  %2 = load double, double* getelementptr inbounds ([500 x double], [500 x double]* @gd, i64 0, i64 87), align 8
-  %3 = load double, double* getelementptr inbounds ([500 x double], [500 x double]* @gd, i64 0, i64 97), align 8
-  %4 = load double, double* getelementptr inbounds ([500 x double], [500 x double]* @gd, i64 0, i64 77), align 8
+  %0 = load double, ptr getelementptr inbounds ([500 x double], ptr @gd, i64 0, i64 10), align 8
+  %1 = load double, ptr getelementptr inbounds ([500 x double], ptr @gd, i64 0, i64 17), align 8
+  %2 = load double, ptr getelementptr inbounds ([500 x double], ptr @gd, i64 0, i64 87), align 8
+  %3 = load double, ptr getelementptr inbounds ([500 x double], ptr @gd, i64 0, i64 97), align 8
+  %4 = load double, ptr getelementptr inbounds ([500 x double], ptr @gd, i64 0, i64 77), align 8
   %add = fadd double %a, %b
   %add1 = fadd double %add, %c
   %add2 = fadd double %add1, %d

diff  --git a/llvm/test/CodeGen/PowerPC/sdag-ppcf128.ll b/llvm/test/CodeGen/PowerPC/sdag-ppcf128.ll
index 9b41ebac9888..856b1202b40f 100644
--- a/llvm/test/CodeGen/PowerPC/sdag-ppcf128.ll
+++ b/llvm/test/CodeGen/PowerPC/sdag-ppcf128.ll
@@ -7,7 +7,7 @@ entry:
   br i1 undef, label %if, label %else
 ; CHECK: cmplwi 3, 0
 if:                                               ; preds = %entry
-  store { ppc_fp128, ppc_fp128 } zeroinitializer, { ppc_fp128, ppc_fp128 }* undef
+  store { ppc_fp128, ppc_fp128 } zeroinitializer, ptr undef
   ret void
 
 else:                                             ; preds = %entry

diff  --git a/llvm/test/CodeGen/PowerPC/select-addrRegRegOnly.ll b/llvm/test/CodeGen/PowerPC/select-addrRegRegOnly.ll
index 29b11dfa21bf..2cfc302a2ab5 100644
--- a/llvm/test/CodeGen/PowerPC/select-addrRegRegOnly.ll
+++ b/llvm/test/CodeGen/PowerPC/select-addrRegRegOnly.ll
@@ -2,7 +2,7 @@
 ; RUN: llc -mcpu=pwr8 -mtriple=powerpc64-unknown-unknown -verify-machineinstrs < %s | FileCheck %s
 
 ; Function Attrs: norecurse nounwind readonly
-define float @testSingleAccess(i32* nocapture readonly %arr) local_unnamed_addr #0 {
+define float @testSingleAccess(ptr nocapture readonly %arr) local_unnamed_addr #0 {
 ; CHECK-LABEL: testSingleAccess:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    addi 3, 3, 8
@@ -10,14 +10,14 @@ define float @testSingleAccess(i32* nocapture readonly %arr) local_unnamed_addr
 ; CHECK-NEXT:    xscvsxdsp 1, 0
 ; CHECK-NEXT:    blr
 entry:
-  %arrayidx = getelementptr inbounds i32, i32* %arr, i64 2
-  %0 = load i32, i32* %arrayidx, align 4
+  %arrayidx = getelementptr inbounds i32, ptr %arr, i64 2
+  %0 = load i32, ptr %arrayidx, align 4
   %conv = sitofp i32 %0 to float
   ret float %conv
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define float @testMultipleAccess(i32* nocapture readonly %arr) local_unnamed_addr #0 {
+define float @testMultipleAccess(ptr nocapture readonly %arr) local_unnamed_addr #0 {
 ; CHECK-LABEL: testMultipleAccess:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lwz 4, 8(3)
@@ -27,10 +27,10 @@ define float @testMultipleAccess(i32* nocapture readonly %arr) local_unnamed_add
 ; CHECK-NEXT:    xscvsxdsp 1, 0
 ; CHECK-NEXT:    blr
 entry:
-  %arrayidx = getelementptr inbounds i32, i32* %arr, i64 2
-  %0 = load i32, i32* %arrayidx, align 4
-  %arrayidx1 = getelementptr inbounds i32, i32* %arr, i64 3
-  %1 = load i32, i32* %arrayidx1, align 4
+  %arrayidx = getelementptr inbounds i32, ptr %arr, i64 2
+  %0 = load i32, ptr %arrayidx, align 4
+  %arrayidx1 = getelementptr inbounds i32, ptr %arr, i64 3
+  %1 = load i32, ptr %arrayidx1, align 4
   %add = add nsw i32 %1, %0
   %conv = sitofp i32 %add to float
   ret float %conv

diff  --git a/llvm/test/CodeGen/PowerPC/selectiondag-extload-computeknownbits.ll b/llvm/test/CodeGen/PowerPC/selectiondag-extload-computeknownbits.ll
index 73fce78c33aa..496a374c3ae0 100644
--- a/llvm/test/CodeGen/PowerPC/selectiondag-extload-computeknownbits.ll
+++ b/llvm/test/CodeGen/PowerPC/selectiondag-extload-computeknownbits.ll
@@ -2,9 +2,9 @@
 
 ; Check that llc does not crash due to an illegal APInt operation
 
-define i1 @f(i8* %ptr) {
+define i1 @f(ptr %ptr) {
  entry:
-  %val = load i8, i8* %ptr, align 8, !range !0
+  %val = load i8, ptr %ptr, align 8, !range !0
   %tobool = icmp eq i8 %val, 0
   ret i1 %tobool
 }

diff  --git a/llvm/test/CodeGen/PowerPC/selectiondag-sextload.ll b/llvm/test/CodeGen/PowerPC/selectiondag-sextload.ll
index de33faf000ac..1aec04718714 100644
--- a/llvm/test/CodeGen/PowerPC/selectiondag-sextload.ll
+++ b/llvm/test/CodeGen/PowerPC/selectiondag-sextload.ll
@@ -5,12 +5,12 @@
 
 declare void @g(i32 signext)
 
-define void @foo(i8* %p) {
+define void @foo(ptr %p) {
 entry:
   br label %while.body
 
 while.body:
-  %0 = load i8, i8* %p, align 1
+  %0 = load i8, ptr %p, align 1
   %conv = zext i8 %0 to i32
   %cmp = icmp sgt i8 %0, 0
   br i1 %cmp, label %if.then, label %while.body

diff  --git a/llvm/test/CodeGen/PowerPC/setcc-logic.ll b/llvm/test/CodeGen/PowerPC/setcc-logic.ll
index 723653ce9fcf..7dca47128a5b 100644
--- a/llvm/test/CodeGen/PowerPC/setcc-logic.ll
+++ b/llvm/test/CodeGen/PowerPC/setcc-logic.ll
@@ -110,7 +110,7 @@ define zeroext i1 @any_sign_bits_clear(i32 %P, i32 %Q)  {
 }
 
 ; PR3351 - (P == 0) & (Q == 0) -> (P|Q) == 0
-define i32 @all_bits_clear_branch(i32* %P, i32* %Q)  {
+define i32 @all_bits_clear_branch(ptr %P, ptr %Q)  {
 ; CHECK-LABEL: all_bits_clear_branch:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    or. 3, 3, 4
@@ -122,8 +122,8 @@ define i32 @all_bits_clear_branch(i32* %P, i32* %Q)  {
 ; CHECK-NEXT:    li 3, 192
 ; CHECK-NEXT:    blr
 entry:
-  %a = icmp eq i32* %P, null
-  %b = icmp eq i32* %Q, null
+  %a = icmp eq ptr %P, null
+  %b = icmp eq ptr %Q, null
   %c = and i1 %a, %b
   br i1 %c, label %bb1, label %return
 
@@ -210,7 +210,7 @@ return:
 }
 
 ; PR3351 - (P != 0) | (Q != 0) -> (P|Q) != 0
-define i32 @any_bits_set_branch(i32* %P, i32* %Q)  {
+define i32 @any_bits_set_branch(ptr %P, ptr %Q)  {
 ; CHECK-LABEL: any_bits_set_branch:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    or. 3, 3, 4
@@ -222,8 +222,8 @@ define i32 @any_bits_set_branch(i32* %P, i32* %Q)  {
 ; CHECK-NEXT:    li 3, 192
 ; CHECK-NEXT:    blr
 entry:
-  %a = icmp ne i32* %P, null
-  %b = icmp ne i32* %Q, null
+  %a = icmp ne ptr %P, null
+  %b = icmp ne ptr %Q, null
   %c = or i1 %a, %b
   br i1 %c, label %bb1, label %return
 

diff  --git a/llvm/test/CodeGen/PowerPC/setcc-to-sub.ll b/llvm/test/CodeGen/PowerPC/setcc-to-sub.ll
index 1d8c92f94a76..13c629b63494 100644
--- a/llvm/test/CodeGen/PowerPC/setcc-to-sub.ll
+++ b/llvm/test/CodeGen/PowerPC/setcc-to-sub.ll
@@ -4,11 +4,11 @@
 ; RUN: llc -verify-machineinstrs -mtriple=powerpc64-ibm-aix-xcoff -mcpu=pwr8 \
 ; RUN: < %s -vec-extabi | FileCheck %s
 
-%class.PB2 = type { [1 x i32], %class.PB1* }
+%class.PB2 = type { [1 x i32], ptr }
 %class.PB1 = type { [1 x i32], i64, i64, i32 }
 
 ; Function Attrs: norecurse nounwind readonly
-define zeroext i1 @test1(%class.PB2* %s_a, %class.PB2* %s_b) local_unnamed_addr #0 {
+define zeroext i1 @test1(ptr %s_a, ptr %s_b) local_unnamed_addr #0 {
 ; CHECK-LABEL: test1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lwz 3, 0(3)
@@ -19,18 +19,16 @@ define zeroext i1 @test1(%class.PB2* %s_a, %class.PB2* %s_b) local_unnamed_addr
 ; CHECK-NEXT:    rldicl 3, 3, 1, 63
 ; CHECK-NEXT:    blr
 entry:
-  %arrayidx.i6 = bitcast %class.PB2* %s_a to i32*
-  %0 = load i32, i32* %arrayidx.i6, align 8, !tbaa !1
+  %0 = load i32, ptr %s_a, align 8, !tbaa !1
   %and.i = and i32 %0, 8
-  %arrayidx.i37 = bitcast %class.PB2* %s_b to i32*
-  %1 = load i32, i32* %arrayidx.i37, align 8, !tbaa !1
+  %1 = load i32, ptr %s_b, align 8, !tbaa !1
   %and.i4 = and i32 %1, 8
   %cmp.i5 = icmp ult i32 %and.i, %and.i4
   ret i1 %cmp.i5
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define zeroext i1 @test2(%class.PB2* %s_a, %class.PB2* %s_b) local_unnamed_addr #0 {
+define zeroext i1 @test2(ptr %s_a, ptr %s_b) local_unnamed_addr #0 {
 ; CHECK-LABEL: test2:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lwz 3, 0(3)
@@ -42,18 +40,16 @@ define zeroext i1 @test2(%class.PB2* %s_a, %class.PB2* %s_b) local_unnamed_addr
 ; CHECK-NEXT:    rldicl 3, 3, 1, 63
 ; CHECK-NEXT:    blr
 entry:
-  %arrayidx.i6 = bitcast %class.PB2* %s_a to i32*
-  %0 = load i32, i32* %arrayidx.i6, align 8, !tbaa !1
+  %0 = load i32, ptr %s_a, align 8, !tbaa !1
   %and.i = and i32 %0, 8
-  %arrayidx.i37 = bitcast %class.PB2* %s_b to i32*
-  %1 = load i32, i32* %arrayidx.i37, align 8, !tbaa !1
+  %1 = load i32, ptr %s_b, align 8, !tbaa !1
   %and.i4 = and i32 %1, 8
   %cmp.i5 = icmp ule i32 %and.i, %and.i4
   ret i1 %cmp.i5
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define zeroext i1 @test3(%class.PB2* %s_a, %class.PB2* %s_b) local_unnamed_addr #0 {
+define zeroext i1 @test3(ptr %s_a, ptr %s_b) local_unnamed_addr #0 {
 ; CHECK-LABEL: test3:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lwz 3, 0(3)
@@ -64,18 +60,16 @@ define zeroext i1 @test3(%class.PB2* %s_a, %class.PB2* %s_b) local_unnamed_addr
 ; CHECK-NEXT:    rldicl 3, 3, 1, 63
 ; CHECK-NEXT:    blr
 entry:
-  %arrayidx.i6 = bitcast %class.PB2* %s_a to i32*
-  %0 = load i32, i32* %arrayidx.i6, align 8, !tbaa !1
+  %0 = load i32, ptr %s_a, align 8, !tbaa !1
   %and.i = and i32 %0, 8
-  %arrayidx.i37 = bitcast %class.PB2* %s_b to i32*
-  %1 = load i32, i32* %arrayidx.i37, align 8, !tbaa !1
+  %1 = load i32, ptr %s_b, align 8, !tbaa !1
   %and.i4 = and i32 %1, 8
   %cmp.i5 = icmp ugt i32 %and.i, %and.i4
   ret i1 %cmp.i5
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define zeroext i1 @test4(%class.PB2* %s_a, %class.PB2* %s_b) local_unnamed_addr #0 {
+define zeroext i1 @test4(ptr %s_a, ptr %s_b) local_unnamed_addr #0 {
 ; CHECK-LABEL: test4:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lwz 3, 0(3)
@@ -87,11 +81,9 @@ define zeroext i1 @test4(%class.PB2* %s_a, %class.PB2* %s_b) local_unnamed_addr
 ; CHECK-NEXT:    rldicl 3, 3, 1, 63
 ; CHECK-NEXT:    blr
 entry:
-  %arrayidx.i6 = bitcast %class.PB2* %s_a to i32*
-  %0 = load i32, i32* %arrayidx.i6, align 8, !tbaa !1
+  %0 = load i32, ptr %s_a, align 8, !tbaa !1
   %and.i = and i32 %0, 8
-  %arrayidx.i37 = bitcast %class.PB2* %s_b to i32*
-  %1 = load i32, i32* %arrayidx.i37, align 8, !tbaa !1
+  %1 = load i32, ptr %s_b, align 8, !tbaa !1
   %and.i4 = and i32 %1, 8
   %cmp.i5 = icmp uge i32 %and.i, %and.i4
   ret i1 %cmp.i5

diff  --git a/llvm/test/CodeGen/PowerPC/setcc_no_zext.ll b/llvm/test/CodeGen/PowerPC/setcc_no_zext.ll
index 14d64071c702..cc3b7cec7e54 100644
--- a/llvm/test/CodeGen/PowerPC/setcc_no_zext.ll
+++ b/llvm/test/CodeGen/PowerPC/setcc_no_zext.ll
@@ -4,9 +4,9 @@
 ; default at the default CodeOpt level.
 ; XFAIL: *
 
-define i32 @setcc_one_or_zero(i32* %a) {
+define i32 @setcc_one_or_zero(ptr %a) {
 entry:
-        %tmp.1 = icmp ne i32* %a, null          ; <i1> [#uses=1]
+        %tmp.1 = icmp ne ptr %a, null          ; <i1> [#uses=1]
         %inc.1 = zext i1 %tmp.1 to i32          ; <i32> [#uses=1]
         ret i32 %inc.1
 }

diff  --git a/llvm/test/CodeGen/PowerPC/setcclike-or-comb.ll b/llvm/test/CodeGen/PowerPC/setcclike-or-comb.ll
index 8394d30af0c9..697a2f0f641e 100644
--- a/llvm/test/CodeGen/PowerPC/setcclike-or-comb.ll
+++ b/llvm/test/CodeGen/PowerPC/setcclike-or-comb.ll
@@ -8,17 +8,17 @@ target triple = "powerpc64le-unknown-linux-gnu"
 ; Function Attrs: nounwind
 define void @fn1() #0 {
 entry:
-  %0 = load i32, i32* @a, align 4
+  %0 = load i32, ptr @a, align 4
   %cmp = icmp ne i32 %0, 1
   %conv = zext i1 %cmp to i32
-  %1 = load i32, i32* @b, align 4
+  %1 = load i32, ptr @b, align 4
   %cmp1 = icmp ne i32 0, %1
   %conv2 = zext i1 %cmp1 to i32
   %or = or i32 %conv, %conv2
   %xor = xor i32 1, %or
   %call = call signext i32 @fn2(i32 signext %xor)
   %conv4 = zext i1 undef to i32
-  store i32 %conv4, i32* @b, align 4
+  store i32 %conv4, ptr @b, align 4
   ret void
 
 ; CHECK-LABEL: @fn1

diff  --git a/llvm/test/CodeGen/PowerPC/sign-ext-atomics.ll b/llvm/test/CodeGen/PowerPC/sign-ext-atomics.ll
index 1641d4cd4faa..8a2927d0f451 100644
--- a/llvm/test/CodeGen/PowerPC/sign-ext-atomics.ll
+++ b/llvm/test/CodeGen/PowerPC/sign-ext-atomics.ll
@@ -26,10 +26,9 @@ define i16 @SEXTParam(i16 signext %0) #0 {
 ; CHECK-NEXT:    blr
 top:
   %1 = alloca i16, align 4
-  %2 = bitcast i16* %1 to i8*
-  store i16 0, i16* %1, align 4
-  %rv.i = atomicrmw min i16* %1, i16 %0 acq_rel
-  %rv.i2 = load atomic i16, i16* %1 acquire, align 16
+  store i16 0, ptr %1, align 4
+  %rv.i = atomicrmw min ptr %1, i16 %0 acq_rel
+  %rv.i2 = load atomic i16, ptr %1 acquire, align 16
   ret i16 %rv.i2
 }
 
@@ -60,14 +59,13 @@ define i16 @noSEXTParam(i16 %0) #0 {
 ; CHECK-NEXT:    blr
 top:
   %1 = alloca i16, align 4
-  %2 = bitcast i16* %1 to i8*
-  store i16 0, i16* %1, align 4
-  %rv.i = atomicrmw min i16* %1, i16 %0 acq_rel
-  %rv.i2 = load atomic i16, i16* %1 acquire, align 16
+  store i16 0, ptr %1, align 4
+  %rv.i = atomicrmw min ptr %1, i16 %0 acq_rel
+  %rv.i2 = load atomic i16, ptr %1 acquire, align 16
   ret i16 %rv.i2
 }
 
-define i16 @noSEXTLoad(i16 *%p) #0 {
+define i16 @noSEXTLoad(ptr %p) #0 {
 ; CHECK-LABEL: noSEXTLoad:
 ; CHECK:       # %bb.0: # %top
 ; CHECK-NEXT:    lha 3, 0(3)
@@ -93,12 +91,11 @@ define i16 @noSEXTLoad(i16 *%p) #0 {
 ; CHECK-NEXT:    isync
 ; CHECK-NEXT:    blr
 top:
-  %0 = load i16, i16* %p, align 2
+  %0 = load i16, ptr %p, align 2
   %1 = alloca i16, align 4
-  %2 = bitcast i16* %1 to i8*
-  store i16 0, i16* %1, align 4
-  %rv.i = atomicrmw min i16* %1, i16 %0 acq_rel
-  %rv.i2 = load atomic i16, i16* %1 acquire, align 16
+  store i16 0, ptr %1, align 4
+  %rv.i = atomicrmw min ptr %1, i16 %0 acq_rel
+  %rv.i2 = load atomic i16, ptr %1 acquire, align 16
   ret i16 %rv.i2
 }
 attributes #0 = { nounwind }

diff  --git a/llvm/test/CodeGen/PowerPC/simplifyConstCmpToISEL.ll b/llvm/test/CodeGen/PowerPC/simplifyConstCmpToISEL.ll
index 0dea7c5dec49..2dbd68265645 100644
--- a/llvm/test/CodeGen/PowerPC/simplifyConstCmpToISEL.ll
+++ b/llvm/test/CodeGen/PowerPC/simplifyConstCmpToISEL.ll
@@ -33,11 +33,11 @@ for.body.i62.us:                                  ; preds = %if.end.i.us.1, %for
 
 test2.exit.us.unr-lcssa: ; preds = %if.end.i.us.1, %for.body.i62.us.preheader
   %c.addr.036.i.us.unr = phi i64 [ 0, %for.body.i62.us.preheader ], [ %c.addr.1.i.us.1, %if.end.i.us.1 ]
-  %1 = load i64, i64* undef, align 8
+  %1 = load i64, ptr undef, align 8
   %tobool.i61.us.epil = icmp eq i64 %c.addr.036.i.us.unr, 0
   %add.neg.i.us.epil.pn = select i1 %tobool.i61.us.epil, i64 %1, i64 0
   %storemerge269 = sub i64 %add.neg.i.us.epil.pn, 0
-  store i64 %storemerge269, i64* undef, align 8
+  store i64 %storemerge269, ptr undef, align 8
   unreachable
 
 test3.exit.split:             ; preds = %cond.end.i

diff  --git a/llvm/test/CodeGen/PowerPC/sink-side-effect.ll b/llvm/test/CodeGen/PowerPC/sink-side-effect.ll
index c348a97e2d2a..2e9f49880301 100644
--- a/llvm/test/CodeGen/PowerPC/sink-side-effect.ll
+++ b/llvm/test/CodeGen/PowerPC/sink-side-effect.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=powerpc64le -mcpu=pwr9 -verify-machineinstrs < %s | FileCheck %s
 
-define double @zot(i32* %arg, float* %arg1, i16* %arg2) {
+define double @zot(ptr %arg, ptr %arg1, ptr %arg2) {
 ; CHECK-LABEL: zot:
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    bc 12, 20, .LBB0_2
@@ -26,11 +26,11 @@ define double @zot(i32* %arg, float* %arg1, i16* %arg2) {
 ; CHECK-NEXT:    xsmuldp 1, 1, 0
 ; CHECK-NEXT:    b .LBB0_3
 bb:
-  %tmp = load i32, i32* %arg, align 8
+  %tmp = load i32, ptr %arg, align 8
   br i1 undef, label %bb9, label %bb3
 
 bb3:
-  %tmp4 = load i16, i16* %arg2, align 4
+  %tmp4 = load i16, ptr %arg2, align 4
   %tmp5 = lshr i16 %tmp4, 4
   %tmp6 = and i16 %tmp5, 3
   %tmp7 = zext i16 %tmp6 to i32
@@ -46,7 +46,7 @@ bb10:
   br label %bb13
 
 bb13:
-  %tmp14 = load float, float* %arg1, align 4
+  %tmp14 = load float, ptr %arg1, align 4
   %tmp15 = fpext float %tmp14 to double
   br label %bb16
 

diff  --git a/llvm/test/CodeGen/PowerPC/sj-ctr-loop.ll b/llvm/test/CodeGen/PowerPC/sj-ctr-loop.ll
index 7e485e26123a..4446814384ed 100644
--- a/llvm/test/CodeGen/PowerPC/sj-ctr-loop.ll
+++ b/llvm/test/CodeGen/PowerPC/sj-ctr-loop.ll
@@ -30,7 +30,7 @@ for.cond:                                         ; preds = %for.body
 
 for.body:                                         ; preds = %for.cond, %for.body.lr.ph
   %i.032 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.cond ]
-  %0 = call i32 @llvm.eh.sjlj.setjmp(i8* bitcast ([1 x %struct.__jmp_buf_tag.1.15.17.21.25.49.53.55]* @env_sigill to i8*))
+  %0 = call i32 @llvm.eh.sjlj.setjmp(ptr @env_sigill)
   %inc = add nsw i32 %i.032, 1
   br i1 false, label %if.else, label %for.cond
 
@@ -45,6 +45,6 @@ return:                                           ; preds = %for.end.thread, %en
 }
 
 ; Function Attrs: nounwind
-declare i32 @llvm.eh.sjlj.setjmp(i8*) #0
+declare i32 @llvm.eh.sjlj.setjmp(ptr) #0
 
 attributes #0 = { nounwind }

diff  --git a/llvm/test/CodeGen/PowerPC/sjlj.ll b/llvm/test/CodeGen/PowerPC/sjlj.ll
index 4a57732ef6fa..9f7e26ece771 100644
--- a/llvm/test/CodeGen/PowerPC/sjlj.ll
+++ b/llvm/test/CodeGen/PowerPC/sjlj.ll
@@ -11,7 +11,7 @@ target triple = "powerpc64-unknown-linux-gnu"
 
 define void @foo() #0 {
 entry:
-  call void @llvm.eh.sjlj.longjmp(i8* bitcast ([1 x %struct.__jmp_buf_tag]* @env_sigill to i8*))
+  call void @llvm.eh.sjlj.longjmp(ptr @env_sigill)
   unreachable
 
 ; CHECK: @foo
@@ -29,22 +29,22 @@ return:                                           ; No predecessors!
   ret void
 }
 
-declare void @llvm.eh.sjlj.longjmp(i8*) #1
+declare void @llvm.eh.sjlj.longjmp(ptr) #1
 
 define signext i32 @main() #0 {
 entry:
   %retval = alloca i32, align 4
-  store i32 0, i32* %retval
-  %0 = call i8* @llvm.frameaddress(i32 0)
-  store i8* %0, i8** bitcast ([1 x %struct.__jmp_buf_tag]* @env_sigill to i8**)
-  %1 = call i8* @llvm.stacksave()
-  store i8* %1, i8** getelementptr (i8*, i8** bitcast ([1 x %struct.__jmp_buf_tag]* @env_sigill to i8**), i32 2)
-  %2 = call i32 @llvm.eh.sjlj.setjmp(i8* bitcast ([1 x %struct.__jmp_buf_tag]* @env_sigill to i8*))
+  store i32 0, ptr %retval
+  %0 = call ptr @llvm.frameaddress(i32 0)
+  store ptr %0, ptr @env_sigill
+  %1 = call ptr @llvm.stacksave()
+  store ptr %1, ptr getelementptr (ptr, ptr @env_sigill, i32 2)
+  %2 = call i32 @llvm.eh.sjlj.setjmp(ptr @env_sigill)
   %tobool = icmp ne i32 %2, 0
   br i1 %tobool, label %if.then, label %if.else
 
 if.then:                                          ; preds = %entry
-  store i32 1, i32* %retval
+  store i32 1, ptr %retval
   br label %return
 
 if.else:                                          ; preds = %entry
@@ -52,11 +52,11 @@ if.else:                                          ; preds = %entry
   br label %if.end
 
 if.end:                                           ; preds = %if.else
-  store i32 0, i32* %retval
+  store i32 0, ptr %retval
   br label %return
 
 return:                                           ; preds = %if.end, %if.then
-  %3 = load i32, i32* %retval
+  %3 = load i32, ptr %retval
   ret i32 %3
 
 
@@ -104,19 +104,19 @@ return:                                           ; preds = %if.end, %if.then
 define signext i32 @main2() #0 {
 entry:
   %a = alloca i8, align 64
-  call void @bar(i8* %a)
+  call void @bar(ptr %a)
   %retval = alloca i32, align 4
-  store i32 0, i32* %retval
-  %0 = call i8* @llvm.frameaddress(i32 0)
-  store i8* %0, i8** bitcast ([1 x %struct.__jmp_buf_tag]* @env_sigill to i8**)
-  %1 = call i8* @llvm.stacksave()
-  store i8* %1, i8** getelementptr (i8*, i8** bitcast ([1 x %struct.__jmp_buf_tag]* @env_sigill to i8**), i32 2)
-  %2 = call i32 @llvm.eh.sjlj.setjmp(i8* bitcast ([1 x %struct.__jmp_buf_tag]* @env_sigill to i8*))
+  store i32 0, ptr %retval
+  %0 = call ptr @llvm.frameaddress(i32 0)
+  store ptr %0, ptr @env_sigill
+  %1 = call ptr @llvm.stacksave()
+  store ptr %1, ptr getelementptr (ptr, ptr @env_sigill, i32 2)
+  %2 = call i32 @llvm.eh.sjlj.setjmp(ptr @env_sigill)
   %tobool = icmp ne i32 %2, 0
   br i1 %tobool, label %if.then, label %if.else
 
 if.then:                                          ; preds = %entry
-  store i32 1, i32* %retval
+  store i32 1, ptr %retval
   br label %return
 
 if.else:                                          ; preds = %entry
@@ -124,11 +124,11 @@ if.else:                                          ; preds = %entry
   br label %if.end
 
 if.end:                                           ; preds = %if.else
-  store i32 0, i32* %retval
+  store i32 0, ptr %retval
   br label %return
 
 return:                                           ; preds = %if.end, %if.then
-  %3 = load i32, i32* %retval
+  %3 = load i32, ptr %retval
   ret i32 %3
 
 ; CHECK-LABEL: main2:
@@ -147,12 +147,12 @@ return:                                           ; preds = %if.end, %if.then
 
 define void @test_sjlj_setjmp() #0 {
 entry:
-  %0 = load i8, i8* @cond, align 1
+  %0 = load i8, ptr @cond, align 1
   %tobool = trunc i8 %0 to i1
   br i1 %tobool, label %return, label %end
 
 end:
-  %1 = call i32 @llvm.eh.sjlj.setjmp(i8* bitcast ([1 x %struct.__jmp_buf_tag]* @env_sigill to i8*))
+  %1 = call i32 @llvm.eh.sjlj.setjmp(ptr @env_sigill)
   br label %return
 
 return:
@@ -163,13 +163,13 @@ return:
 ; CHECK-NOT: bl _setjmp
 }
 
-declare void @bar(i8*) #3
+declare void @bar(ptr) #3
 
-declare i8* @llvm.frameaddress(i32) #2
+declare ptr @llvm.frameaddress(i32) #2
 
-declare i8* @llvm.stacksave() #3
+declare ptr @llvm.stacksave() #3
 
-declare i32 @llvm.eh.sjlj.setjmp(i8*) #3
+declare i32 @llvm.eh.sjlj.setjmp(ptr) #3
 
 attributes #0 = { nounwind "less-precise-fpmad"="false" "frame-pointer"="non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
 attributes #1 = { noreturn nounwind }

diff  --git a/llvm/test/CodeGen/PowerPC/sjlj_no0x.ll b/llvm/test/CodeGen/PowerPC/sjlj_no0x.ll
index 01053c4070f1..57a2704ad35a 100644
--- a/llvm/test/CodeGen/PowerPC/sjlj_no0x.ll
+++ b/llvm/test/CodeGen/PowerPC/sjlj_no0x.ll
@@ -8,7 +8,7 @@ target triple = "powerpc64le-unknown-linux-gnu"
 ; Function Attrs: noinline nounwind
 define void @_Z23BuiltinLongJmpFunc1_bufv() #0 {
 entry:
-  call void @llvm.eh.sjlj.longjmp(i8* bitcast (void ()* @_Z23BuiltinLongJmpFunc1_bufv to i8*))
+  call void @llvm.eh.sjlj.longjmp(ptr @_Z23BuiltinLongJmpFunc1_bufv)
   unreachable
 
 ; CHECK: @_Z23BuiltinLongJmpFunc1_bufv
@@ -26,4 +26,4 @@ return:                                           ; No predecessors!
 }
 
 ; Function Attrs: noreturn nounwind
-declare void @llvm.eh.sjlj.longjmp(i8*) #1
+declare void @llvm.eh.sjlj.longjmp(ptr) #1

diff  --git a/llvm/test/CodeGen/PowerPC/small-arguments.ll b/llvm/test/CodeGen/PowerPC/small-arguments.ll
index d35ef3d514c5..0e5854fec0dd 100644
--- a/llvm/test/CodeGen/PowerPC/small-arguments.ll
+++ b/llvm/test/CodeGen/PowerPC/small-arguments.ll
@@ -25,23 +25,23 @@ UnifiedReturnBlock:
 	ret void
 }
 
-define i32 @test4(i16* %P) {
-        %tmp.1 = load i16, i16* %P
+define i32 @test4(ptr %P) {
+        %tmp.1 = load i16, ptr %P
         %tmp.2 = zext i16 %tmp.1 to i32
         %tmp.3 = and i32 %tmp.2, 255
         ret i32 %tmp.3
 }
 
-define i32 @test5(i16* %P) {
-        %tmp.1 = load i16, i16* %P
+define i32 @test5(ptr %P) {
+        %tmp.1 = load i16, ptr %P
         %tmp.2 = bitcast i16 %tmp.1 to i16
         %tmp.3 = zext i16 %tmp.2 to i32
         %tmp.4 = and i32 %tmp.3, 255
         ret i32 %tmp.4
 }
 
-define i32 @test6(i32* %P) {
-        %tmp.1 = load i32, i32* %P
+define i32 @test6(ptr %P) {
+        %tmp.1 = load i32, ptr %P
         %tmp.2 = and i32 %tmp.1, 255
         ret i32 %tmp.2
 }

diff  --git a/llvm/test/CodeGen/PowerPC/sms-cpy-1.ll b/llvm/test/CodeGen/PowerPC/sms-cpy-1.ll
index 563269595fbf..68c3c1f757bd 100644
--- a/llvm/test/CodeGen/PowerPC/sms-cpy-1.ll
+++ b/llvm/test/CodeGen/PowerPC/sms-cpy-1.ll
@@ -82,7 +82,7 @@ define void @print_res() nounwind {
 ; CHECK-NEXT:    li 5, 0
 ; CHECK-NEXT:    bl printf
 ; CHECK-NEXT:    nop
-  %1 = load i32, i32* undef, align 4
+  %1 = load i32, ptr undef, align 4
   %2 = add i32 %1, -1
   %3 = zext i32 %2 to i64
   %4 = zext i32 3 to i64
@@ -94,8 +94,8 @@ define void @print_res() nounwind {
   %8 = trunc i64 %6 to i32
   %9 = sub i32 0, %8
   %10 = zext i32 %9 to i64
-  %11 = getelementptr inbounds i8, i8* null, i64 %10
-  %12 = load i8, i8* %11, align 1
+  %11 = getelementptr inbounds i8, ptr null, i64 %10
+  %12 = load i8, ptr %11, align 1
   %13 = icmp eq i8 %12, 84
   %14 = zext i1 %13 to i32
   %15 = add i32 %7, %14
@@ -107,8 +107,8 @@ define void @print_res() nounwind {
 
 20:                                               ; preds = %5
   %21 = trunc i64 %16 to i32
-  call void (i8*, ...) @printf(i8* getelementptr inbounds ([69 x i8], [69 x i8]* @.str.28, i64 0, i64 0), i32 zeroext 3, i32 zeroext undef, i32 zeroext %15, i32 zeroext undef, i32 zeroext 3, i8* undef, i32 zeroext undef, i32 zeroext 3, i32 zeroext %21, i8* undef, i32 zeroext undef) #1
+  call void (ptr, ...) @printf(ptr @.str.28, i32 zeroext 3, i32 zeroext undef, i32 zeroext %15, i32 zeroext undef, i32 zeroext 3, ptr undef, i32 zeroext undef, i32 zeroext 3, i32 zeroext %21, ptr undef, i32 zeroext undef) #1
   unreachable
 }
 
-declare void @printf(i8*, ...) local_unnamed_addr #0
+declare void @printf(ptr, ...) local_unnamed_addr #0

diff  --git a/llvm/test/CodeGen/PowerPC/sms-grp-order.ll b/llvm/test/CodeGen/PowerPC/sms-grp-order.ll
index ce322ebb94f7..f72598cb4cbc 100644
--- a/llvm/test/CodeGen/PowerPC/sms-grp-order.ll
+++ b/llvm/test/CodeGen/PowerPC/sms-grp-order.ll
@@ -26,8 +26,8 @@ define void @lame_encode_buffer_interleaved() local_unnamed_addr {
 
 1:                                                ; preds = %1, %0
   %2 = phi i64 [ 0, %0 ], [ %13, %1 ]
-  %3 = load i16, i16* null, align 2
-  %4 = load i16, i16* undef, align 2
+  %3 = load i16, ptr null, align 2
+  %4 = load i16, ptr undef, align 2
   %5 = sext i16 %3 to i32
   %6 = sext i16 %4 to i32
   %7 = add nsw i32 0, %5
@@ -36,8 +36,8 @@ define void @lame_encode_buffer_interleaved() local_unnamed_addr {
   %10 = sdiv i32 %8, 2
   %11 = trunc i32 %9 to i16
   %12 = trunc i32 %10 to i16
-  store i16 %11, i16* null, align 2
-  store i16 %12, i16* undef, align 2
+  store i16 %11, ptr null, align 2
+  store i16 %12, ptr undef, align 2
   %13 = add i64 %2, 4
   %14 = icmp eq i64 %13, 0
   br i1 %14, label %15, label %1

diff  --git a/llvm/test/CodeGen/PowerPC/sms-iterator.ll b/llvm/test/CodeGen/PowerPC/sms-iterator.ll
index 0fcb4895b98f..cb85c4baeb77 100644
--- a/llvm/test/CodeGen/PowerPC/sms-iterator.ll
+++ b/llvm/test/CodeGen/PowerPC/sms-iterator.ll
@@ -22,8 +22,8 @@ define dso_local fastcc double @_ZN3povL9polysolveEiPdS0_() unnamed_addr #0 {
   %6 = phi i64 [ %12, %3 ], [ undef, %2 ]
   %7 = add nsw i64 %4, -1
   %8 = fmul fast double %5, 1.000000e+07
-  %9 = getelementptr inbounds %0, %0* null, i64 1, i32 1, i64 %7
-  %10 = load double, double* %9, align 8
+  %9 = getelementptr inbounds %0, ptr null, i64 1, i32 1, i64 %7
+  %10 = load double, ptr %9, align 8
   %11 = fadd fast double %10, %8
   %12 = add i64 %6, -1
   %13 = icmp eq i64 %12, 0

diff  --git a/llvm/test/CodeGen/PowerPC/sms-phi-2.ll b/llvm/test/CodeGen/PowerPC/sms-phi-2.ll
index dd94ee8d2d3f..4904d11fc810 100644
--- a/llvm/test/CodeGen/PowerPC/sms-phi-2.ll
+++ b/llvm/test/CodeGen/PowerPC/sms-phi-2.ll
@@ -2,7 +2,7 @@
 ; RUN: llc < %s -mtriple=powerpc64le-unknown-linux-gnu -verify-machineinstrs\
 ; RUN:       -mcpu=pwr9 --ppc-enable-pipeliner --pipeliner-force-ii=15 2>&1 | FileCheck %s
 
-define void @phi2(i32, i32, i8*) local_unnamed_addr {
+define void @phi2(i32, i32, ptr) local_unnamed_addr {
 ; CHECK-LABEL: phi2:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    divw 8, 3, 4
@@ -59,8 +59,8 @@ define void @phi2(i32, i32, i8*) local_unnamed_addr {
   %12 = trunc i32 %10 to i8
   %13 = select i1 %11, i8 48, i8 55
   %14 = add i8 %13, %12
-  %15 = getelementptr inbounds i8, i8* %2, i64 %7
-  store i8 %14, i8* %15, align 1
+  %15 = getelementptr inbounds i8, ptr %2, i64 %7
+  store i8 %14, ptr %15, align 1
   %16 = icmp sgt i64 %5, 1
   br i1 %16, label %4, label %17
 

diff  --git a/llvm/test/CodeGen/PowerPC/sms-phi-5.ll b/llvm/test/CodeGen/PowerPC/sms-phi-5.ll
index bdc773de8aaf..5a4aa8fdfbf3 100644
--- a/llvm/test/CodeGen/PowerPC/sms-phi-5.ll
+++ b/llvm/test/CodeGen/PowerPC/sms-phi-5.ll
@@ -28,7 +28,7 @@ define void @phi5() unnamed_addr {
   ]
 
 1:                                                ; preds = %0, %0, %0, %0
-  %2 = load i16, i16* undef, align 2
+  %2 = load i16, ptr undef, align 2
   br label %3
 
 3:                                                ; preds = %3, %1

diff  --git a/llvm/test/CodeGen/PowerPC/sms-phi.ll b/llvm/test/CodeGen/PowerPC/sms-phi.ll
index 4e9031bced6f..53a3f13c0597 100644
--- a/llvm/test/CodeGen/PowerPC/sms-phi.ll
+++ b/llvm/test/CodeGen/PowerPC/sms-phi.ll
@@ -15,8 +15,8 @@ define dso_local void @sha512() #0 {
   %2 = phi i64 [ 0, %0 ], [ %12, %1 ]
   %3 = phi i64 [ undef, %0 ], [ %11, %1 ]
   %4 = phi i64 [ undef, %0 ], [ %3, %1 ]
-  %5 = getelementptr inbounds [80 x i64], [80 x i64]* null, i64 0, i64 %2
-  %6 = load i64, i64* %5, align 8
+  %5 = getelementptr inbounds [80 x i64], ptr null, i64 0, i64 %2
+  %6 = load i64, ptr %5, align 8
   %7 = add i64 0, %6
   %8 = and i64 %3, %4
   %9 = or i64 0, %8
@@ -28,7 +28,7 @@ define dso_local void @sha512() #0 {
 
 14:                                               ; preds = %1
   %15 = add i64 %4, 0
-  store i64 %15, i64* undef, align 8
+  store i64 %15, ptr undef, align 8
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/sms-remark.ll b/llvm/test/CodeGen/PowerPC/sms-remark.ll
index e68eb5631df4..11e324b4f1ba 100644
--- a/llvm/test/CodeGen/PowerPC/sms-remark.ll
+++ b/llvm/test/CodeGen/PowerPC/sms-remark.ll
@@ -10,40 +10,40 @@
 @x = dso_local local_unnamed_addr global <{ i32, i32, i32, i32, [1020 x i32] }> <{ i32 1, i32 2, i32 3, i32 4, [1020 x i32] zeroinitializer }>, align 4
 @y = dso_local global [1024 x i32] zeroinitializer, align 4
 
-define dso_local i32* @foo() local_unnamed_addr {
+define dso_local ptr @foo() local_unnamed_addr {
 ;ENABLED: Schedule found with Initiation Interval
 ;ENABLED: Pipelined succesfully!
 ;DISABLED-NOT: remark
 entry:
-  %.pre = load i32, i32* getelementptr inbounds ([1024 x i32], [1024 x i32]* @y, i64 0, i64 0), align 4
+  %.pre = load i32, ptr @y, align 4
   br label %for.body
 
 for.cond.cleanup:                                 ; preds = %for.body
-  ret i32* getelementptr inbounds ([1024 x i32], [1024 x i32]* @y, i64 0, i64 0)
+  ret ptr @y
 
 for.body:                                         ; preds = %for.body, %entry
   %0 = phi i32 [ %.pre, %entry ], [ %add.2, %for.body ]
   %indvars.iv = phi i64 [ 1, %entry ], [ %indvars.iv.next.2, %for.body ]
-  %arrayidx2 = getelementptr inbounds [1024 x i32], [1024 x i32]* bitcast (<{ i32, i32, i32, i32, [1020 x i32] }>* @x to [1024 x i32]*), i64 0, i64 %indvars.iv
-  %1 = load i32, i32* %arrayidx2, align 4
+  %arrayidx2 = getelementptr inbounds [1024 x i32], ptr @x, i64 0, i64 %indvars.iv
+  %1 = load i32, ptr %arrayidx2, align 4
   %mul = mul nsw i32 %1, %1
   %add = add nsw i32 %mul, %0
-  %arrayidx6 = getelementptr inbounds [1024 x i32], [1024 x i32]* @y, i64 0, i64 %indvars.iv
-  store i32 %add, i32* %arrayidx6, align 4
+  %arrayidx6 = getelementptr inbounds [1024 x i32], ptr @y, i64 0, i64 %indvars.iv
+  store i32 %add, ptr %arrayidx6, align 4
   %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
-  %arrayidx2.1 = getelementptr inbounds [1024 x i32], [1024 x i32]* bitcast (<{ i32, i32, i32, i32, [1020 x i32] }>* @x to [1024 x i32]*), i64 0, i64 %indvars.iv.next
-  %2 = load i32, i32* %arrayidx2.1, align 4
+  %arrayidx2.1 = getelementptr inbounds [1024 x i32], ptr @x, i64 0, i64 %indvars.iv.next
+  %2 = load i32, ptr %arrayidx2.1, align 4
   %mul.1 = mul nsw i32 %2, %2
   %add.1 = add nsw i32 %mul.1, %add
-  %arrayidx6.1 = getelementptr inbounds [1024 x i32], [1024 x i32]* @y, i64 0, i64 %indvars.iv.next
-  store i32 %add.1, i32* %arrayidx6.1, align 4
+  %arrayidx6.1 = getelementptr inbounds [1024 x i32], ptr @y, i64 0, i64 %indvars.iv.next
+  store i32 %add.1, ptr %arrayidx6.1, align 4
   %indvars.iv.next.1 = add nuw nsw i64 %indvars.iv, 2
-  %arrayidx2.2 = getelementptr inbounds [1024 x i32], [1024 x i32]* bitcast (<{ i32, i32, i32, i32, [1020 x i32] }>* @x to [1024 x i32]*), i64 0, i64 %indvars.iv.next.1
-  %3 = load i32, i32* %arrayidx2.2, align 4
+  %arrayidx2.2 = getelementptr inbounds [1024 x i32], ptr @x, i64 0, i64 %indvars.iv.next.1
+  %3 = load i32, ptr %arrayidx2.2, align 4
   %mul.2 = mul nsw i32 %3, %3
   %add.2 = add nsw i32 %mul.2, %add.1
-  %arrayidx6.2 = getelementptr inbounds [1024 x i32], [1024 x i32]* @y, i64 0, i64 %indvars.iv.next.1
-  store i32 %add.2, i32* %arrayidx6.2, align 4
+  %arrayidx6.2 = getelementptr inbounds [1024 x i32], ptr @y, i64 0, i64 %indvars.iv.next.1
+  store i32 %add.2, ptr %arrayidx6.2, align 4
   %indvars.iv.next.2 = add nuw nsw i64 %indvars.iv, 3
   %exitcond.2 = icmp eq i64 %indvars.iv.next.2, 1024
   br i1 %exitcond.2, label %for.cond.cleanup, label %for.body

diff  --git a/llvm/test/CodeGen/PowerPC/sms-simple.ll b/llvm/test/CodeGen/PowerPC/sms-simple.ll
index 9cac77b5540a..689b50547970 100644
--- a/llvm/test/CodeGen/PowerPC/sms-simple.ll
+++ b/llvm/test/CodeGen/PowerPC/sms-simple.ll
@@ -6,7 +6,7 @@
 @x = dso_local local_unnamed_addr global <{ i32, i32, i32, i32, [1020 x i32] }> <{ i32 1, i32 2, i32 3, i32 4, [1020 x i32] zeroinitializer }>, align 4
 @y = dso_local global [1024 x i32] zeroinitializer, align 4
 
-define dso_local i32* @foo() local_unnamed_addr {
+define dso_local ptr @foo() local_unnamed_addr {
 ; CHECK-LABEL: foo:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    addis r5, r2, y at toc@ha
@@ -43,35 +43,35 @@ define dso_local i32* @foo() local_unnamed_addr {
 ; CHECK-NEXT:    stw r5, 8(r4)
 ; CHECK-NEXT:    blr
 entry:
-  %.pre = load i32, i32* getelementptr inbounds ([1024 x i32], [1024 x i32]* @y, i64 0, i64 0), align 4
+  %.pre = load i32, ptr @y, align 4
   br label %for.body
 
 for.cond.cleanup:                                 ; preds = %for.body
-  ret i32* getelementptr inbounds ([1024 x i32], [1024 x i32]* @y, i64 0, i64 0)
+  ret ptr @y
 
 for.body:                                         ; preds = %for.body, %entry
   %0 = phi i32 [ %.pre, %entry ], [ %add.2, %for.body ]
   %indvars.iv = phi i64 [ 1, %entry ], [ %indvars.iv.next.2, %for.body ]
-  %arrayidx2 = getelementptr inbounds [1024 x i32], [1024 x i32]* bitcast (<{ i32, i32, i32, i32, [1020 x i32] }>* @x to [1024 x i32]*), i64 0, i64 %indvars.iv
-  %1 = load i32, i32* %arrayidx2, align 4
+  %arrayidx2 = getelementptr inbounds [1024 x i32], ptr @x, i64 0, i64 %indvars.iv
+  %1 = load i32, ptr %arrayidx2, align 4
   %mul = mul nsw i32 %1, %1
   %add = add nsw i32 %mul, %0
-  %arrayidx6 = getelementptr inbounds [1024 x i32], [1024 x i32]* @y, i64 0, i64 %indvars.iv
-  store i32 %add, i32* %arrayidx6, align 4
+  %arrayidx6 = getelementptr inbounds [1024 x i32], ptr @y, i64 0, i64 %indvars.iv
+  store i32 %add, ptr %arrayidx6, align 4
   %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
-  %arrayidx2.1 = getelementptr inbounds [1024 x i32], [1024 x i32]* bitcast (<{ i32, i32, i32, i32, [1020 x i32] }>* @x to [1024 x i32]*), i64 0, i64 %indvars.iv.next
-  %2 = load i32, i32* %arrayidx2.1, align 4
+  %arrayidx2.1 = getelementptr inbounds [1024 x i32], ptr @x, i64 0, i64 %indvars.iv.next
+  %2 = load i32, ptr %arrayidx2.1, align 4
   %mul.1 = mul nsw i32 %2, %2
   %add.1 = add nsw i32 %mul.1, %add
-  %arrayidx6.1 = getelementptr inbounds [1024 x i32], [1024 x i32]* @y, i64 0, i64 %indvars.iv.next
-  store i32 %add.1, i32* %arrayidx6.1, align 4
+  %arrayidx6.1 = getelementptr inbounds [1024 x i32], ptr @y, i64 0, i64 %indvars.iv.next
+  store i32 %add.1, ptr %arrayidx6.1, align 4
   %indvars.iv.next.1 = add nuw nsw i64 %indvars.iv, 2
-  %arrayidx2.2 = getelementptr inbounds [1024 x i32], [1024 x i32]* bitcast (<{ i32, i32, i32, i32, [1020 x i32] }>* @x to [1024 x i32]*), i64 0, i64 %indvars.iv.next.1
-  %3 = load i32, i32* %arrayidx2.2, align 4
+  %arrayidx2.2 = getelementptr inbounds [1024 x i32], ptr @x, i64 0, i64 %indvars.iv.next.1
+  %3 = load i32, ptr %arrayidx2.2, align 4
   %mul.2 = mul nsw i32 %3, %3
   %add.2 = add nsw i32 %mul.2, %add.1
-  %arrayidx6.2 = getelementptr inbounds [1024 x i32], [1024 x i32]* @y, i64 0, i64 %indvars.iv.next.1
-  store i32 %add.2, i32* %arrayidx6.2, align 4
+  %arrayidx6.2 = getelementptr inbounds [1024 x i32], ptr @y, i64 0, i64 %indvars.iv.next.1
+  store i32 %add.2, ptr %arrayidx6.2, align 4
   %indvars.iv.next.2 = add nuw nsw i64 %indvars.iv, 3
   %exitcond.2 = icmp eq i64 %indvars.iv.next.2, 1024
   br i1 %exitcond.2, label %for.cond.cleanup, label %for.body

diff  --git a/llvm/test/CodeGen/PowerPC/spe-fastmath.ll b/llvm/test/CodeGen/PowerPC/spe-fastmath.ll
index d2b83f7ee1da..434f89bd3d58 100644
--- a/llvm/test/CodeGen/PowerPC/spe-fastmath.ll
+++ b/llvm/test/CodeGen/PowerPC/spe-fastmath.ll
@@ -24,7 +24,7 @@ entry:
   %tobool = fcmp une double %mul, 0.000000e+00
   %cond = select i1 %tobool, double %conv, double 0.000000e+00
   %conv3 = fptosi double %cond to i16
-  store i16 %conv3, i16* undef
+  store i16 %conv3, ptr undef
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/spe-hwdouble.ll b/llvm/test/CodeGen/PowerPC/spe-hwdouble.ll
index e004878afe68..77d584aa4b15 100644
--- a/llvm/test/CodeGen/PowerPC/spe-hwdouble.ll
+++ b/llvm/test/CodeGen/PowerPC/spe-hwdouble.ll
@@ -15,8 +15,8 @@ define i32 @test_dasmconst(double %x) #0 {
 ; CHECK-NEXT:    blr
 entry:
   %x.addr = alloca double, align 8
-  store double %x, double* %x.addr, align 8
-  %0 = load double, double* %x.addr, align 8
+  store double %x, ptr %x.addr, align 8
+  %0 = load double, ptr %x.addr, align 8
   %1 = call i32 asm sideeffect "efdctsi $0, $1", "=d,d"(double %0)
   ret i32 %1
 }

diff  --git a/llvm/test/CodeGen/PowerPC/spe.ll b/llvm/test/CodeGen/PowerPC/spe.ll
index 2c2562719b00..7836aa877254 100644
--- a/llvm/test/CodeGen/PowerPC/spe.ll
+++ b/llvm/test/CodeGen/PowerPC/spe.ll
@@ -100,13 +100,13 @@ define i32 @test_fcmpgt(float %a, float %b) #0 {
   %c = fcmp ogt float %a, %b
   br i1 %c, label %tr, label %fa
 tr:
-  store i32 1, i32* %r, align 4
+  store i32 1, ptr %r, align 4
   br label %ret
 fa:
-  store i32 0, i32* %r, align 4
+  store i32 0, ptr %r, align 4
   br label %ret
 ret:
-  %0 = load i32, i32* %r, align 4
+  %0 = load i32, ptr %r, align 4
   ret i32 %0
 }
 
@@ -137,13 +137,13 @@ define i32 @test_fcmpugt(float %a, float %b) #0 {
   %c = fcmp ugt float %a, %b
   br i1 %c, label %tr, label %fa
 tr:
-  store i32 1, i32* %r, align 4
+  store i32 1, ptr %r, align 4
   br label %ret
 fa:
-  store i32 0, i32* %r, align 4
+  store i32 0, ptr %r, align 4
   br label %ret
 ret:
-  %0 = load i32, i32* %r, align 4
+  %0 = load i32, ptr %r, align 4
   ret i32 %0
 }
 
@@ -174,13 +174,13 @@ define i32 @test_fcmple(float %a, float %b) #0 {
   %c = fcmp ole float %a, %b
   br i1 %c, label %tr, label %fa
 tr:
-  store i32 1, i32* %r, align 4
+  store i32 1, ptr %r, align 4
   br label %ret
 fa:
-  store i32 0, i32* %r, align 4
+  store i32 0, ptr %r, align 4
   br label %ret
 ret:
-  %0 = load i32, i32* %r, align 4
+  %0 = load i32, ptr %r, align 4
   ret i32 %0
 }
 
@@ -205,13 +205,13 @@ define i32 @test_fcmpule(float %a, float %b) #0 {
   %c = fcmp ule float %a, %b
   br i1 %c, label %tr, label %fa
 tr:
-  store i32 1, i32* %r, align 4
+  store i32 1, ptr %r, align 4
   br label %ret
 fa:
-  store i32 0, i32* %r, align 4
+  store i32 0, ptr %r, align 4
   br label %ret
 ret:
-  %0 = load i32, i32* %r, align 4
+  %0 = load i32, ptr %r, align 4
   ret i32 %0
 }
 
@@ -237,13 +237,13 @@ define i32 @test_fcmpeq(float %a, float %b) #0 {
   %c = fcmp oeq float %a, %b
   br i1 %c, label %tr, label %fa
 tr:
-  store i32 1, i32* %r, align 4
+  store i32 1, ptr %r, align 4
   br label %ret
 fa:
-  store i32 0, i32* %r, align 4
+  store i32 0, ptr %r, align 4
   br label %ret
 ret:
-  %0 = load i32, i32* %r, align 4
+  %0 = load i32, ptr %r, align 4
   ret i32 %0
 }
 
@@ -345,13 +345,13 @@ define i32 @test_fcmpune(float %a, float %b) #0 {
   %c = fcmp une float %a, %b
   br i1 %c, label %tr, label %fa
 tr:
-  store i32 1, i32* %r, align 4
+  store i32 1, ptr %r, align 4
   br label %ret
 fa:
-  store i32 0, i32* %r, align 4
+  store i32 0, ptr %r, align 4
   br label %ret
 ret:
-  %0 = load i32, i32* %r, align 4
+  %0 = load i32, ptr %r, align 4
   ret i32 %0
 }
 
@@ -376,13 +376,13 @@ define i32 @test_fcmplt(float %a, float %b) #0 {
   %c = fcmp olt float %a, %b
   br i1 %c, label %tr, label %fa
 tr:
-  store i32 1, i32* %r, align 4
+  store i32 1, ptr %r, align 4
   br label %ret
 fa:
-  store i32 0, i32* %r, align 4
+  store i32 0, ptr %r, align 4
   br label %ret
 ret:
-  %0 = load i32, i32* %r, align 4
+  %0 = load i32, ptr %r, align 4
   ret i32 %0
 }
 
@@ -434,13 +434,13 @@ define i32 @test_fcmpge(float %a, float %b) #0 {
   %c = fcmp oge float %a, %b
   br i1 %c, label %tr, label %fa
 tr:
-  store i32 1, i32* %r, align 4
+  store i32 1, ptr %r, align 4
   br label %ret
 fa:
-  store i32 0, i32* %r, align 4
+  store i32 0, ptr %r, align 4
   br label %ret
 ret:
-  %0 = load i32, i32* %r, align 4
+  %0 = load i32, ptr %r, align 4
   ret i32 %0
 }
 
@@ -465,13 +465,13 @@ define i32 @test_fcmpuge(float %a, float %b) #0 {
   %c = fcmp uge float %a, %b
   br i1 %c, label %tr, label %fa
 tr:
-  store i32 1, i32* %r, align 4
+  store i32 1, ptr %r, align 4
   br label %ret
 fa:
-  store i32 0, i32* %r, align 4
+  store i32 0, ptr %r, align 4
   br label %ret
 ret:
-  %0 = load i32, i32* %r, align 4
+  %0 = load i32, ptr %r, align 4
   ret i32 %0
 }
 
@@ -526,8 +526,8 @@ define i32 @test_fasmconst(float %x) #0 {
 ; CHECK-NEXT:    blr
 entry:
   %x.addr = alloca float, align 8
-  store float %x, float* %x.addr, align 8
-  %0 = load float, float* %x.addr, align 8
+  store float %x, ptr %x.addr, align 8
+  %0 = load float, ptr %x.addr, align 8
   %1 = call i32 asm sideeffect "efsctsi $0, $1", "=f,f"(float %0)
   ret i32 %1
 ; Check that it's not loading a double
@@ -559,7 +559,7 @@ define float @test_dtos(double %a) #0 {
   ret float %v
 }
 
-define void @test_double_abs(double * %aa) #0 {
+define void @test_double_abs(ptr %aa) #0 {
 ; SPE-LABEL: test_double_abs:
 ; SPE:       # %bb.0: # %entry
 ; SPE-NEXT:    evldd 4, 0(3)
@@ -574,16 +574,16 @@ define void @test_double_abs(double * %aa) #0 {
 ; EFPU2-NEXT:    stw 4, 0(3)
 ; EFPU2-NEXT:    blr
   entry:
-    %0 = load double, double * %aa
+    %0 = load double, ptr %aa
     %1 = tail call double @llvm.fabs.f64(double %0) #2
-    store double %1, double * %aa
+    store double %1, ptr %aa
     ret void
 }
 
 ; Function Attrs: nounwind readnone
 declare double @llvm.fabs.f64(double) #1
 
-define void @test_dnabs(double * %aa) #0 {
+define void @test_dnabs(ptr %aa) #0 {
 ; SPE-LABEL: test_dnabs:
 ; SPE:       # %bb.0: # %entry
 ; SPE-NEXT:    evldd 4, 0(3)
@@ -598,10 +598,10 @@ define void @test_dnabs(double * %aa) #0 {
 ; EFPU2-NEXT:    stw 4, 0(3)
 ; EFPU2-NEXT:    blr
   entry:
-    %0 = load double, double * %aa
+    %0 = load double, ptr %aa
     %1 = tail call double @llvm.fabs.f64(double %0) #2
     %sub = fsub double -0.000000e+00, %1
-    store double %sub, double * %aa
+    store double %sub, ptr %aa
     ret void
 }
 
@@ -867,13 +867,13 @@ define i32 @test_dcmpgt(double %a, double %b) #0 {
   %c = fcmp ogt double %a, %b
   br i1 %c, label %tr, label %fa
 tr:
-  store i32 1, i32* %r, align 4
+  store i32 1, ptr %r, align 4
   br label %ret
 fa:
-  store i32 0, i32* %r, align 4
+  store i32 0, ptr %r, align 4
   br label %ret
 ret:
-  %0 = load i32, i32* %r, align 4
+  %0 = load i32, ptr %r, align 4
   ret i32 %0
 }
 
@@ -927,13 +927,13 @@ define i32 @test_dcmpugt(double %a, double %b) #0 {
   %c = fcmp ugt double %a, %b
   br i1 %c, label %tr, label %fa
 tr:
-  store i32 1, i32* %r, align 4
+  store i32 1, ptr %r, align 4
   br label %ret
 fa:
-  store i32 0, i32* %r, align 4
+  store i32 0, ptr %r, align 4
   br label %ret
 ret:
-  %0 = load i32, i32* %r, align 4
+  %0 = load i32, ptr %r, align 4
   ret i32 %0
 }
 
@@ -981,13 +981,13 @@ define i32 @test_dcmple(double %a, double %b) #0 {
   %c = fcmp ule double %a, %b
   br i1 %c, label %tr, label %fa
 tr:
-  store i32 1, i32* %r, align 4
+  store i32 1, ptr %r, align 4
   br label %ret
 fa:
-  store i32 0, i32* %r, align 4
+  store i32 0, ptr %r, align 4
   br label %ret
 ret:
-  %0 = load i32, i32* %r, align 4
+  %0 = load i32, ptr %r, align 4
   ret i32 %0
 }
 
@@ -1035,13 +1035,13 @@ define i32 @test_dcmpule(double %a, double %b) #0 {
   %c = fcmp ule double %a, %b
   br i1 %c, label %tr, label %fa
 tr:
-  store i32 1, i32* %r, align 4
+  store i32 1, ptr %r, align 4
   br label %ret
 fa:
-  store i32 0, i32* %r, align 4
+  store i32 0, ptr %r, align 4
   br label %ret
 ret:
-  %0 = load i32, i32* %r, align 4
+  %0 = load i32, ptr %r, align 4
   ret i32 %0
 }
 
@@ -1090,13 +1090,13 @@ define i32 @test_dcmpeq(double %a, double %b) #0 {
   %c = fcmp oeq double %a, %b
   br i1 %c, label %tr, label %fa
 tr:
-  store i32 1, i32* %r, align 4
+  store i32 1, ptr %r, align 4
   br label %ret
 fa:
-  store i32 0, i32* %r, align 4
+  store i32 0, ptr %r, align 4
   br label %ret
 ret:
-  %0 = load i32, i32* %r, align 4
+  %0 = load i32, ptr %r, align 4
   ret i32 %0
 }
 
@@ -1171,13 +1171,13 @@ define i32 @test_dcmpueq(double %a, double %b) #0 {
   %c = fcmp ueq double %a, %b
   br i1 %c, label %tr, label %fa
 tr:
-  store i32 1, i32* %r, align 4
+  store i32 1, ptr %r, align 4
   br label %ret
 fa:
-  store i32 0, i32* %r, align 4
+  store i32 0, ptr %r, align 4
   br label %ret
 ret:
-  %0 = load i32, i32* %r, align 4
+  %0 = load i32, ptr %r, align 4
   ret i32 %0
 }
 
@@ -1289,13 +1289,13 @@ define i32 @test_dcmpune(double %a, double %b) #0 {
   %c = fcmp une double %a, %b
   br i1 %c, label %tr, label %fa
 tr:
-  store i32 1, i32* %r, align 4
+  store i32 1, ptr %r, align 4
   br label %ret
 fa:
-  store i32 0, i32* %r, align 4
+  store i32 0, ptr %r, align 4
   br label %ret
 ret:
-  %0 = load i32, i32* %r, align 4
+  %0 = load i32, ptr %r, align 4
   ret i32 %0
 }
 
@@ -1343,13 +1343,13 @@ define i32 @test_dcmplt(double %a, double %b) #0 {
   %c = fcmp olt double %a, %b
   br i1 %c, label %tr, label %fa
 tr:
-  store i32 1, i32* %r, align 4
+  store i32 1, ptr %r, align 4
   br label %ret
 fa:
-  store i32 0, i32* %r, align 4
+  store i32 0, ptr %r, align 4
   br label %ret
 ret:
-  %0 = load i32, i32* %r, align 4
+  %0 = load i32, ptr %r, align 4
   ret i32 %0
 }
 
@@ -1403,13 +1403,13 @@ define i32 @test_dcmpult(double %a, double %b) #0 {
   %c = fcmp ult double %a, %b
   br i1 %c, label %tr, label %fa
 tr:
-  store i32 1, i32* %r, align 4
+  store i32 1, ptr %r, align 4
   br label %ret
 fa:
-  store i32 0, i32* %r, align 4
+  store i32 0, ptr %r, align 4
   br label %ret
 ret:
-  %0 = load i32, i32* %r, align 4
+  %0 = load i32, ptr %r, align 4
   ret i32 %0
 }
 
@@ -1493,13 +1493,13 @@ define i32 @test_dcmpuge(double %a, double %b) #0 {
   %c = fcmp uge double %a, %b
   br i1 %c, label %tr, label %fa
 tr:
-  store i32 1, i32* %r, align 4
+  store i32 1, ptr %r, align 4
   br label %ret
 fa:
-  store i32 0, i32* %r, align 4
+  store i32 0, ptr %r, align 4
   br label %ret
 ret:
-  %0 = load i32, i32* %r, align 4
+  %0 = load i32, ptr %r, align 4
   ret i32 %0
 }
 
@@ -1632,9 +1632,9 @@ entry:
   ret void
 }
 
-declare void @test_memset(i8* nocapture writeonly, i8, i32, i1)
+declare void @test_memset(ptr nocapture writeonly, i8, i32, i1)
 @global_var1 = global i32 0, align 4
-define double @test_spill(double %a, i32 %a1, i64 %a2, i8 * %a3, i32 *%a4, i32* %a5) #0 {
+define double @test_spill(double %a, i32 %a1, i64 %a2, ptr %a3, ptr %a4, ptr %a5) #0 {
 ; SPE-LABEL: test_spill:
 ; SPE:       # %bb.0: # %entry
 ; SPE-NEXT:    mflr 0
@@ -1763,12 +1763,10 @@ entry:
   %0 = fadd double %a, %a
   call void asm sideeffect "","~{s0},~{s3},~{s4},~{s5},~{s6},~{s7},~{s8},~{s9},~{s10},~{s11},~{s12},~{s13},~{s14},~{s15},~{s16},~{s17},~{s18},~{s19},~{s20},~{s21},~{s22},~{s23},~{s24},~{s25},~{s26},~{s27},~{s28},~{s29},~{s30},~{s31}"() nounwind
   %1 = fadd double %0, 3.14159
-  %2 = bitcast [13 x i32]* %v1 to i8*
-  call void @test_memset(i8* align 4 %2, i8 0, i32 24, i1 true)
-  store i32 0, i32* %a5, align 4
+  call void @test_memset(ptr align 4 %v1, i8 0, i32 24, i1 true)
+  store i32 0, ptr %a5, align 4
   call void @test_func2()
-  %3 = bitcast [11 x i32]* %v2 to i8*
-  call void @test_memset(i8* align 4 %3, i8 0, i32 20, i1 true)
+  call void @test_memset(ptr align 4 %v2, i8 0, i32 20, i1 true)
   br label %return
 
 return:
@@ -1837,7 +1835,7 @@ attributes #1 = { nounwind readnone speculatable willreturn }
 
 declare i32 @foo(double)
 
-define void @d(%struct.a* %e, %struct.a* %f) #0 {
+define void @d(ptr %e, ptr %f) #0 {
 ; SPE-LABEL: d:
 ; SPE:       # %bb.0: # %entry
 ; SPE-NEXT:    mflr 0
@@ -1914,17 +1912,16 @@ define void @d(%struct.a* %e, %struct.a* %f) #0 {
 ; EFPU2-NEXT:    mtlr 0
 ; EFPU2-NEXT:    blr
 entry:
-  %0 = getelementptr %struct.a, %struct.a* %f, i32 0, i32 0
-  %1 = load float, float* undef
-  %conv = fpext float %1 to double
-  %2 = load float, float* %0
-  %g = fpext float %2 to double
-  %3 = call i32 @foo(double %g)
+  %0 = load float, ptr undef
+  %conv = fpext float %0 to double
+  %1 = load float, ptr %f
+  %g = fpext float %1 to double
+  %2 = call i32 @foo(double %g)
   %h = call i32 @foo(double %conv)
-  %n = sitofp i32 %3 to double
+  %n = sitofp i32 %2 to double
   %k = fmul double %g, %n
   %l = fptrunc double %k to float
-  store float %l, float* undef
+  store float %l, ptr undef
   ret void
 }
 attributes #0 = { nounwind }

diff  --git a/llvm/test/CodeGen/PowerPC/spill-nor0.ll b/llvm/test/CodeGen/PowerPC/spill-nor0.ll
index c9c665144829..325dc0e76353 100644
--- a/llvm/test/CodeGen/PowerPC/spill-nor0.ll
+++ b/llvm/test/CodeGen/PowerPC/spill-nor0.ll
@@ -11,7 +11,7 @@ if.then:                                          ; preds = %entry
   ret void
 
 if.end:                                           ; preds = %entry
-  %0 = call i64 asm sideeffect "mr 3,$1\0A\09mr 4,$2\0A\09rotldi 0,0,3  ; rotldi 0,0,13\0A\09rotldi 0,0,61 ; rotldi 0,0,51\0A\09or 1,1,1\0A\09mr $0,3", "=b,b,b,~{cc},~{memory},~{r3},~{r4}"(i32 0, i64* undef) #0
+  %0 = call i64 asm sideeffect "mr 3,$1\0A\09mr 4,$2\0A\09rotldi 0,0,3  ; rotldi 0,0,13\0A\09rotldi 0,0,61 ; rotldi 0,0,51\0A\09or 1,1,1\0A\09mr $0,3", "=b,b,b,~{cc},~{memory},~{r3},~{r4}"(i32 0, ptr undef) #0
   br i1 undef, label %end0, label %end1 ; need successor blocks to force spill
 
 end0:

diff  --git a/llvm/test/CodeGen/PowerPC/spill-vec-pair.ll b/llvm/test/CodeGen/PowerPC/spill-vec-pair.ll
index e08d92d3436b..adca4988bc74 100644
--- a/llvm/test/CodeGen/PowerPC/spill-vec-pair.ll
+++ b/llvm/test/CodeGen/PowerPC/spill-vec-pair.ll
@@ -6,7 +6,7 @@
 ; RUN:   -mcpu=pwr10 -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr \
 ; RUN:   -disable-auto-paired-vec-st=false < %s | FileCheck %s \
 ; RUN:   --check-prefix=CHECK-BE
-define dso_local void @test(<256 x i1>* %vpp, <256 x i1>* %vp2) local_unnamed_addr #0 {
+define dso_local void @test(ptr %vpp, ptr %vp2) local_unnamed_addr #0 {
 ; CHECK-LABEL: test:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    stdu r1, -400(r1)
@@ -153,16 +153,14 @@ define dso_local void @test(<256 x i1>* %vpp, <256 x i1>* %vp2) local_unnamed_ad
 ; CHECK-BE-NEXT:    addi r1, r1, 416
 ; CHECK-BE-NEXT:    blr
 entry:
-  %0 = bitcast <256 x i1>* %vpp to i8*
-  %1 = tail call <256 x i1> @llvm.ppc.vsx.lxvp(i8* %0)
+  %0 = tail call <256 x i1> @llvm.ppc.vsx.lxvp(ptr %vpp)
   tail call void asm sideeffect "nop", "~{memory},~{vs0},~{vs1},~{vs2},~{vs3},~{vs4},~{vs5},~{vs6},~{vs7},~{vs8},~{vs9},~{vs10},~{vs11},~{vs12},~{vs13},~{vs14},~{vs15},~{vs16},~{vs17},~{vs18},~{vs19},~{vs20},~{vs21},~{vs22},~{vs23},~{vs24},~{vs25},~{vs26},~{vs27},~{vs28},~{vs29},~{vs30},~{vs31},~{vs32},~{vs33},~{vs34},~{vs35},~{vs36},~{vs37},~{vs38},~{vs39},~{vs40},~{vs41},~{vs42},~{vs43},~{vs44},~{vs45},~{vs46},~{vs47},~{vs48},~{vs49},~{vs50},~{vs51},~{vs52},~{vs53},~{vs54},~{vs55},~{vs56},~{vs57},~{vs58},~{vs59},~{vs60},~{vs61},~{vs62},~{vs63}"()
-  %2 = bitcast <256 x i1>* %vp2 to i8*
-  tail call void @llvm.ppc.vsx.stxvp(<256 x i1> %1, i8* %2)
+  tail call void @llvm.ppc.vsx.stxvp(<256 x i1> %0, ptr %vp2)
   ret void
 }
 
-declare <256 x i1> @llvm.ppc.vsx.lxvp(i8*) #1
+declare <256 x i1> @llvm.ppc.vsx.lxvp(ptr) #1
 
-declare void @llvm.ppc.vsx.stxvp(<256 x i1>, i8*) #2
+declare void @llvm.ppc.vsx.stxvp(<256 x i1>, ptr) #2
 
 attributes #0 = { nounwind }

diff  --git a/llvm/test/CodeGen/PowerPC/spill_p9_setb.ll b/llvm/test/CodeGen/PowerPC/spill_p9_setb.ll
index 17d1b616088b..81c43ce85d59 100644
--- a/llvm/test/CodeGen/PowerPC/spill_p9_setb.ll
+++ b/llvm/test/CodeGen/PowerPC/spill_p9_setb.ll
@@ -35,7 +35,7 @@ entry:
   br i1 undef, label %if.end, label %if.then
 
 if.then:                                          ; preds = %entry
-  %call = tail call signext i32 bitcast (i32 (...)* @fn_call to i32 ()*)()
+  %call = tail call signext i32 @fn_call()
   %cmp1 = icmp ne i32 %call, 0
   br label %if.end
 

diff  --git a/llvm/test/CodeGen/PowerPC/splat-bug.ll b/llvm/test/CodeGen/PowerPC/splat-bug.ll
index 95c44a108535..dd17ad8b0c25 100644
--- a/llvm/test/CodeGen/PowerPC/splat-bug.ll
+++ b/llvm/test/CodeGen/PowerPC/splat-bug.ll
@@ -10,7 +10,7 @@ target triple = "powerpc64-unknown-linux-gnu"
 
 define void @foo() nounwind ssp {
 ; CHECK: foo:
-  store <16 x i8> <i8 0, i8 16, i8 0, i8 16, i8 0, i8 16, i8 0, i8 16, i8 0, i8 16, i8 0, i8 16, i8 0, i8 16, i8 0, i8 16>, <16 x i8>* @a
+  store <16 x i8> <i8 0, i8 16, i8 0, i8 16, i8 0, i8 16, i8 0, i8 16, i8 0, i8 16, i8 0, i8 16, i8 0, i8 16, i8 0, i8 16>, ptr @a
 ; CHECK: vspltish [[REG:[0-9]+]], 8
 ; CHECK: vadduhm {{[0-9]+}}, [[REG]], [[REG]]
   ret void

diff  --git a/llvm/test/CodeGen/PowerPC/split-index-tc.ll b/llvm/test/CodeGen/PowerPC/split-index-tc.ll
index 2e95568846ac..e332428aa8e2 100644
--- a/llvm/test/CodeGen/PowerPC/split-index-tc.ll
+++ b/llvm/test/CodeGen/PowerPC/split-index-tc.ll
@@ -2,7 +2,7 @@
 target datalayout = "E-m:e-i64:64-n32:64"
 target triple = "powerpc64-unknown-linux-gnu"
 
-%"class.llvm::MachineOperand" = type { i8, [3 x i8], i64, i64*, i64 }
+%"class.llvm::MachineOperand" = type { i8, [3 x i8], i64, ptr, i64 }
 
 ; Function Attrs: nounwind
 define void @_ZN4llvm17ScheduleDAGInstrs14addPhysRegDepsEPNS_5SUnitEj() #0 align 2 {
@@ -13,21 +13,20 @@ define void @_ZN4llvm17ScheduleDAGInstrs14addPhysRegDepsEPNS_5SUnitEj() #0 align
 ; CHECK-NOT: lhzu
 
 entry:
-  %0 = load %"class.llvm::MachineOperand"*, %"class.llvm::MachineOperand"** undef, align 8
+  %0 = load ptr, ptr undef, align 8
   br i1 undef, label %_ZNK4llvm14MachineOperand6getRegEv.exit, label %cond.false.i123
 
 cond.false.i123:                                  ; preds = %_ZN4llvm12MachineInstr10getOperandEj.exit
   unreachable
 
 _ZNK4llvm14MachineOperand6getRegEv.exit:          ; preds = %_ZN4llvm12MachineInstr10getOperandEj.exit
-  %IsDef.i = getelementptr inbounds %"class.llvm::MachineOperand", %"class.llvm::MachineOperand"* %0, i64 undef, i32 1
-  %1 = bitcast [3 x i8]* %IsDef.i to i24*
-  %bf.load.i = load i24, i24* %1, align 1
-  %2 = and i24 %bf.load.i, 128
+  %IsDef.i = getelementptr inbounds %"class.llvm::MachineOperand", ptr %0, i64 undef, i32 1
+  %bf.load.i = load i24, ptr %IsDef.i, align 1
+  %1 = and i24 %bf.load.i, 128
   br i1 undef, label %for.cond.cleanup, label %for.body.lr.ph
 
 for.body.lr.ph:                                   ; preds = %_ZNK4llvm14MachineOperand6getRegEv.exit
-  %3 = zext i24 %2 to i32
+  %2 = zext i24 %1 to i32
   br i1 undef, label %cond.false.i134, label %_ZNK4llvm18MCRegAliasIteratordeEv.exit
 
 for.cond.cleanup:                                 ; preds = %_ZNK4llvm14MachineOperand6getRegEv.exit
@@ -61,7 +60,7 @@ cond.false.i257:                                  ; preds = %if.end55
   unreachable
 
 _ZNK4llvm14MachineOperand6isDeadEv.exit262:       ; preds = %if.end55
-  %bf.load.i259 = load i24, i24* %1, align 1
+  %bf.load.i259 = load i24, ptr %IsDef.i, align 1
   br i1 undef, label %if.then57, label %if.else59
 
 if.then57:                                        ; preds = %_ZNK4llvm14MachineOperand6isDeadEv.exit262

diff  --git a/llvm/test/CodeGen/PowerPC/splitstore-check-volatile.ll b/llvm/test/CodeGen/PowerPC/splitstore-check-volatile.ll
index b93080e896fa..271e1e39c8df 100644
--- a/llvm/test/CodeGen/PowerPC/splitstore-check-volatile.ll
+++ b/llvm/test/CodeGen/PowerPC/splitstore-check-volatile.ll
@@ -2,9 +2,9 @@
 ;
 ; RUN: opt -S -mtriple=powerpc64le -codegenprepare -force-split-store < %s  | FileCheck %s
 
-define void @fun(i16* %Src, i16* %Dst) {
-; CHECK: store volatile i16 %8, i16* %Dst 
-  %1 = load i16, i16* %Src
+define void @fun(ptr %Src, ptr %Dst) {
+; CHECK: store volatile i16 %8, ptr %Dst 
+  %1 = load i16, ptr %Src
   %2 = trunc i16 %1 to i8
   %3 = lshr i16 %1, 8
   %4 = trunc i16 %3 to i8
@@ -12,6 +12,6 @@ define void @fun(i16* %Src, i16* %Dst) {
   %6 = zext i8 %4 to i16
   %7 = shl nuw i16 %6, 8
   %8 = or i16 %7, %5
-  store volatile i16 %8, i16* %Dst
+  store volatile i16 %8, ptr %Dst
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/stack-clash-dynamic-alloca.ll b/llvm/test/CodeGen/PowerPC/stack-clash-dynamic-alloca.ll
index dcd4dda985e4..5f605e93c93e 100644
--- a/llvm/test/CodeGen/PowerPC/stack-clash-dynamic-alloca.ll
+++ b/llvm/test/CodeGen/PowerPC/stack-clash-dynamic-alloca.ll
@@ -142,9 +142,9 @@ define i32 @foo(i32 %n) local_unnamed_addr #0 "stack-probe-size"="32768" nounwin
 ; CHECK-32-NEXT:    mr r31, r0
 ; CHECK-32-NEXT:    blr
   %a = alloca i32, i32 %n, align 16
-  %b = getelementptr inbounds i32, i32* %a, i64 1198
-  store volatile i32 1, i32* %b
-  %c = load volatile i32, i32* %a
+  %b = getelementptr inbounds i32, ptr %a, i64 1198
+  store volatile i32 1, ptr %b
+  %c = load volatile i32, ptr %a
   ret i32 %c
 }
 
@@ -288,9 +288,9 @@ define i32 @bar(i32 %n) local_unnamed_addr #0 nounwind {
 ; CHECK-32-NEXT:    blr
   %a = alloca i32, i32 %n, align 16
   %i = add i32 %n, 1024
-  %b = getelementptr inbounds i32, i32* %a, i32 %i
-  store volatile i32 1, i32* %b
-  %c = load volatile i32, i32* %a
+  %b = getelementptr inbounds i32, ptr %a, i32 %i
+  store volatile i32 1, ptr %b
+  %c = load volatile i32, ptr %a
   ret i32 %c
 }
 
@@ -428,9 +428,9 @@ define i32 @f(i32 %n) local_unnamed_addr #0 "stack-probe-size"="65536" nounwind
 ; CHECK-32-NEXT:    mr r31, r0
 ; CHECK-32-NEXT:    blr
   %a = alloca i32, i32 %n, align 16
-  %b = getelementptr inbounds i32, i32* %a, i64 1198
-  store volatile i32 1, i32* %b
-  %c = load volatile i32, i32* %a
+  %b = getelementptr inbounds i32, ptr %a, i64 1198
+  store volatile i32 1, ptr %b
+  %c = load volatile i32, ptr %a
   ret i32 %c
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/stack-clash-prologue-nounwind.ll b/llvm/test/CodeGen/PowerPC/stack-clash-prologue-nounwind.ll
index 4a8de768d82a..369d6807263f 100644
--- a/llvm/test/CodeGen/PowerPC/stack-clash-prologue-nounwind.ll
+++ b/llvm/test/CodeGen/PowerPC/stack-clash-prologue-nounwind.ll
@@ -35,9 +35,9 @@ define i8 @f0() #0 nounwind {
 ; CHECK-32-NEXT:    blr
 entry:
   %a = alloca i8, i64 64
-  %b = getelementptr inbounds i8, i8* %a, i64 63
-  store volatile i8 3, i8* %a
-  %c = load volatile i8, i8* %a
+  %b = getelementptr inbounds i8, ptr %a, i64 63
+  store volatile i8 3, ptr %a
+  %c = load volatile i8, ptr %a
   ret i8 %c
 }
 
@@ -93,9 +93,9 @@ define i8 @f1() #0 "stack-probe-size"="0" nounwind {
 ; CHECK-32-NEXT:    blr
 entry:
   %a = alloca i8, i64 4096
-  %b = getelementptr inbounds i8, i8* %a, i64 63
-  store volatile i8 3, i8* %a
-  %c = load volatile i8, i8* %a
+  %b = getelementptr inbounds i8, ptr %a, i64 63
+  store volatile i8 3, ptr %a
+  %c = load volatile i8, ptr %a
   ret i8 %c
 }
 
@@ -157,9 +157,9 @@ define i8 @f2() #0 nounwind {
 ; CHECK-32-NEXT:    blr
 entry:
   %a = alloca i8, i64 65536
-  %b = getelementptr inbounds i8, i8* %a, i64 63
-  store volatile i8 3, i8* %a
-  %c = load volatile i8, i8* %a
+  %b = getelementptr inbounds i8, ptr %a, i64 63
+  store volatile i8 3, ptr %a
+  %c = load volatile i8, ptr %a
   ret i8 %c
 }
 
@@ -206,9 +206,9 @@ define i8 @f3() #0 "stack-probe-size"="32768" nounwind {
 ; CHECK-32-NEXT:    blr
 entry:
   %a = alloca i8, i64 65536
-  %b = getelementptr inbounds i8, i8* %a, i64 63
-  store volatile i8 3, i8* %a
-  %c = load volatile i8, i8* %a
+  %b = getelementptr inbounds i8, ptr %a, i64 63
+  store volatile i8 3, ptr %a
+  %c = load volatile i8, ptr %a
   ret i8 %c
 }
 
@@ -252,9 +252,9 @@ define i8 @f4() nounwind {
 ; CHECK-32-NEXT:    blr
 entry:
   %a = alloca i8, i64 65536
-  %b = getelementptr inbounds i8, i8* %a, i64 63
-  store volatile i8 3, i8* %a
-  %c = load volatile i8, i8* %a
+  %b = getelementptr inbounds i8, ptr %a, i64 63
+  store volatile i8 3, ptr %a
+  %c = load volatile i8, ptr %a
   ret i8 %c
 }
 
@@ -322,9 +322,9 @@ define i8 @f5() #0 "stack-probe-size"="65536" nounwind {
 ; CHECK-32-NEXT:    blr
 entry:
   %a = alloca i8, i64 1048576
-  %b = getelementptr inbounds i8, i8* %a, i64 63
-  store volatile i8 3, i8* %a
-  %c = load volatile i8, i8* %a
+  %b = getelementptr inbounds i8, ptr %a, i64 63
+  store volatile i8 3, ptr %a
+  %c = load volatile i8, ptr %a
   ret i8 %c
 }
 
@@ -389,9 +389,9 @@ define i8 @f6() #0 nounwind {
 ; CHECK-32-NEXT:    blr
 entry:
   %a = alloca i8, i64 1073741824
-  %b = getelementptr inbounds i8, i8* %a, i64 63
-  store volatile i8 3, i8* %a
-  %c = load volatile i8, i8* %a
+  %b = getelementptr inbounds i8, ptr %a, i64 63
+  store volatile i8 3, ptr %a
+  %c = load volatile i8, ptr %a
   ret i8 %c
 }
 
@@ -465,9 +465,9 @@ define i8 @f7() #0 "stack-probe-size"="65536" nounwind {
 ; CHECK-32-NEXT:    blr
 entry:
   %a = alloca i8, i64 1000000007
-  %b = getelementptr inbounds i8, i8* %a, i64 101
-  store volatile i8 3, i8* %a
-  %c = load volatile i8, i8* %a
+  %b = getelementptr inbounds i8, ptr %a, i64 101
+  store volatile i8 3, ptr %a
+  %c = load volatile i8, ptr %a
   ret i8 %c
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/stack-clash-prologue.ll b/llvm/test/CodeGen/PowerPC/stack-clash-prologue.ll
index 7e4556c59737..58d7e7d40e7c 100644
--- a/llvm/test/CodeGen/PowerPC/stack-clash-prologue.ll
+++ b/llvm/test/CodeGen/PowerPC/stack-clash-prologue.ll
@@ -35,9 +35,9 @@ define i8 @f0() #0 nounwind {
 ; CHECK-32-NEXT:    blr
 entry:
   %a = alloca i8, i64 64
-  %b = getelementptr inbounds i8, i8* %a, i64 63
-  store volatile i8 3, i8* %a
-  %c = load volatile i8, i8* %a
+  %b = getelementptr inbounds i8, ptr %a, i64 63
+  store volatile i8 3, ptr %a
+  %c = load volatile i8, ptr %a
   ret i8 %c
 }
 
@@ -102,9 +102,9 @@ define i8 @f1() #0 "stack-probe-size"="0" {
 ; CHECK-32-NEXT:    blr
 entry:
   %a = alloca i8, i64 4096
-  %b = getelementptr inbounds i8, i8* %a, i64 63
-  store volatile i8 3, i8* %a
-  %c = load volatile i8, i8* %a
+  %b = getelementptr inbounds i8, ptr %a, i64 63
+  store volatile i8 3, ptr %a
+  %c = load volatile i8, ptr %a
   ret i8 %c
 }
 
@@ -175,9 +175,9 @@ define i8 @f2() #0 {
 ; CHECK-32-NEXT:    blr
 entry:
   %a = alloca i8, i64 65536
-  %b = getelementptr inbounds i8, i8* %a, i64 63
-  store volatile i8 3, i8* %a
-  %c = load volatile i8, i8* %a
+  %b = getelementptr inbounds i8, ptr %a, i64 63
+  store volatile i8 3, ptr %a
+  %c = load volatile i8, ptr %a
   ret i8 %c
 }
 
@@ -233,9 +233,9 @@ define i8 @f3() #0 "stack-probe-size"="32768" {
 ; CHECK-32-NEXT:    blr
 entry:
   %a = alloca i8, i64 65536
-  %b = getelementptr inbounds i8, i8* %a, i64 63
-  store volatile i8 3, i8* %a
-  %c = load volatile i8, i8* %a
+  %b = getelementptr inbounds i8, ptr %a, i64 63
+  store volatile i8 3, ptr %a
+  %c = load volatile i8, ptr %a
   ret i8 %c
 }
 
@@ -282,9 +282,9 @@ define i8 @f4() {
 ; CHECK-32-NEXT:    blr
 entry:
   %a = alloca i8, i64 65536
-  %b = getelementptr inbounds i8, i8* %a, i64 63
-  store volatile i8 3, i8* %a
-  %c = load volatile i8, i8* %a
+  %b = getelementptr inbounds i8, ptr %a, i64 63
+  store volatile i8 3, ptr %a
+  %c = load volatile i8, ptr %a
   ret i8 %c
 }
 
@@ -361,9 +361,9 @@ define i8 @f5() #0 "stack-probe-size"="65536" {
 ; CHECK-32-NEXT:    blr
 entry:
   %a = alloca i8, i64 1048576
-  %b = getelementptr inbounds i8, i8* %a, i64 63
-  store volatile i8 3, i8* %a
-  %c = load volatile i8, i8* %a
+  %b = getelementptr inbounds i8, ptr %a, i64 63
+  store volatile i8 3, ptr %a
+  %c = load volatile i8, ptr %a
   ret i8 %c
 }
 
@@ -437,9 +437,9 @@ define i8 @f6() #0 {
 ; CHECK-32-NEXT:    blr
 entry:
   %a = alloca i8, i64 1073741824
-  %b = getelementptr inbounds i8, i8* %a, i64 63
-  store volatile i8 3, i8* %a
-  %c = load volatile i8, i8* %a
+  %b = getelementptr inbounds i8, ptr %a, i64 63
+  store volatile i8 3, ptr %a
+  %c = load volatile i8, ptr %a
   ret i8 %c
 }
 
@@ -522,9 +522,9 @@ define i8 @f7() #0 "stack-probe-size"="65536" {
 ; CHECK-32-NEXT:    blr
 entry:
   %a = alloca i8, i64 1000000007
-  %b = getelementptr inbounds i8, i8* %a, i64 101
-  store volatile i8 3, i8* %a
-  %c = load volatile i8, i8* %a
+  %b = getelementptr inbounds i8, ptr %a, i64 101
+  store volatile i8 3, ptr %a
+  %c = load volatile i8, ptr %a
   ret i8 %c
 }
 
@@ -589,9 +589,9 @@ define i32 @f8(i64 %i) local_unnamed_addr #0 {
 ; CHECK-32-NEXT:    mr r31, r0
 ; CHECK-32-NEXT:    blr
   %a = alloca i32, i32 200, align 64
-  %b = getelementptr inbounds i32, i32* %a, i64 %i
-  store volatile i32 1, i32* %b
-  %c = load volatile i32, i32* %a
+  %b = getelementptr inbounds i32, ptr %a, i64 %i
+  store volatile i32 1, ptr %b
+  %c = load volatile i32, ptr %a
   ret i32 %c
 }
 
@@ -696,9 +696,9 @@ define i32 @f9(i64 %i) local_unnamed_addr #0 {
 ; CHECK-32-NEXT:    mr r31, r0
 ; CHECK-32-NEXT:    blr
   %a = alloca i32, i32 2000, align 2048
-  %b = getelementptr inbounds i32, i32* %a, i64 %i
-  store volatile i32 1, i32* %b
-  %c = load volatile i32, i32* %a
+  %b = getelementptr inbounds i32, ptr %a, i64 %i
+  store volatile i32 1, ptr %b
+  %c = load volatile i32, ptr %a
   ret i32 %c
 }
 
@@ -803,9 +803,9 @@ define i32 @f10(i64 %i) local_unnamed_addr #0 {
 ; CHECK-32-NEXT:    mr r31, r0
 ; CHECK-32-NEXT:    blr
   %a = alloca i32, i32 1000, align 1024
-  %b = getelementptr inbounds i32, i32* %a, i64 %i
-  store volatile i32 1, i32* %b
-  %c = load volatile i32, i32* %a
+  %b = getelementptr inbounds i32, ptr %a, i64 %i
+  store volatile i32 1, ptr %b
+  %c = load volatile i32, ptr %a
   ret i32 %c
 }
 
@@ -997,11 +997,11 @@ define void @f11(i32 %vla_size, i64 %i) #0 {
 ; CHECK-32-NEXT:    mr r31, r0
 ; CHECK-32-NEXT:    blr
   %a = alloca i32, i32 4096, align 32768
-  %b = getelementptr inbounds i32, i32* %a, i64 %i
-  store volatile i32 1, i32* %b
+  %b = getelementptr inbounds i32, ptr %a, i64 %i
+  store volatile i32 1, ptr %b
   %1 = zext i32 %vla_size to i64
   %vla = alloca i8, i64 %1, align 2048
-  %2 = load volatile i8, i8* %vla, align 2048
+  %2 = load volatile i8, ptr %vla, align 2048
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/stack-guard-oob.ll b/llvm/test/CodeGen/PowerPC/stack-guard-oob.ll
index 182d037988fa..f00f846a9f15 100644
--- a/llvm/test/CodeGen/PowerPC/stack-guard-oob.ll
+++ b/llvm/test/CodeGen/PowerPC/stack-guard-oob.ll
@@ -7,9 +7,8 @@
 ; AIX-NOT: __ssp_canary_word
 define i32 @in_bounds() #0 {
   %var = alloca i32, align 4
-  store i32 0, i32* %var, align 4
-  %gep = getelementptr inbounds i32, i32* %var, i32 0
-  %ret = load i32, i32* %gep, align 4
+  store i32 0, ptr %var, align 4
+  %ret = load i32, ptr %var, align 4
   ret i32 %ret
 }
 
@@ -18,9 +17,9 @@ define i32 @in_bounds() #0 {
 ; AIX: __ssp_canary_word
 define i32 @constant_out_of_bounds() #0 {
   %var = alloca i32, align 4
-  store i32 0, i32* %var, align 4
-  %gep = getelementptr inbounds i32, i32* %var, i32 1
-  %ret = load i32, i32* %gep, align 4
+  store i32 0, ptr %var, align 4
+  %gep = getelementptr inbounds i32, ptr %var, i32 1
+  %ret = load i32, ptr %gep, align 4
   ret i32 %ret
 }
 
@@ -29,9 +28,9 @@ define i32 @constant_out_of_bounds() #0 {
 ; AIX: __ssp_canary_word
 define i32 @nonconstant_out_of_bounds(i32 %n) #0 {
   %var = alloca i32, align 4
-  store i32 0, i32* %var, align 4
-  %gep = getelementptr inbounds i32, i32* %var, i32 %n
-  %ret = load i32, i32* %gep, align 4
+  store i32 0, ptr %var, align 4
+  %gep = getelementptr inbounds i32, ptr %var, i32 %n
+  %ret = load i32, ptr %gep, align 4
   ret i32 %ret
 }
 
@@ -42,8 +41,8 @@ define i32 @phi_before_gep_in_bounds(i32 %k) #0 {
 entry:
   %var1 = alloca i32, align 4
   %var2 = alloca i32, align 4
-  store i32 0, i32* %var1, align 4
-  store i32 0, i32* %var2, align 4
+  store i32 0, ptr %var1, align 4
+  store i32 0, ptr %var2, align 4
   %cmp = icmp ne i32 %k, 0
   br i1 %cmp, label %if, label %then
 
@@ -51,9 +50,8 @@ if:
   br label %then
 
 then:
-  %ptr = phi i32* [ %var1, %entry ], [ %var2, %if ]
-  %gep = getelementptr inbounds i32, i32* %ptr, i32 0
-  %ret = load i32, i32* %gep, align 4
+  %ptr = phi ptr [ %var1, %entry ], [ %var2, %if ]
+  %ret = load i32, ptr %ptr, align 4
   ret i32 %ret
 }
 
@@ -64,8 +62,8 @@ define i32 @phi_before_gep_constant_out_of_bounds(i32 %k) #0 {
 entry:
   %var1 = alloca i32, align 4
   %var2 = alloca i32, align 4
-  store i32 0, i32* %var1, align 4
-  store i32 0, i32* %var2, align 4
+  store i32 0, ptr %var1, align 4
+  store i32 0, ptr %var2, align 4
   %cmp = icmp ne i32 %k, 0
   br i1 %cmp, label %if, label %then
 
@@ -73,9 +71,9 @@ if:
   br label %then
 
 then:
-  %ptr = phi i32* [ %var1, %entry ], [ %var2, %if ]
-  %gep = getelementptr inbounds i32, i32* %ptr, i32 1
-  %ret = load i32, i32* %gep, align 4
+  %ptr = phi ptr [ %var1, %entry ], [ %var2, %if ]
+  %gep = getelementptr inbounds i32, ptr %ptr, i32 1
+  %ret = load i32, ptr %gep, align 4
   ret i32 %ret
 }
 
@@ -86,8 +84,8 @@ define i32 @phi_before_gep_nonconstant_out_of_bounds(i32 %k, i32 %n) #0 {
 entry:
   %var1 = alloca i32, align 4
   %var2 = alloca i32, align 4
-  store i32 0, i32* %var1, align 4
-  store i32 0, i32* %var2, align 4
+  store i32 0, ptr %var1, align 4
+  store i32 0, ptr %var2, align 4
   %cmp = icmp ne i32 %k, 0
   br i1 %cmp, label %if, label %then
 
@@ -95,9 +93,9 @@ if:
   br label %then
 
 then:
-  %ptr = phi i32* [ %var1, %entry ], [ %var2, %if ]
-  %gep = getelementptr inbounds i32, i32* %ptr, i32 %n
-  %ret = load i32, i32* %gep, align 4
+  %ptr = phi ptr [ %var1, %entry ], [ %var2, %if ]
+  %gep = getelementptr inbounds i32, ptr %ptr, i32 %n
+  %ret = load i32, ptr %gep, align 4
   ret i32 %ret
 }
 
@@ -108,22 +106,20 @@ define i32 @phi_after_gep_in_bounds(i32 %k) #0 {
 entry:
   %var1 = alloca i32, align 4
   %var2 = alloca i32, align 4
-  store i32 0, i32* %var1, align 4
-  store i32 0, i32* %var2, align 4
+  store i32 0, ptr %var1, align 4
+  store i32 0, ptr %var2, align 4
   %cmp = icmp ne i32 %k, 0
   br i1 %cmp, label %if, label %else
 
 if:
-  %gep1 = getelementptr inbounds i32, i32* %var1, i32 0
   br label %then
 
 else:
-  %gep2 = getelementptr inbounds i32, i32* %var2, i32 0
   br label %then
 
 then:
-  %ptr = phi i32* [ %gep1, %if ], [ %gep2, %else ]
-  %ret = load i32, i32* %ptr, align 4
+  %ptr = phi ptr [ %var1, %if ], [ %var2, %else ]
+  %ret = load i32, ptr %ptr, align 4
   ret i32 %ret
 }
 
@@ -134,22 +130,21 @@ define i32 @phi_after_gep_constant_out_of_bounds_a(i32 %k) #0 {
 entry:
   %var1 = alloca i32, align 4
   %var2 = alloca i32, align 4
-  store i32 0, i32* %var1, align 4
-  store i32 0, i32* %var2, align 4
+  store i32 0, ptr %var1, align 4
+  store i32 0, ptr %var2, align 4
   %cmp = icmp ne i32 %k, 0
   br i1 %cmp, label %if, label %else
 
 if:
-  %gep1 = getelementptr inbounds i32, i32* %var1, i32 0
   br label %then
 
 else:
-  %gep2 = getelementptr inbounds i32, i32* %var2, i32 1
+  %gep2 = getelementptr inbounds i32, ptr %var2, i32 1
   br label %then
 
 then:
-  %ptr = phi i32* [ %gep1, %if ], [ %gep2, %else ]
-  %ret = load i32, i32* %ptr, align 4
+  %ptr = phi ptr [ %var1, %if ], [ %gep2, %else ]
+  %ret = load i32, ptr %ptr, align 4
   ret i32 %ret
 }
 
@@ -160,22 +155,21 @@ define i32 @phi_after_gep_constant_out_of_bounds_b(i32 %k) #0 {
 entry:
   %var1 = alloca i32, align 4
   %var2 = alloca i32, align 4
-  store i32 0, i32* %var1, align 4
-  store i32 0, i32* %var2, align 4
+  store i32 0, ptr %var1, align 4
+  store i32 0, ptr %var2, align 4
   %cmp = icmp ne i32 %k, 0
   br i1 %cmp, label %if, label %else
 
 if:
-  %gep1 = getelementptr inbounds i32, i32* %var1, i32 1
+  %gep1 = getelementptr inbounds i32, ptr %var1, i32 1
   br label %then
 
 else:
-  %gep2 = getelementptr inbounds i32, i32* %var2, i32 0
   br label %then
 
 then:
-  %ptr = phi i32* [ %gep1, %if ], [ %gep2, %else ]
-  %ret = load i32, i32* %ptr, align 4
+  %ptr = phi ptr [ %gep1, %if ], [ %var2, %else ]
+  %ret = load i32, ptr %ptr, align 4
   ret i32 %ret
 }
 
@@ -186,18 +180,17 @@ define i64 @phi_
diff erent_types_a(i32 %k) #0 {
 entry:
   %var1 = alloca i64, align 4
   %var2 = alloca i32, align 4
-  store i64 0, i64* %var1, align 4
-  store i32 0, i32* %var2, align 4
+  store i64 0, ptr %var1, align 4
+  store i32 0, ptr %var2, align 4
   %cmp = icmp ne i32 %k, 0
   br i1 %cmp, label %if, label %then
 
 if:
-  %bitcast = bitcast i32* %var2 to i64*
   br label %then
 
 then:
-  %ptr = phi i64* [ %var1, %entry ], [ %bitcast, %if ]
-  %ret = load i64, i64* %ptr, align 4
+  %ptr = phi ptr [ %var1, %entry ], [ %var2, %if ]
+  %ret = load i64, ptr %ptr, align 4
   ret i64 %ret
 }
 
@@ -208,18 +201,17 @@ define i64 @phi_
diff erent_types_b(i32 %k) #0 {
 entry:
   %var1 = alloca i32, align 4
   %var2 = alloca i64, align 4
-  store i32 0, i32* %var1, align 4
-  store i64 0, i64* %var2, align 4
+  store i32 0, ptr %var1, align 4
+  store i64 0, ptr %var2, align 4
   %cmp = icmp ne i32 %k, 0
   br i1 %cmp, label %if, label %then
 
 if:
-  %bitcast = bitcast i32* %var1 to i64*
   br label %then
 
 then:
-  %ptr = phi i64* [ %var2, %entry ], [ %bitcast, %if ]
-  %ret = load i64, i64* %ptr, align 4
+  %ptr = phi ptr [ %var2, %entry ], [ %var1, %if ]
+  %ret = load i64, ptr %ptr, align 4
   ret i64 %ret
 }
 
@@ -230,22 +222,21 @@ define i32 @phi_after_gep_nonconstant_out_of_bounds_a(i32 %k, i32 %n) #0 {
 entry:
   %var1 = alloca i32, align 4
   %var2 = alloca i32, align 4
-  store i32 0, i32* %var1, align 4
-  store i32 0, i32* %var2, align 4
+  store i32 0, ptr %var1, align 4
+  store i32 0, ptr %var2, align 4
   %cmp = icmp ne i32 %k, 0
   br i1 %cmp, label %if, label %else
 
 if:
-  %gep1 = getelementptr inbounds i32, i32* %var1, i32 0
   br label %then
 
 else:
-  %gep2 = getelementptr inbounds i32, i32* %var2, i32 %n
+  %gep2 = getelementptr inbounds i32, ptr %var2, i32 %n
   br label %then
 
 then:
-  %ptr = phi i32* [ %gep1, %if ], [ %gep2, %else ]
-  %ret = load i32, i32* %ptr, align 4
+  %ptr = phi ptr [ %var1, %if ], [ %gep2, %else ]
+  %ret = load i32, ptr %ptr, align 4
   ret i32 %ret
 }
 
@@ -256,22 +247,21 @@ define i32 @phi_after_gep_nonconstant_out_of_bounds_b(i32 %k, i32 %n) #0 {
 entry:
   %var1 = alloca i32, align 4
   %var2 = alloca i32, align 4
-  store i32 0, i32* %var1, align 4
-  store i32 0, i32* %var2, align 4
+  store i32 0, ptr %var1, align 4
+  store i32 0, ptr %var2, align 4
   %cmp = icmp ne i32 %k, 0
   br i1 %cmp, label %if, label %else
 
 if:
-  %gep1 = getelementptr inbounds i32, i32* %var1, i32 %n
+  %gep1 = getelementptr inbounds i32, ptr %var1, i32 %n
   br label %then
 
 else:
-  %gep2 = getelementptr inbounds i32, i32* %var2, i32 0
   br label %then
 
 then:
-  %ptr = phi i32* [ %gep1, %if ], [ %gep2, %else ]
-  %ret = load i32, i32* %ptr, align 4
+  %ptr = phi ptr [ %gep1, %if ], [ %var2, %else ]
+  %ret = load i32, ptr %ptr, align 4
   ret i32 %ret
 }
 
@@ -283,9 +273,9 @@ then:
 ; AIX-NOT: __ssp_canary_word
 define void @struct_in_bounds() #0 {
   %var = alloca %struct.outer, align 4
-  %outergep = getelementptr inbounds %struct.outer, %struct.outer* %var, i32 0, i32 1
-  %innergep = getelementptr inbounds %struct.inner, %struct.inner* %outergep, i32 0, i32 1
-  store i32 0, i32* %innergep, align 4
+  %outergep = getelementptr inbounds %struct.outer, ptr %var, i32 0, i32 1
+  %innergep = getelementptr inbounds %struct.inner, ptr %outergep, i32 0, i32 1
+  store i32 0, ptr %innergep, align 4
   ret void
 }
 
@@ -294,9 +284,8 @@ define void @struct_in_bounds() #0 {
 ; AIX: __ssp_canary_word
 define void @struct_constant_out_of_bounds_a() #0 {
   %var = alloca %struct.outer, align 4
-  %outergep = getelementptr inbounds %struct.outer, %struct.outer* %var, i32 1, i32 0
-  %innergep = getelementptr inbounds %struct.inner, %struct.inner* %outergep, i32 0, i32 0
-  store i32 0, i32* %innergep, align 4
+  %outergep = getelementptr inbounds %struct.outer, ptr %var, i32 1, i32 0
+  store i32 0, ptr %outergep, align 4
   ret void
 }
 
@@ -307,9 +296,8 @@ define void @struct_constant_out_of_bounds_a() #0 {
 ; AIX-NOT: __ssp_canary_word
 define void @struct_constant_out_of_bounds_b() #0 {
   %var = alloca %struct.outer, align 4
-  %outergep = getelementptr inbounds %struct.outer, %struct.outer* %var, i32 0, i32 0
-  %innergep = getelementptr inbounds %struct.inner, %struct.inner* %outergep, i32 1, i32 0
-  store i32 0, i32* %innergep, align 4
+  %innergep = getelementptr inbounds %struct.inner, ptr %var, i32 1, i32 0
+  store i32 0, ptr %innergep, align 4
   ret void
 }
 
@@ -319,9 +307,9 @@ define void @struct_constant_out_of_bounds_b() #0 {
 ; AIX: __ssp_canary_word
 define void @struct_constant_out_of_bounds_c() #0 {
   %var = alloca %struct.outer, align 4
-  %outergep = getelementptr inbounds %struct.outer, %struct.outer* %var, i32 0, i32 1
-  %innergep = getelementptr inbounds %struct.inner, %struct.inner* %outergep, i32 1, i32 0
-  store i32 0, i32* %innergep, align 4
+  %outergep = getelementptr inbounds %struct.outer, ptr %var, i32 0, i32 1
+  %innergep = getelementptr inbounds %struct.inner, ptr %outergep, i32 1, i32 0
+  store i32 0, ptr %innergep, align 4
   ret void
 }
 
@@ -330,9 +318,8 @@ define void @struct_constant_out_of_bounds_c() #0 {
 ; AIX: __ssp_canary_word
 define void @struct_nonconstant_out_of_bounds_a(i32 %n) #0 {
   %var = alloca %struct.outer, align 4
-  %outergep = getelementptr inbounds %struct.outer, %struct.outer* %var, i32 %n, i32 0
-  %innergep = getelementptr inbounds %struct.inner, %struct.inner* %outergep, i32 0, i32 0
-  store i32 0, i32* %innergep, align 4
+  %outergep = getelementptr inbounds %struct.outer, ptr %var, i32 %n, i32 0
+  store i32 0, ptr %outergep, align 4
   ret void
 }
 
@@ -341,9 +328,8 @@ define void @struct_nonconstant_out_of_bounds_a(i32 %n) #0 {
 ; AIX: __ssp_canary_word
 define void @struct_nonconstant_out_of_bounds_b(i32 %n) #0 {
   %var = alloca %struct.outer, align 4
-  %outergep = getelementptr inbounds %struct.outer, %struct.outer* %var, i32 0, i32 0
-  %innergep = getelementptr inbounds %struct.inner, %struct.inner* %outergep, i32 %n, i32 0
-  store i32 0, i32* %innergep, align 4
+  %innergep = getelementptr inbounds %struct.inner, ptr %var, i32 %n, i32 0
+  store i32 0, ptr %innergep, align 4
   ret void
 }
 
@@ -352,9 +338,8 @@ define void @struct_nonconstant_out_of_bounds_b(i32 %n) #0 {
 ; AIX-NOT: __ssp_canary_word
 define i32 @bitcast_smaller_load() #0 {
   %var = alloca i64, align 4
-  store i64 0, i64* %var, align 4
-  %bitcast = bitcast i64* %var to i32*
-  %ret = load i32, i32* %bitcast, align 4
+  store i64 0, ptr %var, align 4
+  %ret = load i32, ptr %var, align 4
   ret i32 %ret
 }
 
@@ -363,10 +348,9 @@ define i32 @bitcast_smaller_load() #0 {
 ; AIX-NOT: __ssp_canary_word
 define i32 @bitcast_same_size_load() #0 {
   %var = alloca i64, align 4
-  store i64 0, i64* %var, align 4
-  %bitcast = bitcast i64* %var to %struct.inner*
-  %gep = getelementptr inbounds %struct.inner, %struct.inner* %bitcast, i32 0, i32 1
-  %ret = load i32, i32* %gep, align 4
+  store i64 0, ptr %var, align 4
+  %gep = getelementptr inbounds %struct.inner, ptr %var, i32 0, i32 1
+  %ret = load i32, ptr %gep, align 4
   ret i32 %ret
 }
 
@@ -375,9 +359,8 @@ define i32 @bitcast_same_size_load() #0 {
 ; AIX: __ssp_canary_word
 define i64 @bitcast_larger_load() #0 {
   %var = alloca i32, align 4
-  store i32 0, i32* %var, align 4
-  %bitcast = bitcast i32* %var to i64*
-  %ret = load i64, i64* %bitcast, align 4
+  store i32 0, ptr %var, align 4
+  %ret = load i64, ptr %var, align 4
   ret i64 %ret
 }
 
@@ -386,9 +369,8 @@ define i64 @bitcast_larger_load() #0 {
 ; AIX: __ssp_canary_word
 define i32 @bitcast_larger_store() #0 {
   %var = alloca i32, align 4
-  %bitcast = bitcast i32* %var to i64*
-  store i64 0, i64* %bitcast, align 4
-  %ret = load i32, i32* %var, align 4
+  store i64 0, ptr %var, align 4
+  %ret = load i32, ptr %var, align 4
   ret i32 %ret
 }
 
@@ -397,8 +379,7 @@ define i32 @bitcast_larger_store() #0 {
 ; AIX: __ssp_canary_word
 define i64 @bitcast_larger_cmpxchg(i64 %desired, i64 %new) #0 {
   %var = alloca i32, align 4
-  %bitcast = bitcast i32* %var to i64*
-  %pair = cmpxchg i64* %bitcast, i64 %desired, i64 %new seq_cst monotonic
+  %pair = cmpxchg ptr %var, i64 %desired, i64 %new seq_cst monotonic
   %ret = extractvalue { i64, i1 } %pair, 0
   ret i64 %ret
 }
@@ -408,8 +389,7 @@ define i64 @bitcast_larger_cmpxchg(i64 %desired, i64 %new) #0 {
 ; AIX: __ssp_canary_word
 define i64 @bitcast_larger_atomic_rmw() #0 {
   %var = alloca i32, align 4
-  %bitcast = bitcast i32* %var to i64*
-  %ret = atomicrmw add i64* %bitcast, i64 1 monotonic
+  %ret = atomicrmw add ptr %var, i64 1 monotonic
   ret i64 %ret
 }
 
@@ -420,9 +400,8 @@ define i64 @bitcast_larger_atomic_rmw() #0 {
 ; AIX: __ssp_canary_word
 define i32 @bitcast_overlap() #0 {
   %var = alloca i32, align 4
-  %bitcast = bitcast i32* %var to %struct.packed*
-  %gep = getelementptr inbounds %struct.packed, %struct.packed* %bitcast, i32 0, i32 1
-  %ret = load i32, i32* %gep, align 2
+  %gep = getelementptr inbounds %struct.packed, ptr %var, i32 0, i32 1
+  %ret = load i32, ptr %gep, align 2
   ret i32 %ret
 }
 
@@ -433,10 +412,9 @@ define i32 @bitcast_overlap() #0 {
 ; AIX: __ssp_canary_word
 define i32 @multi_dimensional_array() #0 {
   %var = alloca %struct.multi_dimensional, align 4
-  %gep1 = getelementptr inbounds %struct.multi_dimensional, %struct.multi_dimensional* %var, i32 0, i32 0
-  %gep2 = getelementptr inbounds [10 x [10 x i32]], [10 x [10 x i32]]* %gep1, i32 0, i32 10
-  %gep3 = getelementptr inbounds [10 x i32], [10 x i32]* %gep2, i32 0, i32 5
-  %ret = load i32, i32* %gep3, align 4
+  %gep2 = getelementptr inbounds [10 x [10 x i32]], ptr %var, i32 0, i32 10
+  %gep3 = getelementptr inbounds [10 x i32], ptr %gep2, i32 0, i32 5
+  %ret = load i32, ptr %gep3, align 4
   ret i32 %ret
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/stack-no-redzone.ll b/llvm/test/CodeGen/PowerPC/stack-no-redzone.ll
index 66ef91b4ce46..780836e99973 100644
--- a/llvm/test/CodeGen/PowerPC/stack-no-redzone.ll
+++ b/llvm/test/CodeGen/PowerPC/stack-no-redzone.ll
@@ -27,7 +27,7 @@
 ; CHECK-NOT: lwz {{[0-9]+}}, -{{[0-9]+}}(1)
 define i32 @test_n() local_unnamed_addr #0 {
 entry:
-  %t0 = tail call i32 bitcast (i32 (...)* @bar0 to i32 ()*)() #0
+  %t0 = tail call i32 @bar0() #0
   ret i32 %t0
 }
 
@@ -42,7 +42,7 @@ entry:
 define i32 @test_a() local_unnamed_addr #0 {
 entry:
   %t0 = alloca i32, align 128
-  %t1 = tail call i32 bitcast (i32 (...)* @bar1 to i32 (i32*)*)(i32* %t0) #0
+  %t1 = tail call i32 @bar1(ptr %t0) #0
   ret i32 %t1
 }
 
@@ -55,7 +55,7 @@ entry:
 ; CHECK-NOT: lwz {{[0-9]+}}, -{{[0-9]+}}(1)
 define i32 @test_d(i32 %p0) local_unnamed_addr #0 {
   %t0 = alloca i32, i32 %p0, align 4
-  %t1 = tail call i32 bitcast (i32 (...)* @bar1 to i32 (i32*)*)(i32* %t0) #0
+  %t1 = tail call i32 @bar1(ptr %t0) #0
   ret i32 %t1
 }
 
@@ -68,8 +68,7 @@ define i32 @test_d(i32 %p0) local_unnamed_addr #0 {
 define i32 @test_s(i32 %p0) local_unnamed_addr #0 {
 entry:
   %t0 = alloca [16384 x i32]
-  %t1 = getelementptr [16384 x i32], [16384 x i32]* %t0, i32 0, i32 0
-  %t2 = tail call i32 bitcast (i32 (...)* @bar1 to i32 (i32*)*)(i32* %t1) #0
+  %t2 = tail call i32 @bar1(ptr %t0) #0
   ret i32 %t2
 }
 
@@ -83,8 +82,8 @@ entry:
 define i32 @test_ad(i32 %p0) local_unnamed_addr #0 {
   %t0 = alloca i32, align 128
   %t1 = alloca i32, i32 %p0, align 4
-  %t2 = tail call i32 bitcast (i32 (...)* @bar1 to i32 (i32*)*)(i32* %t0) #0
-  %t3 = tail call i32 bitcast (i32 (...)* @bar1 to i32 (i32*)*)(i32* %t1) #0
+  %t2 = tail call i32 @bar1(ptr %t0) #0
+  %t3 = tail call i32 @bar1(ptr %t1) #0
   %t4 = add i32 %t2, %t3
   ret i32 %t4
 }
@@ -97,9 +96,8 @@ define i32 @test_ad(i32 %p0) local_unnamed_addr #0 {
 define i32 @test_as() local_unnamed_addr #0 {
   %t0 = alloca i32, align 128
   %t1 = alloca [16384 x i32]
-  %t2 = tail call i32 bitcast (i32 (...)* @bar1 to i32 (i32*)*)(i32* %t0) #0
-  %t3 = getelementptr [16384 x i32], [16384 x i32]* %t1, i32 0, i32 0
-  %t4 = tail call i32 bitcast (i32 (...)* @bar1 to i32 (i32*)*)(i32* %t3) #0
+  %t2 = tail call i32 @bar1(ptr %t0) #0
+  %t4 = tail call i32 @bar1(ptr %t1) #0
   %t5 = add i32 %t2, %t4
   ret i32 %t5
 }
@@ -112,9 +110,8 @@ define i32 @test_as() local_unnamed_addr #0 {
 define i32 @test_ds(i32 %p0) local_unnamed_addr #0 {
   %t0 = alloca i32, i32 %p0, align 4
   %t1 = alloca [16384 x i32]
-  %t2 = tail call i32 bitcast (i32 (...)* @bar1 to i32 (i32*)*)(i32* %t0) #0
-  %t3 = getelementptr [16384 x i32], [16384 x i32]* %t1, i32 0, i32 0
-  %t4 = tail call i32 bitcast (i32 (...)* @bar1 to i32 (i32*)*)(i32* %t3) #0
+  %t2 = tail call i32 @bar1(ptr %t0) #0
+  %t4 = tail call i32 @bar1(ptr %t1) #0
   %t5 = add i32 %t2, %t4
   ret i32 %t5
 }
@@ -129,12 +126,11 @@ define i32 @test_ads(i32 %p0) local_unnamed_addr #0 {
   %t1 = alloca i32, i32 %p0, align 4
   %t2 = alloca [16384 x i32]
 
-  %t3 = tail call i32 bitcast (i32 (...)* @bar1 to i32 (i32*)*)(i32* %t0) #0
-  %t4 = tail call i32 bitcast (i32 (...)* @bar1 to i32 (i32*)*)(i32* %t1) #0
+  %t3 = tail call i32 @bar1(ptr %t0) #0
+  %t4 = tail call i32 @bar1(ptr %t1) #0
   %t5 = add i32 %t3, %t4
 
-  %t6 = getelementptr [16384 x i32], [16384 x i32]* %t2, i32 0, i32 0
-  %t7 = tail call i32 bitcast (i32 (...)* @bar1 to i32 (i32*)*)(i32* %t6) #0
+  %t7 = tail call i32 @bar1(ptr %t2) #0
   %t8 = add i32 %t5, %t7
   ret i32 %t7
 }

diff  --git a/llvm/test/CodeGen/PowerPC/stack-protector.ll b/llvm/test/CodeGen/PowerPC/stack-protector.ll
index 0c59bb7e0a27..dc20d94b6e3e 100644
--- a/llvm/test/CodeGen/PowerPC/stack-protector.ll
+++ b/llvm/test/CodeGen/PowerPC/stack-protector.ll
@@ -22,25 +22,23 @@
 ; FREEBSD32: bl __stack_chk_fail
 ; FREEBSD64: bl __stack_chk_fail
 
-@"\01LC" = internal constant [11 x i8] c"buf == %s\0A\00"		; <[11 x i8]*> [#uses=1]
+@"\01LC" = internal constant [11 x i8] c"buf == %s\0A\00"		; <ptr> [#uses=1]
 
-define void @test(i8* %a) nounwind ssp {
+define void @test(ptr %a) nounwind ssp {
 entry:
-	%a_addr = alloca i8*		; <i8**> [#uses=2]
-	%buf = alloca [8 x i8]		; <[8 x i8]*> [#uses=2]
+	%a_addr = alloca ptr		; <ptr> [#uses=2]
+	%buf = alloca [8 x i8]		; <ptr> [#uses=2]
   %"alloca point" = bitcast i32 0 to i32		; <i32> [#uses=0]
-	store i8* %a, i8** %a_addr
-	%buf1 = bitcast [8 x i8]* %buf to i8*		; <i8*> [#uses=1]
-	%0 = load i8*, i8** %a_addr, align 4		; <i8*> [#uses=1]
-	%1 = call i8* @strcpy(i8* %buf1, i8* %0) nounwind		; <i8*> [#uses=0]
-  %buf2 = bitcast [8 x i8]* %buf to i8*		; <i8*> [#uses=1]
-	%2 = call i32 (i8*, ...) @printf(i8* getelementptr ([11 x i8], [11 x i8]* @"\01LC", i32 0, i32 0), i8* %buf2) nounwind		; <i32> [#uses=0]
+	store ptr %a, ptr %a_addr
+	%0 = load ptr, ptr %a_addr, align 4		; <ptr> [#uses=1]
+	%1 = call ptr @strcpy(ptr %buf, ptr %0) nounwind		; <ptr> [#uses=0]
+	%2 = call i32 (ptr, ...) @printf(ptr @"\01LC", ptr %buf) nounwind		; <i32> [#uses=0]
 	br label %return
 
 return:		; preds = %entry
 	ret void
 }
 
-declare i8* @strcpy(i8*, i8*) nounwind
+declare ptr @strcpy(ptr, ptr) nounwind
 
-declare i32 @printf(i8*, ...) nounwind
+declare i32 @printf(ptr, ...) nounwind

diff  --git a/llvm/test/CodeGen/PowerPC/stack-realign.ll b/llvm/test/CodeGen/PowerPC/stack-realign.ll
index 8638592ab8b8..bccdc12cb27d 100644
--- a/llvm/test/CodeGen/PowerPC/stack-realign.ll
+++ b/llvm/test/CodeGen/PowerPC/stack-realign.ll
@@ -7,23 +7,21 @@ target triple = "powerpc64-unknown-linux-gnu"
 
 %struct.s = type { i32, i32 }
 
-declare void @bar(i32*)
+declare void @bar(ptr)
 
 @barbaz = external global i32
 
-define void @goo(%struct.s* byval(%struct.s) nocapture readonly %a) {
+define void @goo(ptr byval(%struct.s) nocapture readonly %a) {
 entry:
   %x = alloca [2 x i32], align 32
-  %a1 = getelementptr inbounds %struct.s, %struct.s* %a, i64 0, i32 0
-  %0 = load i32, i32* %a1, align 4
-  %arrayidx = getelementptr inbounds [2 x i32], [2 x i32]* %x, i64 0, i64 0
-  store i32 %0, i32* %arrayidx, align 32
-  %b = getelementptr inbounds %struct.s, %struct.s* %a, i64 0, i32 1
-  %1 = load i32, i32* %b, align 4
-  %2 = load i32, i32* @barbaz, align 4
-  %arrayidx2 = getelementptr inbounds [2 x i32], [2 x i32]* %x, i64 0, i64 1
-  store i32 %2, i32* %arrayidx2, align 4
-  call void @bar(i32* %arrayidx)
+  %0 = load i32, ptr %a, align 4
+  store i32 %0, ptr %x, align 32
+  %b = getelementptr inbounds %struct.s, ptr %a, i64 0, i32 1
+  %1 = load i32, ptr %b, align 4
+  %2 = load i32, ptr @barbaz, align 4
+  %arrayidx2 = getelementptr inbounds [2 x i32], ptr %x, i64 0, i64 1
+  store i32 %2, ptr %arrayidx2, align 4
+  call void @bar(ptr %x)
   ret void
 }
 
@@ -105,18 +103,16 @@ entry:
 ; CHECK-32-PIC:     addic 29, 0, 12
 
 ; The large-frame-size case.
-define void @hoo(%struct.s* byval(%struct.s) nocapture readonly %a) {
+define void @hoo(ptr byval(%struct.s) nocapture readonly %a) {
 entry:
   %x = alloca [200000 x i32], align 32
-  %a1 = getelementptr inbounds %struct.s, %struct.s* %a, i64 0, i32 0
-  %0 = load i32, i32* %a1, align 4
-  %arrayidx = getelementptr inbounds [200000 x i32], [200000 x i32]* %x, i64 0, i64 0
-  store i32 %0, i32* %arrayidx, align 32
-  %b = getelementptr inbounds %struct.s, %struct.s* %a, i64 0, i32 1
-  %1 = load i32, i32* %b, align 4
-  %arrayidx2 = getelementptr inbounds [200000 x i32], [200000 x i32]* %x, i64 0, i64 1
-  store i32 %1, i32* %arrayidx2, align 4
-  call void @bar(i32* %arrayidx)
+  %0 = load i32, ptr %a, align 4
+  store i32 %0, ptr %x, align 32
+  %b = getelementptr inbounds %struct.s, ptr %a, i64 0, i32 1
+  %1 = load i32, ptr %b, align 4
+  %arrayidx2 = getelementptr inbounds [200000 x i32], ptr %x, i64 0, i64 1
+  store i32 %1, ptr %arrayidx2, align 4
+  call void @bar(ptr %x)
   ret void
 }
 
@@ -174,18 +170,16 @@ entry:
 
 ; Make sure that the FP save area is still allocated correctly relative to
 ; where r30 is saved.
-define void @loo(%struct.s* byval(%struct.s) nocapture readonly %a) {
+define void @loo(ptr byval(%struct.s) nocapture readonly %a) {
 entry:
   %x = alloca [2 x i32], align 32
-  %a1 = getelementptr inbounds %struct.s, %struct.s* %a, i64 0, i32 0
-  %0 = load i32, i32* %a1, align 4
-  %arrayidx = getelementptr inbounds [2 x i32], [2 x i32]* %x, i64 0, i64 0
-  store i32 %0, i32* %arrayidx, align 32
-  %b = getelementptr inbounds %struct.s, %struct.s* %a, i64 0, i32 1
-  %1 = load i32, i32* %b, align 4
-  %arrayidx2 = getelementptr inbounds [2 x i32], [2 x i32]* %x, i64 0, i64 1
-  store i32 %1, i32* %arrayidx2, align 4
-  call void @bar(i32* %arrayidx)
+  %0 = load i32, ptr %a, align 4
+  store i32 %0, ptr %x, align 32
+  %b = getelementptr inbounds %struct.s, ptr %a, i64 0, i32 1
+  %1 = load i32, ptr %b, align 4
+  %arrayidx2 = getelementptr inbounds [2 x i32], ptr %x, i64 0, i64 1
+  store i32 %1, ptr %arrayidx2, align 4
+  call void @bar(ptr %x)
   call void asm sideeffect "", "~{f30}"() nounwind
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/stack-restore-with-setjmp.ll b/llvm/test/CodeGen/PowerPC/stack-restore-with-setjmp.ll
index 50dcffc235dd..30adfdf024be 100644
--- a/llvm/test/CodeGen/PowerPC/stack-restore-with-setjmp.ll
+++ b/llvm/test/CodeGen/PowerPC/stack-restore-with-setjmp.ll
@@ -8,7 +8,7 @@
 @.str = private unnamed_addr constant [33 x i8] c"Successfully returned from main\0A\00", align 1
 
 ; Function Attrs: nounwind
-define dso_local signext i32 @main(i32 signext %argc, i8** nocapture readnone %argv) local_unnamed_addr #0 {
+define dso_local signext i32 @main(i32 signext %argc, ptr nocapture readnone %argv) local_unnamed_addr #0 {
 ; CHECK-LABEL: main:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    mfocrf 12, 32
@@ -111,25 +111,22 @@ entry:
   br i1 %cmp, label %return, label %if.end
 
 if.end:                                           ; preds = %entry
-  %0 = bitcast [1 x %struct.__jmp_buf_tag]* %env_buffer to i8*
-  call void @llvm.lifetime.start.p0i8(i64 656, i8* nonnull %0) #5
-  %arraydecay = getelementptr inbounds [1 x %struct.__jmp_buf_tag], [1 x %struct.__jmp_buf_tag]* %env_buffer, i64 0, i64 0
-  %call = call signext i32 @_setjmp(%struct.__jmp_buf_tag* nonnull %arraydecay) #6
+  call void @llvm.lifetime.start.p0(i64 656, ptr nonnull %env_buffer) #5
+  %call = call signext i32 @_setjmp(ptr nonnull %env_buffer) #6
   %cmp1 = icmp ne i32 %argc, 2
   %cmp2 = icmp eq i32 %call, 0
   %or.cond = and i1 %cmp1, %cmp2
   br i1 %or.cond, label %if.then3, label %if.end5
 
 if.then3:                                         ; preds = %if.end
-  %1 = alloca [8 x i8], align 16
-  %.sub = getelementptr inbounds [8 x i8], [8 x i8]* %1, i64 0, i64 0
-  store i8 -1, i8* %.sub, align 16
-  call void @test(i8* nonnull %.sub, %struct.__jmp_buf_tag* nonnull %arraydecay) #7
+  %0 = alloca [8 x i8], align 16
+  store i8 -1, ptr %0, align 16
+  call void @test(ptr nonnull %0, ptr nonnull %env_buffer) #7
   unreachable
 
 if.end5:                                          ; preds = %if.end
-  %call6 = call signext i32 (i8*, ...) @printf(i8* nonnull dereferenceable(1) getelementptr inbounds ([33 x i8], [33 x i8]* @.str, i64 0, i64 0))
-  call void @llvm.lifetime.end.p0i8(i64 656, i8* nonnull %0) #5
+  %call6 = call signext i32 (ptr, ...) @printf(ptr nonnull dereferenceable(1) @.str)
+  call void @llvm.lifetime.end.p0(i64 656, ptr nonnull %env_buffer) #5
   br label %return
 
 return:                                           ; preds = %entry, %if.end5
@@ -138,19 +135,19 @@ return:                                           ; preds = %entry, %if.end5
 }
 
 ; Function Attrs: argmemonly nofree nosync nounwind willreturn
-declare void @llvm.lifetime.start.p0i8(i64 immarg, i8* nocapture)
+declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
 
 ; Function Attrs: nounwind returns_twice
-declare signext i32 @_setjmp(%struct.__jmp_buf_tag*) local_unnamed_addr
+declare signext i32 @_setjmp(ptr) local_unnamed_addr
 
 ; Function Attrs: noreturn
-declare void @test(i8*, %struct.__jmp_buf_tag*) local_unnamed_addr
+declare void @test(ptr, ptr) local_unnamed_addr
 
 ; Function Attrs: nofree nounwind
-declare noundef signext i32 @printf(i8* nocapture noundef readonly, ...) local_unnamed_addr
+declare noundef signext i32 @printf(ptr nocapture noundef readonly, ...) local_unnamed_addr
 
 ; Function Attrs: argmemonly nofree nosync nounwind willreturn
-declare void @llvm.lifetime.end.p0i8(i64 immarg, i8* nocapture)
+declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
 
 attributes #0 = { nounwind }
 attributes #6 = { nounwind returns_twice }

diff  --git a/llvm/test/CodeGen/PowerPC/stackmap-frame-setup.ll b/llvm/test/CodeGen/PowerPC/stackmap-frame-setup.ll
index b677b8be2966..bc294dc5a7c4 100644
--- a/llvm/test/CodeGen/PowerPC/stackmap-frame-setup.ll
+++ b/llvm/test/CodeGen/PowerPC/stackmap-frame-setup.ll
@@ -4,13 +4,13 @@
 define void @caller_meta_leaf() {
 entry:
   %metadata = alloca i64, i32 3, align 8
-  store i64 11, i64* %metadata
-  store i64 12, i64* %metadata
-  store i64 13, i64* %metadata
+  store i64 11, ptr %metadata
+  store i64 12, ptr %metadata
+  store i64 13, ptr %metadata
 ; ISEL:      ADJCALLSTACKDOWN 0, 0, implicit-def
 ; ISEL-NEXT: STACKMAP
 ; ISEL-NEXT: ADJCALLSTACKUP 0, 0, implicit-def
-  call void (i64, i32, ...) @llvm.experimental.stackmap(i64 4, i32 0, i64* %metadata)
+  call void (i64, i32, ...) @llvm.experimental.stackmap(i64 4, i32 0, ptr %metadata)
 ; FAST-ISEL:      ADJCALLSTACKDOWN 0, 0, implicit-def
 ; FAST-ISEL-NEXT: STACKMAP
 ; FAST-ISEL-NEXT: ADJCALLSTACKUP 0, 0, implicit-def

diff  --git a/llvm/test/CodeGen/PowerPC/std-unal-fi.ll b/llvm/test/CodeGen/PowerPC/std-unal-fi.ll
index f68bdbb26200..b488ddc12354 100644
--- a/llvm/test/CodeGen/PowerPC/std-unal-fi.ll
+++ b/llvm/test/CodeGen/PowerPC/std-unal-fi.ll
@@ -9,28 +9,27 @@ BB:
   br label %CF
 
 CF:                                               ; preds = %CF80, %CF, %BB
-  %L5 = load i64, i64* undef
-  store i8 %0, i8* %A4
+  %L5 = load i64, ptr undef
+  store i8 %0, ptr %A4
   %Shuff7 = shufflevector <16 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, <16 x i32> %Shuff, <16 x i32> <i32 28, i32 30, i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 undef, i32 20, i32 22, i32 24, i32 26>
-  %PC10 = bitcast i8* %A4 to ppc_fp128*
   br i1 undef, label %CF, label %CF77
 
 CF77:                                             ; preds = %CF81, %CF83, %CF77, %CF
   br i1 undef, label %CF77, label %CF82
 
 CF82:                                             ; preds = %CF82, %CF77
-  %L19 = load i64, i64* undef
-  store <1 x ppc_fp128> zeroinitializer, <1 x ppc_fp128>* %A
-  store i8 -65, i8* %A4
+  %L19 = load i64, ptr undef
+  store <1 x ppc_fp128> zeroinitializer, ptr %A
+  store i8 -65, ptr %A4
   br i1 undef, label %CF82, label %CF83
 
 CF83:                                             ; preds = %CF82
-  %L34 = load i64, i64* undef
+  %L34 = load i64, ptr undef
   br i1 undef, label %CF77, label %CF81
 
 CF81:                                             ; preds = %CF83
   %Shuff43 = shufflevector <16 x i32> %Shuff7, <16 x i32> undef, <16 x i32> <i32 15, i32 17, i32 19, i32 21, i32 23, i32 undef, i32 undef, i32 29, i32 31, i32 undef, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13>
-  store ppc_fp128 0xM00000000000000000000000000000000, ppc_fp128* %PC10
+  store ppc_fp128 0xM00000000000000000000000000000000, ptr %A4
   br i1 undef, label %CF77, label %CF78
 
 CF78:                                             ; preds = %CF78, %CF81
@@ -40,38 +39,36 @@ CF79:                                             ; preds = %CF79, %CF78
   br i1 undef, label %CF79, label %CF80
 
 CF80:                                             ; preds = %CF79
-  store i64 %L19, i64* undef
+  store i64 %L19, ptr undef
   %Cmp75 = icmp uge i32 206779, undef
   br i1 %Cmp75, label %CF, label %CF76
 
 CF76:                                             ; preds = %CF80
-  store i64 %L5, i64* undef
-  store i64 %L34, i64* undef
+  store i64 %L5, ptr undef
+  store i64 %L34, ptr undef
   ret void
 }
 
-define void @autogen_SD88042(i8*, i32*, i8) {
+define void @autogen_SD88042(ptr, ptr, i8) {
 BB:
   %A4 = alloca <2 x i1>
   %A = alloca <16 x float>
-  %L = load i8, i8* %0
-  %Sl = select i1 false, <16 x float>* %A, <16 x float>* %A
-  %PC = bitcast <2 x i1>* %A4 to i64*
+  %L = load i8, ptr %0
+  %Sl = select i1 false, ptr %A, ptr %A
   %Sl27 = select i1 false, i8 undef, i8 %L
   br label %CF
 
 CF:                                               ; preds = %CF78, %CF, %BB
-  %PC33 = bitcast i32* %1 to i32*
   br i1 undef, label %CF, label %CF77
 
 CF77:                                             ; preds = %CF80, %CF77, %CF
-  store <16 x float> zeroinitializer, <16 x float>* %Sl
-  %L58 = load i32, i32* %PC33
-  store i8 0, i8* %0
+  store <16 x float> zeroinitializer, ptr %Sl
+  %L58 = load i32, ptr %1
+  store i8 0, ptr %0
   br i1 undef, label %CF77, label %CF80
 
 CF80:                                             ; preds = %CF77
-  store i64 0, i64* %PC
+  store i64 0, ptr %A4
   %E67 = extractelement <8 x i1> zeroinitializer, i32 1
   br i1 %E67, label %CF77, label %CF78
 
@@ -80,40 +77,39 @@ CF78:                                             ; preds = %CF80
   br i1 %Cmp73, label %CF, label %CF76
 
 CF76:                                             ; preds = %CF78
-  store i8 %2, i8* %0
-  store i8 %Sl27, i8* %0
+  store i8 %2, ptr %0
+  store i8 %Sl27, ptr %0
   ret void
 }
 
-define void @autogen_SD37497(i8*, i32*, i64*) {
+define void @autogen_SD37497(ptr, ptr, ptr) {
 BB:
   %A1 = alloca i1
   %I8 = insertelement <1 x i32> <i32 -1>, i32 454855, i32 0
   %Cmp = icmp ult <4 x i64> <i64 -1, i64 -1, i64 -1, i64 -1>, undef
-  %L10 = load i64, i64* %2
+  %L10 = load i64, ptr %2
   %E11 = extractelement <4 x i1> %Cmp, i32 2
   br label %CF72
 
 CF72:                                             ; preds = %CF74, %CF72, %BB
-  store double 0xB47BB29A53790718, double* undef
+  store double 0xB47BB29A53790718, ptr undef
   %E18 = extractelement <1 x i32> <i32 -1>, i32 0
   %FC22 = sitofp <1 x i32> %I8 to <1 x float>
   br i1 undef, label %CF72, label %CF74
 
 CF74:                                             ; preds = %CF72
-  store i8 0, i8* %0
-  %PC = bitcast i1* %A1 to i64*
-  %L31 = load i64, i64* %PC
-  store i64 477323, i64* %PC
-  %Sl37 = select i1 false, i32* undef, i32* %1
+  store i8 0, ptr %0
+  %L31 = load i64, ptr %A1
+  store i64 477323, ptr %A1
+  %Sl37 = select i1 false, ptr undef, ptr %1
   %Cmp38 = icmp ugt i1 undef, undef
   br i1 %Cmp38, label %CF72, label %CF73
 
 CF73:                                             ; preds = %CF74
-  store i64 %L31, i64* %PC
+  store i64 %L31, ptr %A1
   %B55 = fdiv <1 x float> undef, %FC22
-  %Sl63 = select i1 %E11, i32* undef, i32* %Sl37
-  store i32 %E18, i32* %Sl63
-  store i64 %L10, i64* %PC
+  %Sl63 = select i1 %E11, ptr undef, ptr %Sl37
+  store i32 %E18, ptr %Sl63
+  store i64 %L10, ptr %A1
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/stdux-constuse.ll b/llvm/test/CodeGen/PowerPC/stdux-constuse.ll
index e207e5e14e64..3af218900d62 100644
--- a/llvm/test/CodeGen/PowerPC/stdux-constuse.ll
+++ b/llvm/test/CodeGen/PowerPC/stdux-constuse.ll
@@ -2,9 +2,9 @@
 target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v128:128:128-n32:64"
 target triple = "powerpc64-unknown-linux-gnu"
 
-define i32 @test1(i64 %add, i64* %ptr) nounwind {
+define i32 @test1(i64 %add, ptr %ptr) nounwind {
 entry:
-  %p1 = getelementptr i64, i64* %ptr, i64 144115188075855
+  %p1 = getelementptr i64, ptr %ptr, i64 144115188075855
   br label %for.cond2.preheader
 
 for.cond2.preheader:
@@ -13,15 +13,15 @@ for.cond2.preheader:
 
 for.body4:
   %lsr.iv = phi i32 [ %lsr.iv.next, %for.body4 ], [ 16000, %for.cond2.preheader ]
-  %i0 = phi i64* [ %p1, %for.cond2.preheader ], [ %i6, %for.body4 ]
-  %i6 = getelementptr i64, i64* %i0, i64 400000
-  %i7 = getelementptr i64, i64* %i6, i64 300000
-  %i8 = getelementptr i64, i64* %i6, i64 200000
-  %i9 = getelementptr i64, i64* %i6, i64 100000
-  store i64 %add, i64* %i6, align 32
-  store i64 %add, i64* %i7, align 32
-  store i64 %add, i64* %i8, align 32
-  store i64 %add, i64* %i9, align 32
+  %i0 = phi ptr [ %p1, %for.cond2.preheader ], [ %i6, %for.body4 ]
+  %i6 = getelementptr i64, ptr %i0, i64 400000
+  %i7 = getelementptr i64, ptr %i6, i64 300000
+  %i8 = getelementptr i64, ptr %i6, i64 200000
+  %i9 = getelementptr i64, ptr %i6, i64 100000
+  store i64 %add, ptr %i6, align 32
+  store i64 %add, ptr %i7, align 32
+  store i64 %add, ptr %i8, align 32
+  store i64 %add, ptr %i9, align 32
   %lsr.iv.next = add i32 %lsr.iv, -16
   %exitcond.15 = icmp eq i32 %lsr.iv.next, 0
   br i1 %exitcond.15, label %for.end, label %for.body4

diff  --git a/llvm/test/CodeGen/PowerPC/stfiwx-2.ll b/llvm/test/CodeGen/PowerPC/stfiwx-2.ll
index e7f8362744d0..d82313985fcc 100644
--- a/llvm/test/CodeGen/PowerPC/stfiwx-2.ll
+++ b/llvm/test/CodeGen/PowerPC/stfiwx-2.ll
@@ -1,9 +1,9 @@
 ; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc-unknown-linux-gnu -mcpu=g5 | FileCheck %s
 
-define void @test(float %F, i8* %P) {
+define void @test(float %F, ptr %P) {
 	%I = fptosi float %F to i32
 	%X = trunc i32 %I to i8
-	store i8 %X, i8* %P
+	store i8 %X, ptr %P
 	ret void
 ; CHECK: fctiwz 0, 1
 ; CHECK: stfiwx 0, 0, 4

diff  --git a/llvm/test/CodeGen/PowerPC/stfiwx.ll b/llvm/test/CodeGen/PowerPC/stfiwx.ll
index f9f1afc5d19a..c77897467508 100644
--- a/llvm/test/CodeGen/PowerPC/stfiwx.ll
+++ b/llvm/test/CodeGen/PowerPC/stfiwx.ll
@@ -1,11 +1,11 @@
 ; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc-unknown-linux-gnu -mattr=stfiwx | FileCheck %s
 ; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc-unknown-linux-gnu -mattr=-stfiwx | FileCheck -check-prefix=CHECK-LS %s
 
-define void @test1(float %a, i32* %b) nounwind {
+define void @test1(float %a, ptr %b) nounwind {
 ; CHECK-LABEL: @test1
 ; CHECK-LS-LABEL: @test1
         %tmp.2 = fptosi float %a to i32         ; <i32> [#uses=1]
-        store i32 %tmp.2, i32* %b
+        store i32 %tmp.2, ptr %b
         ret void
 
 ; CHECK: stwu
@@ -20,15 +20,15 @@ define void @test1(float %a, i32* %b) nounwind {
 ; CHECK-LS: blr
 }
 
-define void @test2(float %a, i32* %b, i32 %i) nounwind {
+define void @test2(float %a, ptr %b, i32 %i) nounwind {
 ; CHECK-LABEL: @test2
 ; CHECK-LS-LABEL: @test2
-        %tmp.2 = getelementptr i32, i32* %b, i32 1           ; <i32*> [#uses=1]
-        %tmp.5 = getelementptr i32, i32* %b, i32 %i          ; <i32*> [#uses=1]
+        %tmp.2 = getelementptr i32, ptr %b, i32 1           ; <ptr> [#uses=1]
+        %tmp.5 = getelementptr i32, ptr %b, i32 %i          ; <ptr> [#uses=1]
         %tmp.7 = fptosi float %a to i32         ; <i32> [#uses=3]
-        store i32 %tmp.7, i32* %tmp.5
-        store i32 %tmp.7, i32* %tmp.2
-        store i32 %tmp.7, i32* %b
+        store i32 %tmp.7, ptr %tmp.5
+        store i32 %tmp.7, ptr %tmp.2
+        store i32 %tmp.7, ptr %b
         ret void
 
 ; CHECK: stwu

diff  --git a/llvm/test/CodeGen/PowerPC/store-combine.ll b/llvm/test/CodeGen/PowerPC/store-combine.ll
index 7024a6d07b60..9063335bf661 100644
--- a/llvm/test/CodeGen/PowerPC/store-combine.ll
+++ b/llvm/test/CodeGen/PowerPC/store-combine.ll
@@ -1,13 +1,13 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=powerpc64le-unknown-linux-gnu -mcpu=pwr9 -verify-machineinstrs < %s | FileCheck %s -check-prefix=CHECK-PPC64LE
 ; RUN: llc -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr8 -verify-machineinstrs < %s | FileCheck %s -check-prefix=CHECK-PPC64
-; i8* p;
+; ptr p;
 ; i32 m;
 ; p[0] = (m >> 0) & 0xFF;
 ; p[1] = (m >> 8) & 0xFF;
 ; p[2] = (m >> 16) & 0xFF;
 ; p[3] = (m >> 24) & 0xFF;
-define void @store_i32_by_i8(i32 signext %m, i8* %p) {
+define void @store_i32_by_i8(i32 signext %m, ptr %p) {
 ; CHECK-PPC64LE-LABEL: store_i32_by_i8:
 ; CHECK-PPC64LE:       # %bb.0: # %entry
 ; CHECK-PPC64LE-NEXT:    stw 3, 0(4)
@@ -19,28 +19,28 @@ define void @store_i32_by_i8(i32 signext %m, i8* %p) {
 ; CHECK-PPC64-NEXT:    blr
 entry:
   %conv = trunc i32 %m to i8
-  store i8 %conv, i8* %p, align 1
+  store i8 %conv, ptr %p, align 1
   %0 = lshr i32 %m, 8
   %conv3 = trunc i32 %0 to i8
-  %arrayidx4 = getelementptr inbounds i8, i8* %p, i64 1
-  store i8 %conv3, i8* %arrayidx4, align 1
+  %arrayidx4 = getelementptr inbounds i8, ptr %p, i64 1
+  store i8 %conv3, ptr %arrayidx4, align 1
   %1 = lshr i32 %m, 16
   %conv7 = trunc i32 %1 to i8
-  %arrayidx8 = getelementptr inbounds i8, i8* %p, i64 2
-  store i8 %conv7, i8* %arrayidx8, align 1
+  %arrayidx8 = getelementptr inbounds i8, ptr %p, i64 2
+  store i8 %conv7, ptr %arrayidx8, align 1
   %2 = lshr i32 %m, 24
   %conv11 = trunc i32 %2 to i8
-  %arrayidx12 = getelementptr inbounds i8, i8* %p, i64 3
-  store i8 %conv11, i8* %arrayidx12, align 1
+  %arrayidx12 = getelementptr inbounds i8, ptr %p, i64 3
+  store i8 %conv11, ptr %arrayidx12, align 1
   ret void
 }
-; i8* p;
+; ptr p;
 ; i32 m;
 ; p[0] = (m >> 24) & 0xFF;
 ; p[1] = (m >> 16) & 0xFF;
 ; p[2] = (m >> 8) & 0xFF;
 ; p[3] = (m >> 0) & 0xFF;
-define void @store_i32_by_i8_bswap(i32 signext %m, i8* %p)  {
+define void @store_i32_by_i8_bswap(i32 signext %m, ptr %p)  {
 ; CHECK-PPC64LE-LABEL: store_i32_by_i8_bswap:
 ; CHECK-PPC64LE:       # %bb.0: # %entry
 ; CHECK-PPC64LE-NEXT:    stwbrx 3, 0, 4
@@ -53,21 +53,21 @@ define void @store_i32_by_i8_bswap(i32 signext %m, i8* %p)  {
 entry:
   %0 = lshr i32 %m, 24
   %conv = trunc i32 %0 to i8
-  store i8 %conv, i8* %p, align 1
+  store i8 %conv, ptr %p, align 1
   %1 = lshr i32 %m, 16
   %conv3 = trunc i32 %1 to i8
-  %arrayidx4 = getelementptr inbounds i8, i8* %p, i64 1
-  store i8 %conv3, i8* %arrayidx4, align 1
+  %arrayidx4 = getelementptr inbounds i8, ptr %p, i64 1
+  store i8 %conv3, ptr %arrayidx4, align 1
   %2 = lshr i32 %m, 8
   %conv7 = trunc i32 %2 to i8
-  %arrayidx8 = getelementptr inbounds i8, i8* %p, i64 2
-  store i8 %conv7, i8* %arrayidx8, align 1
+  %arrayidx8 = getelementptr inbounds i8, ptr %p, i64 2
+  store i8 %conv7, ptr %arrayidx8, align 1
   %conv11 = trunc i32 %m to i8
-  %arrayidx12 = getelementptr inbounds i8, i8* %p, i64 3
-  store i8 %conv11, i8* %arrayidx12, align 1
+  %arrayidx12 = getelementptr inbounds i8, ptr %p, i64 3
+  store i8 %conv11, ptr %arrayidx12, align 1
   ret void
 }
-; i8 *p;
+; ptr p;
 ; i64 m;
 ; p[0] = (m >> 0) & 0xFF;
 ; p[1] = (m >> 8) & 0xFF;
@@ -77,7 +77,7 @@ entry:
 ; p[5] = (m >> 40) & 0xFF;
 ; p[6] = (m >> 48) & 0xFF;
 ; p[7] = (m >> 56) & 0xFF;
-define void @store_i64_by_i8(i64 %m, i8* %p)  {
+define void @store_i64_by_i8(i64 %m, ptr %p)  {
 ; CHECK-PPC64LE-LABEL: store_i64_by_i8:
 ; CHECK-PPC64LE:       # %bb.0: # %entry
 ; CHECK-PPC64LE-NEXT:    std 3, 0(4)
@@ -89,38 +89,38 @@ define void @store_i64_by_i8(i64 %m, i8* %p)  {
 ; CHECK-PPC64-NEXT:    blr
 entry:
   %conv = trunc i64 %m to i8
-  store i8 %conv, i8* %p, align 1
+  store i8 %conv, ptr %p, align 1
   %0 = lshr i64 %m, 8
   %conv3 = trunc i64 %0 to i8
-  %arrayidx4 = getelementptr inbounds i8, i8* %p, i64 1
-  store i8 %conv3, i8* %arrayidx4, align 1
+  %arrayidx4 = getelementptr inbounds i8, ptr %p, i64 1
+  store i8 %conv3, ptr %arrayidx4, align 1
   %1 = lshr i64 %m, 16
   %conv7 = trunc i64 %1 to i8
-  %arrayidx8 = getelementptr inbounds i8, i8* %p, i64 2
-  store i8 %conv7, i8* %arrayidx8, align 1
+  %arrayidx8 = getelementptr inbounds i8, ptr %p, i64 2
+  store i8 %conv7, ptr %arrayidx8, align 1
   %2 = lshr i64 %m, 24
   %conv11 = trunc i64 %2 to i8
-  %arrayidx12 = getelementptr inbounds i8, i8* %p, i64 3
-  store i8 %conv11, i8* %arrayidx12, align 1
+  %arrayidx12 = getelementptr inbounds i8, ptr %p, i64 3
+  store i8 %conv11, ptr %arrayidx12, align 1
   %3 = lshr i64 %m, 32
   %conv15 = trunc i64 %3 to i8
-  %arrayidx16 = getelementptr inbounds i8, i8* %p, i64 4
-  store i8 %conv15, i8* %arrayidx16, align 1
+  %arrayidx16 = getelementptr inbounds i8, ptr %p, i64 4
+  store i8 %conv15, ptr %arrayidx16, align 1
   %4 = lshr i64 %m, 40
   %conv19 = trunc i64 %4 to i8
-  %arrayidx20 = getelementptr inbounds i8, i8* %p, i64 5
-  store i8 %conv19, i8* %arrayidx20, align 1
+  %arrayidx20 = getelementptr inbounds i8, ptr %p, i64 5
+  store i8 %conv19, ptr %arrayidx20, align 1
   %5 = lshr i64 %m, 48
   %conv23 = trunc i64 %5 to i8
-  %arrayidx24 = getelementptr inbounds i8, i8* %p, i64 6
-  store i8 %conv23, i8* %arrayidx24, align 1
+  %arrayidx24 = getelementptr inbounds i8, ptr %p, i64 6
+  store i8 %conv23, ptr %arrayidx24, align 1
   %6 = lshr i64 %m, 56
   %conv27 = trunc i64 %6 to i8
-  %arrayidx28 = getelementptr inbounds i8, i8* %p, i64 7
-  store i8 %conv27, i8* %arrayidx28, align 1
+  %arrayidx28 = getelementptr inbounds i8, ptr %p, i64 7
+  store i8 %conv27, ptr %arrayidx28, align 1
   ret void
 }
-; i8 *p;
+; ptr p;
 ; i64 m;
 ; p[7] = (m >> 0) & 0xFF;
 ; p[6] = (m >> 8) & 0xFF;
@@ -130,7 +130,7 @@ entry:
 ; p[2] = (m >> 40) & 0xFF;
 ; p[1] = (m >> 48) & 0xFF;
 ; p[0] = (m >> 56) & 0xFF;
-define void @store_i64_by_i8_bswap(i64 %m, i8* %p)  {
+define void @store_i64_by_i8_bswap(i64 %m, ptr %p)  {
 ; CHECK-PPC64LE-LABEL: store_i64_by_i8_bswap:
 ; CHECK-PPC64LE:       # %bb.0: # %entry
 ; CHECK-PPC64LE-NEXT:    stdbrx 3, 0, 4
@@ -142,39 +142,39 @@ define void @store_i64_by_i8_bswap(i64 %m, i8* %p)  {
 ; CHECK-PPC64-NEXT:    blr
 entry:
   %conv = trunc i64 %m to i8
-  %arrayidx = getelementptr inbounds i8, i8* %p, i64 7
-  store i8 %conv, i8* %arrayidx, align 1
+  %arrayidx = getelementptr inbounds i8, ptr %p, i64 7
+  store i8 %conv, ptr %arrayidx, align 1
   %0 = lshr i64 %m, 8
   %conv3 = trunc i64 %0 to i8
-  %arrayidx4 = getelementptr inbounds i8, i8* %p, i64 6
-  store i8 %conv3, i8* %arrayidx4, align 1
+  %arrayidx4 = getelementptr inbounds i8, ptr %p, i64 6
+  store i8 %conv3, ptr %arrayidx4, align 1
   %1 = lshr i64 %m, 16
   %conv7 = trunc i64 %1 to i8
-  %arrayidx8 = getelementptr inbounds i8, i8* %p, i64 5
-  store i8 %conv7, i8* %arrayidx8, align 1
+  %arrayidx8 = getelementptr inbounds i8, ptr %p, i64 5
+  store i8 %conv7, ptr %arrayidx8, align 1
   %2 = lshr i64 %m, 24
   %conv11 = trunc i64 %2 to i8
-  %arrayidx12 = getelementptr inbounds i8, i8* %p, i64 4
-  store i8 %conv11, i8* %arrayidx12, align 1
+  %arrayidx12 = getelementptr inbounds i8, ptr %p, i64 4
+  store i8 %conv11, ptr %arrayidx12, align 1
   %3 = lshr i64 %m, 32
   %conv15 = trunc i64 %3 to i8
-  %arrayidx16 = getelementptr inbounds i8, i8* %p, i64 3
-  store i8 %conv15, i8* %arrayidx16, align 1
+  %arrayidx16 = getelementptr inbounds i8, ptr %p, i64 3
+  store i8 %conv15, ptr %arrayidx16, align 1
   %4 = lshr i64 %m, 40
   %conv19 = trunc i64 %4 to i8
-  %arrayidx20 = getelementptr inbounds i8, i8* %p, i64 2
-  store i8 %conv19, i8* %arrayidx20, align 1
+  %arrayidx20 = getelementptr inbounds i8, ptr %p, i64 2
+  store i8 %conv19, ptr %arrayidx20, align 1
   %5 = lshr i64 %m, 48
   %conv23 = trunc i64 %5 to i8
-  %arrayidx24 = getelementptr inbounds i8, i8* %p, i64 1
-  store i8 %conv23, i8* %arrayidx24, align 1
+  %arrayidx24 = getelementptr inbounds i8, ptr %p, i64 1
+  store i8 %conv23, ptr %arrayidx24, align 1
   %6 = lshr i64 %m, 56
   %conv27 = trunc i64 %6 to i8
-  store i8 %conv27, i8* %p, align 1
+  store i8 %conv27, ptr %p, align 1
   ret void
 }
 
-; i32 t; i8 *p;
+; i32 t; ptr p;
 ; i64 m = t * 7;
 ; p[7] = (m >> 0) & 0xFF;
 ; p[6] = (m >> 8) & 0xFF;
@@ -184,7 +184,7 @@ entry:
 ; p[2] = (m >> 40) & 0xFF;
 ; p[1] = (m >> 48) & 0xFF;
 ; p[0] = (m >> 56) & 0xFF;
-define void @store_i64_by_i8_bswap_uses(i32 signext %t, i8* %p) {
+define void @store_i64_by_i8_bswap_uses(i32 signext %t, ptr %p) {
 ; CHECK-PPC64LE-LABEL: store_i64_by_i8_bswap_uses:
 ; CHECK-PPC64LE:       # %bb.0: # %entry
 ; CHECK-PPC64LE-NEXT:    slwi 5, 3, 3
@@ -204,46 +204,46 @@ entry:
   %mul = mul nsw i32 %t, 7
   %conv = sext i32 %mul to i64
   %conv1 = trunc i32 %mul to i8
-  %arrayidx = getelementptr inbounds i8, i8* %p, i64 7
-  store i8 %conv1, i8* %arrayidx, align 1
+  %arrayidx = getelementptr inbounds i8, ptr %p, i64 7
+  store i8 %conv1, ptr %arrayidx, align 1
   %0 = lshr i64 %conv, 8
   %conv4 = trunc i64 %0 to i8
-  %arrayidx5 = getelementptr inbounds i8, i8* %p, i64 6
-  store i8 %conv4, i8* %arrayidx5, align 1
+  %arrayidx5 = getelementptr inbounds i8, ptr %p, i64 6
+  store i8 %conv4, ptr %arrayidx5, align 1
   %1 = lshr i64 %conv, 16
   %conv8 = trunc i64 %1 to i8
-  %arrayidx9 = getelementptr inbounds i8, i8* %p, i64 5
-  store i8 %conv8, i8* %arrayidx9, align 1
+  %arrayidx9 = getelementptr inbounds i8, ptr %p, i64 5
+  store i8 %conv8, ptr %arrayidx9, align 1
   %2 = lshr i64 %conv, 24
   %conv12 = trunc i64 %2 to i8
-  %arrayidx13 = getelementptr inbounds i8, i8* %p, i64 4
-  store i8 %conv12, i8* %arrayidx13, align 1
+  %arrayidx13 = getelementptr inbounds i8, ptr %p, i64 4
+  store i8 %conv12, ptr %arrayidx13, align 1
   %shr14 = ashr i64 %conv, 32
   %conv16 = trunc i64 %shr14 to i8
-  %arrayidx17 = getelementptr inbounds i8, i8* %p, i64 3
-  store i8 %conv16, i8* %arrayidx17, align 1
+  %arrayidx17 = getelementptr inbounds i8, ptr %p, i64 3
+  store i8 %conv16, ptr %arrayidx17, align 1
   %shr18 = ashr i64 %conv, 40
   %conv20 = trunc i64 %shr18 to i8
-  %arrayidx21 = getelementptr inbounds i8, i8* %p, i64 2
-  store i8 %conv20, i8* %arrayidx21, align 1
+  %arrayidx21 = getelementptr inbounds i8, ptr %p, i64 2
+  store i8 %conv20, ptr %arrayidx21, align 1
   %shr22 = ashr i64 %conv, 48
   %conv24 = trunc i64 %shr22 to i8
-  %arrayidx25 = getelementptr inbounds i8, i8* %p, i64 1
-  store i8 %conv24, i8* %arrayidx25, align 1
+  %arrayidx25 = getelementptr inbounds i8, ptr %p, i64 1
+  store i8 %conv24, ptr %arrayidx25, align 1
   %shr26 = ashr i64 %conv, 56
   %conv28 = trunc i64 %shr26 to i8
-  store i8 %conv28, i8* %p, align 1
+  store i8 %conv28, ptr %p, align 1
   ret void
 }
 
 ; One of the stores is volatile
-; i8 *p;
+; ptr p;
 ; p0 = volatile *p;
 ; p[3] = (m >> 0) & 0xFF;
 ; p[2] = (m >> 8) & 0xFF;
 ; p[1] = (m >> 16) & 0xFF;
 ; *p0 = (m >> 24) & 0xFF;
-define void @store_i32_by_i8_bswap_volatile(i32 signext %m, i8* %p) {
+define void @store_i32_by_i8_bswap_volatile(i32 signext %m, ptr %p) {
 ; CHECK-PPC64LE-LABEL: store_i32_by_i8_bswap_volatile:
 ; CHECK-PPC64LE:       # %bb.0: # %entry
 ; CHECK-PPC64LE-NEXT:    li 5, 2
@@ -264,30 +264,30 @@ define void @store_i32_by_i8_bswap_volatile(i32 signext %m, i8* %p) {
 ; CHECK-PPC64-NEXT:    blr
 entry:
   %conv = trunc i32 %m to i8
-  %arrayidx = getelementptr inbounds i8, i8* %p, i64 3
-  store i8 %conv, i8* %arrayidx, align 1
+  %arrayidx = getelementptr inbounds i8, ptr %p, i64 3
+  store i8 %conv, ptr %arrayidx, align 1
   %0 = lshr i32 %m, 8
   %conv3 = trunc i32 %0 to i8
-  %arrayidx4 = getelementptr inbounds i8, i8* %p, i64 2
-  store i8 %conv3, i8* %arrayidx4, align 1
+  %arrayidx4 = getelementptr inbounds i8, ptr %p, i64 2
+  store i8 %conv3, ptr %arrayidx4, align 1
   %1 = lshr i32 %m, 16
   %conv7 = trunc i32 %1 to i8
-  %arrayidx8 = getelementptr inbounds i8, i8* %p, i64 1
-  store i8 %conv7, i8* %arrayidx8, align 1
+  %arrayidx8 = getelementptr inbounds i8, ptr %p, i64 1
+  store i8 %conv7, ptr %arrayidx8, align 1
   %2 = lshr i32 %m, 24
   %conv11 = trunc i32 %2 to i8
-  store volatile i8 %conv11, i8* %p, align 1
+  store volatile i8 %conv11, ptr %p, align 1
   ret void
 }
 
 ; There is a store in between individual stores
-; i8* p, q;
+; ptr p, q;
 ; p[3] = (m >> 0) & 0xFF;
 ; p[2] = (m >> 8) & 0xFF;
 ; *q = 3;
 ; p[1] = (m >> 16) & 0xFF;
 ; p[0] = (m >> 24) & 0xFF;
-define void @store_i32_by_i8_bswap_store_in_between(i32 signext %m, i8* %p, i8* %q) {
+define void @store_i32_by_i8_bswap_store_in_between(i32 signext %m, ptr %p, ptr %q) {
 ; CHECK-PPC64LE-LABEL: store_i32_by_i8_bswap_store_in_between:
 ; CHECK-PPC64LE:       # %bb.0: # %entry
 ; CHECK-PPC64LE-NEXT:    li 6, 2
@@ -312,24 +312,24 @@ define void @store_i32_by_i8_bswap_store_in_between(i32 signext %m, i8* %p, i8*
 ; CHECK-PPC64-NEXT:    blr
 entry:
   %conv = trunc i32 %m to i8
-  %arrayidx = getelementptr inbounds i8, i8* %p, i64 3
-  store i8 %conv, i8* %arrayidx, align 1
+  %arrayidx = getelementptr inbounds i8, ptr %p, i64 3
+  store i8 %conv, ptr %arrayidx, align 1
   %0 = lshr i32 %m, 8
   %conv3 = trunc i32 %0 to i8
-  %arrayidx4 = getelementptr inbounds i8, i8* %p, i64 2
-  store i8 %conv3, i8* %arrayidx4, align 1
-  store i8 3, i8* %q, align 1
+  %arrayidx4 = getelementptr inbounds i8, ptr %p, i64 2
+  store i8 %conv3, ptr %arrayidx4, align 1
+  store i8 3, ptr %q, align 1
   %1 = lshr i32 %m, 16
   %conv7 = trunc i32 %1 to i8
-  %arrayidx8 = getelementptr inbounds i8, i8* %p, i64 1
-  store i8 %conv7, i8* %arrayidx8, align 1
+  %arrayidx8 = getelementptr inbounds i8, ptr %p, i64 1
+  store i8 %conv7, ptr %arrayidx8, align 1
   %2 = lshr i32 %m, 24
   %conv11 = trunc i32 %2 to i8
-  store i8 %conv11, i8* %p, align 1
+  store i8 %conv11, ptr %p, align 1
   ret void
 }
 
-define void @store_i32_by_i8_bswap_unrelated_store(i32 signext %m, i8* %p, i8* %q) {
+define void @store_i32_by_i8_bswap_unrelated_store(i32 signext %m, ptr %p, ptr %q) {
 ; CHECK-PPC64LE-LABEL: store_i32_by_i8_bswap_unrelated_store:
 ; CHECK-PPC64LE:       # %bb.0: # %entry
 ; CHECK-PPC64LE-NEXT:    srwi 6, 3, 8
@@ -353,28 +353,28 @@ define void @store_i32_by_i8_bswap_unrelated_store(i32 signext %m, i8* %p, i8* %
 ; CHECK-PPC64-NEXT:    blr
 entry:
   %conv = trunc i32 %m to i8
-  %arrayidx = getelementptr inbounds i8, i8* %p, i64 3
-  store i8 %conv, i8* %arrayidx, align 1
+  %arrayidx = getelementptr inbounds i8, ptr %p, i64 3
+  store i8 %conv, ptr %arrayidx, align 1
   %0 = lshr i32 %m, 8
   %conv3 = trunc i32 %0 to i8
-  %arrayidx4 = getelementptr inbounds i8, i8* %q, i64 2
-  store i8 %conv3, i8* %arrayidx4, align 1
+  %arrayidx4 = getelementptr inbounds i8, ptr %q, i64 2
+  store i8 %conv3, ptr %arrayidx4, align 1
   %1 = lshr i32 %m, 16
   %conv7 = trunc i32 %1 to i8
-  %arrayidx8 = getelementptr inbounds i8, i8* %p, i64 1
-  store i8 %conv7, i8* %arrayidx8, align 1
+  %arrayidx8 = getelementptr inbounds i8, ptr %p, i64 1
+  store i8 %conv7, ptr %arrayidx8, align 1
   %2 = lshr i32 %m, 24
   %conv11 = trunc i32 %2 to i8
-  store i8 %conv11, i8* %p, align 1
+  store i8 %conv11, ptr %p, align 1
   ret void
 }
 ; i32 m;
-; i8* p;
+; ptr p;
 ; p[3] = (m >> 8) & 0xFF;
 ; p[4] = (m >> 0) & 0xFF;
 ; p[2] = (m >> 16) & 0xFF;
 ; p[1] = (m >> 24) & 0xFF;
-define void @store_i32_by_i8_bswap_nonzero_offset(i32 signext %m, i8* %p) {
+define void @store_i32_by_i8_bswap_nonzero_offset(i32 signext %m, ptr %p) {
 ; CHECK-PPC64LE-LABEL: store_i32_by_i8_bswap_nonzero_offset:
 ; CHECK-PPC64LE:       # %bb.0: # %entry
 ; CHECK-PPC64LE-NEXT:    addi 4, 4, 1
@@ -388,28 +388,28 @@ define void @store_i32_by_i8_bswap_nonzero_offset(i32 signext %m, i8* %p) {
 entry:
   %0 = lshr i32 %m, 8
   %conv = trunc i32 %0 to i8
-  %arrayidx = getelementptr inbounds i8, i8* %p, i64 3
-  store i8 %conv, i8* %arrayidx, align 1
+  %arrayidx = getelementptr inbounds i8, ptr %p, i64 3
+  store i8 %conv, ptr %arrayidx, align 1
   %conv3 = trunc i32 %m to i8
-  %arrayidx4 = getelementptr inbounds i8, i8* %p, i64 4
-  store i8 %conv3, i8* %arrayidx4, align 1
+  %arrayidx4 = getelementptr inbounds i8, ptr %p, i64 4
+  store i8 %conv3, ptr %arrayidx4, align 1
   %1 = lshr i32 %m, 16
   %conv7 = trunc i32 %1 to i8
-  %arrayidx8 = getelementptr inbounds i8, i8* %p, i64 2
-  store i8 %conv7, i8* %arrayidx8, align 1
+  %arrayidx8 = getelementptr inbounds i8, ptr %p, i64 2
+  store i8 %conv7, ptr %arrayidx8, align 1
   %2 = lshr i32 %m, 24
   %conv11 = trunc i32 %2 to i8
-  %arrayidx12 = getelementptr inbounds i8, i8* %p, i64 1
-  store i8 %conv11, i8* %arrayidx12, align 1
+  %arrayidx12 = getelementptr inbounds i8, ptr %p, i64 1
+  store i8 %conv11, ptr %arrayidx12, align 1
   ret void
 }
 ; i32 m;
-; i8* p;
+; ptr p;
 ; p[-3] = (m >> 8) & 0xFF;
 ; p[-4] = (m >> 0) & 0xFF;
 ; p[-2] = (m >> 16) & 0xFF;
 ; p[-1] = (m >> 24) & 0xFF;
-define void @store_i32_by_i8_neg_offset(i32 signext %m, i8* %p) {
+define void @store_i32_by_i8_neg_offset(i32 signext %m, ptr %p) {
 ; CHECK-PPC64LE-LABEL: store_i32_by_i8_neg_offset:
 ; CHECK-PPC64LE:       # %bb.0: # %entry
 ; CHECK-PPC64LE-NEXT:    stw 3, -4(4)
@@ -423,28 +423,28 @@ define void @store_i32_by_i8_neg_offset(i32 signext %m, i8* %p) {
 entry:
   %0 = lshr i32 %m, 8
   %conv = trunc i32 %0 to i8
-  %arrayidx = getelementptr inbounds i8, i8* %p, i64 -3
-  store i8 %conv, i8* %arrayidx, align 1
+  %arrayidx = getelementptr inbounds i8, ptr %p, i64 -3
+  store i8 %conv, ptr %arrayidx, align 1
   %conv3 = trunc i32 %m to i8
-  %arrayidx4 = getelementptr inbounds i8, i8* %p, i64 -4
-  store i8 %conv3, i8* %arrayidx4, align 1
+  %arrayidx4 = getelementptr inbounds i8, ptr %p, i64 -4
+  store i8 %conv3, ptr %arrayidx4, align 1
   %1 = lshr i32 %m, 16
   %conv7 = trunc i32 %1 to i8
-  %arrayidx8 = getelementptr inbounds i8, i8* %p, i64 -2
-  store i8 %conv7, i8* %arrayidx8, align 1
+  %arrayidx8 = getelementptr inbounds i8, ptr %p, i64 -2
+  store i8 %conv7, ptr %arrayidx8, align 1
   %2 = lshr i32 %m, 24
   %conv11 = trunc i32 %2 to i8
-  %arrayidx12 = getelementptr inbounds i8, i8* %p, i64 -1
-  store i8 %conv11, i8* %arrayidx12, align 1
+  %arrayidx12 = getelementptr inbounds i8, ptr %p, i64 -1
+  store i8 %conv11, ptr %arrayidx12, align 1
   ret void
 }
 ; i32 m;
-; i8* p;
+; ptr p;
 ; p[-3] = (m >> 16) & 0xFF;
 ; p[-4] = (m >> 24) & 0xFF;
 ; p[-2] = (m >> 8) & 0xFF;
 ; p[-1] = (m >> 0) & 0xFF;
-define void @store_i32_by_i8_bswap_neg_offset(i32 signext %m, i8* %p) {
+define void @store_i32_by_i8_bswap_neg_offset(i32 signext %m, ptr %p) {
 ; CHECK-PPC64LE-LABEL: store_i32_by_i8_bswap_neg_offset:
 ; CHECK-PPC64LE:       # %bb.0: # %entry
 ; CHECK-PPC64LE-NEXT:    addi 4, 4, -4
@@ -458,28 +458,28 @@ define void @store_i32_by_i8_bswap_neg_offset(i32 signext %m, i8* %p) {
 entry:
   %0 = lshr i32 %m, 16
   %conv = trunc i32 %0 to i8
-  %arrayidx = getelementptr inbounds i8, i8* %p, i64 -3
-  store i8 %conv, i8* %arrayidx, align 1
+  %arrayidx = getelementptr inbounds i8, ptr %p, i64 -3
+  store i8 %conv, ptr %arrayidx, align 1
   %1 = lshr i32 %m, 24
   %conv3 = trunc i32 %1 to i8
-  %arrayidx4 = getelementptr inbounds i8, i8* %p, i64 -4
-  store i8 %conv3, i8* %arrayidx4, align 1
+  %arrayidx4 = getelementptr inbounds i8, ptr %p, i64 -4
+  store i8 %conv3, ptr %arrayidx4, align 1
   %2 = lshr i32 %m, 8
   %conv7 = trunc i32 %2 to i8
-  %arrayidx8 = getelementptr inbounds i8, i8* %p, i64 -2
-  store i8 %conv7, i8* %arrayidx8, align 1
+  %arrayidx8 = getelementptr inbounds i8, ptr %p, i64 -2
+  store i8 %conv7, ptr %arrayidx8, align 1
   %conv11 = trunc i32 %m to i8
-  %arrayidx12 = getelementptr inbounds i8, i8* %p, i64 -1
-  store i8 %conv11, i8* %arrayidx12, align 1
+  %arrayidx12 = getelementptr inbounds i8, ptr %p, i64 -1
+  store i8 %conv11, ptr %arrayidx12, align 1
   ret void
 }
 ; i32 m, i;
-; i8* p;
+; ptr p;
 ; p[i-3] = (m >> 16) & 0xFF;
 ; p[i-4] = (m >> 24) & 0xFF;
 ; p[i-2] = (m >> 8) & 0xFF;
 ; p[i-1] = (m >> 0) & 0xFF;
-define void @store_i32_by_i8_bswap_base_index_offset(i32 %m, i32 %i, i8* %p) {
+define void @store_i32_by_i8_bswap_base_index_offset(i32 %m, i32 %i, ptr %p) {
 ; CHECK-PPC64LE-LABEL: store_i32_by_i8_bswap_base_index_offset:
 ; CHECK-PPC64LE:       # %bb.0: # %entry
 ; CHECK-PPC64LE-NEXT:    extsw 4, 4
@@ -499,39 +499,39 @@ entry:
   %conv = trunc i32 %0 to i8
   %sub = add nsw i32 %i, -3
   %idxprom = sext i32 %sub to i64
-  %arrayidx = getelementptr inbounds i8, i8* %p, i64 %idxprom
-  store i8 %conv, i8* %arrayidx, align 1
+  %arrayidx = getelementptr inbounds i8, ptr %p, i64 %idxprom
+  store i8 %conv, ptr %arrayidx, align 1
   %1 = lshr i32 %m, 24
   %conv3 = trunc i32 %1 to i8
   %sub4 = add nsw i32 %i, -4
   %idxprom5 = sext i32 %sub4 to i64
-  %arrayidx6 = getelementptr inbounds i8, i8* %p, i64 %idxprom5
-  store i8 %conv3, i8* %arrayidx6, align 1
+  %arrayidx6 = getelementptr inbounds i8, ptr %p, i64 %idxprom5
+  store i8 %conv3, ptr %arrayidx6, align 1
   %2 = lshr i32 %m, 8
   %conv9 = trunc i32 %2 to i8
   %sub10 = add nsw i32 %i, -2
   %idxprom11 = sext i32 %sub10 to i64
-  %arrayidx12 = getelementptr inbounds i8, i8* %p, i64 %idxprom11
-  store i8 %conv9, i8* %arrayidx12, align 1
+  %arrayidx12 = getelementptr inbounds i8, ptr %p, i64 %idxprom11
+  store i8 %conv9, ptr %arrayidx12, align 1
   %conv15 = trunc i32 %m to i8
   %sub16 = add nsw i32 %i, -1
   %idxprom17 = sext i32 %sub16 to i64
-  %arrayidx18 = getelementptr inbounds i8, i8* %p, i64 %idxprom17
-  store i8 %conv15, i8* %arrayidx18, align 1
+  %arrayidx18 = getelementptr inbounds i8, ptr %p, i64 %idxprom17
+  store i8 %conv15, ptr %arrayidx18, align 1
   ret void
 }
 
-; i8* p;
+; ptr p;
 ; i32 i, m;
-; i8* p0 = p + i;
-; i8* p1 = p + i + 1;
-; i8* p2 = p + i + 2;
-; i8 *p3 = p + i + 3;
+; ptr p0 = p + i;
+; ptr p1 = p + i + 1;
+; ptr p2 = p + i + 2;
+; ptr p3 = p + i + 3;
 ; p0[3] = (m >> 24) & 0xFF;
 ; p1[3] = (m >> 16) & 0xFF;
 ; p2[3] = (m >> 8) & 0xFF;
 ; p3[3] = (m >> 0) & 0xFF;
-define void @store_i32_by_i8_bswap_complicated(i32 %m, i32 %i, i8* %p) {
+define void @store_i32_by_i8_bswap_complicated(i32 %m, i32 %i, ptr %p) {
 ; CHECK-PPC64LE-LABEL: store_i32_by_i8_bswap_complicated:
 ; CHECK-PPC64LE:       # %bb.0: # %entry
 ; CHECK-PPC64LE-NEXT:    extsw 4, 4
@@ -548,30 +548,30 @@ define void @store_i32_by_i8_bswap_complicated(i32 %m, i32 %i, i8* %p) {
 ; CHECK-PPC64-NEXT:    blr
 entry:
   %idx.ext = sext i32 %i to i64
-  %add.ptr = getelementptr inbounds i8, i8* %p, i64 %idx.ext
-  %add.ptr3 = getelementptr inbounds i8, i8* %add.ptr, i64 1
-  %add.ptr6 = getelementptr inbounds i8, i8* %add.ptr, i64 2
-  %add.ptr9 = getelementptr inbounds i8, i8* %add.ptr, i64 3
+  %add.ptr = getelementptr inbounds i8, ptr %p, i64 %idx.ext
+  %add.ptr3 = getelementptr inbounds i8, ptr %add.ptr, i64 1
+  %add.ptr6 = getelementptr inbounds i8, ptr %add.ptr, i64 2
+  %add.ptr9 = getelementptr inbounds i8, ptr %add.ptr, i64 3
   %0 = lshr i32 %m, 24
   %conv = trunc i32 %0 to i8
-  store i8 %conv, i8* %add.ptr9, align 1
+  store i8 %conv, ptr %add.ptr9, align 1
   %1 = lshr i32 %m, 16
   %conv12 = trunc i32 %1 to i8
-  %arrayidx13 = getelementptr inbounds i8, i8* %add.ptr3, i64 3
-  store i8 %conv12, i8* %arrayidx13, align 1
+  %arrayidx13 = getelementptr inbounds i8, ptr %add.ptr3, i64 3
+  store i8 %conv12, ptr %arrayidx13, align 1
   %2 = lshr i32 %m, 8
   %conv16 = trunc i32 %2 to i8
-  %arrayidx17 = getelementptr inbounds i8, i8* %add.ptr6, i64 3
-  store i8 %conv16, i8* %arrayidx17, align 1
+  %arrayidx17 = getelementptr inbounds i8, ptr %add.ptr6, i64 3
+  store i8 %conv16, ptr %arrayidx17, align 1
   %conv20 = trunc i32 %m to i8
-  %arrayidx21 = getelementptr inbounds i8, i8* %add.ptr9, i64 3
-  store i8 %conv20, i8* %arrayidx21, align 1
+  %arrayidx21 = getelementptr inbounds i8, ptr %add.ptr9, i64 3
+  store i8 %conv20, ptr %arrayidx21, align 1
   ret void
 }
-; i8* p; i32 m;
+; ptr p; i32 m;
 ; p[0] = (m >> 8) & 0xFF;
 ; p[1] = (m >> 0) & 0xFF;
-define void @store_i16_by_i8_bswap(i16 %m, i8* %p) {
+define void @store_i16_by_i8_bswap(i16 %m, ptr %p) {
 ; CHECK-PPC64LE-LABEL: store_i16_by_i8_bswap:
 ; CHECK-PPC64LE:       # %bb.0: # %entry
 ; CHECK-PPC64LE-NEXT:    sthbrx 3, 0, 4
@@ -584,16 +584,16 @@ define void @store_i16_by_i8_bswap(i16 %m, i8* %p) {
 entry:
   %0 = lshr i16 %m, 8
   %conv1 = trunc i16 %0 to i8
-  store i8 %conv1, i8* %p, align 1
+  store i8 %conv1, ptr %p, align 1
   %conv5 = trunc i16 %m to i8
-  %arrayidx6 = getelementptr inbounds i8, i8* %p, i64 1
-  store i8 %conv5, i8* %arrayidx6, align 1
+  %arrayidx6 = getelementptr inbounds i8, ptr %p, i64 1
+  store i8 %conv5, ptr %arrayidx6, align 1
   ret void
 }
-; i8* p; i32 m;
+; ptr p; i32 m;
 ; p[0] = (m >> 0) & 0xFF;
 ; p[1] = (m >> 8) & 0xFF;
-define void @store_16_by_i8(i16 %m, i8* %p) {
+define void @store_16_by_i8(i16 %m, ptr %p) {
 ; CHECK-PPC64LE-LABEL: store_16_by_i8:
 ; CHECK-PPC64LE:       # %bb.0: # %entry
 ; CHECK-PPC64LE-NEXT:    sth 3, 0(4)
@@ -605,18 +605,18 @@ define void @store_16_by_i8(i16 %m, i8* %p) {
 ; CHECK-PPC64-NEXT:    blr
 entry:
   %conv1 = trunc i16 %m to i8
-  store i8 %conv1, i8* %p, align 1
+  store i8 %conv1, ptr %p, align 1
   %0 = lshr i16 %m, 8
   %conv5 = trunc i16 %0 to i8
-  %arrayidx6 = getelementptr inbounds i8, i8* %p, i64 1
-  store i8 %conv5, i8* %arrayidx6, align 1
+  %arrayidx6 = getelementptr inbounds i8, ptr %p, i64 1
+  store i8 %conv5, ptr %arrayidx6, align 1
   ret void
 }
 ; This was found when testing the hexxagon in testsuite
-; i8* p; i8 v;
+; ptr p; i8 v;
 ; p[0] = v;
 ; p[1] = v;
-define void @store_same_value_to_consecutive_mem(i8* %p, i8 zeroext %v) {
+define void @store_same_value_to_consecutive_mem(ptr %p, i8 zeroext %v) {
 ; CHECK-PPC64LE-LABEL: store_same_value_to_consecutive_mem:
 ; CHECK-PPC64LE:       # %bb.0: # %entry
 ; CHECK-PPC64LE-NEXT:    stb 4, 0(3)
@@ -629,8 +629,8 @@ define void @store_same_value_to_consecutive_mem(i8* %p, i8 zeroext %v) {
 ; CHECK-PPC64-NEXT:    stb 4, 1(3)
 ; CHECK-PPC64-NEXT:    blr
 entry:
-  store i8 %v, i8* %p, align 1
-  %arrayidx1 = getelementptr inbounds i8, i8* %p, i64 1
-  store i8 %v, i8* %arrayidx1, align 1
+  store i8 %v, ptr %p, align 1
+  %arrayidx1 = getelementptr inbounds i8, ptr %p, i64 1
+  store i8 %v, ptr %arrayidx1, align 1
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/store-constant.ll b/llvm/test/CodeGen/PowerPC/store-constant.ll
index 1b7dd168479c..3c72cf58d467 100644
--- a/llvm/test/CodeGen/PowerPC/store-constant.ll
+++ b/llvm/test/CodeGen/PowerPC/store-constant.ll
@@ -6,14 +6,14 @@
 @IVal = external local_unnamed_addr global i32, align 4
 @LVal = external local_unnamed_addr global i64, align 8
 @USVal = external local_unnamed_addr global i16, align 2
- at arr = external local_unnamed_addr global i64*, align 8
- at arri = external local_unnamed_addr global i32*, align 8
+ at arr = external local_unnamed_addr global ptr, align 8
+ at arri = external local_unnamed_addr global ptr, align 8
 
 ; Test the same constant can be used by 
diff erent stores.
 
 %struct.S = type { i64, i8, i16, i32 }
 
-define void @foo(%struct.S* %p) {
+define void @foo(ptr %p) {
 ; CHECK-LABEL: foo:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    li 4, 0
@@ -22,19 +22,18 @@ define void @foo(%struct.S* %p) {
 ; CHECK-NEXT:    sth 4, 10(3)
 ; CHECK-NEXT:    stw 4, 12(3)
 ; CHECK-NEXT:    blr
-  %l4 = bitcast %struct.S* %p to i64*
-  store i64 0, i64* %l4, align 8
-  %c = getelementptr %struct.S, %struct.S* %p, i64 0, i32 1
-  store i8 0, i8* %c, align 8
-  %s = getelementptr %struct.S, %struct.S* %p, i64 0, i32 2
-  store i16 0, i16* %s, align 2
-  %i = getelementptr %struct.S, %struct.S* %p, i64 0, i32 3
-  store i32 0, i32* %i, align 4
+  store i64 0, ptr %p, align 8
+  %c = getelementptr %struct.S, ptr %p, i64 0, i32 1
+  store i8 0, ptr %c, align 8
+  %s = getelementptr %struct.S, ptr %p, i64 0, i32 2
+  store i16 0, ptr %s, align 2
+  %i = getelementptr %struct.S, ptr %p, i64 0, i32 3
+  store i32 0, ptr %i, align 4
   ret void
 
 }
 
-define void @bar(%struct.S* %p) {
+define void @bar(ptr %p) {
 ; CHECK-LABEL: bar:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    li 4, 2
@@ -43,14 +42,13 @@ define void @bar(%struct.S* %p) {
 ; CHECK-NEXT:    std 4, 0(3)
 ; CHECK-NEXT:    stb 4, 8(3)
 ; CHECK-NEXT:    blr
-  %i = getelementptr %struct.S, %struct.S* %p, i64 0, i32 3
-  store i32 2, i32* %i, align 4
-  %s = getelementptr %struct.S, %struct.S* %p, i64 0, i32 2
-  store i16 2, i16* %s, align 2
-  %c = getelementptr %struct.S, %struct.S* %p, i64 0, i32 1
-  store i8 2, i8* %c, align 8
-  %l4 = bitcast %struct.S* %p to i64*
-  store i64 2, i64* %l4, align 8
+  %i = getelementptr %struct.S, ptr %p, i64 0, i32 3
+  store i32 2, ptr %i, align 4
+  %s = getelementptr %struct.S, ptr %p, i64 0, i32 2
+  store i16 2, ptr %s, align 2
+  %c = getelementptr %struct.S, ptr %p, i64 0, i32 1
+  store i8 2, ptr %c, align 8
+  store i64 2, ptr %p, align 8
   ret void
 
 }
@@ -74,10 +72,10 @@ define void @setSmallNeg() {
 ; CHECK-NEXT:    stw 7, 0(5)
 ; CHECK-NEXT:    blr
 entry:
-  store i8 -7, i8* @CVal, align 1
-  store i16 -7, i16* @SVal, align 2
-  store i32 -7, i32* @IVal, align 4
-  store i64 -7, i64* @LVal, align 8
+  store i8 -7, ptr @CVal, align 1
+  store i16 -7, ptr @SVal, align 2
+  store i32 -7, ptr @IVal, align 4
+  store i64 -7, ptr @LVal, align 8
   ret void
 }
 
@@ -100,10 +98,10 @@ define void @setSmallPos() {
 ; CHECK-NEXT:    stw 7, 0(5)
 ; CHECK-NEXT:    blr
 entry:
-  store i8 8, i8* @CVal, align 1
-  store i16 8, i16* @SVal, align 2
-  store i32 8, i32* @IVal, align 4
-  store i64 8, i64* @LVal, align 8
+  store i8 8, ptr @CVal, align 1
+  store i16 8, ptr @SVal, align 2
+  store i32 8, ptr @IVal, align 4
+  store i64 8, ptr @LVal, align 8
   ret void
 }
 
@@ -123,9 +121,9 @@ define void @setMaxNeg() {
 ; CHECK-NEXT:    std 6, 0(5)
 ; CHECK-NEXT:    blr
 entry:
-  store i16 -32768, i16* @SVal, align 2
-  store i32 -32768, i32* @IVal, align 4
-  store i64 -32768, i64* @LVal, align 8
+  store i16 -32768, ptr @SVal, align 2
+  store i32 -32768, ptr @IVal, align 4
+  store i64 -32768, ptr @LVal, align 8
   ret void
 }
 
@@ -145,9 +143,9 @@ define void @setMaxPos() {
 ; CHECK-NEXT:    std 6, 0(5)
 ; CHECK-NEXT:    blr
 entry:
-  store i16 32767, i16* @SVal, align 2
-  store i32 32767, i32* @IVal, align 4
-  store i64 32767, i64* @LVal, align 8
+  store i16 32767, ptr @SVal, align 2
+  store i32 32767, ptr @IVal, align 4
+  store i64 32767, ptr @LVal, align 8
   ret void
 }
 
@@ -165,8 +163,8 @@ define void @setExcessiveNeg() {
 ; CHECK-NEXT:    std 5, 0(4)
 ; CHECK-NEXT:    blr
 entry:
-  store i32 -32769, i32* @IVal, align 4
-  store i64 -32769, i64* @LVal, align 8
+  store i32 -32769, ptr @IVal, align 4
+  store i64 -32769, ptr @LVal, align 8
   ret void
 }
 
@@ -187,9 +185,9 @@ define void @setExcessivePos() {
 ; CHECK-NEXT:    std 6, 0(5)
 ; CHECK-NEXT:    blr
 entry:
-  store i16 -32768, i16* @USVal, align 2
-  store i32 32768, i32* @IVal, align 4
-  store i64 32768, i64* @LVal, align 8
+  store i16 -32768, ptr @USVal, align 2
+  store i32 32768, ptr @IVal, align 4
+  store i64 32768, ptr @LVal, align 8
   ret void
 }
 
@@ -223,8 +221,8 @@ entry:
   br i1 %cmp7, label %for.body.lr.ph, label %for.cond.cleanup
 
 for.body.lr.ph:                                   ; preds = %entry
-  %0 = load i64*, i64** @arr, align 8
-  %1 = load i32*, i32** @arri, align 8
+  %0 = load ptr, ptr @arr, align 8
+  %1 = load ptr, ptr @arri, align 8
   %wide.trip.count = zext i32 %Len to i64
   br label %for.body
 
@@ -233,10 +231,10 @@ for.cond.cleanup:                                 ; preds = %for.body, %entry
 
 for.body:                                         ; preds = %for.body, %for.body.lr.ph
   %indvars.iv = phi i64 [ 0, %for.body.lr.ph ], [ %indvars.iv.next, %for.body ]
-  %arrayidx = getelementptr inbounds i64, i64* %0, i64 %indvars.iv
-  store i64 -7, i64* %arrayidx, align 8
-  %arrayidx2 = getelementptr inbounds i32, i32* %1, i64 %indvars.iv
-  store i32 -7, i32* %arrayidx2, align 4
+  %arrayidx = getelementptr inbounds i64, ptr %0, i64 %indvars.iv
+  store i64 -7, ptr %arrayidx, align 8
+  %arrayidx2 = getelementptr inbounds i32, ptr %1, i64 %indvars.iv
+  store i32 -7, ptr %arrayidx2, align 4
   %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
   %exitcond = icmp eq i64 %indvars.iv.next, %wide.trip.count
   br i1 %exitcond, label %for.cond.cleanup, label %for.body
@@ -254,8 +252,8 @@ define void @setSameValDiffSizeCI() {
 ; CHECK-NEXT:    stb 5, 0(4)
 ; CHECK-NEXT:    blr
 entry:
-  store i32 255, i32* @IVal, align 4
-  store i8 -1, i8* @CVal, align 1
+  store i32 255, ptr @IVal, align 4
+  store i8 -1, ptr @CVal, align 1
   ret void
 }
 
@@ -272,7 +270,7 @@ define void @setSameValDiffSizeSI() {
 ; CHECK-NEXT:    sth 5, 0(4)
 ; CHECK-NEXT:    blr
 entry:
-  store i32 65535, i32* @IVal, align 4
-  store i16 -1, i16* @SVal, align 2
+  store i32 65535, ptr @IVal, align 4
+  store i16 -1, ptr @SVal, align 2
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/store-forward-be32.ll b/llvm/test/CodeGen/PowerPC/store-forward-be32.ll
index 25a241529dfc..9349c9c79113 100644
--- a/llvm/test/CodeGen/PowerPC/store-forward-be32.ll
+++ b/llvm/test/CodeGen/PowerPC/store-forward-be32.ll
@@ -12,7 +12,7 @@ target triple = "powerpc-ibm-aix7.2.0.0"
 %struct.UST = type { i32, i32 }
 
 ; Function Attrs: nounwind
-define i32 @ustc1(%struct.USST* noundef byval(%struct.USST) align 4 %s) {
+define i32 @ustc1(ptr noundef byval(%struct.USST) align 4 %s) {
 ; CHECK-LABEL: ustc1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    mr 4, 3
@@ -20,15 +20,14 @@ define i32 @ustc1(%struct.USST* noundef byval(%struct.USST) align 4 %s) {
 ; CHECK-NEXT:    stw 4, 24(1)
 ; CHECK-NEXT:    blr
 entry:
-  %a = getelementptr inbounds %struct.USST, %struct.USST* %s, i32 0, i32 0
-  %0 = load i16, i16* %a, align 4
+  %0 = load i16, ptr %s, align 4
   %conv = zext i16 %0 to i32
   %shr = ashr i32 %conv, 8
   ret i32 %shr
 }
 
 ; Function Attrs: nounwind
-define i32 @ustc2(%struct.USST* noundef byval(%struct.USST) align 4 %s) {
+define i32 @ustc2(ptr noundef byval(%struct.USST) align 4 %s) {
 ; CHECK-LABEL: ustc2:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    mr 4, 3
@@ -36,14 +35,13 @@ define i32 @ustc2(%struct.USST* noundef byval(%struct.USST) align 4 %s) {
 ; CHECK-NEXT:    stw 4, 24(1)
 ; CHECK-NEXT:    blr
 entry:
-  %a = getelementptr inbounds %struct.USST, %struct.USST* %s, i32 0, i32 0
-  %0 = load i16, i16* %a, align 4
+  %0 = load i16, ptr %s, align 4
   %conv = zext i16 %0 to i32
   ret i32 %conv
 }
 
 ; Function Attrs: nounwind
-define i32 @stc1(%struct.SST* noundef byval(%struct.SST) align 4 %s) {
+define i32 @stc1(ptr noundef byval(%struct.SST) align 4 %s) {
 ; CHECK-LABEL: stc1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    mr 4, 3
@@ -51,15 +49,14 @@ define i32 @stc1(%struct.SST* noundef byval(%struct.SST) align 4 %s) {
 ; CHECK-NEXT:    stw 4, 24(1)
 ; CHECK-NEXT:    blr
 entry:
-  %a = getelementptr inbounds %struct.SST, %struct.SST* %s, i32 0, i32 0
-  %0 = load i16, i16* %a, align 4
+  %0 = load i16, ptr %s, align 4
   %conv = sext i16 %0 to i32
   %shr = ashr i32 %conv, 8
   ret i32 %shr
 }
 
 ; Function Attrs: nounwind
-define i32 @stc2(%struct.SST* noundef byval(%struct.SST) align 4 %s) {
+define i32 @stc2(ptr noundef byval(%struct.SST) align 4 %s) {
 ; CHECK-LABEL: stc2:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    mr 4, 3
@@ -67,14 +64,13 @@ define i32 @stc2(%struct.SST* noundef byval(%struct.SST) align 4 %s) {
 ; CHECK-NEXT:    stw 4, 24(1)
 ; CHECK-NEXT:    blr
 entry:
-  %a = getelementptr inbounds %struct.SST, %struct.SST* %s, i32 0, i32 0
-  %0 = load i16, i16* %a, align 4
+  %0 = load i16, ptr %s, align 4
   %conv = sext i16 %0 to i32
   ret i32 %conv
 }
 
 ; Function Attrs: nounwind
-define i32 @ctc(%struct.CST* noundef byval(%struct.CST) align 4 %s) {
+define i32 @ctc(ptr noundef byval(%struct.CST) align 4 %s) {
 ; CHECK-LABEL: ctc:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    mr 4, 3
@@ -82,14 +78,13 @@ define i32 @ctc(%struct.CST* noundef byval(%struct.CST) align 4 %s) {
 ; CHECK-NEXT:    stw 4, 24(1)
 ; CHECK-NEXT:    blr
 entry:
-  %a = getelementptr inbounds %struct.CST, %struct.CST* %s, i32 0, i32 0
-  %0 = load i8, i8* %a, align 4
+  %0 = load i8, ptr %s, align 4
   %conv = zext i8 %0 to i32
   ret i32 %conv
 }
 
 ; Function Attrs: nounwind
-define i32 @sctc(%struct.SCST* noundef byval(%struct.SCST) align 4 %s) {
+define i32 @sctc(ptr noundef byval(%struct.SCST) align 4 %s) {
 ; CHECK-LABEL: sctc:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    mr 4, 3
@@ -97,27 +92,25 @@ define i32 @sctc(%struct.SCST* noundef byval(%struct.SCST) align 4 %s) {
 ; CHECK-NEXT:    stw 4, 24(1)
 ; CHECK-NEXT:    blr
 entry:
-  %a = getelementptr inbounds %struct.SCST, %struct.SCST* %s, i32 0, i32 0
-  %0 = load i8, i8* %a, align 4
+  %0 = load i8, ptr %s, align 4
   %conv = sext i8 %0 to i32
   ret i32 %conv
 }
 
 ; Function Attrs: nounwind
-define i32 @tc44(%struct.ST* noundef byval(%struct.ST) align 4 %s) {
+define i32 @tc44(ptr noundef byval(%struct.ST) align 4 %s) {
 ; CHECK-LABEL: tc44:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    stw 3, 24(1)
 ; CHECK-NEXT:    stw 4, 28(1)
 ; CHECK-NEXT:    blr
 entry:
-  %a = getelementptr inbounds %struct.ST, %struct.ST* %s, i32 0, i32 0
-  %0 = load i32, i32* %a, align 4
+  %0 = load i32, ptr %s, align 4
   ret i32 %0
 }
 
 ; Function Attrs: nounwind
-define i32 @tc41(%struct.ST* noundef byval(%struct.ST) align 4 %s) {
+define i32 @tc41(ptr noundef byval(%struct.ST) align 4 %s) {
 ; CHECK-LABEL: tc41:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    stw 3, 24(1)
@@ -125,14 +118,13 @@ define i32 @tc41(%struct.ST* noundef byval(%struct.ST) align 4 %s) {
 ; CHECK-NEXT:    stw 4, 28(1)
 ; CHECK-NEXT:    blr
 entry:
-  %a = getelementptr inbounds %struct.ST, %struct.ST* %s, i32 0, i32 0
-  %0 = load i32, i32* %a, align 4
+  %0 = load i32, ptr %s, align 4
   %shr = ashr i32 %0, 24
   ret i32 %shr
 }
 
 ; Function Attrs: nounwind
-define i32 @tc42(%struct.ST* noundef byval(%struct.ST) align 4 %s) {
+define i32 @tc42(ptr noundef byval(%struct.ST) align 4 %s) {
 ; CHECK-LABEL: tc42:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    stw 3, 24(1)
@@ -140,14 +132,13 @@ define i32 @tc42(%struct.ST* noundef byval(%struct.ST) align 4 %s) {
 ; CHECK-NEXT:    stw 4, 28(1)
 ; CHECK-NEXT:    blr
 entry:
-  %a = getelementptr inbounds %struct.ST, %struct.ST* %s, i32 0, i32 0
-  %0 = load i32, i32* %a, align 4
+  %0 = load i32, ptr %s, align 4
   %shr = ashr i32 %0, 16
   ret i32 %shr
 }
 
 ; Function Attrs: nounwind
-define i32 @tc43(%struct.ST* noundef byval(%struct.ST) align 4 %s) {
+define i32 @tc43(ptr noundef byval(%struct.ST) align 4 %s) {
 ; CHECK-LABEL: tc43:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    stw 3, 24(1)
@@ -155,27 +146,25 @@ define i32 @tc43(%struct.ST* noundef byval(%struct.ST) align 4 %s) {
 ; CHECK-NEXT:    stw 4, 28(1)
 ; CHECK-NEXT:    blr
 entry:
-  %a = getelementptr inbounds %struct.ST, %struct.ST* %s, i32 0, i32 0
-  %0 = load i32, i32* %a, align 4
+  %0 = load i32, ptr %s, align 4
   %shr = ashr i32 %0, 8
   ret i32 %shr
 }
 
 ; Function Attrs: nounwind
-define i32 @utc44(%struct.UST* noundef byval(%struct.UST) align 4 %s) {
+define i32 @utc44(ptr noundef byval(%struct.UST) align 4 %s) {
 ; CHECK-LABEL: utc44:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    stw 3, 24(1)
 ; CHECK-NEXT:    stw 4, 28(1)
 ; CHECK-NEXT:    blr
 entry:
-  %a = getelementptr inbounds %struct.UST, %struct.UST* %s, i32 0, i32 0
-  %0 = load i32, i32* %a, align 4
+  %0 = load i32, ptr %s, align 4
   ret i32 %0
 }
 
 ; Function Attrs: nounwind
-define i32 @utc41(%struct.UST* noundef byval(%struct.UST) align 4 %s) {
+define i32 @utc41(ptr noundef byval(%struct.UST) align 4 %s) {
 ; CHECK-LABEL: utc41:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    stw 3, 24(1)
@@ -183,14 +172,13 @@ define i32 @utc41(%struct.UST* noundef byval(%struct.UST) align 4 %s) {
 ; CHECK-NEXT:    stw 4, 28(1)
 ; CHECK-NEXT:    blr
 entry:
-  %a = getelementptr inbounds %struct.UST, %struct.UST* %s, i32 0, i32 0
-  %0 = load i32, i32* %a, align 4
+  %0 = load i32, ptr %s, align 4
   %shr = lshr i32 %0, 24
   ret i32 %shr
 }
 
 ; Function Attrs: nounwind
-define i32 @utc42(%struct.UST* noundef byval(%struct.UST) align 4 %s) {
+define i32 @utc42(ptr noundef byval(%struct.UST) align 4 %s) {
 ; CHECK-LABEL: utc42:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    stw 3, 24(1)
@@ -198,14 +186,13 @@ define i32 @utc42(%struct.UST* noundef byval(%struct.UST) align 4 %s) {
 ; CHECK-NEXT:    stw 4, 28(1)
 ; CHECK-NEXT:    blr
 entry:
-  %a = getelementptr inbounds %struct.UST, %struct.UST* %s, i32 0, i32 0
-  %0 = load i32, i32* %a, align 4
+  %0 = load i32, ptr %s, align 4
   %shr = lshr i32 %0, 16
   ret i32 %shr
 }
 
 ; Function Attrs: nounwind
-define i32 @utc43(%struct.UST* noundef byval(%struct.UST) align 4 %s) {
+define i32 @utc43(ptr noundef byval(%struct.UST) align 4 %s) {
 ; CHECK-LABEL: utc43:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    stw 3, 24(1)
@@ -213,8 +200,7 @@ define i32 @utc43(%struct.UST* noundef byval(%struct.UST) align 4 %s) {
 ; CHECK-NEXT:    stw 4, 28(1)
 ; CHECK-NEXT:    blr
 entry:
-  %a = getelementptr inbounds %struct.UST, %struct.UST* %s, i32 0, i32 0
-  %0 = load i32, i32* %a, align 4
+  %0 = load i32, ptr %s, align 4
   %shr = lshr i32 %0, 8
   ret i32 %shr
 }

diff  --git a/llvm/test/CodeGen/PowerPC/store-forward-be64.ll b/llvm/test/CodeGen/PowerPC/store-forward-be64.ll
index 332dd6fd22cf..32e67c7ce127 100644
--- a/llvm/test/CodeGen/PowerPC/store-forward-be64.ll
+++ b/llvm/test/CodeGen/PowerPC/store-forward-be64.ll
@@ -14,7 +14,7 @@ target triple = "powerpc64-ibm-aix7.2.0.0"
 %struct.ULST = type { i64, i64 }
 
 ; Function Attrs: nounwind
-define zeroext i32 @ustc1(%struct.USST* noundef byval(%struct.USST) align 8 %s) {
+define zeroext i32 @ustc1(ptr noundef byval(%struct.USST) align 8 %s) {
 ; CHECK-LABEL: ustc1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    mr 4, 3
@@ -22,15 +22,14 @@ define zeroext i32 @ustc1(%struct.USST* noundef byval(%struct.USST) align 8 %s)
 ; CHECK-NEXT:    std 4, 48(1)
 ; CHECK-NEXT:    blr
 entry:
-  %a = getelementptr inbounds %struct.USST, %struct.USST* %s, i32 0, i32 0
-  %0 = load i16, i16* %a, align 8
+  %0 = load i16, ptr %s, align 8
   %conv = zext i16 %0 to i32
   %shr = ashr i32 %conv, 8
   ret i32 %shr
 }
 
 ; Function Attrs: nounwind
-define zeroext i32 @ustc2(%struct.USST* noundef byval(%struct.USST) align 8 %s) {
+define zeroext i32 @ustc2(ptr noundef byval(%struct.USST) align 8 %s) {
 ; CHECK-LABEL: ustc2:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    mr 4, 3
@@ -38,14 +37,13 @@ define zeroext i32 @ustc2(%struct.USST* noundef byval(%struct.USST) align 8 %s)
 ; CHECK-NEXT:    std 4, 48(1)
 ; CHECK-NEXT:    blr
 entry:
-  %a = getelementptr inbounds %struct.USST, %struct.USST* %s, i32 0, i32 0
-  %0 = load i16, i16* %a, align 8
+  %0 = load i16, ptr %s, align 8
   %conv = zext i16 %0 to i32
   ret i32 %conv
 }
 
 ; Function Attrs: nounwind
-define signext i32 @stc1(%struct.SST* noundef byval(%struct.SST) align 8 %s) {
+define signext i32 @stc1(ptr noundef byval(%struct.SST) align 8 %s) {
 ; CHECK-LABEL: stc1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    mr 4, 3
@@ -55,15 +53,14 @@ define signext i32 @stc1(%struct.SST* noundef byval(%struct.SST) align 8 %s) {
 ; CHECK-NEXT:    srawi 3, 3, 8
 ; CHECK-NEXT:    blr
 entry:
-  %a = getelementptr inbounds %struct.SST, %struct.SST* %s, i32 0, i32 0
-  %0 = load i16, i16* %a, align 8
+  %0 = load i16, ptr %s, align 8
   %conv = sext i16 %0 to i32
   %shr = ashr i32 %conv, 8
   ret i32 %shr
 }
 
 ; Function Attrs: nounwind
-define signext i32 @stc2(%struct.SST* noundef byval(%struct.SST) align 8 %s) {
+define signext i32 @stc2(ptr noundef byval(%struct.SST) align 8 %s) {
 ; CHECK-LABEL: stc2:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    mr 4, 3
@@ -71,14 +68,13 @@ define signext i32 @stc2(%struct.SST* noundef byval(%struct.SST) align 8 %s) {
 ; CHECK-NEXT:    std 4, 48(1)
 ; CHECK-NEXT:    blr
 entry:
-  %a = getelementptr inbounds %struct.SST, %struct.SST* %s, i32 0, i32 0
-  %0 = load i16, i16* %a, align 8
+  %0 = load i16, ptr %s, align 8
   %conv = sext i16 %0 to i32
   ret i32 %conv
 }
 
 ; Function Attrs: nounwind
-define signext i32 @ctc(%struct.CST* noundef byval(%struct.CST) align 8 %s) {
+define signext i32 @ctc(ptr noundef byval(%struct.CST) align 8 %s) {
 ; CHECK-LABEL: ctc:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    mr 4, 3
@@ -86,14 +82,13 @@ define signext i32 @ctc(%struct.CST* noundef byval(%struct.CST) align 8 %s) {
 ; CHECK-NEXT:    std 4, 48(1)
 ; CHECK-NEXT:    blr
 entry:
-  %a = getelementptr inbounds %struct.CST, %struct.CST* %s, i32 0, i32 0
-  %0 = load i8, i8* %a, align 8
+  %0 = load i8, ptr %s, align 8
   %conv = zext i8 %0 to i32
   ret i32 %conv
 }
 
 ; Function Attrs: nounwind
-define signext i32 @sctc(%struct.SCST* noundef byval(%struct.SCST) align 8 %s) {
+define signext i32 @sctc(ptr noundef byval(%struct.SCST) align 8 %s) {
 ; CHECK-LABEL: sctc:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    mr 4, 3
@@ -101,14 +96,13 @@ define signext i32 @sctc(%struct.SCST* noundef byval(%struct.SCST) align 8 %s) {
 ; CHECK-NEXT:    std 4, 48(1)
 ; CHECK-NEXT:    blr
 entry:
-  %a = getelementptr inbounds %struct.SCST, %struct.SCST* %s, i32 0, i32 0
-  %0 = load i8, i8* %a, align 8
+  %0 = load i8, ptr %s, align 8
   %conv = sext i8 %0 to i32
   ret i32 %conv
 }
 
 ; Function Attrs: nounwind
-define signext i32 @tc44(%struct.ST* noundef byval(%struct.ST) align 8 %s) {
+define signext i32 @tc44(ptr noundef byval(%struct.ST) align 8 %s) {
 ; CHECK-LABEL: tc44:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    mr 4, 3
@@ -116,13 +110,12 @@ define signext i32 @tc44(%struct.ST* noundef byval(%struct.ST) align 8 %s) {
 ; CHECK-NEXT:    std 4, 48(1)
 ; CHECK-NEXT:    blr
 entry:
-  %a = getelementptr inbounds %struct.ST, %struct.ST* %s, i32 0, i32 0
-  %0 = load i32, i32* %a, align 8
+  %0 = load i32, ptr %s, align 8
   ret i32 %0
 }
 
 ; Function Attrs: nounwind
-define signext i32 @tc41(%struct.ST* noundef byval(%struct.ST) align 8 %s) {
+define signext i32 @tc41(ptr noundef byval(%struct.ST) align 8 %s) {
 ; CHECK-LABEL: tc41:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    mr 4, 3
@@ -130,14 +123,13 @@ define signext i32 @tc41(%struct.ST* noundef byval(%struct.ST) align 8 %s) {
 ; CHECK-NEXT:    std 4, 48(1)
 ; CHECK-NEXT:    blr
 entry:
-  %a = getelementptr inbounds %struct.ST, %struct.ST* %s, i32 0, i32 0
-  %0 = load i32, i32* %a, align 8
+  %0 = load i32, ptr %s, align 8
   %shr = ashr i32 %0, 24
   ret i32 %shr
 }
 
 ; Function Attrs: nounwind
-define signext i32 @tc42(%struct.ST* noundef byval(%struct.ST) align 8 %s) {
+define signext i32 @tc42(ptr noundef byval(%struct.ST) align 8 %s) {
 ; CHECK-LABEL: tc42:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    mr 4, 3
@@ -145,14 +137,13 @@ define signext i32 @tc42(%struct.ST* noundef byval(%struct.ST) align 8 %s) {
 ; CHECK-NEXT:    std 4, 48(1)
 ; CHECK-NEXT:    blr
 entry:
-  %a = getelementptr inbounds %struct.ST, %struct.ST* %s, i32 0, i32 0
-  %0 = load i32, i32* %a, align 8
+  %0 = load i32, ptr %s, align 8
   %shr = ashr i32 %0, 16
   ret i32 %shr
 }
 
 ; Function Attrs: nounwind
-define signext i32 @tc43(%struct.ST* noundef byval(%struct.ST) align 8 %s) {
+define signext i32 @tc43(ptr noundef byval(%struct.ST) align 8 %s) {
 ; CHECK-LABEL: tc43:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    mr 4, 3
@@ -160,14 +151,13 @@ define signext i32 @tc43(%struct.ST* noundef byval(%struct.ST) align 8 %s) {
 ; CHECK-NEXT:    std 4, 48(1)
 ; CHECK-NEXT:    blr
 entry:
-  %a = getelementptr inbounds %struct.ST, %struct.ST* %s, i32 0, i32 0
-  %0 = load i32, i32* %a, align 8
+  %0 = load i32, ptr %s, align 8
   %shr = ashr i32 %0, 8
   ret i32 %shr
 }
 
 ; Function Attrs: nounwind
-define zeroext i32 @utc44(%struct.UST* noundef byval(%struct.UST) align 8 %s) #0 {
+define zeroext i32 @utc44(ptr noundef byval(%struct.UST) align 8 %s) #0 {
 ; CHECK-LABEL: utc44:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    mr 4, 3
@@ -175,13 +165,12 @@ define zeroext i32 @utc44(%struct.UST* noundef byval(%struct.UST) align 8 %s) #0
 ; CHECK-NEXT:    std 4, 48(1)
 ; CHECK-NEXT:    blr
 entry:
-  %a = getelementptr inbounds %struct.UST, %struct.UST* %s, i32 0, i32 0
-  %0 = load i32, i32* %a, align 8
+  %0 = load i32, ptr %s, align 8
   ret i32 %0
 }
 
 ; Function Attrs: nounwind
-define zeroext i32 @utc41(%struct.UST* noundef byval(%struct.UST) align 8 %s) {
+define zeroext i32 @utc41(ptr noundef byval(%struct.UST) align 8 %s) {
 ; CHECK-LABEL: utc41:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    mr 4, 3
@@ -189,14 +178,13 @@ define zeroext i32 @utc41(%struct.UST* noundef byval(%struct.UST) align 8 %s) {
 ; CHECK-NEXT:    std 4, 48(1)
 ; CHECK-NEXT:    blr
 entry:
-  %a = getelementptr inbounds %struct.UST, %struct.UST* %s, i32 0, i32 0
-  %0 = load i32, i32* %a, align 8
+  %0 = load i32, ptr %s, align 8
   %shr = lshr i32 %0, 24
   ret i32 %shr
 }
 
 ; Function Attrs: nounwind
-define zeroext i32 @utc42(%struct.UST* noundef byval(%struct.UST) align 8 %s) {
+define zeroext i32 @utc42(ptr noundef byval(%struct.UST) align 8 %s) {
 ; CHECK-LABEL: utc42:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    mr 4, 3
@@ -204,14 +192,13 @@ define zeroext i32 @utc42(%struct.UST* noundef byval(%struct.UST) align 8 %s) {
 ; CHECK-NEXT:    std 4, 48(1)
 ; CHECK-NEXT:    blr
 entry:
-  %a = getelementptr inbounds %struct.UST, %struct.UST* %s, i32 0, i32 0
-  %0 = load i32, i32* %a, align 8
+  %0 = load i32, ptr %s, align 8
   %shr = lshr i32 %0, 16
   ret i32 %shr
 }
 
 ; Function Attrs: nounwind
-define zeroext i32 @utc43(%struct.UST* noundef byval(%struct.UST) align 8 %s) {
+define zeroext i32 @utc43(ptr noundef byval(%struct.UST) align 8 %s) {
 ; CHECK-LABEL: utc43:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    mr 4, 3
@@ -219,14 +206,13 @@ define zeroext i32 @utc43(%struct.UST* noundef byval(%struct.UST) align 8 %s) {
 ; CHECK-NEXT:    std 4, 48(1)
 ; CHECK-NEXT:    blr
 entry:
-  %a = getelementptr inbounds %struct.UST, %struct.UST* %s, i32 0, i32 0
-  %0 = load i32, i32* %a, align 8
+  %0 = load i32, ptr %s, align 8
   %shr = lshr i32 %0, 8
   ret i32 %shr
 }
 
 ; Function Attrs: nounwind
-define i64 @ltc88(%struct.LST* noundef byval(%struct.LST) align 8 %s) {
+define i64 @ltc88(ptr noundef byval(%struct.LST) align 8 %s) {
 ; CHECK-LABEL: ltc88:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    mr 5, 3
@@ -235,14 +221,13 @@ define i64 @ltc88(%struct.LST* noundef byval(%struct.LST) align 8 %s) {
 ; CHECK-NEXT:    std 4, 56(1)
 ; CHECK-NEXT:    blr
 entry:
-  %a = getelementptr inbounds %struct.LST, %struct.LST* %s, i32 0, i32 0
-  %0 = load i64, i64* %a, align 8
+  %0 = load i64, ptr %s, align 8
   %shr = ashr i64 %0, 8
   ret i64 %shr
 }
 
 ; Function Attrs: nounwind
-define i64 @ltc86(%struct.LST* noundef byval(%struct.LST) align 8 %s) {
+define i64 @ltc86(ptr noundef byval(%struct.LST) align 8 %s) {
 ; CHECK-LABEL: ltc86:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    mr 5, 3
@@ -251,14 +236,13 @@ define i64 @ltc86(%struct.LST* noundef byval(%struct.LST) align 8 %s) {
 ; CHECK-NEXT:    std 4, 56(1)
 ; CHECK-NEXT:    blr
 entry:
-  %a = getelementptr inbounds %struct.LST, %struct.LST* %s, i32 0, i32 0
-  %0 = load i64, i64* %a, align 8
+  %0 = load i64, ptr %s, align 8
   %shr = ashr i64 %0, 16
   ret i64 %shr
 }
 
 ; Function Attrs: nounwind
-define i64 @ltc85(%struct.LST* noundef byval(%struct.LST) align 8 %s) {
+define i64 @ltc85(ptr noundef byval(%struct.LST) align 8 %s) {
 ; CHECK-LABEL: ltc85:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    mr 5, 3
@@ -267,14 +251,13 @@ define i64 @ltc85(%struct.LST* noundef byval(%struct.LST) align 8 %s) {
 ; CHECK-NEXT:    std 4, 56(1)
 ; CHECK-NEXT:    blr
 entry:
-  %a = getelementptr inbounds %struct.LST, %struct.LST* %s, i32 0, i32 0
-  %0 = load i64, i64* %a, align 8
+  %0 = load i64, ptr %s, align 8
   %shr = ashr i64 %0, 24
   ret i64 %shr
 }
 
 ; Function Attrs: nounwind
-define i64 @ltc84(%struct.LST* noundef byval(%struct.LST) align 8 %s) {
+define i64 @ltc84(ptr noundef byval(%struct.LST) align 8 %s) {
 ; CHECK-LABEL: ltc84:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    mr 5, 3
@@ -283,14 +266,13 @@ define i64 @ltc84(%struct.LST* noundef byval(%struct.LST) align 8 %s) {
 ; CHECK-NEXT:    std 4, 56(1)
 ; CHECK-NEXT:    blr
 entry:
-  %a = getelementptr inbounds %struct.LST, %struct.LST* %s, i32 0, i32 0
-  %0 = load i64, i64* %a, align 8
+  %0 = load i64, ptr %s, align 8
   %shr = ashr i64 %0, 32
   ret i64 %shr
 }
 
 ; Function Attrs: nounwind
-define i64 @ltc83(%struct.LST* noundef byval(%struct.LST) align 8 %s) {
+define i64 @ltc83(ptr noundef byval(%struct.LST) align 8 %s) {
 ; CHECK-LABEL: ltc83:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    mr 5, 3
@@ -299,14 +281,13 @@ define i64 @ltc83(%struct.LST* noundef byval(%struct.LST) align 8 %s) {
 ; CHECK-NEXT:    std 4, 56(1)
 ; CHECK-NEXT:    blr
 entry:
-  %a = getelementptr inbounds %struct.LST, %struct.LST* %s, i32 0, i32 0
-  %0 = load i64, i64* %a, align 8
+  %0 = load i64, ptr %s, align 8
   %shr = ashr i64 %0, 40
   ret i64 %shr
 }
 
 ; Function Attrs: nounwind
-define i64 @ltc82(%struct.LST* noundef byval(%struct.LST) align 8 %s) {
+define i64 @ltc82(ptr noundef byval(%struct.LST) align 8 %s) {
 ; CHECK-LABEL: ltc82:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    mr 5, 3
@@ -315,14 +296,13 @@ define i64 @ltc82(%struct.LST* noundef byval(%struct.LST) align 8 %s) {
 ; CHECK-NEXT:    std 4, 56(1)
 ; CHECK-NEXT:    blr
 entry:
-  %a = getelementptr inbounds %struct.LST, %struct.LST* %s, i32 0, i32 0
-  %0 = load i64, i64* %a, align 8
+  %0 = load i64, ptr %s, align 8
   %shr = ashr i64 %0, 48
   ret i64 %shr
 }
 
 ; Function Attrs: nounwind
-define i64 @ltc81(%struct.LST* noundef byval(%struct.LST) align 8 %s) {
+define i64 @ltc81(ptr noundef byval(%struct.LST) align 8 %s) {
 ; CHECK-LABEL: ltc81:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    mr 5, 3
@@ -331,27 +311,25 @@ define i64 @ltc81(%struct.LST* noundef byval(%struct.LST) align 8 %s) {
 ; CHECK-NEXT:    std 4, 56(1)
 ; CHECK-NEXT:    blr
 entry:
-  %a = getelementptr inbounds %struct.LST, %struct.LST* %s, i32 0, i32 0
-  %0 = load i64, i64* %a, align 8
+  %0 = load i64, ptr %s, align 8
   %shr = ashr i64 %0, 56
   ret i64 %shr
 }
 
 ; Function Attrs: nounwind
-define i64 @ultc88(%struct.ULST* noundef byval(%struct.ULST) align 8 %s) {
+define i64 @ultc88(ptr noundef byval(%struct.ULST) align 8 %s) {
 ; CHECK-LABEL: ultc88:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    std 3, 48(1)
 ; CHECK-NEXT:    std 4, 56(1)
 ; CHECK-NEXT:    blr
 entry:
-  %a = getelementptr inbounds %struct.ULST, %struct.ULST* %s, i32 0, i32 0
-  %0 = load i64, i64* %a, align 8
+  %0 = load i64, ptr %s, align 8
   ret i64 %0
 }
 
 ; Function Attrs: nounwind
-define i64 @ultc87(%struct.ULST* noundef byval(%struct.ULST) align 8 %s) {
+define i64 @ultc87(ptr noundef byval(%struct.ULST) align 8 %s) {
 ; CHECK-LABEL: ultc87:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    mr 5, 3
@@ -360,14 +338,13 @@ define i64 @ultc87(%struct.ULST* noundef byval(%struct.ULST) align 8 %s) {
 ; CHECK-NEXT:    std 4, 56(1)
 ; CHECK-NEXT:    blr
 entry:
-  %a = getelementptr inbounds %struct.ULST, %struct.ULST* %s, i32 0, i32 0
-  %0 = load i64, i64* %a, align 8
+  %0 = load i64, ptr %s, align 8
   %shr = lshr i64 %0, 8
   ret i64 %shr
 }
 
 ; Function Attrs: nounwind
-define i64 @ultc86(%struct.ULST* noundef byval(%struct.ULST) align 8 %s) {
+define i64 @ultc86(ptr noundef byval(%struct.ULST) align 8 %s) {
 ; CHECK-LABEL: ultc86:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    mr 5, 3
@@ -376,14 +353,13 @@ define i64 @ultc86(%struct.ULST* noundef byval(%struct.ULST) align 8 %s) {
 ; CHECK-NEXT:    std 4, 56(1)
 ; CHECK-NEXT:    blr
 entry:
-  %a = getelementptr inbounds %struct.ULST, %struct.ULST* %s, i32 0, i32 0
-  %0 = load i64, i64* %a, align 8
+  %0 = load i64, ptr %s, align 8
   %shr = lshr i64 %0, 16
   ret i64 %shr
 }
 
 ; Function Attrs: nounwind
-define i64 @ultc85(%struct.ULST* noundef byval(%struct.ULST) align 8 %s) {
+define i64 @ultc85(ptr noundef byval(%struct.ULST) align 8 %s) {
 ; CHECK-LABEL: ultc85:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    mr 5, 3
@@ -392,14 +368,13 @@ define i64 @ultc85(%struct.ULST* noundef byval(%struct.ULST) align 8 %s) {
 ; CHECK-NEXT:    std 4, 56(1)
 ; CHECK-NEXT:    blr
 entry:
-  %a = getelementptr inbounds %struct.ULST, %struct.ULST* %s, i32 0, i32 0
-  %0 = load i64, i64* %a, align 8
+  %0 = load i64, ptr %s, align 8
   %shr = lshr i64 %0, 24
   ret i64 %shr
 }
 
 ; Function Attrs: nounwind
-define i64 @ultc84(%struct.ULST* noundef byval(%struct.ULST) align 8 %s) {
+define i64 @ultc84(ptr noundef byval(%struct.ULST) align 8 %s) {
 ; CHECK-LABEL: ultc84:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    mr 5, 3
@@ -408,14 +383,13 @@ define i64 @ultc84(%struct.ULST* noundef byval(%struct.ULST) align 8 %s) {
 ; CHECK-NEXT:    std 4, 56(1)
 ; CHECK-NEXT:    blr
 entry:
-  %a = getelementptr inbounds %struct.ULST, %struct.ULST* %s, i32 0, i32 0
-  %0 = load i64, i64* %a, align 8
+  %0 = load i64, ptr %s, align 8
   %shr = lshr i64 %0, 32
   ret i64 %shr
 }
 
 ; Function Attrs: nounwind
-define i64 @ultc83(%struct.ULST* noundef byval(%struct.ULST) align 8 %s) {
+define i64 @ultc83(ptr noundef byval(%struct.ULST) align 8 %s) {
 ; CHECK-LABEL: ultc83:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    mr 5, 3
@@ -424,14 +398,13 @@ define i64 @ultc83(%struct.ULST* noundef byval(%struct.ULST) align 8 %s) {
 ; CHECK-NEXT:    std 4, 56(1)
 ; CHECK-NEXT:    blr
 entry:
-  %a = getelementptr inbounds %struct.ULST, %struct.ULST* %s, i32 0, i32 0
-  %0 = load i64, i64* %a, align 8
+  %0 = load i64, ptr %s, align 8
   %shr = lshr i64 %0, 40
   ret i64 %shr
 }
 
 ; Function Attrs: nounwind
-define i64 @ultc82(%struct.ULST* noundef byval(%struct.ULST) align 8 %s) {
+define i64 @ultc82(ptr noundef byval(%struct.ULST) align 8 %s) {
 ; CHECK-LABEL: ultc82:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    mr 5, 3
@@ -440,14 +413,13 @@ define i64 @ultc82(%struct.ULST* noundef byval(%struct.ULST) align 8 %s) {
 ; CHECK-NEXT:    std 4, 56(1)
 ; CHECK-NEXT:    blr
 entry:
-  %a = getelementptr inbounds %struct.ULST, %struct.ULST* %s, i32 0, i32 0
-  %0 = load i64, i64* %a, align 8
+  %0 = load i64, ptr %s, align 8
   %shr = lshr i64 %0, 48
   ret i64 %shr
 }
 
 ; Function Attrs: nounwind
-define i64 @ultc81(%struct.ULST* noundef byval(%struct.ULST) align 8 %s) {
+define i64 @ultc81(ptr noundef byval(%struct.ULST) align 8 %s) {
 ; CHECK-LABEL: ultc81:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    mr 5, 3
@@ -456,8 +428,7 @@ define i64 @ultc81(%struct.ULST* noundef byval(%struct.ULST) align 8 %s) {
 ; CHECK-NEXT:    std 4, 56(1)
 ; CHECK-NEXT:    blr
 entry:
-  %a = getelementptr inbounds %struct.ULST, %struct.ULST* %s, i32 0, i32 0
-  %0 = load i64, i64* %a, align 8
+  %0 = load i64, ptr %s, align 8
   %shr = lshr i64 %0, 56
   ret i64 %shr
 }

diff  --git a/llvm/test/CodeGen/PowerPC/store-load-fwd.ll b/llvm/test/CodeGen/PowerPC/store-load-fwd.ll
index 3d0b8096dc4d..9bb78a7a51b2 100644
--- a/llvm/test/CodeGen/PowerPC/store-load-fwd.ll
+++ b/llvm/test/CodeGen/PowerPC/store-load-fwd.ll
@@ -1,8 +1,8 @@
 ; RUN: llc -verify-machineinstrs < %s -mtriple=ppc32-- | not grep lwz
 
-define i32 @test(i32* %P) {
-        store i32 1, i32* %P
-        %V = load i32, i32* %P               ; <i32> [#uses=1]
+define i32 @test(ptr %P) {
+        store i32 1, ptr %P
+        %V = load i32, ptr %P               ; <i32> [#uses=1]
         ret i32 %V
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/store-rightmost-vector-elt.ll b/llvm/test/CodeGen/PowerPC/store-rightmost-vector-elt.ll
index 5fbcafecfb3d..04689d85ff7f 100644
--- a/llvm/test/CodeGen/PowerPC/store-rightmost-vector-elt.ll
+++ b/llvm/test/CodeGen/PowerPC/store-rightmost-vector-elt.ll
@@ -7,7 +7,7 @@
 ; RUN:     -mcpu=pwr10 -ppc-vsr-nums-as-vr -ppc-asm-full-reg-names \
 ; RUN:     < %s | FileCheck %s --check-prefix=CHECK-BE
 
-define void @test1(<4 x i32> %A, i32* %a) {
+define void @test1(<4 x i32> %A, ptr %a) {
 ; CHECK-LE-LABEL: test1:
 ; CHECK-LE:       # %bb.0: # %entry
 ; CHECK-LE-NEXT:    stxvrwx v2, 0, r5
@@ -20,11 +20,11 @@ define void @test1(<4 x i32> %A, i32* %a) {
 ; CHECK-BE-NEXT:    blr
 entry:
   %vecext = extractelement <4 x i32> %A, i32 0
-  store i32 %vecext, i32* %a, align 4
+  store i32 %vecext, ptr %a, align 4
   ret void
 }
 
-define void @test2(<4 x float> %A, float* %a) {
+define void @test2(<4 x float> %A, ptr %a) {
 ; CHECK-LE-LABEL: test2:
 ; CHECK-LE:       # %bb.0: # %entry
 ; CHECK-LE-NEXT:    stxvrwx v2, 0, r5
@@ -37,11 +37,11 @@ define void @test2(<4 x float> %A, float* %a) {
 ; CHECK-BE-NEXT:    blr
 entry:
   %vecext = extractelement <4 x float> %A, i32 0
-  store float %vecext, float* %a, align 4
+  store float %vecext, ptr %a, align 4
   ret void
 }
 
-define void @test3(<2 x double> %A, double* %a) {
+define void @test3(<2 x double> %A, ptr %a) {
 ; CHECK-LE-LABEL: test3:
 ; CHECK-LE:       # %bb.0: # %entry
 ; CHECK-LE-NEXT:    stxvrdx v2, 0, r5
@@ -53,11 +53,11 @@ define void @test3(<2 x double> %A, double* %a) {
 ; CHECK-BE-NEXT:    blr
 entry:
   %vecext = extractelement <2 x double> %A, i32 0
-  store double %vecext, double* %a, align 8
+  store double %vecext, ptr %a, align 8
   ret void
 }
 
-define void @test4(<2 x i64> %A, i64* %a) {
+define void @test4(<2 x i64> %A, ptr %a) {
 ; CHECK-LE-LABEL: test4:
 ; CHECK-LE:       # %bb.0: # %entry
 ; CHECK-LE-NEXT:    stxvrdx v2, 0, r5
@@ -69,11 +69,11 @@ define void @test4(<2 x i64> %A, i64* %a) {
 ; CHECK-BE-NEXT:    blr
 entry:
   %vecext = extractelement <2 x i64> %A, i32 0
-  store i64 %vecext, i64* %a, align 8
+  store i64 %vecext, ptr %a, align 8
   ret void
 }
 
-define void @test5(<8 x i16> %A, i16* %a) {
+define void @test5(<8 x i16> %A, ptr %a) {
 ; CHECK-LE-LABEL: test5:
 ; CHECK-LE:       # %bb.0: # %entry
 ; CHECK-LE-NEXT:    stxvrhx v2, 0, r5
@@ -86,11 +86,11 @@ define void @test5(<8 x i16> %A, i16* %a) {
 ; CHECK-BE-NEXT:    blr
 entry:
   %vecext = extractelement <8 x i16> %A, i32 0
-  store i16 %vecext, i16* %a, align 2
+  store i16 %vecext, ptr %a, align 2
   ret void
 }
 
-define void @test6(<16 x i8> %A, i8* %a) {
+define void @test6(<16 x i8> %A, ptr %a) {
 ; CHECK-LE-LABEL: test6:
 ; CHECK-LE:       # %bb.0: # %entry
 ; CHECK-LE-NEXT:    stxvrbx v2, 0, r5
@@ -103,7 +103,7 @@ define void @test6(<16 x i8> %A, i8* %a) {
 ; CHECK-BE-NEXT:    blr
 entry:
   %vecext = extractelement <16 x i8> %A, i32 0
-  store i8 %vecext, i8* %a, align 1
+  store i8 %vecext, ptr %a, align 1
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/store-update.ll b/llvm/test/CodeGen/PowerPC/store-update.ll
index 80a234e94269..882eb2a37645 100644
--- a/llvm/test/CodeGen/PowerPC/store-update.ll
+++ b/llvm/test/CodeGen/PowerPC/store-update.ll
@@ -3,44 +3,44 @@
 target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64"
 target triple = "powerpc64-unknown-linux-gnu"
 
-define i8* @test_stbu(i8* %base, i8 zeroext %val) nounwind {
+define ptr @test_stbu(ptr %base, i8 zeroext %val) nounwind {
 entry:
-  %arrayidx = getelementptr inbounds i8, i8* %base, i64 16
-  store i8 %val, i8* %arrayidx, align 1
-  ret i8* %arrayidx
+  %arrayidx = getelementptr inbounds i8, ptr %base, i64 16
+  store i8 %val, ptr %arrayidx, align 1
+  ret ptr %arrayidx
 }
 ; CHECK: @test_stbu
 ; CHECK: %entry
 ; CHECK-NEXT: stbu
 ; CHECK-NEXT: blr
 
-define i8* @test_stbux(i8* %base, i8 zeroext %val, i64 %offset) nounwind {
+define ptr @test_stbux(ptr %base, i8 zeroext %val, i64 %offset) nounwind {
 entry:
-  %arrayidx = getelementptr inbounds i8, i8* %base, i64 %offset
-  store i8 %val, i8* %arrayidx, align 1
-  ret i8* %arrayidx
+  %arrayidx = getelementptr inbounds i8, ptr %base, i64 %offset
+  store i8 %val, ptr %arrayidx, align 1
+  ret ptr %arrayidx
 }
 ; CHECK: @test_stbux
 ; CHECK: %entry
 ; CHECK-NEXT: stbux
 ; CHECK-NEXT: blr
 
-define i16* @test_sthu(i16* %base, i16 zeroext %val) nounwind {
+define ptr @test_sthu(ptr %base, i16 zeroext %val) nounwind {
 entry:
-  %arrayidx = getelementptr inbounds i16, i16* %base, i64 16
-  store i16 %val, i16* %arrayidx, align 2
-  ret i16* %arrayidx
+  %arrayidx = getelementptr inbounds i16, ptr %base, i64 16
+  store i16 %val, ptr %arrayidx, align 2
+  ret ptr %arrayidx
 }
 ; CHECK: @test_sthu
 ; CHECK: %entry
 ; CHECK-NEXT: sthu
 ; CHECK-NEXT: blr
 
-define i16* @test_sthux(i16* %base, i16 zeroext %val, i64 %offset) nounwind {
+define ptr @test_sthux(ptr %base, i16 zeroext %val, i64 %offset) nounwind {
 entry:
-  %arrayidx = getelementptr inbounds i16, i16* %base, i64 %offset
-  store i16 %val, i16* %arrayidx, align 2
-  ret i16* %arrayidx
+  %arrayidx = getelementptr inbounds i16, ptr %base, i64 %offset
+  store i16 %val, ptr %arrayidx, align 2
+  ret ptr %arrayidx
 }
 ; CHECK: @test_sthux
 ; CHECK: %entry
@@ -48,22 +48,22 @@ entry:
 ; CHECK-NEXT: sthux
 ; CHECK-NEXT: blr
 
-define i32* @test_stwu(i32* %base, i32 zeroext %val) nounwind {
+define ptr @test_stwu(ptr %base, i32 zeroext %val) nounwind {
 entry:
-  %arrayidx = getelementptr inbounds i32, i32* %base, i64 16
-  store i32 %val, i32* %arrayidx, align 4
-  ret i32* %arrayidx
+  %arrayidx = getelementptr inbounds i32, ptr %base, i64 16
+  store i32 %val, ptr %arrayidx, align 4
+  ret ptr %arrayidx
 }
 ; CHECK: @test_stwu
 ; CHECK: %entry
 ; CHECK-NEXT: stwu
 ; CHECK-NEXT: blr
 
-define i32* @test_stwux(i32* %base, i32 zeroext %val, i64 %offset) nounwind {
+define ptr @test_stwux(ptr %base, i32 zeroext %val, i64 %offset) nounwind {
 entry:
-  %arrayidx = getelementptr inbounds i32, i32* %base, i64 %offset
-  store i32 %val, i32* %arrayidx, align 4
-  ret i32* %arrayidx
+  %arrayidx = getelementptr inbounds i32, ptr %base, i64 %offset
+  store i32 %val, ptr %arrayidx, align 4
+  ret ptr %arrayidx
 }
 ; CHECK: @test_stwux
 ; CHECK: %entry
@@ -71,48 +71,48 @@ entry:
 ; CHECK-NEXT: stwux
 ; CHECK-NEXT: blr
 
-define i8* @test_stbu8(i8* %base, i64 %val) nounwind {
+define ptr @test_stbu8(ptr %base, i64 %val) nounwind {
 entry:
   %conv = trunc i64 %val to i8
-  %arrayidx = getelementptr inbounds i8, i8* %base, i64 16
-  store i8 %conv, i8* %arrayidx, align 1
-  ret i8* %arrayidx
+  %arrayidx = getelementptr inbounds i8, ptr %base, i64 16
+  store i8 %conv, ptr %arrayidx, align 1
+  ret ptr %arrayidx
 }
 ; CHECK: @test_stbu8
 ; CHECK: %entry
 ; CHECK-NEXT: stbu
 ; CHECK-NEXT: blr
 
-define i8* @test_stbux8(i8* %base, i64 %val, i64 %offset) nounwind {
+define ptr @test_stbux8(ptr %base, i64 %val, i64 %offset) nounwind {
 entry:
   %conv = trunc i64 %val to i8
-  %arrayidx = getelementptr inbounds i8, i8* %base, i64 %offset
-  store i8 %conv, i8* %arrayidx, align 1
-  ret i8* %arrayidx
+  %arrayidx = getelementptr inbounds i8, ptr %base, i64 %offset
+  store i8 %conv, ptr %arrayidx, align 1
+  ret ptr %arrayidx
 }
 ; CHECK: @test_stbux8
 ; CHECK: %entry
 ; CHECK-NEXT: stbux
 ; CHECK-NEXT: blr
 
-define i16* @test_sthu8(i16* %base, i64 %val) nounwind {
+define ptr @test_sthu8(ptr %base, i64 %val) nounwind {
 entry:
   %conv = trunc i64 %val to i16
-  %arrayidx = getelementptr inbounds i16, i16* %base, i64 16
-  store i16 %conv, i16* %arrayidx, align 2
-  ret i16* %arrayidx
+  %arrayidx = getelementptr inbounds i16, ptr %base, i64 16
+  store i16 %conv, ptr %arrayidx, align 2
+  ret ptr %arrayidx
 }
 ; CHECK: @test_sthu
 ; CHECK: %entry
 ; CHECK-NEXT: sthu
 ; CHECK-NEXT: blr
 
-define i16* @test_sthux8(i16* %base, i64 %val, i64 %offset) nounwind {
+define ptr @test_sthux8(ptr %base, i64 %val, i64 %offset) nounwind {
 entry:
   %conv = trunc i64 %val to i16
-  %arrayidx = getelementptr inbounds i16, i16* %base, i64 %offset
-  store i16 %conv, i16* %arrayidx, align 2
-  ret i16* %arrayidx
+  %arrayidx = getelementptr inbounds i16, ptr %base, i64 %offset
+  store i16 %conv, ptr %arrayidx, align 2
+  ret ptr %arrayidx
 }
 ; CHECK: @test_sthux
 ; CHECK: %entry
@@ -120,24 +120,24 @@ entry:
 ; CHECK-NEXT: sthux
 ; CHECK-NEXT: blr
 
-define i32* @test_stwu8(i32* %base, i64 %val) nounwind {
+define ptr @test_stwu8(ptr %base, i64 %val) nounwind {
 entry:
   %conv = trunc i64 %val to i32
-  %arrayidx = getelementptr inbounds i32, i32* %base, i64 16
-  store i32 %conv, i32* %arrayidx, align 4
-  ret i32* %arrayidx
+  %arrayidx = getelementptr inbounds i32, ptr %base, i64 16
+  store i32 %conv, ptr %arrayidx, align 4
+  ret ptr %arrayidx
 }
 ; CHECK: @test_stwu
 ; CHECK: %entry
 ; CHECK-NEXT: stwu
 ; CHECK-NEXT: blr
 
-define i32* @test_stwux8(i32* %base, i64 %val, i64 %offset) nounwind {
+define ptr @test_stwux8(ptr %base, i64 %val, i64 %offset) nounwind {
 entry:
   %conv = trunc i64 %val to i32
-  %arrayidx = getelementptr inbounds i32, i32* %base, i64 %offset
-  store i32 %conv, i32* %arrayidx, align 4
-  ret i32* %arrayidx
+  %arrayidx = getelementptr inbounds i32, ptr %base, i64 %offset
+  store i32 %conv, ptr %arrayidx, align 4
+  ret ptr %arrayidx
 }
 ; CHECK: @test_stwux
 ; CHECK: %entry
@@ -145,22 +145,22 @@ entry:
 ; CHECK-NEXT: stwux
 ; CHECK-NEXT: blr
 
-define i64* @test_stdu(i64* %base, i64 %val) nounwind {
+define ptr @test_stdu(ptr %base, i64 %val) nounwind {
 entry:
-  %arrayidx = getelementptr inbounds i64, i64* %base, i64 16
-  store i64 %val, i64* %arrayidx, align 8
-  ret i64* %arrayidx
+  %arrayidx = getelementptr inbounds i64, ptr %base, i64 16
+  store i64 %val, ptr %arrayidx, align 8
+  ret ptr %arrayidx
 }
 ; CHECK: @test_stdu
 ; CHECK: %entry
 ; CHECK-NEXT: stdu
 ; CHECK-NEXT: blr
 
-define i64* @test_stdux(i64* %base, i64 %val, i64 %offset) nounwind {
+define ptr @test_stdux(ptr %base, i64 %val, i64 %offset) nounwind {
 entry:
-  %arrayidx = getelementptr inbounds i64, i64* %base, i64 %offset
-  store i64 %val, i64* %arrayidx, align 8
-  ret i64* %arrayidx
+  %arrayidx = getelementptr inbounds i64, ptr %base, i64 %offset
+  store i64 %val, ptr %arrayidx, align 8
+  ret ptr %arrayidx
 }
 ; CHECK: @test_stdux
 ; CHECK: %entry

diff  --git a/llvm/test/CodeGen/PowerPC/store_fptoi.ll b/llvm/test/CodeGen/PowerPC/store_fptoi.ll
index eda44437bb1f..df7d318c3384 100644
--- a/llvm/test/CodeGen/PowerPC/store_fptoi.ll
+++ b/llvm/test/CodeGen/PowerPC/store_fptoi.ll
@@ -9,7 +9,7 @@
 ; ==========================================
 
 ; Function Attrs: norecurse nounwind
-define void @qpConv2sdw(fp128* nocapture readonly %a, i64* nocapture %b) {
+define void @qpConv2sdw(ptr nocapture readonly %a, ptr nocapture %b) {
 ; CHECK-LABEL: qpConv2sdw:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lxv 2, 0(3)
@@ -38,16 +38,16 @@ define void @qpConv2sdw(fp128* nocapture readonly %a, i64* nocapture %b) {
 ; CHECK-PWR8-NEXT:    mtlr 0
 ; CHECK-PWR8-NEXT:    blr
 entry:
-  %0 = load fp128, fp128* %a, align 16
+  %0 = load fp128, ptr %a, align 16
   %conv = fptosi fp128 %0 to i64
-  store i64 %conv, i64* %b, align 8
+  store i64 %conv, ptr %b, align 8
   ret void
 
 
 }
 
 ; Function Attrs: norecurse nounwind
-define void @qpConv2sw(fp128* nocapture readonly %a, i32* nocapture %b) {
+define void @qpConv2sw(ptr nocapture readonly %a, ptr nocapture %b) {
 ; CHECK-LABEL: qpConv2sw:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lxv 2, 0(3)
@@ -76,16 +76,16 @@ define void @qpConv2sw(fp128* nocapture readonly %a, i32* nocapture %b) {
 ; CHECK-PWR8-NEXT:    mtlr 0
 ; CHECK-PWR8-NEXT:    blr
 entry:
-  %0 = load fp128, fp128* %a, align 16
+  %0 = load fp128, ptr %a, align 16
   %conv = fptosi fp128 %0 to i32
-  store i32 %conv, i32* %b, align 4
+  store i32 %conv, ptr %b, align 4
   ret void
 
 
 }
 
 ; Function Attrs: norecurse nounwind
-define void @qpConv2udw(fp128* nocapture readonly %a, i64* nocapture %b) {
+define void @qpConv2udw(ptr nocapture readonly %a, ptr nocapture %b) {
 ; CHECK-LABEL: qpConv2udw:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lxv 2, 0(3)
@@ -114,16 +114,16 @@ define void @qpConv2udw(fp128* nocapture readonly %a, i64* nocapture %b) {
 ; CHECK-PWR8-NEXT:    mtlr 0
 ; CHECK-PWR8-NEXT:    blr
 entry:
-  %0 = load fp128, fp128* %a, align 16
+  %0 = load fp128, ptr %a, align 16
   %conv = fptoui fp128 %0 to i64
-  store i64 %conv, i64* %b, align 8
+  store i64 %conv, ptr %b, align 8
   ret void
 
 
 }
 
 ; Function Attrs: norecurse nounwind
-define void @qpConv2uw(fp128* nocapture readonly %a, i32* nocapture %b) {
+define void @qpConv2uw(ptr nocapture readonly %a, ptr nocapture %b) {
 ; CHECK-LABEL: qpConv2uw:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lxv 2, 0(3)
@@ -152,16 +152,16 @@ define void @qpConv2uw(fp128* nocapture readonly %a, i32* nocapture %b) {
 ; CHECK-PWR8-NEXT:    mtlr 0
 ; CHECK-PWR8-NEXT:    blr
 entry:
-  %0 = load fp128, fp128* %a, align 16
+  %0 = load fp128, ptr %a, align 16
   %conv = fptoui fp128 %0 to i32
-  store i32 %conv, i32* %b, align 4
+  store i32 %conv, ptr %b, align 4
   ret void
 
 
 }
 
 ; Function Attrs: norecurse nounwind
-define void @dpConv2sdw(double* nocapture readonly %a, i64* nocapture %b) {
+define void @dpConv2sdw(ptr nocapture readonly %a, ptr nocapture %b) {
 ; CHECK-LABEL: dpConv2sdw:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lfd 0, 0(3)
@@ -176,16 +176,16 @@ define void @dpConv2sdw(double* nocapture readonly %a, i64* nocapture %b) {
 ; CHECK-PWR8-NEXT:    stxsdx 0, 0, 4
 ; CHECK-PWR8-NEXT:    blr
 entry:
-  %0 = load double, double* %a, align 8
+  %0 = load double, ptr %a, align 8
   %conv = fptosi double %0 to i64
-  store i64 %conv, i64* %b, align 8
+  store i64 %conv, ptr %b, align 8
   ret void
 
 
 }
 
 ; Function Attrs: norecurse nounwind
-define void @dpConv2sw(double* nocapture readonly %a, i32* nocapture %b) {
+define void @dpConv2sw(ptr nocapture readonly %a, ptr nocapture %b) {
 ; CHECK-LABEL: dpConv2sw:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lfd 0, 0(3)
@@ -200,16 +200,16 @@ define void @dpConv2sw(double* nocapture readonly %a, i32* nocapture %b) {
 ; CHECK-PWR8-NEXT:    stfiwx 0, 0, 4
 ; CHECK-PWR8-NEXT:    blr
 entry:
-  %0 = load double, double* %a, align 8
+  %0 = load double, ptr %a, align 8
   %conv = fptosi double %0 to i32
-  store i32 %conv, i32* %b, align 4
+  store i32 %conv, ptr %b, align 4
   ret void
 
 
 }
 
 ; Function Attrs: norecurse nounwind
-define void @dpConv2shw(double* nocapture readonly %a, i16* nocapture %b) {
+define void @dpConv2shw(ptr nocapture readonly %a, ptr nocapture %b) {
 ; CHECK-LABEL: dpConv2shw:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lfd 0, 0(3)
@@ -225,16 +225,16 @@ define void @dpConv2shw(double* nocapture readonly %a, i16* nocapture %b) {
 ; CHECK-PWR8-NEXT:    sth 3, 0(4)
 ; CHECK-PWR8-NEXT:    blr
 entry:
-  %0 = load double, double* %a, align 8
+  %0 = load double, ptr %a, align 8
   %conv = fptosi double %0 to i16
-  store i16 %conv, i16* %b, align 2
+  store i16 %conv, ptr %b, align 2
   ret void
 
 
 }
 
 ; Function Attrs: norecurse nounwind
-define void @dpConv2sb(double* nocapture readonly %a, i8* nocapture %b) {
+define void @dpConv2sb(ptr nocapture readonly %a, ptr nocapture %b) {
 ; CHECK-LABEL: dpConv2sb:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lfd 0, 0(3)
@@ -250,16 +250,16 @@ define void @dpConv2sb(double* nocapture readonly %a, i8* nocapture %b) {
 ; CHECK-PWR8-NEXT:    stb 3, 0(4)
 ; CHECK-PWR8-NEXT:    blr
 entry:
-  %0 = load double, double* %a, align 8
+  %0 = load double, ptr %a, align 8
   %conv = fptosi double %0 to i8
-  store i8 %conv, i8* %b, align 1
+  store i8 %conv, ptr %b, align 1
   ret void
 
 
 }
 
 ; Function Attrs: norecurse nounwind
-define void @spConv2sdw(float* nocapture readonly %a, i64* nocapture %b) {
+define void @spConv2sdw(ptr nocapture readonly %a, ptr nocapture %b) {
 ; CHECK-LABEL: spConv2sdw:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lfs 0, 0(3)
@@ -274,16 +274,16 @@ define void @spConv2sdw(float* nocapture readonly %a, i64* nocapture %b) {
 ; CHECK-PWR8-NEXT:    stxsdx 0, 0, 4
 ; CHECK-PWR8-NEXT:    blr
 entry:
-  %0 = load float, float* %a, align 4
+  %0 = load float, ptr %a, align 4
   %conv = fptosi float %0 to i64
-  store i64 %conv, i64* %b, align 8
+  store i64 %conv, ptr %b, align 8
   ret void
 
 
 }
 
 ; Function Attrs: norecurse nounwind
-define void @spConv2sw(float* nocapture readonly %a, i32* nocapture %b) {
+define void @spConv2sw(ptr nocapture readonly %a, ptr nocapture %b) {
 ; CHECK-LABEL: spConv2sw:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lfs 0, 0(3)
@@ -298,16 +298,16 @@ define void @spConv2sw(float* nocapture readonly %a, i32* nocapture %b) {
 ; CHECK-PWR8-NEXT:    stfiwx 0, 0, 4
 ; CHECK-PWR8-NEXT:    blr
 entry:
-  %0 = load float, float* %a, align 4
+  %0 = load float, ptr %a, align 4
   %conv = fptosi float %0 to i32
-  store i32 %conv, i32* %b, align 4
+  store i32 %conv, ptr %b, align 4
   ret void
 
 
 }
 
 ; Function Attrs: norecurse nounwind
-define void @spConv2shw(float* nocapture readonly %a, i16* nocapture %b) {
+define void @spConv2shw(ptr nocapture readonly %a, ptr nocapture %b) {
 ; CHECK-LABEL: spConv2shw:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lfs 0, 0(3)
@@ -323,16 +323,16 @@ define void @spConv2shw(float* nocapture readonly %a, i16* nocapture %b) {
 ; CHECK-PWR8-NEXT:    sth 3, 0(4)
 ; CHECK-PWR8-NEXT:    blr
 entry:
-  %0 = load float, float* %a, align 4
+  %0 = load float, ptr %a, align 4
   %conv = fptosi float %0 to i16
-  store i16 %conv, i16* %b, align 2
+  store i16 %conv, ptr %b, align 2
   ret void
 
 
 }
 
 ; Function Attrs: norecurse nounwind
-define void @spConv2sb(float* nocapture readonly %a, i8* nocapture %b) {
+define void @spConv2sb(ptr nocapture readonly %a, ptr nocapture %b) {
 ; CHECK-LABEL: spConv2sb:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lfs 0, 0(3)
@@ -348,16 +348,16 @@ define void @spConv2sb(float* nocapture readonly %a, i8* nocapture %b) {
 ; CHECK-PWR8-NEXT:    stb 3, 0(4)
 ; CHECK-PWR8-NEXT:    blr
 entry:
-  %0 = load float, float* %a, align 4
+  %0 = load float, ptr %a, align 4
   %conv = fptosi float %0 to i8
-  store i8 %conv, i8* %b, align 1
+  store i8 %conv, ptr %b, align 1
   ret void
 
 
 }
 
 ; Function Attrs: norecurse nounwind
-define void @dpConv2sdw_x(double* nocapture readonly %a, i64* nocapture %b,
+define void @dpConv2sdw_x(ptr nocapture readonly %a, ptr nocapture %b,
 ; CHECK-LABEL: dpConv2sdw_x:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lfd 0, 0(3)
@@ -375,18 +375,18 @@ define void @dpConv2sdw_x(double* nocapture readonly %a, i64* nocapture %b,
 ; CHECK-PWR8-NEXT:    blr
                           i32 signext %idx) {
 entry:
-  %0 = load double, double* %a, align 8
+  %0 = load double, ptr %a, align 8
   %conv = fptosi double %0 to i64
   %idxprom = sext i32 %idx to i64
-  %arrayidx = getelementptr inbounds i64, i64* %b, i64 %idxprom
-  store i64 %conv, i64* %arrayidx, align 8
+  %arrayidx = getelementptr inbounds i64, ptr %b, i64 %idxprom
+  store i64 %conv, ptr %arrayidx, align 8
   ret void
 
 
 }
 
 ; Function Attrs: norecurse nounwind
-define void @dpConv2sw_x(double* nocapture readonly %a, i32* nocapture %b,
+define void @dpConv2sw_x(ptr nocapture readonly %a, ptr nocapture %b,
 ; CHECK-LABEL: dpConv2sw_x:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lfd 0, 0(3)
@@ -404,18 +404,18 @@ define void @dpConv2sw_x(double* nocapture readonly %a, i32* nocapture %b,
 ; CHECK-PWR8-NEXT:    blr
                           i32 signext %idx) {
 entry:
-  %0 = load double, double* %a, align 8
+  %0 = load double, ptr %a, align 8
   %conv = fptosi double %0 to i32
   %idxprom = sext i32 %idx to i64
-  %arrayidx = getelementptr inbounds i32, i32* %b, i64 %idxprom
-  store i32 %conv, i32* %arrayidx, align 4
+  %arrayidx = getelementptr inbounds i32, ptr %b, i64 %idxprom
+  store i32 %conv, ptr %arrayidx, align 4
   ret void
 
 
 }
 
 ; Function Attrs: norecurse nounwind
-define void @dpConv2shw_x(double* nocapture readonly %a, i16* nocapture %b,
+define void @dpConv2shw_x(ptr nocapture readonly %a, ptr nocapture %b,
 ; CHECK-LABEL: dpConv2shw_x:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lfd 0, 0(3)
@@ -434,18 +434,18 @@ define void @dpConv2shw_x(double* nocapture readonly %a, i16* nocapture %b,
 ; CHECK-PWR8-NEXT:    blr
                           i32 signext %idx) {
 entry:
-  %0 = load double, double* %a, align 8
+  %0 = load double, ptr %a, align 8
   %conv = fptosi double %0 to i16
   %idxprom = sext i32 %idx to i64
-  %arrayidx = getelementptr inbounds i16, i16* %b, i64 %idxprom
-  store i16 %conv, i16* %arrayidx, align 2
+  %arrayidx = getelementptr inbounds i16, ptr %b, i64 %idxprom
+  store i16 %conv, ptr %arrayidx, align 2
   ret void
 
 
 }
 
 ; Function Attrs: norecurse nounwind
-define void @dpConv2sb_x(double* nocapture readonly %a, i8* nocapture %b,
+define void @dpConv2sb_x(ptr nocapture readonly %a, ptr nocapture %b,
 ; CHECK-LABEL: dpConv2sb_x:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lfd 0, 0(3)
@@ -462,18 +462,18 @@ define void @dpConv2sb_x(double* nocapture readonly %a, i8* nocapture %b,
 ; CHECK-PWR8-NEXT:    blr
                           i32 signext %idx) {
 entry:
-  %0 = load double, double* %a, align 8
+  %0 = load double, ptr %a, align 8
   %conv = fptosi double %0 to i8
   %idxprom = sext i32 %idx to i64
-  %arrayidx = getelementptr inbounds i8, i8* %b, i64 %idxprom
-  store i8 %conv, i8* %arrayidx, align 1
+  %arrayidx = getelementptr inbounds i8, ptr %b, i64 %idxprom
+  store i8 %conv, ptr %arrayidx, align 1
   ret void
 
 
 }
 
 ; Function Attrs: norecurse nounwind
-define void @spConv2sdw_x(float* nocapture readonly %a, i64* nocapture %b,
+define void @spConv2sdw_x(ptr nocapture readonly %a, ptr nocapture %b,
 ; CHECK-LABEL: spConv2sdw_x:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lfs 0, 0(3)
@@ -491,18 +491,18 @@ define void @spConv2sdw_x(float* nocapture readonly %a, i64* nocapture %b,
 ; CHECK-PWR8-NEXT:    blr
                           i32 signext %idx) {
 entry:
-  %0 = load float, float* %a, align 4
+  %0 = load float, ptr %a, align 4
   %conv = fptosi float %0 to i64
   %idxprom = sext i32 %idx to i64
-  %arrayidx = getelementptr inbounds i64, i64* %b, i64 %idxprom
-  store i64 %conv, i64* %arrayidx, align 8
+  %arrayidx = getelementptr inbounds i64, ptr %b, i64 %idxprom
+  store i64 %conv, ptr %arrayidx, align 8
   ret void
 
 
 }
 
 ; Function Attrs: norecurse nounwind
-define void @spConv2sw_x(float* nocapture readonly %a, i32* nocapture %b,
+define void @spConv2sw_x(ptr nocapture readonly %a, ptr nocapture %b,
 ; CHECK-LABEL: spConv2sw_x:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lfs 0, 0(3)
@@ -520,18 +520,18 @@ define void @spConv2sw_x(float* nocapture readonly %a, i32* nocapture %b,
 ; CHECK-PWR8-NEXT:    blr
                           i32 signext %idx) {
 entry:
-  %0 = load float, float* %a, align 4
+  %0 = load float, ptr %a, align 4
   %conv = fptosi float %0 to i32
   %idxprom = sext i32 %idx to i64
-  %arrayidx = getelementptr inbounds i32, i32* %b, i64 %idxprom
-  store i32 %conv, i32* %arrayidx, align 4
+  %arrayidx = getelementptr inbounds i32, ptr %b, i64 %idxprom
+  store i32 %conv, ptr %arrayidx, align 4
   ret void
 
 
 }
 
 ; Function Attrs: norecurse nounwind
-define void @spConv2shw_x(float* nocapture readonly %a, i16* nocapture %b,
+define void @spConv2shw_x(ptr nocapture readonly %a, ptr nocapture %b,
 ; CHECK-LABEL: spConv2shw_x:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lfs 0, 0(3)
@@ -550,18 +550,18 @@ define void @spConv2shw_x(float* nocapture readonly %a, i16* nocapture %b,
 ; CHECK-PWR8-NEXT:    blr
                           i32 signext %idx) {
 entry:
-  %0 = load float, float* %a, align 4
+  %0 = load float, ptr %a, align 4
   %conv = fptosi float %0 to i16
   %idxprom = sext i32 %idx to i64
-  %arrayidx = getelementptr inbounds i16, i16* %b, i64 %idxprom
-  store i16 %conv, i16* %arrayidx, align 2
+  %arrayidx = getelementptr inbounds i16, ptr %b, i64 %idxprom
+  store i16 %conv, ptr %arrayidx, align 2
   ret void
 
 
 }
 
 ; Function Attrs: norecurse nounwind
-define void @spConv2sb_x(float* nocapture readonly %a, i8* nocapture %b,
+define void @spConv2sb_x(ptr nocapture readonly %a, ptr nocapture %b,
 ; CHECK-LABEL: spConv2sb_x:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lfs 0, 0(3)
@@ -578,11 +578,11 @@ define void @spConv2sb_x(float* nocapture readonly %a, i8* nocapture %b,
 ; CHECK-PWR8-NEXT:    blr
                           i32 signext %idx) {
 entry:
-  %0 = load float, float* %a, align 4
+  %0 = load float, ptr %a, align 4
   %conv = fptosi float %0 to i8
   %idxprom = sext i32 %idx to i64
-  %arrayidx = getelementptr inbounds i8, i8* %b, i64 %idxprom
-  store i8 %conv, i8* %arrayidx, align 1
+  %arrayidx = getelementptr inbounds i8, ptr %b, i64 %idxprom
+  store i8 %conv, ptr %arrayidx, align 1
   ret void
 
 
@@ -593,7 +593,7 @@ entry:
 ; ==========================================
 
 ; Function Attrs: norecurse nounwind
-define void @dpConv2udw(double* nocapture readonly %a, i64* nocapture %b) {
+define void @dpConv2udw(ptr nocapture readonly %a, ptr nocapture %b) {
 ; CHECK-LABEL: dpConv2udw:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lfd 0, 0(3)
@@ -608,16 +608,16 @@ define void @dpConv2udw(double* nocapture readonly %a, i64* nocapture %b) {
 ; CHECK-PWR8-NEXT:    stxsdx 0, 0, 4
 ; CHECK-PWR8-NEXT:    blr
 entry:
-  %0 = load double, double* %a, align 8
+  %0 = load double, ptr %a, align 8
   %conv = fptoui double %0 to i64
-  store i64 %conv, i64* %b, align 8
+  store i64 %conv, ptr %b, align 8
   ret void
 
 
 }
 
 ; Function Attrs: norecurse nounwind
-define void @dpConv2uw(double* nocapture readonly %a, i32* nocapture %b) {
+define void @dpConv2uw(ptr nocapture readonly %a, ptr nocapture %b) {
 ; CHECK-LABEL: dpConv2uw:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lfd 0, 0(3)
@@ -632,16 +632,16 @@ define void @dpConv2uw(double* nocapture readonly %a, i32* nocapture %b) {
 ; CHECK-PWR8-NEXT:    stfiwx 0, 0, 4
 ; CHECK-PWR8-NEXT:    blr
 entry:
-  %0 = load double, double* %a, align 8
+  %0 = load double, ptr %a, align 8
   %conv = fptoui double %0 to i32
-  store i32 %conv, i32* %b, align 4
+  store i32 %conv, ptr %b, align 4
   ret void
 
 
 }
 
 ; Function Attrs: norecurse nounwind
-define void @dpConv2uhw(double* nocapture readonly %a, i16* nocapture %b) {
+define void @dpConv2uhw(ptr nocapture readonly %a, ptr nocapture %b) {
 ; CHECK-LABEL: dpConv2uhw:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lfd 0, 0(3)
@@ -657,16 +657,16 @@ define void @dpConv2uhw(double* nocapture readonly %a, i16* nocapture %b) {
 ; CHECK-PWR8-NEXT:    sth 3, 0(4)
 ; CHECK-PWR8-NEXT:    blr
 entry:
-  %0 = load double, double* %a, align 8
+  %0 = load double, ptr %a, align 8
   %conv = fptoui double %0 to i16
-  store i16 %conv, i16* %b, align 2
+  store i16 %conv, ptr %b, align 2
   ret void
 
 
 }
 
 ; Function Attrs: norecurse nounwind
-define void @dpConv2ub(double* nocapture readonly %a, i8* nocapture %b) {
+define void @dpConv2ub(ptr nocapture readonly %a, ptr nocapture %b) {
 ; CHECK-LABEL: dpConv2ub:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lfd 0, 0(3)
@@ -682,16 +682,16 @@ define void @dpConv2ub(double* nocapture readonly %a, i8* nocapture %b) {
 ; CHECK-PWR8-NEXT:    stb 3, 0(4)
 ; CHECK-PWR8-NEXT:    blr
 entry:
-  %0 = load double, double* %a, align 8
+  %0 = load double, ptr %a, align 8
   %conv = fptoui double %0 to i8
-  store i8 %conv, i8* %b, align 1
+  store i8 %conv, ptr %b, align 1
   ret void
 
 
 }
 
 ; Function Attrs: norecurse nounwind
-define void @spConv2udw(float* nocapture readonly %a, i64* nocapture %b) {
+define void @spConv2udw(ptr nocapture readonly %a, ptr nocapture %b) {
 ; CHECK-LABEL: spConv2udw:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lfs 0, 0(3)
@@ -706,16 +706,16 @@ define void @spConv2udw(float* nocapture readonly %a, i64* nocapture %b) {
 ; CHECK-PWR8-NEXT:    stxsdx 0, 0, 4
 ; CHECK-PWR8-NEXT:    blr
 entry:
-  %0 = load float, float* %a, align 4
+  %0 = load float, ptr %a, align 4
   %conv = fptoui float %0 to i64
-  store i64 %conv, i64* %b, align 8
+  store i64 %conv, ptr %b, align 8
   ret void
 
 
 }
 
 ; Function Attrs: norecurse nounwind
-define void @spConv2uw(float* nocapture readonly %a, i32* nocapture %b) {
+define void @spConv2uw(ptr nocapture readonly %a, ptr nocapture %b) {
 ; CHECK-LABEL: spConv2uw:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lfs 0, 0(3)
@@ -730,16 +730,16 @@ define void @spConv2uw(float* nocapture readonly %a, i32* nocapture %b) {
 ; CHECK-PWR8-NEXT:    stfiwx 0, 0, 4
 ; CHECK-PWR8-NEXT:    blr
 entry:
-  %0 = load float, float* %a, align 4
+  %0 = load float, ptr %a, align 4
   %conv = fptoui float %0 to i32
-  store i32 %conv, i32* %b, align 4
+  store i32 %conv, ptr %b, align 4
   ret void
 
 
 }
 
 ; Function Attrs: norecurse nounwind
-define void @spConv2uhw(float* nocapture readonly %a, i16* nocapture %b) {
+define void @spConv2uhw(ptr nocapture readonly %a, ptr nocapture %b) {
 ; CHECK-LABEL: spConv2uhw:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lfs 0, 0(3)
@@ -755,16 +755,16 @@ define void @spConv2uhw(float* nocapture readonly %a, i16* nocapture %b) {
 ; CHECK-PWR8-NEXT:    sth 3, 0(4)
 ; CHECK-PWR8-NEXT:    blr
 entry:
-  %0 = load float, float* %a, align 4
+  %0 = load float, ptr %a, align 4
   %conv = fptoui float %0 to i16
-  store i16 %conv, i16* %b, align 2
+  store i16 %conv, ptr %b, align 2
   ret void
 
 
 }
 
 ; Function Attrs: norecurse nounwind
-define void @spConv2ub(float* nocapture readonly %a, i8* nocapture %b) {
+define void @spConv2ub(ptr nocapture readonly %a, ptr nocapture %b) {
 ; CHECK-LABEL: spConv2ub:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lfs 0, 0(3)
@@ -780,16 +780,16 @@ define void @spConv2ub(float* nocapture readonly %a, i8* nocapture %b) {
 ; CHECK-PWR8-NEXT:    stb 3, 0(4)
 ; CHECK-PWR8-NEXT:    blr
 entry:
-  %0 = load float, float* %a, align 4
+  %0 = load float, ptr %a, align 4
   %conv = fptoui float %0 to i8
-  store i8 %conv, i8* %b, align 1
+  store i8 %conv, ptr %b, align 1
   ret void
 
 
 }
 
 ; Function Attrs: norecurse nounwind
-define void @dpConv2udw_x(double* nocapture readonly %a, i64* nocapture %b,
+define void @dpConv2udw_x(ptr nocapture readonly %a, ptr nocapture %b,
 ; CHECK-LABEL: dpConv2udw_x:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lfd 0, 0(3)
@@ -807,18 +807,18 @@ define void @dpConv2udw_x(double* nocapture readonly %a, i64* nocapture %b,
 ; CHECK-PWR8-NEXT:    blr
                           i32 zeroext %idx) {
 entry:
-  %0 = load double, double* %a, align 8
+  %0 = load double, ptr %a, align 8
   %conv = fptoui double %0 to i64
   %idxprom = zext i32 %idx to i64
-  %arrayidx = getelementptr inbounds i64, i64* %b, i64 %idxprom
-  store i64 %conv, i64* %arrayidx, align 8
+  %arrayidx = getelementptr inbounds i64, ptr %b, i64 %idxprom
+  store i64 %conv, ptr %arrayidx, align 8
   ret void
 
 
 }
 
 ; Function Attrs: norecurse nounwind
-define void @dpConv2uw_x(double* nocapture readonly %a, i32* nocapture %b,
+define void @dpConv2uw_x(ptr nocapture readonly %a, ptr nocapture %b,
 ; CHECK-LABEL: dpConv2uw_x:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lfd 0, 0(3)
@@ -836,18 +836,18 @@ define void @dpConv2uw_x(double* nocapture readonly %a, i32* nocapture %b,
 ; CHECK-PWR8-NEXT:    blr
                           i32 zeroext %idx) {
 entry:
-  %0 = load double, double* %a, align 8
+  %0 = load double, ptr %a, align 8
   %conv = fptoui double %0 to i32
   %idxprom = zext i32 %idx to i64
-  %arrayidx = getelementptr inbounds i32, i32* %b, i64 %idxprom
-  store i32 %conv, i32* %arrayidx, align 4
+  %arrayidx = getelementptr inbounds i32, ptr %b, i64 %idxprom
+  store i32 %conv, ptr %arrayidx, align 4
   ret void
 
 
 }
 
 ; Function Attrs: norecurse nounwind
-define void @dpConv2uhw_x(double* nocapture readonly %a, i16* nocapture %b,
+define void @dpConv2uhw_x(ptr nocapture readonly %a, ptr nocapture %b,
 ; CHECK-LABEL: dpConv2uhw_x:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lfd 0, 0(3)
@@ -866,18 +866,18 @@ define void @dpConv2uhw_x(double* nocapture readonly %a, i16* nocapture %b,
 ; CHECK-PWR8-NEXT:    blr
                           i32 zeroext %idx) {
 entry:
-  %0 = load double, double* %a, align 8
+  %0 = load double, ptr %a, align 8
   %conv = fptoui double %0 to i16
   %idxprom = zext i32 %idx to i64
-  %arrayidx = getelementptr inbounds i16, i16* %b, i64 %idxprom
-  store i16 %conv, i16* %arrayidx, align 2
+  %arrayidx = getelementptr inbounds i16, ptr %b, i64 %idxprom
+  store i16 %conv, ptr %arrayidx, align 2
   ret void
 
 
 }
 
 ; Function Attrs: norecurse nounwind
-define void @dpConv2ub_x(double* nocapture readonly %a, i8* nocapture %b,
+define void @dpConv2ub_x(ptr nocapture readonly %a, ptr nocapture %b,
 ; CHECK-LABEL: dpConv2ub_x:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lfd 0, 0(3)
@@ -894,18 +894,18 @@ define void @dpConv2ub_x(double* nocapture readonly %a, i8* nocapture %b,
 ; CHECK-PWR8-NEXT:    blr
                           i32 zeroext %idx) {
 entry:
-  %0 = load double, double* %a, align 8
+  %0 = load double, ptr %a, align 8
   %conv = fptoui double %0 to i8
   %idxprom = zext i32 %idx to i64
-  %arrayidx = getelementptr inbounds i8, i8* %b, i64 %idxprom
-  store i8 %conv, i8* %arrayidx, align 1
+  %arrayidx = getelementptr inbounds i8, ptr %b, i64 %idxprom
+  store i8 %conv, ptr %arrayidx, align 1
   ret void
 
 
 }
 
 ; Function Attrs: norecurse nounwind
-define void @spConv2udw_x(float* nocapture readonly %a, i64* nocapture %b,
+define void @spConv2udw_x(ptr nocapture readonly %a, ptr nocapture %b,
 ; CHECK-LABEL: spConv2udw_x:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lfs 0, 0(3)
@@ -923,18 +923,18 @@ define void @spConv2udw_x(float* nocapture readonly %a, i64* nocapture %b,
 ; CHECK-PWR8-NEXT:    blr
                           i32 zeroext %idx) {
 entry:
-  %0 = load float, float* %a, align 4
+  %0 = load float, ptr %a, align 4
   %conv = fptoui float %0 to i64
   %idxprom = zext i32 %idx to i64
-  %arrayidx = getelementptr inbounds i64, i64* %b, i64 %idxprom
-  store i64 %conv, i64* %arrayidx, align 8
+  %arrayidx = getelementptr inbounds i64, ptr %b, i64 %idxprom
+  store i64 %conv, ptr %arrayidx, align 8
   ret void
 
 
 }
 
 ; Function Attrs: norecurse nounwind
-define void @spConv2uw_x(float* nocapture readonly %a, i32* nocapture %b,
+define void @spConv2uw_x(ptr nocapture readonly %a, ptr nocapture %b,
 ; CHECK-LABEL: spConv2uw_x:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lfs 0, 0(3)
@@ -952,18 +952,18 @@ define void @spConv2uw_x(float* nocapture readonly %a, i32* nocapture %b,
 ; CHECK-PWR8-NEXT:    blr
                           i32 zeroext %idx) {
 entry:
-  %0 = load float, float* %a, align 4
+  %0 = load float, ptr %a, align 4
   %conv = fptoui float %0 to i32
   %idxprom = zext i32 %idx to i64
-  %arrayidx = getelementptr inbounds i32, i32* %b, i64 %idxprom
-  store i32 %conv, i32* %arrayidx, align 4
+  %arrayidx = getelementptr inbounds i32, ptr %b, i64 %idxprom
+  store i32 %conv, ptr %arrayidx, align 4
   ret void
 
 
 }
 
 ; Function Attrs: norecurse nounwind
-define void @spConv2uhw_x(float* nocapture readonly %a, i16* nocapture %b,
+define void @spConv2uhw_x(ptr nocapture readonly %a, ptr nocapture %b,
 ; CHECK-LABEL: spConv2uhw_x:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lfs 0, 0(3)
@@ -982,18 +982,18 @@ define void @spConv2uhw_x(float* nocapture readonly %a, i16* nocapture %b,
 ; CHECK-PWR8-NEXT:    blr
                           i32 zeroext %idx) {
 entry:
-  %0 = load float, float* %a, align 4
+  %0 = load float, ptr %a, align 4
   %conv = fptoui float %0 to i16
   %idxprom = zext i32 %idx to i64
-  %arrayidx = getelementptr inbounds i16, i16* %b, i64 %idxprom
-  store i16 %conv, i16* %arrayidx, align 2
+  %arrayidx = getelementptr inbounds i16, ptr %b, i64 %idxprom
+  store i16 %conv, ptr %arrayidx, align 2
   ret void
 
 
 }
 
 ; Function Attrs: norecurse nounwind
-define void @spConv2ub_x(float* nocapture readonly %a, i8* nocapture %b,
+define void @spConv2ub_x(ptr nocapture readonly %a, ptr nocapture %b,
 ; CHECK-LABEL: spConv2ub_x:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lfs 0, 0(3)
@@ -1010,11 +1010,11 @@ define void @spConv2ub_x(float* nocapture readonly %a, i8* nocapture %b,
 ; CHECK-PWR8-NEXT:    blr
                           i32 zeroext %idx) {
 entry:
-  %0 = load float, float* %a, align 4
+  %0 = load float, ptr %a, align 4
   %conv = fptoui float %0 to i8
   %idxprom = zext i32 %idx to i64
-  %arrayidx = getelementptr inbounds i8, i8* %b, i64 %idxprom
-  store i8 %conv, i8* %arrayidx, align 1
+  %arrayidx = getelementptr inbounds i8, ptr %b, i64 %idxprom
+  store i8 %conv, ptr %arrayidx, align 1
   ret void
 
 

diff  --git a/llvm/test/CodeGen/PowerPC/structsinmem.ll b/llvm/test/CodeGen/PowerPC/structsinmem.ll
index bbbd95ccd5f5..c9c93161f90f 100644
--- a/llvm/test/CodeGen/PowerPC/structsinmem.ll
+++ b/llvm/test/CodeGen/PowerPC/structsinmem.ll
@@ -42,21 +42,14 @@ entry:
   %p5 = alloca %struct.s5, align 4
   %p6 = alloca %struct.s6, align 4
   %p7 = alloca %struct.s7, align 4
-  %0 = bitcast %struct.s1* %p1 to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* getelementptr inbounds (%struct.s1, %struct.s1* @caller1.p1, i32 0, i32 0), i64 1, i1 false)
-  %1 = bitcast %struct.s2* %p2 to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 2 %1, i8* align 2 bitcast (%struct.s2* @caller1.p2 to i8*), i64 2, i1 false)
-  %2 = bitcast %struct.s3* %p3 to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 2 %2, i8* align 2 bitcast ({ i16, i8, i8 }* @caller1.p3 to i8*), i64 4, i1 false)
-  %3 = bitcast %struct.s4* %p4 to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %3, i8* align 4 bitcast (%struct.s4* @caller1.p4 to i8*), i64 4, i1 false)
-  %4 = bitcast %struct.s5* %p5 to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %4, i8* align 4 bitcast ({ i32, i8, [3 x i8] }* @caller1.p5 to i8*), i64 8, i1 false)
-  %5 = bitcast %struct.s6* %p6 to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %5, i8* align 4 bitcast ({ i32, i16, [2 x i8] }* @caller1.p6 to i8*), i64 8, i1 false)
-  %6 = bitcast %struct.s7* %p7 to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %6, i8* align 4 bitcast ({ i32, i16, i8, i8 }* @caller1.p7 to i8*), i64 8, i1 false)
-  %call = call i32 @callee1(i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, %struct.s1* byval(%struct.s1) %p1, %struct.s2* byval(%struct.s2) %p2, %struct.s3* byval(%struct.s3) %p3, %struct.s4* byval(%struct.s4) %p4, %struct.s5* byval(%struct.s5) %p5, %struct.s6* byval(%struct.s6) %p6, %struct.s7* byval(%struct.s7) %p7)
+  call void @llvm.memcpy.p0.p0.i64(ptr %p1, ptr @caller1.p1, i64 1, i1 false)
+  call void @llvm.memcpy.p0.p0.i64(ptr align 2 %p2, ptr align 2 @caller1.p2, i64 2, i1 false)
+  call void @llvm.memcpy.p0.p0.i64(ptr align 2 %p3, ptr align 2 @caller1.p3, i64 4, i1 false)
+  call void @llvm.memcpy.p0.p0.i64(ptr align 4 %p4, ptr align 4 @caller1.p4, i64 4, i1 false)
+  call void @llvm.memcpy.p0.p0.i64(ptr align 4 %p5, ptr align 4 @caller1.p5, i64 8, i1 false)
+  call void @llvm.memcpy.p0.p0.i64(ptr align 4 %p6, ptr align 4 @caller1.p6, i64 8, i1 false)
+  call void @llvm.memcpy.p0.p0.i64(ptr align 4 %p7, ptr align 4 @caller1.p7, i64 8, i1 false)
+  %call = call i32 @callee1(i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, ptr byval(%struct.s1) %p1, ptr byval(%struct.s2) %p2, ptr byval(%struct.s3) %p3, ptr byval(%struct.s4) %p4, ptr byval(%struct.s5) %p5, ptr byval(%struct.s6) %p6, ptr byval(%struct.s7) %p7)
   ret i32 %call
 
 ; CHECK: stb {{[0-9]+}}, 119(1)
@@ -68,9 +61,9 @@ entry:
 ; CHECK: std {{[0-9]+}}, 160(1)
 }
 
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1) nounwind
 
-define internal i32 @callee1(i32 %z1, i32 %z2, i32 %z3, i32 %z4, i32 %z5, i32 %z6, i32 %z7, i32 %z8, %struct.s1* byval(%struct.s1) %v1, %struct.s2* byval(%struct.s2) %v2, %struct.s3* byval(%struct.s3) %v3, %struct.s4* byval(%struct.s4) %v4, %struct.s5* byval(%struct.s5) %v5, %struct.s6* byval(%struct.s6) %v6, %struct.s7* byval(%struct.s7) %v7) nounwind {
+define internal i32 @callee1(i32 %z1, i32 %z2, i32 %z3, i32 %z4, i32 %z5, i32 %z6, i32 %z7, i32 %z8, ptr byval(%struct.s1) %v1, ptr byval(%struct.s2) %v2, ptr byval(%struct.s3) %v3, ptr byval(%struct.s4) %v4, ptr byval(%struct.s5) %v5, ptr byval(%struct.s6) %v6, ptr byval(%struct.s7) %v7) nounwind {
 entry:
   %z1.addr = alloca i32, align 4
   %z2.addr = alloca i32, align 4
@@ -80,36 +73,29 @@ entry:
   %z6.addr = alloca i32, align 4
   %z7.addr = alloca i32, align 4
   %z8.addr = alloca i32, align 4
-  store i32 %z1, i32* %z1.addr, align 4
-  store i32 %z2, i32* %z2.addr, align 4
-  store i32 %z3, i32* %z3.addr, align 4
-  store i32 %z4, i32* %z4.addr, align 4
-  store i32 %z5, i32* %z5.addr, align 4
-  store i32 %z6, i32* %z6.addr, align 4
-  store i32 %z7, i32* %z7.addr, align 4
-  store i32 %z8, i32* %z8.addr, align 4
-  %a = getelementptr inbounds %struct.s1, %struct.s1* %v1, i32 0, i32 0
-  %0 = load i8, i8* %a, align 1
+  store i32 %z1, ptr %z1.addr, align 4
+  store i32 %z2, ptr %z2.addr, align 4
+  store i32 %z3, ptr %z3.addr, align 4
+  store i32 %z4, ptr %z4.addr, align 4
+  store i32 %z5, ptr %z5.addr, align 4
+  store i32 %z6, ptr %z6.addr, align 4
+  store i32 %z7, ptr %z7.addr, align 4
+  store i32 %z8, ptr %z8.addr, align 4
+  %0 = load i8, ptr %v1, align 1
   %conv = zext i8 %0 to i32
-  %a1 = getelementptr inbounds %struct.s2, %struct.s2* %v2, i32 0, i32 0
-  %1 = load i16, i16* %a1, align 2
+  %1 = load i16, ptr %v2, align 2
   %conv2 = sext i16 %1 to i32
   %add = add nsw i32 %conv, %conv2
-  %a3 = getelementptr inbounds %struct.s3, %struct.s3* %v3, i32 0, i32 0
-  %2 = load i16, i16* %a3, align 2
+  %2 = load i16, ptr %v3, align 2
   %conv4 = sext i16 %2 to i32
   %add5 = add nsw i32 %add, %conv4
-  %a6 = getelementptr inbounds %struct.s4, %struct.s4* %v4, i32 0, i32 0
-  %3 = load i32, i32* %a6, align 4
+  %3 = load i32, ptr %v4, align 4
   %add7 = add nsw i32 %add5, %3
-  %a8 = getelementptr inbounds %struct.s5, %struct.s5* %v5, i32 0, i32 0
-  %4 = load i32, i32* %a8, align 4
+  %4 = load i32, ptr %v5, align 4
   %add9 = add nsw i32 %add7, %4
-  %a10 = getelementptr inbounds %struct.s6, %struct.s6* %v6, i32 0, i32 0
-  %5 = load i32, i32* %a10, align 4
+  %5 = load i32, ptr %v6, align 4
   %add11 = add nsw i32 %add9, %5
-  %a12 = getelementptr inbounds %struct.s7, %struct.s7* %v7, i32 0, i32 0
-  %6 = load i32, i32* %a12, align 4
+  %6 = load i32, ptr %v7, align 4
   %add13 = add nsw i32 %add11, %6
   ret i32 %add13
 
@@ -131,21 +117,14 @@ entry:
   %p5 = alloca %struct.t5, align 1
   %p6 = alloca %struct.t6, align 1
   %p7 = alloca %struct.t7, align 1
-  %0 = bitcast %struct.t1* %p1 to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* getelementptr inbounds (%struct.t1, %struct.t1* @caller2.p1, i32 0, i32 0), i64 1, i1 false)
-  %1 = bitcast %struct.t2* %p2 to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* bitcast ({ i16 }* @caller2.p2 to i8*), i64 2, i1 false)
-  %2 = bitcast %struct.t3* %p3 to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %2, i8* bitcast (%struct.t3* @caller2.p3 to i8*), i64 3, i1 false)
-  %3 = bitcast %struct.t4* %p4 to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %3, i8* bitcast ({ i32 }* @caller2.p4 to i8*), i64 4, i1 false)
-  %4 = bitcast %struct.t5* %p5 to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %4, i8* bitcast (%struct.t5* @caller2.p5 to i8*), i64 5, i1 false)
-  %5 = bitcast %struct.t6* %p6 to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %5, i8* bitcast (%struct.t6* @caller2.p6 to i8*), i64 6, i1 false)
-  %6 = bitcast %struct.t7* %p7 to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %6, i8* bitcast (%struct.t7* @caller2.p7 to i8*), i64 7, i1 false)
-  %call = call i32 @callee2(i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, %struct.t1* byval(%struct.t1) %p1, %struct.t2* byval(%struct.t2) %p2, %struct.t3* byval(%struct.t3) %p3, %struct.t4* byval(%struct.t4) %p4, %struct.t5* byval(%struct.t5) %p5, %struct.t6* byval(%struct.t6) %p6, %struct.t7* byval(%struct.t7) %p7)
+  call void @llvm.memcpy.p0.p0.i64(ptr %p1, ptr @caller2.p1, i64 1, i1 false)
+  call void @llvm.memcpy.p0.p0.i64(ptr %p2, ptr @caller2.p2, i64 2, i1 false)
+  call void @llvm.memcpy.p0.p0.i64(ptr %p3, ptr @caller2.p3, i64 3, i1 false)
+  call void @llvm.memcpy.p0.p0.i64(ptr %p4, ptr @caller2.p4, i64 4, i1 false)
+  call void @llvm.memcpy.p0.p0.i64(ptr %p5, ptr @caller2.p5, i64 5, i1 false)
+  call void @llvm.memcpy.p0.p0.i64(ptr %p6, ptr @caller2.p6, i64 6, i1 false)
+  call void @llvm.memcpy.p0.p0.i64(ptr %p7, ptr @caller2.p7, i64 7, i1 false)
+  %call = call i32 @callee2(i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, ptr byval(%struct.t1) %p1, ptr byval(%struct.t2) %p2, ptr byval(%struct.t3) %p3, ptr byval(%struct.t4) %p4, ptr byval(%struct.t5) %p5, ptr byval(%struct.t6) %p6, ptr byval(%struct.t7) %p7)
   ret i32 %call
 
 ; CHECK: stb {{[0-9]+}}, 119(1)
@@ -161,7 +140,7 @@ entry:
 ; CHECK: stw {{[0-9]+}}, 161(1)
 }
 
-define internal i32 @callee2(i32 %z1, i32 %z2, i32 %z3, i32 %z4, i32 %z5, i32 %z6, i32 %z7, i32 %z8, %struct.t1* byval(%struct.t1) %v1, %struct.t2* byval(%struct.t2) %v2, %struct.t3* byval(%struct.t3) %v3, %struct.t4* byval(%struct.t4) %v4, %struct.t5* byval(%struct.t5) %v5, %struct.t6* byval(%struct.t6) %v6, %struct.t7* byval(%struct.t7) %v7) nounwind {
+define internal i32 @callee2(i32 %z1, i32 %z2, i32 %z3, i32 %z4, i32 %z5, i32 %z6, i32 %z7, i32 %z8, ptr byval(%struct.t1) %v1, ptr byval(%struct.t2) %v2, ptr byval(%struct.t3) %v3, ptr byval(%struct.t4) %v4, ptr byval(%struct.t5) %v5, ptr byval(%struct.t6) %v6, ptr byval(%struct.t7) %v7) nounwind {
 entry:
   %z1.addr = alloca i32, align 4
   %z2.addr = alloca i32, align 4
@@ -171,36 +150,29 @@ entry:
   %z6.addr = alloca i32, align 4
   %z7.addr = alloca i32, align 4
   %z8.addr = alloca i32, align 4
-  store i32 %z1, i32* %z1.addr, align 4
-  store i32 %z2, i32* %z2.addr, align 4
-  store i32 %z3, i32* %z3.addr, align 4
-  store i32 %z4, i32* %z4.addr, align 4
-  store i32 %z5, i32* %z5.addr, align 4
-  store i32 %z6, i32* %z6.addr, align 4
-  store i32 %z7, i32* %z7.addr, align 4
-  store i32 %z8, i32* %z8.addr, align 4
-  %a = getelementptr inbounds %struct.t1, %struct.t1* %v1, i32 0, i32 0
-  %0 = load i8, i8* %a, align 1
+  store i32 %z1, ptr %z1.addr, align 4
+  store i32 %z2, ptr %z2.addr, align 4
+  store i32 %z3, ptr %z3.addr, align 4
+  store i32 %z4, ptr %z4.addr, align 4
+  store i32 %z5, ptr %z5.addr, align 4
+  store i32 %z6, ptr %z6.addr, align 4
+  store i32 %z7, ptr %z7.addr, align 4
+  store i32 %z8, ptr %z8.addr, align 4
+  %0 = load i8, ptr %v1, align 1
   %conv = zext i8 %0 to i32
-  %a1 = getelementptr inbounds %struct.t2, %struct.t2* %v2, i32 0, i32 0
-  %1 = load i16, i16* %a1, align 1
+  %1 = load i16, ptr %v2, align 1
   %conv2 = sext i16 %1 to i32
   %add = add nsw i32 %conv, %conv2
-  %a3 = getelementptr inbounds %struct.t3, %struct.t3* %v3, i32 0, i32 0
-  %2 = load i16, i16* %a3, align 1
+  %2 = load i16, ptr %v3, align 1
   %conv4 = sext i16 %2 to i32
   %add5 = add nsw i32 %add, %conv4
-  %a6 = getelementptr inbounds %struct.t4, %struct.t4* %v4, i32 0, i32 0
-  %3 = load i32, i32* %a6, align 1
+  %3 = load i32, ptr %v4, align 1
   %add7 = add nsw i32 %add5, %3
-  %a8 = getelementptr inbounds %struct.t5, %struct.t5* %v5, i32 0, i32 0
-  %4 = load i32, i32* %a8, align 1
+  %4 = load i32, ptr %v5, align 1
   %add9 = add nsw i32 %add7, %4
-  %a10 = getelementptr inbounds %struct.t6, %struct.t6* %v6, i32 0, i32 0
-  %5 = load i32, i32* %a10, align 1
+  %5 = load i32, ptr %v6, align 1
   %add11 = add nsw i32 %add9, %5
-  %a12 = getelementptr inbounds %struct.t7, %struct.t7* %v7, i32 0, i32 0
-  %6 = load i32, i32* %a12, align 1
+  %6 = load i32, ptr %v7, align 1
   %add13 = add nsw i32 %add11, %6
   ret i32 %add13
 

diff  --git a/llvm/test/CodeGen/PowerPC/structsinregs.ll b/llvm/test/CodeGen/PowerPC/structsinregs.ll
index 9fd84c5202c6..eb804c93bf1e 100644
--- a/llvm/test/CodeGen/PowerPC/structsinregs.ll
+++ b/llvm/test/CodeGen/PowerPC/structsinregs.ll
@@ -42,21 +42,14 @@ entry:
   %p5 = alloca %struct.s5, align 4
   %p6 = alloca %struct.s6, align 4
   %p7 = alloca %struct.s7, align 4
-  %0 = bitcast %struct.s1* %p1 to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* getelementptr inbounds (%struct.s1, %struct.s1* @caller1.p1, i32 0, i32 0), i64 1, i1 false)
-  %1 = bitcast %struct.s2* %p2 to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 2 %1, i8* align 2 bitcast (%struct.s2* @caller1.p2 to i8*), i64 2, i1 false)
-  %2 = bitcast %struct.s3* %p3 to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 2 %2, i8* align 2 bitcast ({ i16, i8, i8 }* @caller1.p3 to i8*), i64 4, i1 false)
-  %3 = bitcast %struct.s4* %p4 to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %3, i8* align 4 bitcast (%struct.s4* @caller1.p4 to i8*), i64 4, i1 false)
-  %4 = bitcast %struct.s5* %p5 to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %4, i8* align 4 bitcast ({ i32, i8, [3 x i8] }* @caller1.p5 to i8*), i64 8, i1 false)
-  %5 = bitcast %struct.s6* %p6 to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %5, i8* align 4 bitcast ({ i32, i16, [2 x i8] }* @caller1.p6 to i8*), i64 8, i1 false)
-  %6 = bitcast %struct.s7* %p7 to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %6, i8* align 4 bitcast ({ i32, i16, i8, i8 }* @caller1.p7 to i8*), i64 8, i1 false)
-  %call = call i32 @callee1(%struct.s1* byval(%struct.s1) %p1, %struct.s2* byval(%struct.s2) %p2, %struct.s3* byval(%struct.s3) %p3, %struct.s4* byval(%struct.s4) %p4, %struct.s5* byval(%struct.s5) %p5, %struct.s6* byval(%struct.s6) %p6, %struct.s7* byval(%struct.s7) %p7)
+  call void @llvm.memcpy.p0.p0.i64(ptr %p1, ptr @caller1.p1, i64 1, i1 false)
+  call void @llvm.memcpy.p0.p0.i64(ptr align 2 %p2, ptr align 2 @caller1.p2, i64 2, i1 false)
+  call void @llvm.memcpy.p0.p0.i64(ptr align 2 %p3, ptr align 2 @caller1.p3, i64 4, i1 false)
+  call void @llvm.memcpy.p0.p0.i64(ptr align 4 %p4, ptr align 4 @caller1.p4, i64 4, i1 false)
+  call void @llvm.memcpy.p0.p0.i64(ptr align 4 %p5, ptr align 4 @caller1.p5, i64 8, i1 false)
+  call void @llvm.memcpy.p0.p0.i64(ptr align 4 %p6, ptr align 4 @caller1.p6, i64 8, i1 false)
+  call void @llvm.memcpy.p0.p0.i64(ptr align 4 %p7, ptr align 4 @caller1.p7, i64 8, i1 false)
+  %call = call i32 @callee1(ptr byval(%struct.s1) %p1, ptr byval(%struct.s2) %p2, ptr byval(%struct.s3) %p3, ptr byval(%struct.s4) %p4, ptr byval(%struct.s5) %p5, ptr byval(%struct.s6) %p6, ptr byval(%struct.s7) %p7)
   ret i32 %call
 
 ; CHECK-LABEL: caller1
@@ -69,32 +62,25 @@ entry:
 ; CHECK: lbz 3, 160(31)
 }
 
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1) nounwind
 
-define internal i32 @callee1(%struct.s1* byval(%struct.s1) %v1, %struct.s2* byval(%struct.s2) %v2, %struct.s3* byval(%struct.s3) %v3, %struct.s4* byval(%struct.s4) %v4, %struct.s5* byval(%struct.s5) %v5, %struct.s6* byval(%struct.s6) %v6, %struct.s7* byval(%struct.s7) %v7) nounwind {
+define internal i32 @callee1(ptr byval(%struct.s1) %v1, ptr byval(%struct.s2) %v2, ptr byval(%struct.s3) %v3, ptr byval(%struct.s4) %v4, ptr byval(%struct.s5) %v5, ptr byval(%struct.s6) %v6, ptr byval(%struct.s7) %v7) nounwind {
 entry:
-  %a = getelementptr inbounds %struct.s1, %struct.s1* %v1, i32 0, i32 0
-  %0 = load i8, i8* %a, align 1
+  %0 = load i8, ptr %v1, align 1
   %conv = zext i8 %0 to i32
-  %a1 = getelementptr inbounds %struct.s2, %struct.s2* %v2, i32 0, i32 0
-  %1 = load i16, i16* %a1, align 2
+  %1 = load i16, ptr %v2, align 2
   %conv2 = sext i16 %1 to i32
   %add = add nsw i32 %conv, %conv2
-  %a3 = getelementptr inbounds %struct.s3, %struct.s3* %v3, i32 0, i32 0
-  %2 = load i16, i16* %a3, align 2
+  %2 = load i16, ptr %v3, align 2
   %conv4 = sext i16 %2 to i32
   %add5 = add nsw i32 %add, %conv4
-  %a6 = getelementptr inbounds %struct.s4, %struct.s4* %v4, i32 0, i32 0
-  %3 = load i32, i32* %a6, align 4
+  %3 = load i32, ptr %v4, align 4
   %add7 = add nsw i32 %add5, %3
-  %a8 = getelementptr inbounds %struct.s5, %struct.s5* %v5, i32 0, i32 0
-  %4 = load i32, i32* %a8, align 4
+  %4 = load i32, ptr %v5, align 4
   %add9 = add nsw i32 %add7, %4
-  %a10 = getelementptr inbounds %struct.s6, %struct.s6* %v6, i32 0, i32 0
-  %5 = load i32, i32* %a10, align 4
+  %5 = load i32, ptr %v6, align 4
   %add11 = add nsw i32 %add9, %5
-  %a12 = getelementptr inbounds %struct.s7, %struct.s7* %v7, i32 0, i32 0
-  %6 = load i32, i32* %a12, align 4
+  %6 = load i32, ptr %v7, align 4
   %add13 = add nsw i32 %add11, %6
   ret i32 %add13
 
@@ -124,21 +110,14 @@ entry:
   %p5 = alloca %struct.t5, align 1
   %p6 = alloca %struct.t6, align 1
   %p7 = alloca %struct.t7, align 1
-  %0 = bitcast %struct.t1* %p1 to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* getelementptr inbounds (%struct.t1, %struct.t1* @caller2.p1, i32 0, i32 0), i64 1, i1 false)
-  %1 = bitcast %struct.t2* %p2 to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* bitcast ({ i16 }* @caller2.p2 to i8*), i64 2, i1 false)
-  %2 = bitcast %struct.t3* %p3 to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %2, i8* bitcast (%struct.t3* @caller2.p3 to i8*), i64 3, i1 false)
-  %3 = bitcast %struct.t4* %p4 to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %3, i8* bitcast ({ i32 }* @caller2.p4 to i8*), i64 4, i1 false)
-  %4 = bitcast %struct.t5* %p5 to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %4, i8* bitcast (%struct.t5* @caller2.p5 to i8*), i64 5, i1 false)
-  %5 = bitcast %struct.t6* %p6 to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %5, i8* bitcast (%struct.t6* @caller2.p6 to i8*), i64 6, i1 false)
-  %6 = bitcast %struct.t7* %p7 to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %6, i8* bitcast (%struct.t7* @caller2.p7 to i8*), i64 7, i1 false)
-  %call = call i32 @callee2(%struct.t1* byval(%struct.t1) %p1, %struct.t2* byval(%struct.t2) %p2, %struct.t3* byval(%struct.t3) %p3, %struct.t4* byval(%struct.t4) %p4, %struct.t5* byval(%struct.t5) %p5, %struct.t6* byval(%struct.t6) %p6, %struct.t7* byval(%struct.t7) %p7)
+  call void @llvm.memcpy.p0.p0.i64(ptr %p1, ptr @caller2.p1, i64 1, i1 false)
+  call void @llvm.memcpy.p0.p0.i64(ptr %p2, ptr @caller2.p2, i64 2, i1 false)
+  call void @llvm.memcpy.p0.p0.i64(ptr %p3, ptr @caller2.p3, i64 3, i1 false)
+  call void @llvm.memcpy.p0.p0.i64(ptr %p4, ptr @caller2.p4, i64 4, i1 false)
+  call void @llvm.memcpy.p0.p0.i64(ptr %p5, ptr @caller2.p5, i64 5, i1 false)
+  call void @llvm.memcpy.p0.p0.i64(ptr %p6, ptr @caller2.p6, i64 6, i1 false)
+  call void @llvm.memcpy.p0.p0.i64(ptr %p7, ptr @caller2.p7, i64 7, i1 false)
+  %call = call i32 @callee2(ptr byval(%struct.t1) %p1, ptr byval(%struct.t2) %p2, ptr byval(%struct.t3) %p3, ptr byval(%struct.t4) %p4, ptr byval(%struct.t5) %p5, ptr byval(%struct.t6) %p6, ptr byval(%struct.t7) %p7)
   ret i32 %call
 
 ; CHECK-LABEL: caller2
@@ -159,30 +138,23 @@ entry:
 ; CHECK: lbz 3, 160(31)
 }
 
-define internal i32 @callee2(%struct.t1* byval(%struct.t1) %v1, %struct.t2* byval(%struct.t2) %v2, %struct.t3* byval(%struct.t3) %v3, %struct.t4* byval(%struct.t4) %v4, %struct.t5* byval(%struct.t5) %v5, %struct.t6* byval(%struct.t6) %v6, %struct.t7* byval(%struct.t7) %v7) nounwind {
+define internal i32 @callee2(ptr byval(%struct.t1) %v1, ptr byval(%struct.t2) %v2, ptr byval(%struct.t3) %v3, ptr byval(%struct.t4) %v4, ptr byval(%struct.t5) %v5, ptr byval(%struct.t6) %v6, ptr byval(%struct.t7) %v7) nounwind {
 entry:
-  %a = getelementptr inbounds %struct.t1, %struct.t1* %v1, i32 0, i32 0
-  %0 = load i8, i8* %a, align 1
+  %0 = load i8, ptr %v1, align 1
   %conv = zext i8 %0 to i32
-  %a1 = getelementptr inbounds %struct.t2, %struct.t2* %v2, i32 0, i32 0
-  %1 = load i16, i16* %a1, align 1
+  %1 = load i16, ptr %v2, align 1
   %conv2 = sext i16 %1 to i32
   %add = add nsw i32 %conv, %conv2
-  %a3 = getelementptr inbounds %struct.t3, %struct.t3* %v3, i32 0, i32 0
-  %2 = load i16, i16* %a3, align 1
+  %2 = load i16, ptr %v3, align 1
   %conv4 = sext i16 %2 to i32
   %add5 = add nsw i32 %add, %conv4
-  %a6 = getelementptr inbounds %struct.t4, %struct.t4* %v4, i32 0, i32 0
-  %3 = load i32, i32* %a6, align 1
+  %3 = load i32, ptr %v4, align 1
   %add7 = add nsw i32 %add5, %3
-  %a8 = getelementptr inbounds %struct.t5, %struct.t5* %v5, i32 0, i32 0
-  %4 = load i32, i32* %a8, align 1
+  %4 = load i32, ptr %v5, align 1
   %add9 = add nsw i32 %add7, %4
-  %a10 = getelementptr inbounds %struct.t6, %struct.t6* %v6, i32 0, i32 0
-  %5 = load i32, i32* %a10, align 1
+  %5 = load i32, ptr %v6, align 1
   %add11 = add nsw i32 %add9, %5
-  %a12 = getelementptr inbounds %struct.t7, %struct.t7* %v7, i32 0, i32 0
-  %6 = load i32, i32* %a12, align 1
+  %6 = load i32, ptr %v7, align 1
   %add13 = add nsw i32 %add11, %6
   ret i32 %add13
 

diff  --git a/llvm/test/CodeGen/PowerPC/stwu-gta.ll b/llvm/test/CodeGen/PowerPC/stwu-gta.ll
index 2ca72ffd79d4..e413e0e776d1 100644
--- a/llvm/test/CodeGen/PowerPC/stwu-gta.ll
+++ b/llvm/test/CodeGen/PowerPC/stwu-gta.ll
@@ -8,8 +8,8 @@ target triple = "powerpc-unknown-linux"
 
 define void @_GLOBAL__I_a() nounwind section ".text.startup" {
 entry:
-  store i32 5, i32* getelementptr inbounds (%class.Two.0.5, %class.Two.0.5* @foo, i32 0, i32 0), align 4
-  store i32 6, i32* getelementptr inbounds (%class.Two.0.5, %class.Two.0.5* @foo, i32 0, i32 1), align 4
+  store i32 5, ptr @foo, align 4
+  store i32 6, ptr getelementptr inbounds (%class.Two.0.5, ptr @foo, i32 0, i32 1), align 4
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/stwu-sched.ll b/llvm/test/CodeGen/PowerPC/stwu-sched.ll
index 36afaf84a296..1f4a9246e77c 100644
--- a/llvm/test/CodeGen/PowerPC/stwu-sched.ll
+++ b/llvm/test/CodeGen/PowerPC/stwu-sched.ll
@@ -9,7 +9,7 @@
 %0 = type { i32, i32 }
 
 ; Function Attrs: norecurse nounwind writeonly
-define void @initCombList(%0* nocapture, i32 signext) local_unnamed_addr #0 {
+define void @initCombList(ptr nocapture, i32 signext) local_unnamed_addr #0 {
 ; CHECK-LABEL: initCombList:
 ; CHECK: addi 4, 4, -8
 ; CHECK: stwu [[REG:[0-9]+]], 64(3)
@@ -23,7 +23,7 @@ define void @initCombList(%0* nocapture, i32 signext) local_unnamed_addr #0 {
   br i1 undef, label %6, label %4
 
 ; <label>:4:                                      ; preds = %2
-  store i32 0, i32* undef, align 4, !tbaa !1
+  store i32 0, ptr undef, align 4, !tbaa !1
   %5 = add nuw nsw i64 0, 1
   br label %6
 
@@ -33,23 +33,23 @@ define void @initCombList(%0* nocapture, i32 signext) local_unnamed_addr #0 {
 
 ; <label>:8:                                      ; preds = %8, %6
   %9 = phi i64 [ %21, %8 ], [ %7, %6 ]
-  %10 = getelementptr inbounds %0, %0* %0, i64 %9, i32 1
-  store i32 0, i32* %10, align 4, !tbaa !1
+  %10 = getelementptr inbounds %0, ptr %0, i64 %9, i32 1
+  store i32 0, ptr %10, align 4, !tbaa !1
   %11 = add nuw nsw i64 %9, 1
-  %12 = getelementptr inbounds %0, %0* %0, i64 %11, i32 1
-  store i32 0, i32* %12, align 4, !tbaa !1
+  %12 = getelementptr inbounds %0, ptr %0, i64 %11, i32 1
+  store i32 0, ptr %12, align 4, !tbaa !1
   %13 = add nsw i64 %9, 2
-  %14 = getelementptr inbounds %0, %0* %0, i64 %13, i32 1
-  store i32 0, i32* %14, align 4, !tbaa !1
+  %14 = getelementptr inbounds %0, ptr %0, i64 %13, i32 1
+  store i32 0, ptr %14, align 4, !tbaa !1
   %15 = add nsw i64 %9, 3
-  %16 = getelementptr inbounds %0, %0* %0, i64 %15, i32 1
-  store i32 0, i32* %16, align 4, !tbaa !1
+  %16 = getelementptr inbounds %0, ptr %0, i64 %15, i32 1
+  store i32 0, ptr %16, align 4, !tbaa !1
   %17 = add nsw i64 %9, 4
-  %18 = getelementptr inbounds %0, %0* %0, i64 %17, i32 1
-  store i32 0, i32* %18, align 4, !tbaa !1
+  %18 = getelementptr inbounds %0, ptr %0, i64 %17, i32 1
+  store i32 0, ptr %18, align 4, !tbaa !1
   %19 = add nsw i64 %9, 6
-  %20 = getelementptr inbounds %0, %0* %0, i64 %19, i32 1
-  store i32 0, i32* %20, align 4, !tbaa !1
+  %20 = getelementptr inbounds %0, ptr %0, i64 %19, i32 1
+  store i32 0, ptr %20, align 4, !tbaa !1
   %21 = add nsw i64 %9, 8
   %22 = icmp eq i64 %21, %3
   br i1 %22, label %23, label %8, !llvm.loop !6

diff  --git a/llvm/test/CodeGen/PowerPC/stwu8.ll b/llvm/test/CodeGen/PowerPC/stwu8.ll
index f6d7ec9334ce..a0f10af9bf5b 100644
--- a/llvm/test/CodeGen/PowerPC/stwu8.ll
+++ b/llvm/test/CodeGen/PowerPC/stwu8.ll
@@ -7,18 +7,17 @@ target triple = "powerpc64-unknown-linux-gnu"
 %"class.std::_Rb_tree.19.101.511.536" = type { %"struct.std::_Rb_tree<std::pair<const char *, const char *>, std::pair<const std::pair<const char *, const char *>, int>, std::_Select1st<std::pair<const std::pair<const char *, const char *>, int>>, std::less<std::pair<const char *, const char *>>, std::allocator<std::pair<const std::pair<const char *, const char *>, int>> >::_Rb_tree_impl.18.100.510.535" }
 %"struct.std::_Rb_tree<std::pair<const char *, const char *>, std::pair<const std::pair<const char *, const char *>, int>, std::_Select1st<std::pair<const std::pair<const char *, const char *>, int>>, std::less<std::pair<const char *, const char *>>, std::allocator<std::pair<const std::pair<const char *, const char *>, int>> >::_Rb_tree_impl.18.100.510.535" = type { %"struct.std::less.16.98.508.533", %"struct.std::_Rb_tree_node_base.17.99.509.534", i64 }
 %"struct.std::less.16.98.508.533" = type { i8 }
-%"struct.std::_Rb_tree_node_base.17.99.509.534" = type { i32, %"struct.std::_Rb_tree_node_base.17.99.509.534"*, %"struct.std::_Rb_tree_node_base.17.99.509.534"*, %"struct.std::_Rb_tree_node_base.17.99.509.534"* }
+%"struct.std::_Rb_tree_node_base.17.99.509.534" = type { i32, ptr, ptr, ptr }
 
-define void @test1(%class.spell_checker.21.103.513.538* %this) unnamed_addr align 2 {
+define void @test1(ptr %this) unnamed_addr align 2 {
 entry:
-  %_M_header.i.i.i.i.i.i = getelementptr inbounds %class.spell_checker.21.103.513.538, %class.spell_checker.21.103.513.538* %this, i64 0, i32 0, i32 0, i32 0, i32 1
-  %0 = bitcast %"struct.std::_Rb_tree_node_base.17.99.509.534"* %_M_header.i.i.i.i.i.i to i8*
-  call void @llvm.memset.p0i8.i64(i8* align 4 %0, i8 0, i64 40, i1 false) nounwind
-  store %"struct.std::_Rb_tree_node_base.17.99.509.534"* %_M_header.i.i.i.i.i.i, %"struct.std::_Rb_tree_node_base.17.99.509.534"** undef, align 8
+  %_M_header.i.i.i.i.i.i = getelementptr inbounds %class.spell_checker.21.103.513.538, ptr %this, i64 0, i32 0, i32 0, i32 0, i32 1
+  call void @llvm.memset.p0.i64(ptr align 4 %_M_header.i.i.i.i.i.i, i8 0, i64 40, i1 false) nounwind
+  store ptr %_M_header.i.i.i.i.i.i, ptr undef, align 8
   unreachable
 }
 
 ; CHECK: @test1
 ; CHECK: stwu
 
-declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) nounwind
+declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i1) nounwind

diff  --git a/llvm/test/CodeGen/PowerPC/stwux.ll b/llvm/test/CodeGen/PowerPC/stwux.ll
index 157e23e35c5d..9a40c51f598e 100644
--- a/llvm/test/CodeGen/PowerPC/stwux.ll
+++ b/llvm/test/CodeGen/PowerPC/stwux.ll
@@ -28,15 +28,15 @@ while.end:                                        ; preds = %if.end12
 
 if.end15:                                         ; preds = %while.end
   %idxprom.i.i230 = sext i32 %i.1 to i64
-  %arrayidx18 = getelementptr inbounds [100 x i32], [100 x i32]* @multvec_i, i64 0, i64 %idxprom.i.i230
-  store i32 0, i32* %arrayidx18, align 4
+  %arrayidx18 = getelementptr inbounds [100 x i32], ptr @multvec_i, i64 0, i64 %idxprom.i.i230
+  store i32 0, ptr %arrayidx18, align 4
   br i1 undef, label %while.body21, label %while.end90
 
 while.body21:                                     ; preds = %if.end15
   unreachable
 
 while.end90:                                      ; preds = %if.end15
-  store i32 0, i32* %arrayidx18, align 4
+  store i32 0, ptr %arrayidx18, align 4
   br label %return
 
 return:                                           ; preds = %while.end90, %while.end, %entry

diff  --git a/llvm/test/CodeGen/PowerPC/subreg-postra-2.ll b/llvm/test/CodeGen/PowerPC/subreg-postra-2.ll
index cfef020c2d35..f952c0e2d8fb 100644
--- a/llvm/test/CodeGen/PowerPC/subreg-postra-2.ll
+++ b/llvm/test/CodeGen/PowerPC/subreg-postra-2.ll
@@ -4,26 +4,25 @@ target datalayout = "E-m:e-i64:64-n32:64"
 target triple = "powerpc64-unknown-linux-gnu"
 
 ; Function Attrs: nounwind
-define void @jbd2_journal_commit_transaction(i32 %input1, i32* %input2, i32* %input3, i8** %input4) #0 {
+define void @jbd2_journal_commit_transaction(i32 %input1, ptr %input2, ptr %input3, ptr %input4) #0 {
 entry:
   br label %while.body392
 
 while.body392:                                    ; preds = %wait_on_buffer.exit1319, %while.body392.lr.ph
-  %0 = load i8*, i8** %input4, align 8
-  %add.ptr399 = getelementptr inbounds i8, i8* %0, i64 -72
-  %b_state.i.i1314 = bitcast i8* %add.ptr399 to i64*
+  %0 = load ptr, ptr %input4, align 8
+  %add.ptr399 = getelementptr inbounds i8, ptr %0, i64 -72
   %ivar = add i32 %input1, 1
   %tobool.i1316 = icmp eq i32 %input1, 0
   br i1 %tobool.i1316, label %wait_on_buffer.exit1319, label %while.end418
 
 wait_on_buffer.exit1319:                          ; preds = %while.body392
-  %1 = load volatile i64, i64* %b_state.i.i1314, align 8
+  %1 = load volatile i64, ptr %add.ptr399, align 8
   %conv.i.i1322 = and i64 %1, 1
   %lnot404 = icmp eq i64 %conv.i.i1322, 0
   %.err.4 = select i1 %lnot404, i32 -5, i32 %input1
-  %2 = call i64 asm sideeffect "1:.long 0x7c0000a8 $| ((($0) & 0x1f) << 21) $| (((0) & 0x1f) << 16) $| ((($3) & 0x1f) << 11) $| (((0) & 0x1) << 0) \0Aandc $0,$0,$2\0Astdcx. $0,0,$3\0Abne- 1b\0A", "=&r,=*m,r,r,*m,~{cc},~{memory}"(i64* elementtype(i64) %b_state.i.i1314, i64 262144, i64* %b_state.i.i1314, i64* elementtype(i64) %b_state.i.i1314) #0
-  store i8* %0, i8** %input4, align 8
-  %cmp.i1312 = icmp eq i32* %input2, %input3
+  %2 = call i64 asm sideeffect "1:.long 0x7c0000a8 $| ((($0) & 0x1f) << 21) $| (((0) & 0x1f) << 16) $| ((($3) & 0x1f) << 11) $| (((0) & 0x1) << 0) \0Aandc $0,$0,$2\0Astdcx. $0,0,$3\0Abne- 1b\0A", "=&r,=*m,r,r,*m,~{cc},~{memory}"(ptr elementtype(i64) %add.ptr399, i64 262144, ptr %add.ptr399, ptr elementtype(i64) %add.ptr399) #0
+  store ptr %0, ptr %input4, align 8
+  %cmp.i1312 = icmp eq ptr %input2, %input3
   br i1 %cmp.i1312, label %while.end418, label %while.body392
 
 while.end418:                                     ; preds = %wait_on_buffer.exit1319, %do.body378

diff  --git a/llvm/test/CodeGen/PowerPC/subreg-postra.ll b/llvm/test/CodeGen/PowerPC/subreg-postra.ll
index 9f5f9e70808d..9ed7e93299ee 100644
--- a/llvm/test/CodeGen/PowerPC/subreg-postra.ll
+++ b/llvm/test/CodeGen/PowerPC/subreg-postra.ll
@@ -4,9 +4,9 @@ target datalayout = "E-m:e-i64:64-n32:64"
 target triple = "powerpc64-unknown-linux-gnu"
 
 ; Function Attrs: nounwind
-define void @jbd2_journal_commit_transaction(i32* %journal, i64 %inp1, i32 %inp2,
-                                             i32* %inp3, i32** %inp4,
-                                             i32** %inp5, i1 %inp6,
+define void @jbd2_journal_commit_transaction(ptr %journal, i64 %inp1, i32 %inp2,
+                                             ptr %inp3, ptr %inp4,
+                                             ptr %inp5, i1 %inp6,
                                              i1 %inp7, i1 %inp8) #0 {
 entry:
   br i1 undef, label %do.body, label %if.then5
@@ -124,9 +124,8 @@ while.body392.lr.ph:                              ; preds = %do.body378
   br label %while.body392
 
 while.body392:                                    ; preds = %wait_on_buffer.exit1319, %while.body392.lr.ph
-  %0 = load i8*, i8** undef, align 8
-  %add.ptr399 = getelementptr inbounds i8, i8* %0, i64 -72
-  %b_state.i.i1314 = bitcast i8* %add.ptr399 to i64*
+  %0 = load ptr, ptr undef, align 8
+  %add.ptr399 = getelementptr inbounds i8, ptr %0, i64 -72
   %tobool.i1316 = icmp eq i64 %inp1, 0
   br i1 %tobool.i1316, label %wait_on_buffer.exit1319, label %if.then.i1317
 
@@ -134,15 +133,15 @@ if.then.i1317:                                    ; preds = %while.body392
   unreachable
 
 wait_on_buffer.exit1319:                          ; preds = %while.body392
-  %1 = load volatile i64, i64* %b_state.i.i1314, align 8
+  %1 = load volatile i64, ptr %add.ptr399, align 8
   %conv.i.i1322 = and i64 %1, 1
   %lnot404 = icmp eq i64 %conv.i.i1322, 0
   %.err.4 = select i1 %lnot404, i32 -5, i32 %inp2
-  %2 = call i64 asm sideeffect "1:.long 0x7c0000a8 $| ((($0) & 0x1f) << 21) $| (((0) & 0x1f) << 16) $| ((($3) & 0x1f) << 11) $| (((0) & 0x1) << 0) \0Aandc $0,$0,$2\0Astdcx. $0,0,$3\0Abne- 1b\0A", "=&r,=*m,r,r,*m,~{cc},~{memory}"(i64* elementtype(i64) %b_state.i.i1314, i64 262144, i64* %b_state.i.i1314, i64* elementtype(i64) %b_state.i.i1314) #1
-  %prev.i.i.i1325 = getelementptr inbounds i8, i8* %0, i64 8
-  %3 = load i32*, i32** %inp4, align 8
-  store i32* %3, i32** %inp5, align 8
-  call void @__brelse(i32* %3) #1
+  %2 = call i64 asm sideeffect "1:.long 0x7c0000a8 $| ((($0) & 0x1f) << 21) $| (((0) & 0x1f) << 16) $| ((($3) & 0x1f) << 11) $| (((0) & 0x1) << 0) \0Aandc $0,$0,$2\0Astdcx. $0,0,$3\0Abne- 1b\0A", "=&r,=*m,r,r,*m,~{cc},~{memory}"(ptr elementtype(i64) %add.ptr399, i64 262144, ptr %add.ptr399, ptr elementtype(i64) %add.ptr399) #1
+  %prev.i.i.i1325 = getelementptr inbounds i8, ptr %0, i64 8
+  %3 = load ptr, ptr %inp4, align 8
+  store ptr %3, ptr %inp5, align 8
+  call void @__brelse(ptr %3) #1
   br i1 %inp8, label %while.end418, label %while.body392
 
 ; CHECK-LABEL: @jbd2_journal_commit_transaction
@@ -161,16 +160,16 @@ while.end418:                                     ; preds = %wait_on_buffer.exit
   br i1 %inp7, label %if.end421, label %if.then420
 
 if.then420:                                       ; preds = %while.end418
-  call void @jbd2_journal_abort(i32* %journal, i32 signext %err.4.lcssa) #1
+  call void @jbd2_journal_abort(ptr %journal, i32 signext %err.4.lcssa) #1
   br label %if.end421
 
 if.end421:                                        ; preds = %if.then420, %while.end418
   unreachable
 }
 
-declare void @jbd2_journal_abort(i32*, i32 signext)
+declare void @jbd2_journal_abort(ptr, i32 signext)
 
-declare void @__brelse(i32*)
+declare void @__brelse(ptr)
 
 attributes #0 = { nounwind }
 attributes #1 = { nounwind }

diff  --git a/llvm/test/CodeGen/PowerPC/svr4-redzone.ll b/llvm/test/CodeGen/PowerPC/svr4-redzone.ll
index 26c4410ded6d..2a6063f92707 100644
--- a/llvm/test/CodeGen/PowerPC/svr4-redzone.ll
+++ b/llvm/test/CodeGen/PowerPC/svr4-redzone.ll
@@ -15,10 +15,10 @@ entry:
 ; PPC64-NOT: stdu 1, -{{[0-9]+}}(1)
 ; PPC64: blr
 
-define i8* @smallstack() nounwind {
+define ptr @smallstack() nounwind {
 entry:
  %0 = alloca i8, i32 4
-       ret i8* %0
+       ret ptr %0
 }
 ; PPC32-LABEL: smallstack:
 ; PPC32: stwu 1, -16(1)
@@ -27,10 +27,10 @@ entry:
 ; PPC64-NOT: stdu 1, -{{[0-9]+}}(1)
 ; PPC64: blr
 
-define i8* @bigstack() nounwind {
+define ptr @bigstack() nounwind {
 entry:
  %0 = alloca i8, i32 290
-       ret i8* %0
+       ret ptr %0
 }
 ; PPC32-LABEL: bigstack:
 ; PPC32: stwu 1, -304(1)

diff  --git a/llvm/test/CodeGen/PowerPC/swap-reduction.ll b/llvm/test/CodeGen/PowerPC/swap-reduction.ll
index a2e7176654f8..1152594e37e6 100644
--- a/llvm/test/CodeGen/PowerPC/swap-reduction.ll
+++ b/llvm/test/CodeGen/PowerPC/swap-reduction.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -verify-machineinstrs -mcpu=pwr8 -mtriple=powerpc64le < %s | FileCheck %s
 
-define i64 @test1(i64* %a, i64* %b) {
+define i64 @test1(ptr %a, ptr %b) {
 ; CHECK-LABEL: test1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    mr 5, 3
@@ -14,8 +14,8 @@ define i64 @test1(i64* %a, i64* %b) {
 ; CHECK-NEXT:    stxsdx 34, 0, 5
 ; CHECK-NEXT:    blr
 entry:
-  %lhs = load i64, i64* %a, align 8
-  %rhs = load i64, i64* %b, align 8
+  %lhs = load i64, ptr %a, align 8
+  %rhs = load i64, ptr %b, align 8
   %sum = add i64 %lhs, %rhs
   %lv = insertelement <2 x i64> undef, i64 %lhs, i32 0
   %rv = insertelement <2 x i64> undef, i64 %rhs, i32 0
@@ -24,11 +24,11 @@ entry:
   %add = call <16 x i8> @llvm.ppc.altivec.vavgsb(<16 x i8> %lhc, <16 x i8> %rhc)
   %cb = bitcast <16 x i8> %add to <2 x i64>
   %fv = extractelement <2 x i64> %cb, i32 0
-  store i64 %fv, i64* %a, align 8
+  store i64 %fv, ptr %a, align 8
   ret i64 %sum
 }
 
-define i64 @test2(i64* %a, i64* %b) {
+define i64 @test2(ptr %a, ptr %b) {
 ; CHECK-LABEL: test2:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    mr 5, 3
@@ -41,8 +41,8 @@ define i64 @test2(i64* %a, i64* %b) {
 ; CHECK-NEXT:    stxsdx 34, 0, 5
 ; CHECK-NEXT:    blr
 entry:
-  %lhs = load i64, i64* %a, align 8
-  %rhs = load i64, i64* %b, align 8
+  %lhs = load i64, ptr %a, align 8
+  %rhs = load i64, ptr %b, align 8
   %sum = add i64 %lhs, %rhs
   %lv = insertelement <2 x i64> undef, i64 %lhs, i32 0
   %rv = insertelement <2 x i64> undef, i64 %rhs, i32 0
@@ -51,12 +51,12 @@ entry:
   %add = add <8 x i16> %lhc, %rhc
   %cb = bitcast <8 x i16> %add to <2 x i64>
   %fv = extractelement <2 x i64> %cb, i32 0
-  store i64 %fv, i64* %a, align 8
+  store i64 %fv, ptr %a, align 8
   ret i64 %sum
 }
 
 ; Ensure that vec-ops with multiple uses aren't simplified.
-define signext i16 @vecop_uses(i16* %addr) {
+define signext i16 @vecop_uses(ptr %addr) {
 ; CHECK-LABEL: vecop_uses:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    li 4, 16
@@ -77,13 +77,12 @@ define signext i16 @vecop_uses(i16* %addr) {
 ; CHECK-NEXT:    extsh 3, 3
 ; CHECK-NEXT:    blr
 entry:
-  %0 = bitcast i16* %addr to <16 x i16>*
-  %1 = load <16 x i16>, <16 x i16>* %0, align 2
-  %2 = call i16 @llvm.vector.reduce.smin.v16i16(<16 x i16> %1)
-  ret i16 %2
+  %0 = load <16 x i16>, ptr %addr, align 2
+  %1 = call i16 @llvm.vector.reduce.smin.v16i16(<16 x i16> %0)
+  ret i16 %1
 }
 
-define signext i32 @vecop_uses2([4 x i32]* %a, [4 x i32]* %b, [4 x i32]* %c) {
+define signext i32 @vecop_uses2(ptr %a, ptr %b, ptr %c) {
 ; CHECK-LABEL: vecop_uses2:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lxvd2x 0, 0, 3
@@ -98,15 +97,12 @@ define signext i32 @vecop_uses2([4 x i32]* %a, [4 x i32]* %b, [4 x i32]* %c) {
 ; CHECK-NEXT:    stxvd2x 0, 0, 5
 ; CHECK-NEXT:    blr
 entry:
-  %0 = bitcast [4 x i32]* %a to <4 x i32>*
-  %1 = load <4 x i32>, <4 x i32>* %0, align 4
-  %2 = bitcast [4 x i32]* %b to <4 x i32>*
-  %3 = load <4 x i32>, <4 x i32>* %2, align 4
-  %4 = mul <4 x i32> %3, %1
-  %5 = bitcast [4 x i32]* %c to <4 x i32>*
-  store <4 x i32> %4, <4 x i32>* %5, align 4
-  %6 = extractelement <4 x i32> %1, i32 3
-  ret i32 %6
+  %0 = load <4 x i32>, ptr %a, align 4
+  %1 = load <4 x i32>, ptr %b, align 4
+  %2 = mul <4 x i32> %1, %0
+  store <4 x i32> %2, ptr %c, align 4
+  %3 = extractelement <4 x i32> %0, i32 3
+  ret i32 %3
 }
 
 declare <16 x i8> @llvm.ppc.altivec.vavgsb(<16 x i8>, <16 x i8>)

diff  --git a/llvm/test/CodeGen/PowerPC/swaps-le-1.ll b/llvm/test/CodeGen/PowerPC/swaps-le-1.ll
index bea0d9ef3408..e2a61d7060ff 100644
--- a/llvm/test/CodeGen/PowerPC/swaps-le-1.ll
+++ b/llvm/test/CodeGen/PowerPC/swaps-le-1.ll
@@ -46,68 +46,52 @@ entry:
 
 vector.body:
   %index = phi i64 [ 0, %entry ], [ %index.next.3, %vector.body ]
-  %0 = getelementptr inbounds [4096 x i32], [4096 x i32]* @cb, i64 0, i64 %index
-  %1 = bitcast i32* %0 to <4 x i32>*
-  %wide.load = load <4 x i32>, <4 x i32>* %1, align 8
-  %2 = getelementptr inbounds [4096 x i32], [4096 x i32]* @cc, i64 0, i64 %index
-  %3 = bitcast i32* %2 to <4 x i32>*
-  %wide.load13 = load <4 x i32>, <4 x i32>* %3, align 8
-  %4 = add nsw <4 x i32> %wide.load13, %wide.load
-  %5 = getelementptr inbounds [4096 x i32], [4096 x i32]* @cd, i64 0, i64 %index
-  %6 = bitcast i32* %5 to <4 x i32>*
-  %wide.load14 = load <4 x i32>, <4 x i32>* %6, align 8
-  %7 = mul nsw <4 x i32> %4, %wide.load14
-  %8 = getelementptr inbounds [4096 x i32], [4096 x i32]* @ca, i64 0, i64 %index
-  %9 = bitcast i32* %8 to <4 x i32>*
-  store <4 x i32> %7, <4 x i32>* %9, align 8
+  %0 = getelementptr inbounds [4096 x i32], ptr @cb, i64 0, i64 %index
+  %wide.load = load <4 x i32>, ptr %0, align 8
+  %1 = getelementptr inbounds [4096 x i32], ptr @cc, i64 0, i64 %index
+  %wide.load13 = load <4 x i32>, ptr %1, align 8
+  %2 = add nsw <4 x i32> %wide.load13, %wide.load
+  %3 = getelementptr inbounds [4096 x i32], ptr @cd, i64 0, i64 %index
+  %wide.load14 = load <4 x i32>, ptr %3, align 8
+  %4 = mul nsw <4 x i32> %2, %wide.load14
+  %5 = getelementptr inbounds [4096 x i32], ptr @ca, i64 0, i64 %index
+  store <4 x i32> %4, ptr %5, align 8
   %index.next = add nuw nsw i64 %index, 4
-  %10 = getelementptr inbounds [4096 x i32], [4096 x i32]* @cb, i64 0, i64 %index.next
-  %11 = bitcast i32* %10 to <4 x i32>*
-  %wide.load.1 = load <4 x i32>, <4 x i32>* %11, align 8
-  %12 = getelementptr inbounds [4096 x i32], [4096 x i32]* @cc, i64 0, i64 %index.next
-  %13 = bitcast i32* %12 to <4 x i32>*
-  %wide.load13.1 = load <4 x i32>, <4 x i32>* %13, align 8
-  %14 = add nsw <4 x i32> %wide.load13.1, %wide.load.1
-  %15 = getelementptr inbounds [4096 x i32], [4096 x i32]* @cd, i64 0, i64 %index.next
-  %16 = bitcast i32* %15 to <4 x i32>*
-  %wide.load14.1 = load <4 x i32>, <4 x i32>* %16, align 8
-  %17 = mul nsw <4 x i32> %14, %wide.load14.1
-  %18 = getelementptr inbounds [4096 x i32], [4096 x i32]* @ca, i64 0, i64 %index.next
-  %19 = bitcast i32* %18 to <4 x i32>*
-  store <4 x i32> %17, <4 x i32>* %19, align 8
+  %6 = getelementptr inbounds [4096 x i32], ptr @cb, i64 0, i64 %index.next
+  %wide.load.1 = load <4 x i32>, ptr %6, align 8
+  %7 = getelementptr inbounds [4096 x i32], ptr @cc, i64 0, i64 %index.next
+  %wide.load13.1 = load <4 x i32>, ptr %7, align 8
+  %8 = add nsw <4 x i32> %wide.load13.1, %wide.load.1
+  %9 = getelementptr inbounds [4096 x i32], ptr @cd, i64 0, i64 %index.next
+  %wide.load14.1 = load <4 x i32>, ptr %9, align 8
+  %10 = mul nsw <4 x i32> %8, %wide.load14.1
+  %11 = getelementptr inbounds [4096 x i32], ptr @ca, i64 0, i64 %index.next
+  store <4 x i32> %10, ptr %11, align 8
   %index.next.1 = add nuw nsw i64 %index.next, 4
-  %20 = getelementptr inbounds [4096 x i32], [4096 x i32]* @cb, i64 0, i64 %index.next.1
-  %21 = bitcast i32* %20 to <4 x i32>*
-  %wide.load.2 = load <4 x i32>, <4 x i32>* %21, align 8
-  %22 = getelementptr inbounds [4096 x i32], [4096 x i32]* @cc, i64 0, i64 %index.next.1
-  %23 = bitcast i32* %22 to <4 x i32>*
-  %wide.load13.2 = load <4 x i32>, <4 x i32>* %23, align 8
-  %24 = add nsw <4 x i32> %wide.load13.2, %wide.load.2
-  %25 = getelementptr inbounds [4096 x i32], [4096 x i32]* @cd, i64 0, i64 %index.next.1
-  %26 = bitcast i32* %25 to <4 x i32>*
-  %wide.load14.2 = load <4 x i32>, <4 x i32>* %26, align 8
-  %27 = mul nsw <4 x i32> %24, %wide.load14.2
-  %28 = getelementptr inbounds [4096 x i32], [4096 x i32]* @ca, i64 0, i64 %index.next.1
-  %29 = bitcast i32* %28 to <4 x i32>*
-  store <4 x i32> %27, <4 x i32>* %29, align 8
+  %12 = getelementptr inbounds [4096 x i32], ptr @cb, i64 0, i64 %index.next.1
+  %wide.load.2 = load <4 x i32>, ptr %12, align 8
+  %13 = getelementptr inbounds [4096 x i32], ptr @cc, i64 0, i64 %index.next.1
+  %wide.load13.2 = load <4 x i32>, ptr %13, align 8
+  %14 = add nsw <4 x i32> %wide.load13.2, %wide.load.2
+  %15 = getelementptr inbounds [4096 x i32], ptr @cd, i64 0, i64 %index.next.1
+  %wide.load14.2 = load <4 x i32>, ptr %15, align 8
+  %16 = mul nsw <4 x i32> %14, %wide.load14.2
+  %17 = getelementptr inbounds [4096 x i32], ptr @ca, i64 0, i64 %index.next.1
+  store <4 x i32> %16, ptr %17, align 8
   %index.next.2 = add nuw nsw i64 %index.next.1, 4
-  %30 = getelementptr inbounds [4096 x i32], [4096 x i32]* @cb, i64 0, i64 %index.next.2
-  %31 = bitcast i32* %30 to <4 x i32>*
-  %wide.load.3 = load <4 x i32>, <4 x i32>* %31, align 8
-  %32 = getelementptr inbounds [4096 x i32], [4096 x i32]* @cc, i64 0, i64 %index.next.2
-  %33 = bitcast i32* %32 to <4 x i32>*
-  %wide.load13.3 = load <4 x i32>, <4 x i32>* %33, align 8
-  %34 = add nsw <4 x i32> %wide.load13.3, %wide.load.3
-  %35 = getelementptr inbounds [4096 x i32], [4096 x i32]* @cd, i64 0, i64 %index.next.2
-  %36 = bitcast i32* %35 to <4 x i32>*
-  %wide.load14.3 = load <4 x i32>, <4 x i32>* %36, align 8
-  %37 = mul nsw <4 x i32> %34, %wide.load14.3
-  %38 = getelementptr inbounds [4096 x i32], [4096 x i32]* @ca, i64 0, i64 %index.next.2
-  %39 = bitcast i32* %38 to <4 x i32>*
-  store <4 x i32> %37, <4 x i32>* %39, align 8
+  %18 = getelementptr inbounds [4096 x i32], ptr @cb, i64 0, i64 %index.next.2
+  %wide.load.3 = load <4 x i32>, ptr %18, align 8
+  %19 = getelementptr inbounds [4096 x i32], ptr @cc, i64 0, i64 %index.next.2
+  %wide.load13.3 = load <4 x i32>, ptr %19, align 8
+  %20 = add nsw <4 x i32> %wide.load13.3, %wide.load.3
+  %21 = getelementptr inbounds [4096 x i32], ptr @cd, i64 0, i64 %index.next.2
+  %wide.load14.3 = load <4 x i32>, ptr %21, align 8
+  %22 = mul nsw <4 x i32> %20, %wide.load14.3
+  %23 = getelementptr inbounds [4096 x i32], ptr @ca, i64 0, i64 %index.next.2
+  store <4 x i32> %22, ptr %23, align 8
   %index.next.3 = add nuw nsw i64 %index.next.2, 4
-  %40 = icmp eq i64 %index.next.3, 4096
-  br i1 %40, label %for.end, label %vector.body
+  %24 = icmp eq i64 %index.next.3, 4096
+  br i1 %24, label %for.end, label %vector.body
 
 for.end:
   ret void

diff  --git a/llvm/test/CodeGen/PowerPC/swaps-le-2.ll b/llvm/test/CodeGen/PowerPC/swaps-le-2.ll
index e7751a194f7f..a6e63f88a788 100644
--- a/llvm/test/CodeGen/PowerPC/swaps-le-2.ll
+++ b/llvm/test/CodeGen/PowerPC/swaps-le-2.ll
@@ -45,27 +45,27 @@
 ; Function Attrs: nounwind
 define void @cfoo() {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @vc, align 8
+  %0 = load <16 x i8>, ptr @vc, align 8
   %vecinit30 = shufflevector <16 x i8> %0, <16 x i8> undef, <16 x i32> <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>
-  store <16 x i8> %vecinit30, <16 x i8>* @vcr, align 8
+  store <16 x i8> %vecinit30, ptr @vcr, align 8
   ret void
 }
 
 ; Function Attrs: nounwind
 define void @sfoo() {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @vs, align 8
+  %0 = load <8 x i16>, ptr @vs, align 8
   %vecinit14 = shufflevector <8 x i16> %0, <8 x i16> undef, <8 x i32> <i32 6, i32 6, i32 6, i32 6, i32 6, i32 6, i32 6, i32 6>
-  store <8 x i16> %vecinit14, <8 x i16>* @vsr, align 8
+  store <8 x i16> %vecinit14, ptr @vsr, align 8
   ret void
 }
 
 ; Function Attrs: nounwind
 define void @ifoo() {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @vi, align 8
+  %0 = load <4 x i32>, ptr @vi, align 8
   %vecinit6 = shufflevector <4 x i32> %0, <4 x i32> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
-  store <4 x i32> %vecinit6, <4 x i32>* @vir, align 8
+  store <4 x i32> %vecinit6, ptr @vir, align 8
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/swaps-le-3.ll b/llvm/test/CodeGen/PowerPC/swaps-le-3.ll
index 7c83ad5cbbd6..65c693285441 100644
--- a/llvm/test/CodeGen/PowerPC/swaps-le-3.ll
+++ b/llvm/test/CodeGen/PowerPC/swaps-le-3.ll
@@ -10,9 +10,9 @@ define void @test(double %s) {
 entry:
   %0 = insertelement <2 x double> undef, double %s, i32 0
   %1 = shufflevector <2 x double> %0, <2 x double> undef, <2 x i32> zeroinitializer
-  %2 = load <2 x double>, <2 x double>* @a, align 16
+  %2 = load <2 x double>, ptr @a, align 16
   %3 = fadd <2 x double> %0, %2
-  store <2 x double> %3, <2 x double>* @b, align 16
+  store <2 x double> %3, ptr @b, align 16
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/swaps-le-4.ll b/llvm/test/CodeGen/PowerPC/swaps-le-4.ll
index 2bf684d9d614..1223ba851ccb 100644
--- a/llvm/test/CodeGen/PowerPC/swaps-le-4.ll
+++ b/llvm/test/CodeGen/PowerPC/swaps-le-4.ll
@@ -7,12 +7,10 @@
 define void @bar() {
 entry:
   %x = alloca <2 x i64>, align 16
-  %0 = bitcast <2 x i64>* %x to i8*
-  call void @llvm.lifetime.start.p0i8(i64 16, i8* %0)
-  %arrayidx = getelementptr inbounds <2 x i64>, <2 x i64>* %x, i64 0, i64 0
-  store <2 x i64> <i64 0, i64 1>, <2 x i64>* %x, align 16
-  call void @foo(i64* %arrayidx)
-  call void @llvm.lifetime.end.p0i8(i64 16, i8* %0)
+  call void @llvm.lifetime.start.p0(i64 16, ptr %x)
+  store <2 x i64> <i64 0, i64 1>, ptr %x, align 16
+  call void @foo(ptr %x)
+  call void @llvm.lifetime.end.p0(i64 16, ptr %x)
   ret void
 }
 
@@ -21,7 +19,7 @@ entry:
 ; CHECK: stxvd2x
 ; CHECK-NOT: xxswapd
 
-declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture)
-declare void @foo(i64*)
-declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture)
+declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
+declare void @foo(ptr)
+declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
 

diff  --git a/llvm/test/CodeGen/PowerPC/swaps-le-5.ll b/llvm/test/CodeGen/PowerPC/swaps-le-5.ll
index 95f0fc25f2dd..4e6500150700 100644
--- a/llvm/test/CodeGen/PowerPC/swaps-le-5.ll
+++ b/llvm/test/CodeGen/PowerPC/swaps-le-5.ll
@@ -8,9 +8,9 @@
 
 define void @bar0(double %y) {
 entry:
-  %0 = load <2 x double>, <2 x double>* @x, align 16
+  %0 = load <2 x double>, ptr @x, align 16
   %vecins = insertelement <2 x double> %0, double %y, i32 0
-  store <2 x double> %vecins, <2 x double>* @z, align 16
+  store <2 x double> %vecins, ptr @z, align 16
   ret void
 }
 
@@ -23,9 +23,9 @@ entry:
 
 define void @bar1(double %y) {
 entry:
-  %0 = load <2 x double>, <2 x double>* @x, align 16
+  %0 = load <2 x double>, ptr @x, align 16
   %vecins = insertelement <2 x double> %0, double %y, i32 1
-  store <2 x double> %vecins, <2 x double>* @z, align 16
+  store <2 x double> %vecins, ptr @z, align 16
   ret void
 }
 
@@ -38,10 +38,10 @@ entry:
 
 define void @baz0() {
 entry:
-  %0 = load <2 x double>, <2 x double>* @z, align 16
-  %1 = load <2 x double>, <2 x double>* @x, align 16
+  %0 = load <2 x double>, ptr @z, align 16
+  %1 = load <2 x double>, ptr @x, align 16
   %vecins = shufflevector <2 x double> %0, <2 x double> %1, <2 x i32> <i32 0, i32 2>
-  store <2 x double> %vecins, <2 x double>* @z, align 16
+  store <2 x double> %vecins, ptr @z, align 16
   ret void
 }
 
@@ -54,10 +54,10 @@ entry:
 
 define void @baz1() {
 entry:
-  %0 = load <2 x double>, <2 x double>* @z, align 16
-  %1 = load <2 x double>, <2 x double>* @x, align 16
+  %0 = load <2 x double>, ptr @z, align 16
+  %1 = load <2 x double>, ptr @x, align 16
   %vecins = shufflevector <2 x double> %0, <2 x double> %1, <2 x i32> <i32 3, i32 1>
-  store <2 x double> %vecins, <2 x double>* @z, align 16
+  store <2 x double> %vecins, ptr @z, align 16
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/swaps-le-6.ll b/llvm/test/CodeGen/PowerPC/swaps-le-6.ll
index 4f12a0688498..5092b4ac78ab 100644
--- a/llvm/test/CodeGen/PowerPC/swaps-le-6.ll
+++ b/llvm/test/CodeGen/PowerPC/swaps-le-6.ll
@@ -67,10 +67,10 @@ define void @bar0() {
 ; CHECK-P9-NOVECTOR-NEXT:    stxvd2x vs0, 0, r3
 ; CHECK-P9-NOVECTOR-NEXT:    blr
 entry:
-  %0 = load <2 x double>, <2 x double>* @x, align 16
-  %1 = load double, double* @y, align 8
+  %0 = load <2 x double>, ptr @x, align 16
+  %1 = load double, ptr @y, align 8
   %vecins = insertelement <2 x double> %0, double %1, i32 0
-  store <2 x double> %vecins, <2 x double>* @z, align 16
+  store <2 x double> %vecins, ptr @z, align 16
   ret void
 }
 
@@ -121,10 +121,10 @@ define void @bar1() {
 ; CHECK-P9-NOVECTOR-NEXT:    stxvd2x vs0, 0, r3
 ; CHECK-P9-NOVECTOR-NEXT:    blr
 entry:
-  %0 = load <2 x double>, <2 x double>* @x, align 16
-  %1 = load double, double* @y, align 8
+  %0 = load <2 x double>, ptr @x, align 16
+  %1 = load double, ptr @y, align 8
   %vecins = insertelement <2 x double> %0, double %1, i32 1
-  store <2 x double> %vecins, <2 x double>* @z, align 16
+  store <2 x double> %vecins, ptr @z, align 16
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/swaps-le-7.ll b/llvm/test/CodeGen/PowerPC/swaps-le-7.ll
index 6f5445197ac4..c5d16f1f08e5 100644
--- a/llvm/test/CodeGen/PowerPC/swaps-le-7.ll
+++ b/llvm/test/CodeGen/PowerPC/swaps-le-7.ll
@@ -23,17 +23,14 @@
 ; CHECK: blr
 
 ; Function Attrs: noinline
-define void @zg(i8* %.G0011_640.0, i8* %.G0012_642.0, <2 x double>* %JJ, <2 x double>* %.ka0000_391, double %.unpack, double %.unpack66) #0 {
+define void @zg(ptr %.G0011_640.0, ptr %.G0012_642.0, ptr %JJ, ptr %.ka0000_391, double %.unpack, double %.unpack66) #0 {
 L.JA291:
-  %Z.L.JA291.2 = load <2 x double>, <2 x double>* %.ka0000_391, align 16
-  store <2 x double> %Z.L.JA291.2, <2 x double>* %JJ, align 8
-  %Z.L.JA291.3 = bitcast i8* %.G0012_642.0 to <2 x double>*
-  %Z.L.JA291.4 = load <2 x double>, <2 x double>* %Z.L.JA291.3, align 1
-  %.elt136 = bitcast i8* %.G0011_640.0 to double*
-  %.unpack137 = load double, double* %.elt136, align 1
-  %.elt138 = getelementptr inbounds i8, i8* %.G0011_640.0, i64 8
-  %Z.L.JA291.5 = bitcast i8* %.elt138 to double*
-  %.unpack139 = load double, double* %Z.L.JA291.5, align 1
+  %Z.L.JA291.2 = load <2 x double>, ptr %.ka0000_391, align 16
+  store <2 x double> %Z.L.JA291.2, ptr %JJ, align 8
+  %Z.L.JA291.4 = load <2 x double>, ptr %.G0012_642.0, align 1
+  %.unpack137 = load double, ptr %.G0011_640.0, align 1
+  %.elt138 = getelementptr inbounds i8, ptr %.G0011_640.0, i64 8
+  %.unpack139 = load double, ptr %.elt138, align 1
   %Z.L.JA291.6 = insertelement <2 x double> undef, double %.unpack137, i32 0
   %Z.L.JA291.7 = insertelement <2 x double> %Z.L.JA291.6, double %.unpack137, i32 1
   %Z.L.JA291.8 = fmul <2 x double> %Z.L.JA291.2, %Z.L.JA291.7
@@ -45,10 +42,8 @@ L.JA291:
   %Z.L.JA291.14 = fadd <2 x double> %Z.L.JA291.8, %Z.L.JA291.12
   %Z.L.JA291.15 = shufflevector <2 x double> %Z.L.JA291.13, <2 x double> %Z.L.JA291.14, <2 x i32> <i32 0, i32 3>
   %Z.L.JA291.16 = fsub <2 x double> %Z.L.JA291.4, %Z.L.JA291.15
-  %Z.L.JA291.17 = bitcast i8* %.G0012_642.0 to <2 x double>*
-  store <2 x double> %Z.L.JA291.16, <2 x double>* %Z.L.JA291.17, align 8
-  %.. = bitcast <2 x double>* %JJ to i32*
-  %.pre = load i32, i32* %.., align 32
+  store <2 x double> %Z.L.JA291.16, ptr %.G0012_642.0, align 8
+  %.pre = load i32, ptr %JJ, align 32
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/swaps-le-8.ll b/llvm/test/CodeGen/PowerPC/swaps-le-8.ll
index 2874a0c258bd..10eb4a98d55a 100644
--- a/llvm/test/CodeGen/PowerPC/swaps-le-8.ll
+++ b/llvm/test/CodeGen/PowerPC/swaps-le-8.ll
@@ -7,7 +7,7 @@
 ; RUN: llc -verify-machineinstrs -O3 -mcpu=pwr10 \
 ; RUN:   -mtriple=powerpc64le-unknown-linux-gnu < %s | FileCheck %s \
 ; RUN:   --check-prefix=CHECK-P9UP
-define dso_local void @test(i64* %Src, i64* nocapture %Tgt) local_unnamed_addr {
+define dso_local void @test(ptr %Src, ptr nocapture %Tgt) local_unnamed_addr {
 ; CHECK-LABEL: test:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lxvd2x 0, 0, 3
@@ -21,11 +21,9 @@ define dso_local void @test(i64* %Src, i64* nocapture %Tgt) local_unnamed_addr {
 ; CHECK-P9UP-NEXT:    stxv 0, 0(4)
 ; CHECK-P9UP-NEXT:    blr
 entry:
-  %0 = bitcast i64* %Src to i8*
-  %1 = tail call <2 x double> @llvm.ppc.vsx.lxvd2x.be(i8* %0) #2
-  %2 = bitcast i64* %Tgt to <2 x double>*
-  store <2 x double> %1, <2 x double>* %2, align 1
+  %0 = tail call <2 x double> @llvm.ppc.vsx.lxvd2x.be(ptr %Src) #2
+  store <2 x double> %0, ptr %Tgt, align 1
   ret void
 }
 
-declare <2 x double> @llvm.ppc.vsx.lxvd2x.be(i8*) #1
+declare <2 x double> @llvm.ppc.vsx.lxvd2x.be(ptr) #1

diff  --git a/llvm/test/CodeGen/PowerPC/tail-dup-analyzable-fallthrough.ll b/llvm/test/CodeGen/PowerPC/tail-dup-analyzable-fallthrough.ll
index 960e6b61b5bd..6f102eed65c7 100644
--- a/llvm/test/CodeGen/PowerPC/tail-dup-analyzable-fallthrough.ll
+++ b/llvm/test/CodeGen/PowerPC/tail-dup-analyzable-fallthrough.ll
@@ -13,12 +13,12 @@ target triple = "powerpc64le-unknown-linux-gnu"
 ; CHECK: bclr
 ; CHECK: # %if.then
 ; Function Attrs: nounwind
-define void @__fmax_double3_3D_exec(<2 x double>* %input6, i1 %bool1, i1 %bool2) #0 {
+define void @__fmax_double3_3D_exec(ptr %input6, i1 %bool1, i1 %bool2) #0 {
 entry:
   br i1 %bool1, label %if.then.i, label %fmax_double3.exit
 
 if.then.i:                                        ; preds = %entry
-  store <2 x double> zeroinitializer, <2 x double>* %input6, align 32
+  store <2 x double> zeroinitializer, ptr %input6, align 32
   br label %fmax_double3.exit
 
 fmax_double3.exit:                                ; preds = %if.then.i, %entry

diff  --git a/llvm/test/CodeGen/PowerPC/tail-dup-branch-to-fallthrough.ll b/llvm/test/CodeGen/PowerPC/tail-dup-branch-to-fallthrough.ll
index 3ff4753200ef..4c1f6b5d5b3d 100644
--- a/llvm/test/CodeGen/PowerPC/tail-dup-branch-to-fallthrough.ll
+++ b/llvm/test/CodeGen/PowerPC/tail-dup-branch-to-fallthrough.ll
@@ -3,7 +3,7 @@ target datalayout = "E-m:e-i64:64-n32:64"
 target triple = "powerpc64-unknown-linux-gnu"
 
 ; Function Attrs: nounwind
-declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #0
+declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #0
 
 declare void @f1()
 declare void @f2()
@@ -54,11 +54,11 @@ if.else:                                      ; preds = %sw.default
   br label %dup2
 
 dup1:                                         ; preds = %sw.0, %sw.1
-  call void @llvm.lifetime.end.p0i8(i64 8, i8* nonnull undef) #0
+  call void @llvm.lifetime.end.p0(i64 8, ptr nonnull undef) #0
   unreachable
 
 dup2:                                         ; preds = %if.then, %if.else
-  call void @llvm.lifetime.end.p0i8(i64 8, i8* nonnull undef) #0
+  call void @llvm.lifetime.end.p0(i64 8, ptr nonnull undef) #0
   unreachable
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/tail-dup-layout.ll b/llvm/test/CodeGen/PowerPC/tail-dup-layout.ll
index bcae75f43cb1..8b4df1d2f99d 100644
--- a/llvm/test/CodeGen/PowerPC/tail-dup-layout.ll
+++ b/llvm/test/CodeGen/PowerPC/tail-dup-layout.ll
@@ -305,14 +305,14 @@ exit:
 ;CHECK-NEXT: beq 0, .[[LATCHLABEL]]
 ;CHECK: [[OPT4LABEL]]:
 ;CHECK: b .[[LATCHLABEL]]
-define void @loop_test(i32* %tags, i32 %count) {
+define void @loop_test(ptr %tags, i32 %count) {
 entry:
   br label %for.check
 for.check:
   %count.loop = phi i32 [%count, %entry], [%count.sub, %for.latch]
   %done.count = icmp ugt i32 %count.loop, 0
-  %tag_ptr = getelementptr inbounds i32, i32* %tags, i32 %count
-  %tag = load i32, i32* %tag_ptr
+  %tag_ptr = getelementptr inbounds i32, ptr %tags, i32 %count
+  %tag = load i32, ptr %tag_ptr
   %done.tag = icmp eq i32 %tag, 0
   %done = and i1 %done.count, %done.tag
   br i1 %done, label %test1, label %exit, !prof !1

diff  --git a/llvm/test/CodeGen/PowerPC/tailcall-speculatable-callee.ll b/llvm/test/CodeGen/PowerPC/tailcall-speculatable-callee.ll
index b2a488a16b30..423dedc2b2c8 100644
--- a/llvm/test/CodeGen/PowerPC/tailcall-speculatable-callee.ll
+++ b/llvm/test/CodeGen/PowerPC/tailcall-speculatable-callee.ll
@@ -4,7 +4,7 @@
 ; The tests check the behavior of the tail call decision when the callee is speculatable.
 
 ; Callee should be tail called in this function since it is at a tail call position.
-define dso_local double @speculatable_callee_return_use_only (double* nocapture %res, double %a) #0 {
+define dso_local double @speculatable_callee_return_use_only (ptr nocapture %res, double %a) #0 {
 ; CHECK-LABEL: speculatable_callee_return_use_only:
 ; CHECK: # %bb.0: # %entry
 ; CHECK-NEXT: b callee
@@ -14,7 +14,7 @@ entry:
 }
 
 ; Callee should not be tail called since it is not at a tail call position.
-define dso_local void @speculatable_callee_non_return_use_only (double* nocapture %res, double %a) #0 {
+define dso_local void @speculatable_callee_non_return_use_only (ptr nocapture %res, double %a) #0 {
 ; CHECK-LABEL: speculatable_callee_non_return_use_only:
 ; CHECK: # %bb.0: # %entry
 ; CHECK-NEXT: mflr r0
@@ -31,12 +31,12 @@ define dso_local void @speculatable_callee_non_return_use_only (double* nocaptur
 ; CHECK-NEXT: blr
 entry:
   %call = tail call double @callee(double %a) #2
-  store double %call, double* %res, align 8
+  store double %call, ptr %res, align 8
   ret void
 }
 
 ; Callee should not be tail called since it is not at a tail call position.
-define dso_local double @speculatable_callee_multi_use (double* nocapture %res, double %a) #0 {
+define dso_local double @speculatable_callee_multi_use (ptr nocapture %res, double %a) #0 {
   ; CHECK-LABEL: speculatable_callee_multi_use:
   ; CHECK: # %bb.0: # %entry
   ; CHECK-NEXT: mflr r0
@@ -53,13 +53,13 @@ define dso_local double @speculatable_callee_multi_use (double* nocapture %res,
   ; CHECK-NEXT: blr
   entry:
   %call = tail call double @callee(double %a) #2
-  store double %call, double* %res, align 8
+  store double %call, ptr %res, align 8
   ret double %call
 }
 
 ; Callee should not be tail called since it is not at a tail call position.
 ; FIXME: A speculatable callee can be tail called if it is moved into a valid tail call position.
-define dso_local double @speculatable_callee_intermediate_instructions (double* nocapture %res, double %a) #0 {
+define dso_local double @speculatable_callee_intermediate_instructions (ptr nocapture %res, double %a) #0 {
   ; CHECK-LABEL: speculatable_callee_intermediate_instructions:
   ; CHECK: # %bb.0: # %entry
   ; CHECK-NEXT: mflr r0
@@ -82,7 +82,7 @@ define dso_local double @speculatable_callee_intermediate_instructions (double*
 
   entry:
   %call = tail call double @callee(double %a) #2
-  store double 5.2, double* %res, align 8
+  store double 5.2, ptr %res, align 8
   ret double %call
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/tailcall-string-rvo.ll b/llvm/test/CodeGen/PowerPC/tailcall-string-rvo.ll
index edf8c5599dab..73217d705703 100644
--- a/llvm/test/CodeGen/PowerPC/tailcall-string-rvo.ll
+++ b/llvm/test/CodeGen/PowerPC/tailcall-string-rvo.ll
@@ -13,35 +13,33 @@ target triple = "powerpc64le-linux-gnu"
 %class.basic_string.11.42.73 = type { %"class.__gnu_cxx::__versa_string.10.41.72" }
 %"class.__gnu_cxx::__versa_string.10.41.72" = type { %"class.__gnu_cxx::__sso_string_base.9.40.71" }
 %"class.__gnu_cxx::__sso_string_base.9.40.71" = type { %"struct.__gnu_cxx::__vstring_utility<char, std::char_traits<char>, std::allocator<char> >::_Alloc_hider.7.38.69", i64, %union.anon.8.39.70 }
-%"struct.__gnu_cxx::__vstring_utility<char, std::char_traits<char>, std::allocator<char> >::_Alloc_hider.7.38.69" = type { i8* }
+%"struct.__gnu_cxx::__vstring_utility<char, std::char_traits<char>, std::allocator<char> >::_Alloc_hider.7.38.69" = type { ptr }
 %union.anon.8.39.70 = type { i64, [8 x i8] }
 
-declare void @TestBaz(%class.basic_string.11.42.73* noalias sret(%class.basic_string.11.42.73) %arg)
+declare void @TestBaz(ptr noalias sret(%class.basic_string.11.42.73) %arg)
 
-define dso_local void @TestBar(%class.basic_string.11.42.73* noalias sret(%class.basic_string.11.42.73) %arg) {
+define dso_local void @TestBar(ptr noalias sret(%class.basic_string.11.42.73) %arg) {
 bb:
-  call void @TestBaz(%class.basic_string.11.42.73* noalias sret(%class.basic_string.11.42.73) %arg)
+  call void @TestBaz(ptr noalias sret(%class.basic_string.11.42.73) %arg)
   ret void
 }
 
-define dso_local void @TestFoo(%class.basic_string.11.42.73* noalias sret(%class.basic_string.11.42.73) %arg) {
+define dso_local void @TestFoo(ptr noalias sret(%class.basic_string.11.42.73) %arg) {
 ; CHECK-LABEL: TestFoo:
 ; CHECK: #TC_RETURNd8 TestBar 0
 bb:
-  %tmp = getelementptr inbounds %class.basic_string.11.42.73, %class.basic_string.11.42.73* %arg, i64 0, i32 0, i32 0, i32 2
-  %tmp1 = bitcast %class.basic_string.11.42.73* %arg to %union.anon.8.39.70**
-  store %union.anon.8.39.70* %tmp, %union.anon.8.39.70** %tmp1, align 8
-  %tmp2 = bitcast %union.anon.8.39.70* %tmp to i8*
-  tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp2, i8* nonnull undef, i64 13, i1 false)
-  %tmp3 = getelementptr inbounds %class.basic_string.11.42.73, %class.basic_string.11.42.73* %arg, i64 0, i32 0, i32 0, i32 1
-  store i64 13, i64* %tmp3, align 8
-  %tmp4 = getelementptr inbounds %class.basic_string.11.42.73, %class.basic_string.11.42.73* %arg, i64 0, i32 0, i32 0, i32 2, i32 1, i64 5
-  store i8 0, i8* %tmp4, align 1
-  tail call void @TestBar(%class.basic_string.11.42.73* noalias sret(%class.basic_string.11.42.73) %arg)
+  %tmp = getelementptr inbounds %class.basic_string.11.42.73, ptr %arg, i64 0, i32 0, i32 0, i32 2
+  store ptr %tmp, ptr %arg, align 8
+  tail call void @llvm.memcpy.p0.p0.i64(ptr %tmp, ptr nonnull undef, i64 13, i1 false)
+  %tmp3 = getelementptr inbounds %class.basic_string.11.42.73, ptr %arg, i64 0, i32 0, i32 0, i32 1
+  store i64 13, ptr %tmp3, align 8
+  %tmp4 = getelementptr inbounds %class.basic_string.11.42.73, ptr %arg, i64 0, i32 0, i32 0, i32 2, i32 1, i64 5
+  store i8 0, ptr %tmp4, align 1
+  tail call void @TestBar(ptr noalias sret(%class.basic_string.11.42.73) %arg)
   ret void
 }
 
 ; Function Attrs: argmemonly nounwind
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i1) #0
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture readonly, i64, i1) #0
 
 attributes #0 = { argmemonly nounwind }

diff  --git a/llvm/test/CodeGen/PowerPC/test-and-cmp-folding.ll b/llvm/test/CodeGen/PowerPC/test-and-cmp-folding.ll
index b4a3f28facd3..0e924eabbed2 100644
--- a/llvm/test/CodeGen/PowerPC/test-and-cmp-folding.ll
+++ b/llvm/test/CodeGen/PowerPC/test-and-cmp-folding.ll
@@ -23,7 +23,7 @@ if.then14.i:                                      ; preds = %sw.bb52
   br i1 undef, label %dummy.exit.i, label %if.then.i.i.i636
 
 if.then.i.i.i636:                                 ; preds = %if.then14.i
-  %0 = load i8*, i8** undef, align 8
+  %0 = load ptr, ptr undef, align 8
   call void @free() #3
   br label %dummy.exit.i
 

diff  --git a/llvm/test/CodeGen/PowerPC/testComparesi32gtu.ll b/llvm/test/CodeGen/PowerPC/testComparesi32gtu.ll
index b9777df02c61..2f6bd65f44c3 100644
--- a/llvm/test/CodeGen/PowerPC/testComparesi32gtu.ll
+++ b/llvm/test/CodeGen/PowerPC/testComparesi32gtu.ll
@@ -26,7 +26,7 @@
 declare signext i32 @fn2(...) local_unnamed_addr #1
 
 ; Function Attrs: nounwind
-define dso_local i32 @testCompare1(%struct.tree_common* nocapture readonly %arg1) nounwind {
+define dso_local i32 @testCompare1(ptr nocapture readonly %arg1) nounwind {
 ; BE-LABEL: testCompare1:
 ; BE:       # %bb.0: # %entry
 ; BE-NEXT:    mflr r0
@@ -134,14 +134,13 @@ define dso_local i32 @testCompare1(%struct.tree_common* nocapture readonly %arg1
 ; CHECK-P10-CMP-BE-NEXT:    #TC_RETURNd8 fn2 at notoc 0
 
 entry:
-  %bf.load = load i8, i8* bitcast (i32 (%struct.tree_common*)* @testCompare1 to i8*), align 4
+  %bf.load = load i8, ptr @testCompare1, align 4
   %bf.clear = and i8 %bf.load, 1
-  %0 = getelementptr inbounds %struct.tree_common, %struct.tree_common* %arg1, i64 0, i32 0
-  %bf.load1 = load i8, i8* %0, align 4
+  %bf.load1 = load i8, ptr %arg1, align 4
   %bf.clear2 = and i8 %bf.load1, 1
   %cmp = icmp ugt i8 %bf.clear, %bf.clear2
   %conv = zext i1 %cmp to i32
-  %call = tail call signext i32 bitcast (i32 (...)* @fn2 to i32 (i32)*)(i32 signext %conv) #2
+  %call = tail call signext i32 @fn2(i32 signext %conv) #2
   ret i32 undef
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/testComparesi32ltu.ll b/llvm/test/CodeGen/PowerPC/testComparesi32ltu.ll
index e609a71962e1..641ea0e3dcec 100644
--- a/llvm/test/CodeGen/PowerPC/testComparesi32ltu.ll
+++ b/llvm/test/CodeGen/PowerPC/testComparesi32ltu.ll
@@ -28,7 +28,7 @@
 declare signext i32 @fn2(...) local_unnamed_addr #1
 
 ; Function Attrs: nounwind
-define dso_local i32 @testCompare1(%struct.tree_common* nocapture readonly %arg1) nounwind {
+define dso_local i32 @testCompare1(ptr nocapture readonly %arg1) nounwind {
 ; BE-LABEL: testCompare1:
 ; BE:       # %bb.0: # %entry
 ; BE-NEXT:    mflr r0
@@ -135,14 +135,13 @@ define dso_local i32 @testCompare1(%struct.tree_common* nocapture readonly %arg1
 ; CHECK-P10-CMP-BE-NEXT:    b fn2 at notoc
 ; CHECK-P10-CMP-BE-NEXT:    #TC_RETURNd8 fn2 at notoc 0
 entry:
-  %bf.load = load i8, i8* bitcast (i32 (%struct.tree_common*)* @testCompare1 to i8*), align 4
+  %bf.load = load i8, ptr @testCompare1, align 4
   %bf.clear = and i8 %bf.load, 1
-  %0 = getelementptr inbounds %struct.tree_common, %struct.tree_common* %arg1, i64 0, i32 0
-  %bf.load1 = load i8, i8* %0, align 4
+  %bf.load1 = load i8, ptr %arg1, align 4
   %bf.clear2 = and i8 %bf.load1, 1
   %cmp = icmp ult i8 %bf.clear, %bf.clear2
   %conv = zext i1 %cmp to i32
-  %call = tail call signext i32 bitcast (i32 (...)* @fn2 to i32 (i32)*)(i32 signext %conv) #2
+  %call = tail call signext i32 @fn2(i32 signext %conv) #2
   ret i32 undef
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/testComparesieqsc.ll b/llvm/test/CodeGen/PowerPC/testComparesieqsc.ll
index f3f76d576274..cabaf81e87bd 100644
--- a/llvm/test/CodeGen/PowerPC/testComparesieqsc.ll
+++ b/llvm/test/CodeGen/PowerPC/testComparesieqsc.ll
@@ -147,7 +147,7 @@ define dso_local void @test_ieqsc_store(i8 signext %a, i8 signext %b) {
 entry:
   %cmp = icmp eq i8 %a, %b
   %conv3 = zext i1 %cmp to i8
-  store i8 %conv3, i8* @glob, align 1
+  store i8 %conv3, ptr @glob, align 1
   ret void
 }
 
@@ -184,7 +184,7 @@ define dso_local void @test_ieqsc_sext_store(i8 signext %a, i8 signext %b) {
 entry:
   %cmp = icmp eq i8 %a, %b
   %conv3 = sext i1 %cmp to i8
-  store i8 %conv3, i8* @glob, align 1
+  store i8 %conv3, ptr @glob, align 1
   ret void
 }
 
@@ -215,7 +215,7 @@ define dso_local void @test_ieqsc_z_store(i8 signext %a) {
 entry:
   %cmp = icmp eq i8 %a, 0
   %conv2 = zext i1 %cmp to i8
-  store i8 %conv2, i8* @glob, align 1
+  store i8 %conv2, ptr @glob, align 1
   ret void
 }
 
@@ -249,6 +249,6 @@ define dso_local void @test_ieqsc_sext_z_store(i8 signext %a) {
 entry:
   %cmp = icmp eq i8 %a, 0
   %conv2 = sext i1 %cmp to i8
-  store i8 %conv2, i8* @glob, align 1
+  store i8 %conv2, ptr @glob, align 1
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/testComparesieqsi.ll b/llvm/test/CodeGen/PowerPC/testComparesieqsi.ll
index ce1a588b4a43..d8cccd39f7f8 100644
--- a/llvm/test/CodeGen/PowerPC/testComparesieqsi.ll
+++ b/llvm/test/CodeGen/PowerPC/testComparesieqsi.ll
@@ -147,7 +147,7 @@ define dso_local void @test_ieqsi_store(i32 signext %a, i32 signext %b) {
 entry:
   %cmp = icmp eq i32 %a, %b
   %conv = zext i1 %cmp to i32
-  store i32 %conv, i32* @glob, align 4
+  store i32 %conv, ptr @glob, align 4
   ret void
 }
 
@@ -184,7 +184,7 @@ define dso_local void @test_ieqsi_sext_store(i32 signext %a, i32 signext %b) {
 entry:
   %cmp = icmp eq i32 %a, %b
   %sub = sext i1 %cmp to i32
-  store i32 %sub, i32* @glob, align 4
+  store i32 %sub, ptr @glob, align 4
   ret void
 }
 
@@ -215,7 +215,7 @@ define dso_local void @test_ieqsi_z_store(i32 signext %a) {
 entry:
   %cmp = icmp eq i32 %a, 0
   %conv = zext i1 %cmp to i32
-  store i32 %conv, i32* @glob, align 4
+  store i32 %conv, ptr @glob, align 4
   ret void
 }
 
@@ -249,6 +249,6 @@ define dso_local void @test_ieqsi_sext_z_store(i32 signext %a) {
 entry:
   %cmp = icmp eq i32 %a, 0
   %sub = sext i1 %cmp to i32
-  store i32 %sub, i32* @glob, align 4
+  store i32 %sub, ptr @glob, align 4
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/testComparesieqsll.ll b/llvm/test/CodeGen/PowerPC/testComparesieqsll.ll
index 3924524e3672..2999231e7510 100644
--- a/llvm/test/CodeGen/PowerPC/testComparesieqsll.ll
+++ b/llvm/test/CodeGen/PowerPC/testComparesieqsll.ll
@@ -141,7 +141,7 @@ define dso_local void @test_ieqsll_store(i64 %a, i64 %b) {
 entry:
   %cmp = icmp eq i64 %a, %b
   %conv1 = zext i1 %cmp to i64
-  store i64 %conv1, i64* @glob, align 8
+  store i64 %conv1, ptr @glob, align 8
   ret void
 }
 
@@ -175,7 +175,7 @@ define dso_local void @test_ieqsll_sext_store(i64 %a, i64 %b) {
 entry:
   %cmp = icmp eq i64 %a, %b
   %conv1 = sext i1 %cmp to i64
-  store i64 %conv1, i64* @glob, align 8
+  store i64 %conv1, ptr @glob, align 8
   ret void
 }
 
@@ -206,7 +206,7 @@ define dso_local void @test_ieqsll_z_store(i64 %a) {
 entry:
   %cmp = icmp eq i64 %a, 0
   %conv1 = zext i1 %cmp to i64
-  store i64 %conv1, i64* @glob, align 8
+  store i64 %conv1, ptr @glob, align 8
   ret void
 }
 
@@ -237,6 +237,6 @@ define dso_local void @test_ieqsll_sext_z_store(i64 %a) {
 entry:
   %cmp = icmp eq i64 %a, 0
   %conv1 = sext i1 %cmp to i64
-  store i64 %conv1, i64* @glob, align 8
+  store i64 %conv1, ptr @glob, align 8
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/testComparesieqss.ll b/llvm/test/CodeGen/PowerPC/testComparesieqss.ll
index 265138b64bcb..d1ff694594f8 100644
--- a/llvm/test/CodeGen/PowerPC/testComparesieqss.ll
+++ b/llvm/test/CodeGen/PowerPC/testComparesieqss.ll
@@ -147,7 +147,7 @@ define dso_local void @test_ieqss_store(i16 signext %a, i16 signext %b) {
 entry:
   %cmp = icmp eq i16 %a, %b
   %conv3 = zext i1 %cmp to i16
-  store i16 %conv3, i16* @glob, align 2
+  store i16 %conv3, ptr @glob, align 2
   ret void
 }
 
@@ -184,7 +184,7 @@ define dso_local void @test_ieqss_sext_store(i16 signext %a, i16 signext %b) {
 entry:
   %cmp = icmp eq i16 %a, %b
   %conv3 = sext i1 %cmp to i16
-  store i16 %conv3, i16* @glob, align 2
+  store i16 %conv3, ptr @glob, align 2
   ret void
 }
 
@@ -215,7 +215,7 @@ define dso_local void @test_ieqss_z_store(i16 signext %a) {
 entry:
   %cmp = icmp eq i16 %a, 0
   %conv2 = zext i1 %cmp to i16
-  store i16 %conv2, i16* @glob, align 2
+  store i16 %conv2, ptr @glob, align 2
   ret void
 }
 
@@ -249,6 +249,6 @@ define dso_local void @test_ieqss_sext_z_store(i16 signext %a) {
 entry:
   %cmp = icmp eq i16 %a, 0
   %conv2 = sext i1 %cmp to i16
-  store i16 %conv2, i16* @glob, align 2
+  store i16 %conv2, ptr @glob, align 2
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/testComparesiequc.ll b/llvm/test/CodeGen/PowerPC/testComparesiequc.ll
index d06a99081169..e8c53242234a 100644
--- a/llvm/test/CodeGen/PowerPC/testComparesiequc.ll
+++ b/llvm/test/CodeGen/PowerPC/testComparesiequc.ll
@@ -147,7 +147,7 @@ define dso_local void @test_iequc_store(i8 zeroext %a, i8 zeroext %b) {
 entry:
   %cmp = icmp eq i8 %a, %b
   %conv3 = zext i1 %cmp to i8
-  store i8 %conv3, i8* @glob, align 1
+  store i8 %conv3, ptr @glob, align 1
   ret void
 }
 
@@ -184,7 +184,7 @@ define dso_local void @test_iequc_sext_store(i8 zeroext %a, i8 zeroext %b) {
 entry:
   %cmp = icmp eq i8 %a, %b
   %conv3 = sext i1 %cmp to i8
-  store i8 %conv3, i8* @glob, align 1
+  store i8 %conv3, ptr @glob, align 1
   ret void
 }
 
@@ -215,7 +215,7 @@ define dso_local void @test_iequc_z_store(i8 zeroext %a) {
 entry:
   %cmp = icmp eq i8 %a, 0
   %conv2 = zext i1 %cmp to i8
-  store i8 %conv2, i8* @glob, align 1
+  store i8 %conv2, ptr @glob, align 1
   ret void
 }
 
@@ -249,6 +249,6 @@ define dso_local void @test_iequc_sext_z_store(i8 zeroext %a) {
 entry:
   %cmp = icmp eq i8 %a, 0
   %conv2 = sext i1 %cmp to i8
-  store i8 %conv2, i8* @glob, align 1
+  store i8 %conv2, ptr @glob, align 1
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/testComparesiequi.ll b/llvm/test/CodeGen/PowerPC/testComparesiequi.ll
index 19e76ec731d1..4a0ac17a2be5 100644
--- a/llvm/test/CodeGen/PowerPC/testComparesiequi.ll
+++ b/llvm/test/CodeGen/PowerPC/testComparesiequi.ll
@@ -147,7 +147,7 @@ define dso_local void @test_iequi_store(i32 zeroext %a, i32 zeroext %b) {
 entry:
   %cmp = icmp eq i32 %a, %b
   %conv = zext i1 %cmp to i32
-  store i32 %conv, i32* @glob, align 4
+  store i32 %conv, ptr @glob, align 4
   ret void
 }
 
@@ -184,7 +184,7 @@ define dso_local void @test_iequi_sext_store(i32 zeroext %a, i32 zeroext %b) {
 entry:
   %cmp = icmp eq i32 %a, %b
   %sub = sext i1 %cmp to i32
-  store i32 %sub, i32* @glob, align 4
+  store i32 %sub, ptr @glob, align 4
   ret void
 }
 
@@ -215,7 +215,7 @@ define dso_local void @test_iequi_z_store(i32 zeroext %a) {
 entry:
   %cmp = icmp eq i32 %a, 0
   %conv = zext i1 %cmp to i32
-  store i32 %conv, i32* @glob, align 4
+  store i32 %conv, ptr @glob, align 4
   ret void
 }
 
@@ -249,6 +249,6 @@ define dso_local void @test_iequi_sext_z_store(i32 zeroext %a) {
 entry:
   %cmp = icmp eq i32 %a, 0
   %sub = sext i1 %cmp to i32
-  store i32 %sub, i32* @glob, align 4
+  store i32 %sub, ptr @glob, align 4
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/testComparesiequll.ll b/llvm/test/CodeGen/PowerPC/testComparesiequll.ll
index 4caac971dc17..9e21682b969e 100644
--- a/llvm/test/CodeGen/PowerPC/testComparesiequll.ll
+++ b/llvm/test/CodeGen/PowerPC/testComparesiequll.ll
@@ -141,7 +141,7 @@ define dso_local void @test_iequll_store(i64 %a, i64 %b) {
 entry:
   %cmp = icmp eq i64 %a, %b
   %conv1 = zext i1 %cmp to i64
-  store i64 %conv1, i64* @glob, align 8
+  store i64 %conv1, ptr @glob, align 8
   ret void
 }
 
@@ -175,7 +175,7 @@ define dso_local void @test_iequll_sext_store(i64 %a, i64 %b) {
 entry:
   %cmp = icmp eq i64 %a, %b
   %conv1 = sext i1 %cmp to i64
-  store i64 %conv1, i64* @glob, align 8
+  store i64 %conv1, ptr @glob, align 8
   ret void
 }
 
@@ -206,7 +206,7 @@ define dso_local void @test_iequll_z_store(i64 %a) {
 entry:
   %cmp = icmp eq i64 %a, 0
   %conv1 = zext i1 %cmp to i64
-  store i64 %conv1, i64* @glob, align 8
+  store i64 %conv1, ptr @glob, align 8
   ret void
 }
 
@@ -237,6 +237,6 @@ define dso_local void @test_iequll_sext_z_store(i64 %a) {
 entry:
   %cmp = icmp eq i64 %a, 0
   %conv1 = sext i1 %cmp to i64
-  store i64 %conv1, i64* @glob, align 8
+  store i64 %conv1, ptr @glob, align 8
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/testComparesiequs.ll b/llvm/test/CodeGen/PowerPC/testComparesiequs.ll
index e3e3a3794094..26df66cb73d6 100644
--- a/llvm/test/CodeGen/PowerPC/testComparesiequs.ll
+++ b/llvm/test/CodeGen/PowerPC/testComparesiequs.ll
@@ -147,7 +147,7 @@ define dso_local void @test_iequs_store(i16 zeroext %a, i16 zeroext %b) {
 entry:
   %cmp = icmp eq i16 %a, %b
   %conv3 = zext i1 %cmp to i16
-  store i16 %conv3, i16* @glob, align 2
+  store i16 %conv3, ptr @glob, align 2
   ret void
 }
 
@@ -184,7 +184,7 @@ define dso_local void @test_iequs_sext_store(i16 zeroext %a, i16 zeroext %b) {
 entry:
   %cmp = icmp eq i16 %a, %b
   %conv3 = sext i1 %cmp to i16
-  store i16 %conv3, i16* @glob, align 2
+  store i16 %conv3, ptr @glob, align 2
   ret void
 }
 
@@ -215,7 +215,7 @@ define dso_local void @test_iequs_z_store(i16 zeroext %a) {
 entry:
   %cmp = icmp eq i16 %a, 0
   %conv2 = zext i1 %cmp to i16
-  store i16 %conv2, i16* @glob, align 2
+  store i16 %conv2, ptr @glob, align 2
   ret void
 }
 
@@ -249,6 +249,6 @@ define dso_local void @test_iequs_sext_z_store(i16 zeroext %a) {
 entry:
   %cmp = icmp eq i16 %a, 0
   %conv2 = sext i1 %cmp to i16
-  store i16 %conv2, i16* @glob, align 2
+  store i16 %conv2, ptr @glob, align 2
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/testComparesigesc.ll b/llvm/test/CodeGen/PowerPC/testComparesigesc.ll
index 8cf820d81a6e..af7402e0073c 100644
--- a/llvm/test/CodeGen/PowerPC/testComparesigesc.ll
+++ b/llvm/test/CodeGen/PowerPC/testComparesigesc.ll
@@ -88,7 +88,7 @@ define dso_local void @test_igesc_store(i8 signext %a, i8 signext %b) {
 entry:
   %cmp = icmp sge i8 %a, %b
   %conv3 = zext i1 %cmp to i8
-  store i8 %conv3, i8* @glob, align 1
+  store i8 %conv3, ptr @glob, align 1
   ret void
 }
 
@@ -121,6 +121,6 @@ define dso_local void @test_igesc_sext_store(i8 signext %a, i8 signext %b) {
 entry:
   %cmp = icmp sge i8 %a, %b
   %conv3 = sext i1 %cmp to i8
-  store i8 %conv3, i8* @glob, align 1
+  store i8 %conv3, ptr @glob, align 1
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/testComparesigesi.ll b/llvm/test/CodeGen/PowerPC/testComparesigesi.ll
index b1454500d51e..53535f905f1d 100644
--- a/llvm/test/CodeGen/PowerPC/testComparesigesi.ll
+++ b/llvm/test/CodeGen/PowerPC/testComparesigesi.ll
@@ -88,7 +88,7 @@ define dso_local void @test_igesi_store(i32 signext %a, i32 signext %b) {
 entry:
   %cmp = icmp sge i32 %a, %b
   %conv = zext i1 %cmp to i32
-  store i32 %conv, i32* @glob, align 4
+  store i32 %conv, ptr @glob, align 4
   ret void
 }
 
@@ -121,6 +121,6 @@ define dso_local void @test_igesi_sext_store(i32 signext %a, i32 signext %b) {
 entry:
   %cmp = icmp sge i32 %a, %b
   %sub = sext i1 %cmp to i32
-  store i32 %sub, i32* @glob, align 4
+  store i32 %sub, ptr @glob, align 4
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/testComparesigesll.ll b/llvm/test/CodeGen/PowerPC/testComparesigesll.ll
index 59f0ef5384e9..331a94543657 100644
--- a/llvm/test/CodeGen/PowerPC/testComparesigesll.ll
+++ b/llvm/test/CodeGen/PowerPC/testComparesigesll.ll
@@ -146,7 +146,7 @@ define dso_local void @test_igesll_store(i64 %a, i64 %b) {
 entry:
   %cmp = icmp sge i64 %a, %b
   %conv1 = zext i1 %cmp to i64
-  store i64 %conv1, i64* @glob, align 8
+  store i64 %conv1, ptr @glob, align 8
   ret void
 }
 
@@ -185,7 +185,7 @@ define dso_local void @test_igesll_sext_store(i64 %a, i64 %b) {
 entry:
   %cmp = icmp sge i64 %a, %b
   %conv1 = sext i1 %cmp to i64
-  store i64 %conv1, i64* @glob, align 8
+  store i64 %conv1, ptr @glob, align 8
   ret void
 }
 
@@ -215,7 +215,7 @@ define dso_local void @test_igesll_z_store(i64 %a) {
 entry:
   %cmp = icmp sgt i64 %a, -1
   %conv1 = zext i1 %cmp to i64
-  store i64 %conv1, i64* @glob, align 8
+  store i64 %conv1, ptr @glob, align 8
   ret void
 }
 
@@ -245,6 +245,6 @@ define dso_local void @test_igesll_sext_z_store(i64 %a) {
 entry:
   %cmp = icmp sgt i64 %a, -1
   %conv1 = sext i1 %cmp to i64
-  store i64 %conv1, i64* @glob, align 8
+  store i64 %conv1, ptr @glob, align 8
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/testComparesigess.ll b/llvm/test/CodeGen/PowerPC/testComparesigess.ll
index 17aeffceefb4..d7bf7d782be2 100644
--- a/llvm/test/CodeGen/PowerPC/testComparesigess.ll
+++ b/llvm/test/CodeGen/PowerPC/testComparesigess.ll
@@ -88,7 +88,7 @@ define dso_local void @test_igess_store(i16 signext %a, i16 signext %b) {
 entry:
   %cmp = icmp sge i16 %a, %b
   %conv3 = zext i1 %cmp to i16
-  store i16 %conv3, i16* @glob, align 2
+  store i16 %conv3, ptr @glob, align 2
   ret void
 }
 
@@ -121,6 +121,6 @@ define dso_local void @test_igess_sext_store(i16 signext %a, i16 signext %b) {
 entry:
   %cmp = icmp sge i16 %a, %b
   %conv3 = sext i1 %cmp to i16
-  store i16 %conv3, i16* @glob, align 2
+  store i16 %conv3, ptr @glob, align 2
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/testComparesigeuc.ll b/llvm/test/CodeGen/PowerPC/testComparesigeuc.ll
index 5e9be76af7a8..14f053baa929 100644
--- a/llvm/test/CodeGen/PowerPC/testComparesigeuc.ll
+++ b/llvm/test/CodeGen/PowerPC/testComparesigeuc.ll
@@ -74,7 +74,7 @@ define dso_local void @test_igeuc_store(i8 zeroext %a, i8 zeroext %b) {
 entry:
   %cmp = icmp uge i8 %a, %b
   %conv3 = zext i1 %cmp to i8
-  store i8 %conv3, i8* @glob
+  store i8 %conv3, ptr @glob
   ret void
 }
 
@@ -91,7 +91,7 @@ define dso_local void @test_igeuc_sext_store(i8 zeroext %a, i8 zeroext %b) {
 entry:
   %cmp = icmp uge i8 %a, %b
   %conv3 = sext i1 %cmp to i8
-  store i8 %conv3, i8* @glob
+  store i8 %conv3, ptr @glob
   ret void
 ; CHECK-TBD-LABEL: @test_igeuc_sext_store
 ; CHECK-TBD: subf [[REG1:r[0-9]+]], r3, r4
@@ -112,7 +112,7 @@ define dso_local void @test_igeuc_z_store(i8 zeroext %a) {
 entry:
   %cmp = icmp uge i8 %a, 0
   %conv3 = zext i1 %cmp to i8
-  store i8 %conv3, i8* @glob
+  store i8 %conv3, ptr @glob
   ret void
 }
 
@@ -127,6 +127,6 @@ define dso_local void @test_igeuc_sext_z_store(i8 zeroext %a) {
 entry:
   %cmp = icmp uge i8 %a, 0
   %conv3 = sext i1 %cmp to i8
-  store i8 %conv3, i8* @glob
+  store i8 %conv3, ptr @glob
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/testComparesigeui.ll b/llvm/test/CodeGen/PowerPC/testComparesigeui.ll
index d6b53c0a3477..6cd8a3d0111d 100644
--- a/llvm/test/CodeGen/PowerPC/testComparesigeui.ll
+++ b/llvm/test/CodeGen/PowerPC/testComparesigeui.ll
@@ -73,7 +73,7 @@ define dso_local void @test_igeui_store(i32 zeroext %a, i32 zeroext %b) {
 entry:
   %cmp = icmp uge i32 %a, %b
   %conv = zext i1 %cmp to i32
-  store i32 %conv, i32* @glob
+  store i32 %conv, ptr @glob
   ret void
 }
 
@@ -90,7 +90,7 @@ define dso_local void @test_igeui_sext_store(i32 zeroext %a, i32 zeroext %b) {
 entry:
   %cmp = icmp uge i32 %a, %b
   %sub = sext i1 %cmp to i32
-  store i32 %sub, i32* @glob
+  store i32 %sub, ptr @glob
   ret void
 }
 
@@ -105,7 +105,7 @@ define dso_local void @test_igeui_z_store(i32 zeroext %a) {
 entry:
   %cmp = icmp uge i32 %a, 0
   %conv1 = zext i1 %cmp to i32
-  store i32 %conv1, i32* @glob
+  store i32 %conv1, ptr @glob
   ret void
 }
 
@@ -120,7 +120,7 @@ define dso_local void @test_igeui_sext_z_store(i32 zeroext %a) {
 entry:
   %cmp = icmp uge i32 %a, 0
   %conv1 = sext i1 %cmp to i32
-  store i32 %conv1, i32* @glob
+  store i32 %conv1, ptr @glob
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/testComparesigeull.ll b/llvm/test/CodeGen/PowerPC/testComparesigeull.ll
index a161b9a54cfd..2fe51d2f15f1 100644
--- a/llvm/test/CodeGen/PowerPC/testComparesigeull.ll
+++ b/llvm/test/CodeGen/PowerPC/testComparesigeull.ll
@@ -73,7 +73,7 @@ define dso_local void @test_igeull_store(i64 %a, i64 %b) {
 entry:
   %cmp = icmp uge i64 %a, %b
   %conv1 = zext i1 %cmp to i64
-  store i64 %conv1, i64* @glob
+  store i64 %conv1, ptr @glob
   ret void
 }
 
@@ -90,7 +90,7 @@ define dso_local void @test_igeull_sext_store(i64 %a, i64 %b) {
 entry:
   %cmp = icmp uge i64 %a, %b
   %conv1 = sext i1 %cmp to i64
-  store i64 %conv1, i64* @glob
+  store i64 %conv1, ptr @glob
   ret void
 }
 
@@ -105,7 +105,7 @@ define dso_local void @test_igeull_z_store(i64 %a) {
 entry:
   %cmp = icmp uge i64 %a, 0
   %conv1 = zext i1 %cmp to i64
-  store i64 %conv1, i64* @glob
+  store i64 %conv1, ptr @glob
   ret void
 }
 
@@ -120,6 +120,6 @@ define dso_local void @test_igeull_sext_z_store(i64 %a) {
 entry:
   %cmp = icmp uge i64 %a, 0
   %conv1 = sext i1 %cmp to i64
-  store i64 %conv1, i64* @glob
+  store i64 %conv1, ptr @glob
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/testComparesigeus.ll b/llvm/test/CodeGen/PowerPC/testComparesigeus.ll
index b8a0a1764c74..364b40254bb1 100644
--- a/llvm/test/CodeGen/PowerPC/testComparesigeus.ll
+++ b/llvm/test/CodeGen/PowerPC/testComparesigeus.ll
@@ -73,7 +73,7 @@ define dso_local void @test_igeus_store(i16 zeroext %a, i16 zeroext %b) {
 entry:
   %cmp = icmp uge i16 %a, %b
   %conv3 = zext i1 %cmp to i16
-  store i16 %conv3, i16* @glob
+  store i16 %conv3, ptr @glob
   ret void
 }
 
@@ -90,7 +90,7 @@ define dso_local void @test_igeus_sext_store(i16 zeroext %a, i16 zeroext %b) {
 entry:
   %cmp = icmp uge i16 %a, %b
   %conv3 = sext i1 %cmp to i16
-  store i16 %conv3, i16* @glob
+  store i16 %conv3, ptr @glob
   ret void
 }
 
@@ -105,7 +105,7 @@ define dso_local void @test_igeus_z_store(i16 zeroext %a) {
 entry:
   %cmp = icmp uge i16 %a, 0
   %conv3 = zext i1 %cmp to i16
-  store i16 %conv3, i16* @glob
+  store i16 %conv3, ptr @glob
   ret void
 }
 
@@ -120,7 +120,7 @@ define dso_local void @test_igeus_sext_z_store(i16 zeroext %a) {
 entry:
   %cmp = icmp uge i16 %a, 0
   %conv3 = sext i1 %cmp to i16
-  store i16 %conv3, i16* @glob
+  store i16 %conv3, ptr @glob
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/testComparesigtsc.ll b/llvm/test/CodeGen/PowerPC/testComparesigtsc.ll
index 19a0a1d96d0b..6d7abcebf607 100644
--- a/llvm/test/CodeGen/PowerPC/testComparesigtsc.ll
+++ b/llvm/test/CodeGen/PowerPC/testComparesigtsc.ll
@@ -74,7 +74,7 @@ define void @test_igtsc_store(i8 signext %a, i8 signext %b) {
 entry:
   %cmp = icmp sgt i8 %a, %b
   %conv3 = zext i1 %cmp to i8
-  store i8 %conv3, i8* @glob, align 1
+  store i8 %conv3, ptr @glob, align 1
   ret void
 }
 
@@ -91,7 +91,7 @@ define void @test_igtsc_sext_store(i8 signext %a, i8 signext %b) {
 entry:
   %cmp = icmp sgt i8 %a, %b
   %conv3 = sext i1 %cmp to i8
-  store i8 %conv3, i8* @glob, align 1
+  store i8 %conv3, ptr @glob, align 1
   ret void
 }
 
@@ -109,7 +109,7 @@ define void @test_igtsc_z_store(i8 signext %a) {
 entry:
   %cmp = icmp sgt i8 %a, 0
   %conv2 = zext i1 %cmp to i8
-  store i8 %conv2, i8* @glob, align 1
+  store i8 %conv2, ptr @glob, align 1
   ret void
 }
 
@@ -126,6 +126,6 @@ define void @test_igtsc_sext_z_store(i8 signext %a) {
 entry:
   %cmp = icmp sgt i8 %a, 0
   %conv2 = sext i1 %cmp to i8
-  store i8 %conv2, i8* @glob, align 1
+  store i8 %conv2, ptr @glob, align 1
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/testComparesigtsi.ll b/llvm/test/CodeGen/PowerPC/testComparesigtsi.ll
index 21a773f1fd80..6a900f1dc8e9 100644
--- a/llvm/test/CodeGen/PowerPC/testComparesigtsi.ll
+++ b/llvm/test/CodeGen/PowerPC/testComparesigtsi.ll
@@ -74,7 +74,7 @@ define void @test_igtsi_store(i32 signext %a, i32 signext %b) {
 entry:
   %cmp = icmp sgt i32 %a, %b
   %conv = zext i1 %cmp to i32
-  store i32 %conv, i32* @glob, align 4
+  store i32 %conv, ptr @glob, align 4
   ret void
 }
 
@@ -91,7 +91,7 @@ define void @test_igtsi_sext_store(i32 signext %a, i32 signext %b) {
 entry:
   %cmp = icmp sgt i32 %a, %b
   %sub = sext i1 %cmp to i32
-  store i32 %sub, i32* @glob, align 4
+  store i32 %sub, ptr @glob, align 4
   ret void
 }
 
@@ -109,7 +109,7 @@ define void @test_igtsi_z_store(i32 signext %a) {
 entry:
   %cmp = icmp sgt i32 %a, 0
   %conv = zext i1 %cmp to i32
-  store i32 %conv, i32* @glob, align 4
+  store i32 %conv, ptr @glob, align 4
   ret void
 }
 
@@ -126,6 +126,6 @@ define void @test_igtsi_sext_z_store(i32 signext %a) {
 entry:
   %cmp = icmp sgt i32 %a, 0
   %sub = sext i1 %cmp to i32
-  store i32 %sub, i32* @glob, align 4
+  store i32 %sub, ptr @glob, align 4
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/testComparesigtsll.ll b/llvm/test/CodeGen/PowerPC/testComparesigtsll.ll
index aa3c6ba797df..27524a8ba4c2 100644
--- a/llvm/test/CodeGen/PowerPC/testComparesigtsll.ll
+++ b/llvm/test/CodeGen/PowerPC/testComparesigtsll.ll
@@ -86,7 +86,7 @@ define void @test_igtsll_store(i64 %a, i64 %b) {
 entry:
   %cmp = icmp sgt i64 %a, %b
   %conv1 = zext i1 %cmp to i64
-  store i64 %conv1, i64* @glob, align 8
+  store i64 %conv1, ptr @glob, align 8
   ret void
 }
 
@@ -107,7 +107,7 @@ define void @test_igtsll_sext_store(i64 %a, i64 %b) {
 entry:
   %cmp = icmp sgt i64 %a, %b
   %conv1 = sext i1 %cmp to i64
-  store i64 %conv1, i64* @glob, align 8
+  store i64 %conv1, ptr @glob, align 8
   ret void
 }
 
@@ -126,7 +126,7 @@ define void @test_igtsll_z_store(i64 %a) {
 entry:
   %cmp = icmp sgt i64 %a, 0
   %conv1 = zext i1 %cmp to i64
-  store i64 %conv1, i64* @glob, align 8
+  store i64 %conv1, ptr @glob, align 8
   ret void
 }
 
@@ -144,6 +144,6 @@ define void @test_igtsll_sext_z_store(i64 %a) {
 entry:
   %cmp = icmp sgt i64 %a, 0
   %conv1 = sext i1 %cmp to i64
-  store i64 %conv1, i64* @glob, align 8
+  store i64 %conv1, ptr @glob, align 8
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/testComparesigtss.ll b/llvm/test/CodeGen/PowerPC/testComparesigtss.ll
index fe951271986b..ffef0f8710f9 100644
--- a/llvm/test/CodeGen/PowerPC/testComparesigtss.ll
+++ b/llvm/test/CodeGen/PowerPC/testComparesigtss.ll
@@ -74,7 +74,7 @@ define void @test_igtss_store(i16 signext %a, i16 signext %b) {
 entry:
   %cmp = icmp sgt i16 %a, %b
   %conv3 = zext i1 %cmp to i16
-  store i16 %conv3, i16* @glob, align 2
+  store i16 %conv3, ptr @glob, align 2
   ret void
 }
 
@@ -91,7 +91,7 @@ define void @test_igtss_sext_store(i16 signext %a, i16 signext %b) {
 entry:
   %cmp = icmp sgt i16 %a, %b
   %conv3 = sext i1 %cmp to i16
-  store i16 %conv3, i16* @glob, align 2
+  store i16 %conv3, ptr @glob, align 2
   ret void
 }
 
@@ -109,7 +109,7 @@ define void @test_igtss_z_store(i16 signext %a) {
 entry:
   %cmp = icmp sgt i16 %a, 0
   %conv2 = zext i1 %cmp to i16
-  store i16 %conv2, i16* @glob, align 2
+  store i16 %conv2, ptr @glob, align 2
   ret void
 }
 
@@ -126,6 +126,6 @@ define void @test_igtss_sext_z_store(i16 signext %a) {
 entry:
   %cmp = icmp sgt i16 %a, 0
   %conv2 = sext i1 %cmp to i16
-  store i16 %conv2, i16* @glob, align 2
+  store i16 %conv2, ptr @glob, align 2
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/testComparesigtuc.ll b/llvm/test/CodeGen/PowerPC/testComparesigtuc.ll
index 2aca2dd2b0df..56bb6a1a972f 100644
--- a/llvm/test/CodeGen/PowerPC/testComparesigtuc.ll
+++ b/llvm/test/CodeGen/PowerPC/testComparesigtuc.ll
@@ -76,7 +76,7 @@ define void @test_igtuc_store(i8 zeroext %a, i8 zeroext %b) {
 entry:
   %cmp = icmp ugt i8 %a, %b
   %conv3 = zext i1 %cmp to i8
-  store i8 %conv3, i8* @glob, align 1
+  store i8 %conv3, ptr @glob, align 1
   ret void
 }
 
@@ -93,7 +93,7 @@ define void @test_igtuc_sext_store(i8 zeroext %a, i8 zeroext %b) {
 entry:
   %cmp = icmp ugt i8 %a, %b
   %conv3 = sext i1 %cmp to i8
-  store i8 %conv3, i8* @glob, align 1
+  store i8 %conv3, ptr @glob, align 1
   ret void
 }
 
@@ -111,7 +111,7 @@ define void @test_igtuc_z_store(i8 zeroext %a) {
 entry:
   %cmp = icmp ne i8 %a, 0
   %conv2 = zext i1 %cmp to i8
-  store i8 %conv2, i8* @glob, align 1
+  store i8 %conv2, ptr @glob, align 1
   ret void
 }
 
@@ -130,6 +130,6 @@ define void @test_igtuc_sext_z_store(i8 zeroext %a) {
 entry:
   %cmp = icmp ne i8 %a, 0
   %conv2 = sext i1 %cmp to i8
-  store i8 %conv2, i8* @glob, align 1
+  store i8 %conv2, ptr @glob, align 1
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/testComparesigtui.ll b/llvm/test/CodeGen/PowerPC/testComparesigtui.ll
index aa1c2edc5754..9cb5dd5a26f3 100644
--- a/llvm/test/CodeGen/PowerPC/testComparesigtui.ll
+++ b/llvm/test/CodeGen/PowerPC/testComparesigtui.ll
@@ -76,7 +76,7 @@ define void @test_igtui_store(i32 zeroext %a, i32 zeroext %b) {
 entry:
   %cmp = icmp ugt i32 %a, %b
   %conv = zext i1 %cmp to i32
-  store i32 %conv, i32* @glob, align 4
+  store i32 %conv, ptr @glob, align 4
   ret void
 }
 
@@ -93,7 +93,7 @@ define void @test_igtui_sext_store(i32 zeroext %a, i32 zeroext %b) {
 entry:
   %cmp = icmp ugt i32 %a, %b
   %sub = sext i1 %cmp to i32
-  store i32 %sub, i32* @glob, align 4
+  store i32 %sub, ptr @glob, align 4
   ret void
 }
 
@@ -111,7 +111,7 @@ define void @test_igtui_z_store(i32 zeroext %a) {
 entry:
   %cmp = icmp ne i32 %a, 0
   %conv = zext i1 %cmp to i32
-  store i32 %conv, i32* @glob, align 4
+  store i32 %conv, ptr @glob, align 4
   ret void
 }
 
@@ -130,7 +130,7 @@ define void @test_igtui_sext_z_store(i32 zeroext %a) {
 entry:
   %cmp = icmp ne i32 %a, 0
   %sub = sext i1 %cmp to i32
-  store i32 %sub, i32* @glob, align 4
+  store i32 %sub, ptr @glob, align 4
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/testComparesigtus.ll b/llvm/test/CodeGen/PowerPC/testComparesigtus.ll
index 877753c1a477..1db4a854d09d 100644
--- a/llvm/test/CodeGen/PowerPC/testComparesigtus.ll
+++ b/llvm/test/CodeGen/PowerPC/testComparesigtus.ll
@@ -76,7 +76,7 @@ define void @test_igtus_store(i16 zeroext %a, i16 zeroext %b) {
 entry:
   %cmp = icmp ugt i16 %a, %b
   %conv3 = zext i1 %cmp to i16
-  store i16 %conv3, i16* @glob, align 2
+  store i16 %conv3, ptr @glob, align 2
   ret void
 }
 
@@ -93,7 +93,7 @@ define void @test_igtus_sext_store(i16 zeroext %a, i16 zeroext %b) {
 entry:
   %cmp = icmp ugt i16 %a, %b
   %conv3 = sext i1 %cmp to i16
-  store i16 %conv3, i16* @glob, align 2
+  store i16 %conv3, ptr @glob, align 2
   ret void
 }
 
@@ -111,7 +111,7 @@ define void @test_igtus_z_store(i16 zeroext %a) {
 entry:
   %cmp = icmp ne i16 %a, 0
   %conv2 = zext i1 %cmp to i16
-  store i16 %conv2, i16* @glob, align 2
+  store i16 %conv2, ptr @glob, align 2
   ret void
 }
 
@@ -130,7 +130,7 @@ define void @test_igtus_sext_z_store(i16 zeroext %a) {
 entry:
   %cmp = icmp ne i16 %a, 0
   %conv2 = sext i1 %cmp to i16
-  store i16 %conv2, i16* @glob, align 2
+  store i16 %conv2, ptr @glob, align 2
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/testComparesilesc.ll b/llvm/test/CodeGen/PowerPC/testComparesilesc.ll
index 55b11c7c1fa6..6e4fd8777813 100644
--- a/llvm/test/CodeGen/PowerPC/testComparesilesc.ll
+++ b/llvm/test/CodeGen/PowerPC/testComparesilesc.ll
@@ -88,7 +88,7 @@ define dso_local void @test_ilesc_store(i8 signext %a, i8 signext %b) {
 entry:
   %cmp = icmp sle i8 %a, %b
   %conv3 = zext i1 %cmp to i8
-  store i8 %conv3, i8* @glob, align 1
+  store i8 %conv3, ptr @glob, align 1
   ret void
 }
 
@@ -121,6 +121,6 @@ define dso_local void @test_ilesc_sext_store(i8 signext %a, i8 signext %b) {
 entry:
   %cmp = icmp sle i8 %a, %b
   %conv3 = sext i1 %cmp to i8
-  store i8 %conv3, i8* @glob, align 1
+  store i8 %conv3, ptr @glob, align 1
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/testComparesilesi.ll b/llvm/test/CodeGen/PowerPC/testComparesilesi.ll
index 9d23b970b6f2..400d2486c0f2 100644
--- a/llvm/test/CodeGen/PowerPC/testComparesilesi.ll
+++ b/llvm/test/CodeGen/PowerPC/testComparesilesi.ll
@@ -88,7 +88,7 @@ define dso_local void @test_ilesi_store(i32 signext %a, i32 signext %b) {
 entry:
   %cmp = icmp sle i32 %a, %b
   %conv = zext i1 %cmp to i32
-  store i32 %conv, i32* @glob, align 4
+  store i32 %conv, ptr @glob, align 4
   ret void
 }
 
@@ -121,6 +121,6 @@ define dso_local void @test_ilesi_sext_store(i32 signext %a, i32 signext %b) {
 entry:
   %cmp = icmp sle i32 %a, %b
   %sub = sext i1 %cmp to i32
-  store i32 %sub, i32* @glob, align 4
+  store i32 %sub, ptr @glob, align 4
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/testComparesilesll.ll b/llvm/test/CodeGen/PowerPC/testComparesilesll.ll
index b9ac9c07b849..02d580b74b55 100644
--- a/llvm/test/CodeGen/PowerPC/testComparesilesll.ll
+++ b/llvm/test/CodeGen/PowerPC/testComparesilesll.ll
@@ -152,7 +152,7 @@ define dso_local void @test_ilesll_store(i64 %a, i64 %b) {
 entry:
   %cmp = icmp sle i64 %a, %b
   %conv1 = zext i1 %cmp to i64
-  store i64 %conv1, i64* @glob, align 8
+  store i64 %conv1, ptr @glob, align 8
   ret void
 }
 
@@ -191,7 +191,7 @@ define dso_local void @test_ilesll_sext_store(i64 %a, i64 %b) {
 entry:
   %cmp = icmp sle i64 %a, %b
   %conv1 = sext i1 %cmp to i64
-  store i64 %conv1, i64* @glob, align 8
+  store i64 %conv1, ptr @glob, align 8
   ret void
 }
 
@@ -224,7 +224,7 @@ define dso_local void @test_ilesll_z_store(i64 %a) {
 entry:
   %cmp = icmp slt i64 %a, 1
   %conv1 = zext i1 %cmp to i64
-  store i64 %conv1, i64* @glob, align 8
+  store i64 %conv1, ptr @glob, align 8
   ret void
 }
 
@@ -257,6 +257,6 @@ define dso_local void @test_ilesll_sext_z_store(i64 %a) {
 entry:
   %cmp = icmp slt i64 %a, 1
   %conv1 = sext i1 %cmp to i64
-  store i64 %conv1, i64* @glob, align 8
+  store i64 %conv1, ptr @glob, align 8
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/testComparesiless.ll b/llvm/test/CodeGen/PowerPC/testComparesiless.ll
index 28c8e9854f13..5bdf9099f0be 100644
--- a/llvm/test/CodeGen/PowerPC/testComparesiless.ll
+++ b/llvm/test/CodeGen/PowerPC/testComparesiless.ll
@@ -88,7 +88,7 @@ define dso_local void @test_iless_store(i16 signext %a, i16 signext %b) {
 entry:
   %cmp = icmp sle i16 %a, %b
   %conv3 = zext i1 %cmp to i16
-  store i16 %conv3, i16* @glob, align 2
+  store i16 %conv3, ptr @glob, align 2
   ret void
 }
 
@@ -121,6 +121,6 @@ define dso_local void @test_iless_sext_store(i16 signext %a, i16 signext %b) {
 entry:
   %cmp = icmp sle i16 %a, %b
   %conv3 = sext i1 %cmp to i16
-  store i16 %conv3, i16* @glob, align 2
+  store i16 %conv3, ptr @glob, align 2
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/testComparesileuc.ll b/llvm/test/CodeGen/PowerPC/testComparesileuc.ll
index 394fb71ac1b2..fcbf82731d84 100644
--- a/llvm/test/CodeGen/PowerPC/testComparesileuc.ll
+++ b/llvm/test/CodeGen/PowerPC/testComparesileuc.ll
@@ -76,7 +76,7 @@ define dso_local void @test_ileuc_store(i8 zeroext %a, i8 zeroext %b) {
 entry:
   %cmp = icmp ule i8 %a, %b
   %conv3 = zext i1 %cmp to i8
-  store i8 %conv3, i8* @glob
+  store i8 %conv3, ptr @glob
   ret void
 }
 
@@ -93,7 +93,7 @@ define dso_local void @test_ileuc_sext_store(i8 zeroext %a, i8 zeroext %b) {
 entry:
   %cmp = icmp ule i8 %a, %b
   %conv3 = sext i1 %cmp to i8
-  store i8 %conv3, i8* @glob
+  store i8 %conv3, ptr @glob
   ret void
 }
 
@@ -109,7 +109,7 @@ define dso_local void @test_ileuc_z_store(i8 zeroext %a) {
 entry:
   %cmp = icmp eq i8 %a, 0
   %conv2 = zext i1 %cmp to i8
-  store i8 %conv2, i8* @glob
+  store i8 %conv2, ptr @glob
   ret void
 }
 
@@ -126,7 +126,7 @@ define dso_local void @test_ileuc_sext_z_store(i8 zeroext %a) {
 entry:
   %cmp = icmp eq i8 %a, 0
   %conv2 = sext i1 %cmp to i8
-  store i8 %conv2, i8* @glob
+  store i8 %conv2, ptr @glob
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/testComparesileui.ll b/llvm/test/CodeGen/PowerPC/testComparesileui.ll
index 542ce0861203..61fa814d957a 100644
--- a/llvm/test/CodeGen/PowerPC/testComparesileui.ll
+++ b/llvm/test/CodeGen/PowerPC/testComparesileui.ll
@@ -76,7 +76,7 @@ define dso_local void @test_ileui_store(i32 zeroext %a, i32 zeroext %b) {
 entry:
   %cmp = icmp ule i32 %a, %b
   %sub = zext i1 %cmp to i32
-  store i32 %sub, i32* @glob
+  store i32 %sub, ptr @glob
   ret void
 }
 
@@ -93,7 +93,7 @@ define dso_local void @test_ileui_sext_store(i32 zeroext %a, i32 zeroext %b) {
 entry:
   %cmp = icmp ule i32 %a, %b
   %sub = sext i1 %cmp to i32
-  store i32 %sub, i32* @glob
+  store i32 %sub, ptr @glob
   ret void
 }
 
@@ -109,7 +109,7 @@ define dso_local void @test_ileui_z_store(i32 zeroext %a) {
 entry:
   %cmp = icmp eq i32 %a, 0
   %sub = zext i1 %cmp to i32
-  store i32 %sub, i32* @glob
+  store i32 %sub, ptr @glob
   ret void
 }
 
@@ -126,7 +126,7 @@ define dso_local void @test_ileui_sext_z_store(i32 zeroext %a) {
 entry:
   %cmp = icmp eq i32 %a, 0
   %sub = sext i1 %cmp to i32
-  store i32 %sub, i32* @glob
+  store i32 %sub, ptr @glob
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/testComparesileull.ll b/llvm/test/CodeGen/PowerPC/testComparesileull.ll
index 705d2cf06dec..77d87b303469 100644
--- a/llvm/test/CodeGen/PowerPC/testComparesileull.ll
+++ b/llvm/test/CodeGen/PowerPC/testComparesileull.ll
@@ -75,7 +75,7 @@ define dso_local void @test_ileull_store(i64 %a, i64 %b) {
 entry:
   %cmp = icmp ule i64 %a, %b
   %conv1 = zext i1 %cmp to i64
-  store i64 %conv1, i64* @glob
+  store i64 %conv1, ptr @glob
   ret void
 }
 
@@ -92,7 +92,7 @@ define dso_local void @test_ileull_sext_store(i64 %a, i64 %b) {
 entry:
   %cmp = icmp ule i64 %a, %b
   %conv1 = sext i1 %cmp to i64
-  store i64 %conv1, i64* @glob
+  store i64 %conv1, ptr @glob
   ret void
 }
 
@@ -108,7 +108,7 @@ define dso_local void @test_ileull_z_store(i64 %a) {
 entry:
   %cmp = icmp ule i64 %a, 0
   %conv1 = zext i1 %cmp to i64
-  store i64 %conv1, i64* @glob
+  store i64 %conv1, ptr @glob
   ret void
 }
 
@@ -124,7 +124,7 @@ define dso_local void @test_ileull_sext_z_store(i64 %a) {
 entry:
   %cmp = icmp ule i64 %a, 0
   %conv1 = sext i1 %cmp to i64
-  store i64 %conv1, i64* @glob
+  store i64 %conv1, ptr @glob
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/testComparesileus.ll b/llvm/test/CodeGen/PowerPC/testComparesileus.ll
index 9861332ffa6c..df8303acc0d3 100644
--- a/llvm/test/CodeGen/PowerPC/testComparesileus.ll
+++ b/llvm/test/CodeGen/PowerPC/testComparesileus.ll
@@ -76,7 +76,7 @@ define dso_local void @test_ileus_store(i16 zeroext %a, i16 zeroext %b) {
 entry:
   %cmp = icmp ule i16 %a, %b
   %conv3 = zext i1 %cmp to i16
-  store i16 %conv3, i16* @glob
+  store i16 %conv3, ptr @glob
   ret void
 }
 
@@ -93,7 +93,7 @@ define dso_local void @test_ileus_sext_store(i16 zeroext %a, i16 zeroext %b) {
 entry:
   %cmp = icmp ule i16 %a, %b
   %conv3 = sext i1 %cmp to i16
-  store i16 %conv3, i16* @glob
+  store i16 %conv3, ptr @glob
   ret void
 }
 
@@ -109,7 +109,7 @@ define dso_local void @test_ileus_z_store(i16 zeroext %a) {
 entry:
   %cmp = icmp ule i16 %a, 0
   %conv2 = zext i1 %cmp to i16
-  store i16 %conv2, i16* @glob
+  store i16 %conv2, ptr @glob
   ret void
 }
 
@@ -126,7 +126,7 @@ define dso_local void @test_ileus_sext_z_store(i16 zeroext %a) {
 entry:
   %cmp = icmp ule i16 %a, 0
   %conv2 = sext i1 %cmp to i16
-  store i16 %conv2, i16* @glob
+  store i16 %conv2, ptr @glob
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/testComparesiltsc.ll b/llvm/test/CodeGen/PowerPC/testComparesiltsc.ll
index f94da027a51b..9d8a8374d28d 100644
--- a/llvm/test/CodeGen/PowerPC/testComparesiltsc.ll
+++ b/llvm/test/CodeGen/PowerPC/testComparesiltsc.ll
@@ -58,7 +58,7 @@ define dso_local void @test_iltsc_store(i8 signext %a, i8 signext %b) {
 entry:
   %cmp = icmp slt i8 %a, %b
   %conv3 = zext i1 %cmp to i8
-  store i8 %conv3, i8* @glob, align 1
+  store i8 %conv3, ptr @glob, align 1
   ret void
 }
 
@@ -74,7 +74,7 @@ define dso_local void @test_iltsc_sext_store(i8 signext %a, i8 signext %b) {
 entry:
   %cmp = icmp slt i8 %a, %b
   %conv3 = sext i1 %cmp to i8
-  store i8 %conv3, i8* @glob, align 1
+  store i8 %conv3, ptr @glob, align 1
   ret void
 }
 
@@ -89,6 +89,6 @@ define dso_local void @test_iltsc_sext_z_store(i8 signext %a) {
 entry:
   %cmp = icmp slt i8 %a, 0
   %conv2 = sext i1 %cmp to i8
-  store i8 %conv2, i8* @glob, align 1
+  store i8 %conv2, ptr @glob, align 1
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/testComparesiltsi.ll b/llvm/test/CodeGen/PowerPC/testComparesiltsi.ll
index ef95b9f97433..c71827ad5610 100644
--- a/llvm/test/CodeGen/PowerPC/testComparesiltsi.ll
+++ b/llvm/test/CodeGen/PowerPC/testComparesiltsi.ll
@@ -58,7 +58,7 @@ define dso_local void @test_iltsi_store(i32 signext %a, i32 signext %b) {
 entry:
   %cmp = icmp slt i32 %a, %b
   %conv = zext i1 %cmp to i32
-  store i32 %conv, i32* @glob, align 4
+  store i32 %conv, ptr @glob, align 4
   ret void
 }
 
@@ -74,7 +74,7 @@ define dso_local void @test_iltsi_sext_store(i32 signext %a, i32 signext %b) {
 entry:
   %cmp = icmp slt i32 %a, %b
   %sub = sext i1 %cmp to i32
-  store i32 %sub, i32* @glob, align 4
+  store i32 %sub, ptr @glob, align 4
   ret void
 }
 
@@ -89,6 +89,6 @@ define dso_local void @test_iltsi_sext_z_store(i32 signext %a) {
 entry:
   %cmp = icmp slt i32 %a, 0
   %sub = sext i1 %cmp to i32
-  store i32 %sub, i32* @glob, align 4
+  store i32 %sub, ptr @glob, align 4
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/testComparesiltsll.ll b/llvm/test/CodeGen/PowerPC/testComparesiltsll.ll
index 52c256912555..eb9897174601 100644
--- a/llvm/test/CodeGen/PowerPC/testComparesiltsll.ll
+++ b/llvm/test/CodeGen/PowerPC/testComparesiltsll.ll
@@ -68,7 +68,7 @@ define dso_local void @test_iltsll_store(i64 %a, i64 %b) {
 entry:
   %cmp = icmp slt i64 %a, %b
   %conv1 = zext i1 %cmp to i64
-  store i64 %conv1, i64* @glob, align 8
+  store i64 %conv1, ptr @glob, align 8
   ret void
 }
 
@@ -88,7 +88,7 @@ define dso_local void @test_iltsll_sext_store(i64 %a, i64 %b) {
 entry:
   %cmp = icmp slt i64 %a, %b
   %conv1 = sext i1 %cmp to i64
-  store i64 %conv1, i64* @glob, align 8
+  store i64 %conv1, ptr @glob, align 8
   ret void
 }
 
@@ -103,6 +103,6 @@ define dso_local void @test_iltsll_sext_z_store(i64 %a) {
 entry:
   %cmp = icmp slt i64 %a, 0
   %conv2 = sext i1 %cmp to i64
-  store i64 %conv2, i64* @glob, align 8
+  store i64 %conv2, ptr @glob, align 8
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/testComparesiltss.ll b/llvm/test/CodeGen/PowerPC/testComparesiltss.ll
index 7b989362b3a3..aa07cd6b87e4 100644
--- a/llvm/test/CodeGen/PowerPC/testComparesiltss.ll
+++ b/llvm/test/CodeGen/PowerPC/testComparesiltss.ll
@@ -58,7 +58,7 @@ define dso_local void @test_iltss_store(i16 signext %a, i16 signext %b) {
 entry:
   %cmp = icmp slt i16 %a, %b
   %conv3 = zext i1 %cmp to i16
-  store i16 %conv3, i16* @glob, align 2
+  store i16 %conv3, ptr @glob, align 2
   ret void
 }
 
@@ -74,7 +74,7 @@ define dso_local void @test_iltss_sext_store(i16 signext %a, i16 signext %b) {
 entry:
   %cmp = icmp slt i16 %a, %b
   %conv3 = sext i1 %cmp to i16
-  store i16 %conv3, i16* @glob, align 2
+  store i16 %conv3, ptr @glob, align 2
   ret void
 }
 
@@ -89,6 +89,6 @@ define dso_local void @test_iltss_sext_z_store(i16 signext %a) {
 entry:
   %cmp = icmp slt i16 %a, 0
   %sub = sext i1 %cmp to i16
-  store i16 %sub, i16* @glob, align 2
+  store i16 %sub, ptr @glob, align 2
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/testComparesiltuc.ll b/llvm/test/CodeGen/PowerPC/testComparesiltuc.ll
index be3cb06bc971..f84a9a003068 100644
--- a/llvm/test/CodeGen/PowerPC/testComparesiltuc.ll
+++ b/llvm/test/CodeGen/PowerPC/testComparesiltuc.ll
@@ -46,7 +46,7 @@ define dso_local void @test_iltuc_store(i8 zeroext %a, i8 zeroext %b) {
 entry:
   %cmp = icmp ult i8 %a, %b
   %conv3 = zext i1 %cmp to i8
-  store i8 %conv3, i8* @glob, align 1
+  store i8 %conv3, ptr @glob, align 1
   ret void
 }
 
@@ -62,6 +62,6 @@ define dso_local void @test_iltuc_sext_store(i8 zeroext %a, i8 zeroext %b) {
 entry:
   %cmp = icmp ult i8 %a, %b
   %conv3 = sext i1 %cmp to i8
-  store i8 %conv3, i8* @glob, align 1
+  store i8 %conv3, ptr @glob, align 1
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/testComparesiltui.ll b/llvm/test/CodeGen/PowerPC/testComparesiltui.ll
index 270ee35004ff..388b94a743f8 100644
--- a/llvm/test/CodeGen/PowerPC/testComparesiltui.ll
+++ b/llvm/test/CodeGen/PowerPC/testComparesiltui.ll
@@ -46,7 +46,7 @@ define dso_local void @test_iltui_store(i32 zeroext %a, i32 zeroext %b) {
 entry:
   %cmp = icmp ult i32 %a, %b
   %conv = zext i1 %cmp to i32
-  store i32 %conv, i32* @glob, align 4
+  store i32 %conv, ptr @glob, align 4
   ret void
 }
 
@@ -62,6 +62,6 @@ define dso_local void @test_iltui_sext_store(i32 zeroext %a, i32 zeroext %b) {
 entry:
   %cmp = icmp ult i32 %a, %b
   %sub = sext i1 %cmp to i32
-  store i32 %sub, i32* @glob, align 4
+  store i32 %sub, ptr @glob, align 4
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/testComparesiltus.ll b/llvm/test/CodeGen/PowerPC/testComparesiltus.ll
index 895c09137d77..086be821e896 100644
--- a/llvm/test/CodeGen/PowerPC/testComparesiltus.ll
+++ b/llvm/test/CodeGen/PowerPC/testComparesiltus.ll
@@ -46,7 +46,7 @@ define dso_local void @test_iltus_store(i16 zeroext %a, i16 zeroext %b) {
 entry:
   %cmp = icmp ult i16 %a, %b
   %conv3 = zext i1 %cmp to i16
-  store i16 %conv3, i16* @glob, align 2
+  store i16 %conv3, ptr @glob, align 2
   ret void
 }
 
@@ -62,6 +62,6 @@ define dso_local void @test_iltus_sext_store(i16 zeroext %a, i16 zeroext %b) {
 entry:
   %cmp = icmp ult i16 %a, %b
   %conv3 = sext i1 %cmp to i16
-  store i16 %conv3, i16* @glob, align 2
+  store i16 %conv3, ptr @glob, align 2
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/testComparesinesc.ll b/llvm/test/CodeGen/PowerPC/testComparesinesc.ll
index 70068389df53..da3d138bfbad 100644
--- a/llvm/test/CodeGen/PowerPC/testComparesinesc.ll
+++ b/llvm/test/CodeGen/PowerPC/testComparesinesc.ll
@@ -156,7 +156,7 @@ define dso_local void @test_inesc_store(i8 signext %a, i8 signext %b) {
 entry:
   %cmp = icmp ne i8 %a, %b
   %conv3 = zext i1 %cmp to i8
-  store i8 %conv3, i8* @glob, align 1
+  store i8 %conv3, ptr @glob, align 1
   ret void
 }
 
@@ -195,7 +195,7 @@ define dso_local void @test_inesc_sext_store(i8 signext %a, i8 signext %b) {
 entry:
   %cmp = icmp ne i8 %a, %b
   %conv3 = sext i1 %cmp to i8
-  store i8 %conv3, i8* @glob, align 1
+  store i8 %conv3, ptr @glob, align 1
   ret void
 }
 
@@ -228,7 +228,7 @@ define dso_local void @test_inesc_z_store(i8 signext %a) {
 entry:
   %cmp = icmp ne i8 %a, 0
   %conv2 = zext i1 %cmp to i8
-  store i8 %conv2, i8* @glob, align 1
+  store i8 %conv2, ptr @glob, align 1
   ret void
 }
 
@@ -264,6 +264,6 @@ define dso_local void @test_inesc_sext_z_store(i8 signext %a) {
 entry:
   %cmp = icmp ne i8 %a, 0
   %conv2 = sext i1 %cmp to i8
-  store i8 %conv2, i8* @glob, align 1
+  store i8 %conv2, ptr @glob, align 1
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/testComparesinesi.ll b/llvm/test/CodeGen/PowerPC/testComparesinesi.ll
index 67d1bed02aaa..6a6fbb829707 100644
--- a/llvm/test/CodeGen/PowerPC/testComparesinesi.ll
+++ b/llvm/test/CodeGen/PowerPC/testComparesinesi.ll
@@ -156,7 +156,7 @@ define dso_local void @test_inesi_store(i32 signext %a, i32 signext %b) {
 entry:
   %cmp = icmp ne i32 %a, %b
   %conv = zext i1 %cmp to i32
-  store i32 %conv, i32* @glob, align 4
+  store i32 %conv, ptr @glob, align 4
   ret void
 }
 
@@ -195,7 +195,7 @@ define dso_local void @test_inesi_sext_store(i32 signext %a, i32 signext %b) {
 entry:
   %cmp = icmp ne i32 %a, %b
   %sub = sext i1 %cmp to i32
-  store i32 %sub, i32* @glob, align 4
+  store i32 %sub, ptr @glob, align 4
   ret void
 }
 
@@ -228,7 +228,7 @@ define dso_local void @test_inesi_z_store(i32 signext %a) {
 entry:
   %cmp = icmp ne i32 %a, 0
   %conv = zext i1 %cmp to i32
-  store i32 %conv, i32* @glob, align 4
+  store i32 %conv, ptr @glob, align 4
   ret void
 }
 
@@ -264,6 +264,6 @@ define dso_local void @test_inesi_sext_z_store(i32 signext %a) {
 entry:
   %cmp = icmp ne i32 %a, 0
   %sub = sext i1 %cmp to i32
-  store i32 %sub, i32* @glob, align 4
+  store i32 %sub, ptr @glob, align 4
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/testComparesinesll.ll b/llvm/test/CodeGen/PowerPC/testComparesinesll.ll
index e50a182b6be1..63475110d7c3 100644
--- a/llvm/test/CodeGen/PowerPC/testComparesinesll.ll
+++ b/llvm/test/CodeGen/PowerPC/testComparesinesll.ll
@@ -135,7 +135,7 @@ define dso_local void @test_inesll_store(i64 %a, i64 %b) {
 entry:
   %cmp = icmp ne i64 %a, %b
   %conv1 = zext i1 %cmp to i64
-  store i64 %conv1, i64* @glob, align 8
+  store i64 %conv1, ptr @glob, align 8
   ret void
 }
 
@@ -168,7 +168,7 @@ define dso_local void @test_inesll_sext_store(i64 %a, i64 %b) {
 entry:
   %cmp = icmp ne i64 %a, %b
   %conv1 = sext i1 %cmp to i64
-  store i64 %conv1, i64* @glob, align 8
+  store i64 %conv1, ptr @glob, align 8
   ret void
 }
 
@@ -198,7 +198,7 @@ define dso_local void @test_inesll_z_store(i64 %a) {
 entry:
   %cmp = icmp ne i64 %a, 0
   %conv1 = zext i1 %cmp to i64
-  store i64 %conv1, i64* @glob, align 8
+  store i64 %conv1, ptr @glob, align 8
   ret void
 }
 
@@ -228,6 +228,6 @@ define dso_local void @test_inesll_sext_z_store(i64 %a) {
 entry:
   %cmp = icmp ne i64 %a, 0
   %conv1 = sext i1 %cmp to i64
-  store i64 %conv1, i64* @glob, align 8
+  store i64 %conv1, ptr @glob, align 8
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/testComparesiness.ll b/llvm/test/CodeGen/PowerPC/testComparesiness.ll
index 662599fa0804..c982278a174e 100644
--- a/llvm/test/CodeGen/PowerPC/testComparesiness.ll
+++ b/llvm/test/CodeGen/PowerPC/testComparesiness.ll
@@ -156,7 +156,7 @@ define dso_local void @test_iness_store(i16 signext %a, i16 signext %b) {
 entry:
   %cmp = icmp ne i16 %a, %b
   %conv3 = zext i1 %cmp to i16
-  store i16 %conv3, i16* @glob, align 2
+  store i16 %conv3, ptr @glob, align 2
   ret void
 }
 
@@ -195,7 +195,7 @@ define dso_local void @test_iness_sext_store(i16 signext %a, i16 signext %b) {
 entry:
   %cmp = icmp ne i16 %a, %b
   %conv3 = sext i1 %cmp to i16
-  store i16 %conv3, i16* @glob, align 2
+  store i16 %conv3, ptr @glob, align 2
   ret void
 }
 
@@ -228,7 +228,7 @@ define dso_local void @test_iness_z_store(i16 signext %a) {
 entry:
   %cmp = icmp ne i16 %a, 0
   %conv2 = zext i1 %cmp to i16
-  store i16 %conv2, i16* @glob, align 2
+  store i16 %conv2, ptr @glob, align 2
   ret void
 }
 
@@ -264,6 +264,6 @@ define dso_local void @test_iness_sext_z_store(i16 signext %a) {
 entry:
   %cmp = icmp ne i16 %a, 0
   %conv2 = sext i1 %cmp to i16
-  store i16 %conv2, i16* @glob, align 2
+  store i16 %conv2, ptr @glob, align 2
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/testComparesineuc.ll b/llvm/test/CodeGen/PowerPC/testComparesineuc.ll
index a78d26a0d59b..61040d356556 100644
--- a/llvm/test/CodeGen/PowerPC/testComparesineuc.ll
+++ b/llvm/test/CodeGen/PowerPC/testComparesineuc.ll
@@ -155,7 +155,7 @@ define dso_local void @test_ineuc_store(i8 zeroext %a, i8 zeroext %b) {
 entry:
   %cmp = icmp ne i8 %a, %b
   %conv3 = zext i1 %cmp to i8
-  store i8 %conv3, i8* @glob, align 1
+  store i8 %conv3, ptr @glob, align 1
   ret void
 }
 
@@ -194,7 +194,7 @@ define dso_local void @test_ineuc_sext_store(i8 zeroext %a, i8 zeroext %b) {
 entry:
   %cmp = icmp ne i8 %a, %b
   %conv3 = sext i1 %cmp to i8
-  store i8 %conv3, i8* @glob, align 1
+  store i8 %conv3, ptr @glob, align 1
   ret void
 }
 
@@ -227,7 +227,7 @@ define dso_local void @test_ineuc_z_store(i8 zeroext %a) {
 entry:
   %cmp = icmp ne i8 %a, 0
   %conv2 = zext i1 %cmp to i8
-  store i8 %conv2, i8* @glob, align 1
+  store i8 %conv2, ptr @glob, align 1
   ret void
 }
 
@@ -263,6 +263,6 @@ define dso_local void @test_ineuc_sext_z_store(i8 zeroext %a) {
 entry:
   %cmp = icmp ne i8 %a, 0
   %conv2 = sext i1 %cmp to i8
-  store i8 %conv2, i8* @glob, align 1
+  store i8 %conv2, ptr @glob, align 1
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/testComparesineui.ll b/llvm/test/CodeGen/PowerPC/testComparesineui.ll
index 910eab0bce57..43cfc2e05c7a 100644
--- a/llvm/test/CodeGen/PowerPC/testComparesineui.ll
+++ b/llvm/test/CodeGen/PowerPC/testComparesineui.ll
@@ -156,7 +156,7 @@ define dso_local void @test_ineui_store(i32 zeroext %a, i32 zeroext %b) {
 entry:
   %cmp = icmp ne i32 %a, %b
   %conv = zext i1 %cmp to i32
-  store i32 %conv, i32* @glob, align 4
+  store i32 %conv, ptr @glob, align 4
   ret void
 }
 
@@ -195,7 +195,7 @@ define dso_local void @test_ineui_sext_store(i32 zeroext %a, i32 zeroext %b) {
 entry:
   %cmp = icmp ne i32 %a, %b
   %sub = sext i1 %cmp to i32
-  store i32 %sub, i32* @glob, align 4
+  store i32 %sub, ptr @glob, align 4
   ret void
 }
 
@@ -228,7 +228,7 @@ define dso_local void @test_ineui_z_store(i32 zeroext %a) {
 entry:
   %cmp = icmp ne i32 %a, 0
   %conv = zext i1 %cmp to i32
-  store i32 %conv, i32* @glob, align 4
+  store i32 %conv, ptr @glob, align 4
   ret void
 }
 
@@ -264,6 +264,6 @@ define dso_local void @test_ineui_sext_z_store(i32 zeroext %a) {
 entry:
   %cmp = icmp ne i32 %a, 0
   %sub = sext i1 %cmp to i32
-  store i32 %sub, i32* @glob, align 4
+  store i32 %sub, ptr @glob, align 4
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/testComparesineull.ll b/llvm/test/CodeGen/PowerPC/testComparesineull.ll
index 9a9999d34e1f..0c2eac4098e7 100644
--- a/llvm/test/CodeGen/PowerPC/testComparesineull.ll
+++ b/llvm/test/CodeGen/PowerPC/testComparesineull.ll
@@ -135,7 +135,7 @@ define dso_local void @test_ineull_store(i64 %a, i64 %b) {
 entry:
   %cmp = icmp ne i64 %a, %b
   %conv1 = zext i1 %cmp to i64
-  store i64 %conv1, i64* @glob, align 8
+  store i64 %conv1, ptr @glob, align 8
   ret void
 }
 
@@ -168,7 +168,7 @@ define dso_local void @test_ineull_sext_store(i64 %a, i64 %b) {
 entry:
   %cmp = icmp ne i64 %a, %b
   %conv1 = sext i1 %cmp to i64
-  store i64 %conv1, i64* @glob, align 8
+  store i64 %conv1, ptr @glob, align 8
   ret void
 }
 
@@ -198,7 +198,7 @@ define dso_local void @test_ineull_z_store(i64 %a) {
 entry:
   %cmp = icmp ne i64 %a, 0
   %conv1 = zext i1 %cmp to i64
-  store i64 %conv1, i64* @glob, align 8
+  store i64 %conv1, ptr @glob, align 8
   ret void
 }
 
@@ -228,6 +228,6 @@ define dso_local void @test_ineull_sext_z_store(i64 %a) {
 entry:
   %cmp = icmp ne i64 %a, 0
   %conv1 = sext i1 %cmp to i64
-  store i64 %conv1, i64* @glob, align 8
+  store i64 %conv1, ptr @glob, align 8
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/testComparesineus.ll b/llvm/test/CodeGen/PowerPC/testComparesineus.ll
index 3cb888703293..e96bb94bd13d 100644
--- a/llvm/test/CodeGen/PowerPC/testComparesineus.ll
+++ b/llvm/test/CodeGen/PowerPC/testComparesineus.ll
@@ -156,7 +156,7 @@ define dso_local void @test_ineus_store(i16 zeroext %a, i16 zeroext %b) {
 entry:
   %cmp = icmp ne i16 %a, %b
   %conv3 = zext i1 %cmp to i16
-  store i16 %conv3, i16* @glob, align 2
+  store i16 %conv3, ptr @glob, align 2
   ret void
 }
 
@@ -195,7 +195,7 @@ define dso_local void @test_ineus_sext_store(i16 zeroext %a, i16 zeroext %b) {
 entry:
   %cmp = icmp ne i16 %a, %b
   %conv3 = sext i1 %cmp to i16
-  store i16 %conv3, i16* @glob, align 2
+  store i16 %conv3, ptr @glob, align 2
   ret void
 }
 
@@ -228,7 +228,7 @@ define dso_local void @test_ineus_z_store(i16 zeroext %a) {
 entry:
   %cmp = icmp ne i16 %a, 0
   %conv2 = zext i1 %cmp to i16
-  store i16 %conv2, i16* @glob, align 2
+  store i16 %conv2, ptr @glob, align 2
   ret void
 }
 
@@ -264,6 +264,6 @@ define dso_local void @test_ineus_sext_z_store(i16 zeroext %a) {
 entry:
   %cmp = icmp ne i16 %a, 0
   %conv2 = sext i1 %cmp to i16
-  store i16 %conv2, i16* @glob, align 2
+  store i16 %conv2, ptr @glob, align 2
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/testCompareslleqsc.ll b/llvm/test/CodeGen/PowerPC/testCompareslleqsc.ll
index 57f9985ea714..718921d281a7 100644
--- a/llvm/test/CodeGen/PowerPC/testCompareslleqsc.ll
+++ b/llvm/test/CodeGen/PowerPC/testCompareslleqsc.ll
@@ -147,7 +147,7 @@ define dso_local void @test_lleqsc_store(i8 signext %a, i8 signext %b) {
 entry:
   %cmp = icmp eq i8 %a, %b
   %conv3 = zext i1 %cmp to i8
-  store i8 %conv3, i8* @glob, align 1
+  store i8 %conv3, ptr @glob, align 1
   ret void
 }
 
@@ -184,7 +184,7 @@ define dso_local void @test_lleqsc_sext_store(i8 signext %a, i8 signext %b) {
 entry:
   %cmp = icmp eq i8 %a, %b
   %conv3 = sext i1 %cmp to i8
-  store i8 %conv3, i8* @glob, align 1
+  store i8 %conv3, ptr @glob, align 1
   ret void
 }
 
@@ -215,7 +215,7 @@ define dso_local void @test_lleqsc_z_store(i8 signext %a) {
 entry:
   %cmp = icmp eq i8 %a, 0
   %conv2 = zext i1 %cmp to i8
-  store i8 %conv2, i8* @glob, align 1
+  store i8 %conv2, ptr @glob, align 1
   ret void
 }
 
@@ -249,6 +249,6 @@ define dso_local void @test_lleqsc_sext_z_store(i8 signext %a) {
 entry:
   %cmp = icmp eq i8 %a, 0
   %conv2 = sext i1 %cmp to i8
-  store i8 %conv2, i8* @glob, align 1
+  store i8 %conv2, ptr @glob, align 1
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/testCompareslleqsi.ll b/llvm/test/CodeGen/PowerPC/testCompareslleqsi.ll
index dc0c8d47975f..368c0231242b 100644
--- a/llvm/test/CodeGen/PowerPC/testCompareslleqsi.ll
+++ b/llvm/test/CodeGen/PowerPC/testCompareslleqsi.ll
@@ -146,7 +146,7 @@ define dso_local void @test_lleqsi_store(i32 signext %a, i32 signext %b) {
 entry:
   %cmp = icmp eq i32 %a, %b
   %conv = zext i1 %cmp to i32
-  store i32 %conv, i32* @glob, align 4
+  store i32 %conv, ptr @glob, align 4
   ret void
 }
 
@@ -183,7 +183,7 @@ define dso_local void @test_lleqsi_sext_store(i32 signext %a, i32 signext %b) {
 entry:
   %cmp = icmp eq i32 %a, %b
   %sub = sext i1 %cmp to i32
-  store i32 %sub, i32* @glob, align 4
+  store i32 %sub, ptr @glob, align 4
   ret void
 }
 
@@ -214,7 +214,7 @@ define dso_local void @test_lleqsi_z_store(i32 signext %a) {
 entry:
   %cmp = icmp eq i32 %a, 0
   %conv = zext i1 %cmp to i32
-  store i32 %conv, i32* @glob, align 4
+  store i32 %conv, ptr @glob, align 4
   ret void
 }
 
@@ -248,6 +248,6 @@ define dso_local void @test_lleqsi_sext_z_store(i32 signext %a) {
 entry:
   %cmp = icmp eq i32 %a, 0
   %sub = sext i1 %cmp to i32
-  store i32 %sub, i32* @glob, align 4
+  store i32 %sub, ptr @glob, align 4
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/testCompareslleqsll.ll b/llvm/test/CodeGen/PowerPC/testCompareslleqsll.ll
index 4e01dc7ef987..23216b95e0ef 100644
--- a/llvm/test/CodeGen/PowerPC/testCompareslleqsll.ll
+++ b/llvm/test/CodeGen/PowerPC/testCompareslleqsll.ll
@@ -140,7 +140,7 @@ define dso_local void @test_lleqsll_store(i64 %a, i64 %b) {
 entry:
   %cmp = icmp eq i64 %a, %b
   %conv1 = zext i1 %cmp to i64
-  store i64 %conv1, i64* @glob, align 8
+  store i64 %conv1, ptr @glob, align 8
   ret void
 }
 
@@ -174,7 +174,7 @@ define dso_local void @test_lleqsll_sext_store(i64 %a, i64 %b) {
 entry:
   %cmp = icmp eq i64 %a, %b
   %conv1 = sext i1 %cmp to i64
-  store i64 %conv1, i64* @glob, align 8
+  store i64 %conv1, ptr @glob, align 8
   ret void
 }
 
@@ -205,7 +205,7 @@ define dso_local void @test_lleqsll_z_store(i64 %a) {
 entry:
   %cmp = icmp eq i64 %a, 0
   %conv1 = zext i1 %cmp to i64
-  store i64 %conv1, i64* @glob, align 8
+  store i64 %conv1, ptr @glob, align 8
   ret void
 }
 
@@ -236,6 +236,6 @@ define dso_local void @test_lleqsll_sext_z_store(i64 %a) {
 entry:
   %cmp = icmp eq i64 %a, 0
   %conv1 = sext i1 %cmp to i64
-  store i64 %conv1, i64* @glob, align 8
+  store i64 %conv1, ptr @glob, align 8
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/testCompareslleqss.ll b/llvm/test/CodeGen/PowerPC/testCompareslleqss.ll
index 992b79fa28b9..9ede3bfb3f03 100644
--- a/llvm/test/CodeGen/PowerPC/testCompareslleqss.ll
+++ b/llvm/test/CodeGen/PowerPC/testCompareslleqss.ll
@@ -146,7 +146,7 @@ define dso_local void @test_lleqss_store(i16 signext %a, i16 signext %b) {
 entry:
   %cmp = icmp eq i16 %a, %b
   %conv3 = zext i1 %cmp to i16
-  store i16 %conv3, i16* @glob, align 2
+  store i16 %conv3, ptr @glob, align 2
   ret void
 }
 
@@ -183,7 +183,7 @@ define dso_local void @test_lleqss_sext_store(i16 signext %a, i16 signext %b) {
 entry:
   %cmp = icmp eq i16 %a, %b
   %conv3 = sext i1 %cmp to i16
-  store i16 %conv3, i16* @glob, align 2
+  store i16 %conv3, ptr @glob, align 2
   ret void
 }
 
@@ -214,7 +214,7 @@ define dso_local void @test_lleqss_z_store(i16 signext %a) {
 entry:
   %cmp = icmp eq i16 %a, 0
   %conv2 = zext i1 %cmp to i16
-  store i16 %conv2, i16* @glob, align 2
+  store i16 %conv2, ptr @glob, align 2
   ret void
 }
 
@@ -248,6 +248,6 @@ define dso_local void @test_lleqss_sext_z_store(i16 signext %a) {
 entry:
   %cmp = icmp eq i16 %a, 0
   %conv2 = sext i1 %cmp to i16
-  store i16 %conv2, i16* @glob, align 2
+  store i16 %conv2, ptr @glob, align 2
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/testComparesllequc.ll b/llvm/test/CodeGen/PowerPC/testComparesllequc.ll
index acd865c9290b..b984283172dc 100644
--- a/llvm/test/CodeGen/PowerPC/testComparesllequc.ll
+++ b/llvm/test/CodeGen/PowerPC/testComparesllequc.ll
@@ -146,7 +146,7 @@ define dso_local void @test_llequc_store(i8 zeroext %a, i8 zeroext %b) {
 entry:
   %cmp = icmp eq i8 %a, %b
   %conv3 = zext i1 %cmp to i8
-  store i8 %conv3, i8* @glob, align 1
+  store i8 %conv3, ptr @glob, align 1
   ret void
 }
 
@@ -183,7 +183,7 @@ define dso_local void @test_llequc_sext_store(i8 zeroext %a, i8 zeroext %b) {
 entry:
   %cmp = icmp eq i8 %a, %b
   %conv3 = sext i1 %cmp to i8
-  store i8 %conv3, i8* @glob, align 1
+  store i8 %conv3, ptr @glob, align 1
   ret void
 }
 
@@ -214,7 +214,7 @@ define dso_local void @test_llequc_z_store(i8 zeroext %a) {
 entry:
   %cmp = icmp eq i8 %a, 0
   %conv2 = zext i1 %cmp to i8
-  store i8 %conv2, i8* @glob, align 1
+  store i8 %conv2, ptr @glob, align 1
   ret void
 }
 
@@ -248,6 +248,6 @@ define dso_local void @test_llequc_sext_z_store(i8 zeroext %a) {
 entry:
   %cmp = icmp eq i8 %a, 0
   %conv2 = sext i1 %cmp to i8
-  store i8 %conv2, i8* @glob, align 1
+  store i8 %conv2, ptr @glob, align 1
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/testComparesllequi.ll b/llvm/test/CodeGen/PowerPC/testComparesllequi.ll
index 37715dd1443f..46d7d40c7d6e 100644
--- a/llvm/test/CodeGen/PowerPC/testComparesllequi.ll
+++ b/llvm/test/CodeGen/PowerPC/testComparesllequi.ll
@@ -146,7 +146,7 @@ define dso_local void @test_llequi_store(i32 zeroext %a, i32 zeroext %b) {
 entry:
   %cmp = icmp eq i32 %a, %b
   %conv = zext i1 %cmp to i32
-  store i32 %conv, i32* @glob, align 4
+  store i32 %conv, ptr @glob, align 4
   ret void
 }
 
@@ -183,7 +183,7 @@ define dso_local void @test_llequi_sext_store(i32 zeroext %a, i32 zeroext %b) {
 entry:
   %cmp = icmp eq i32 %a, %b
   %sub = sext i1 %cmp to i32
-  store i32 %sub, i32* @glob, align 4
+  store i32 %sub, ptr @glob, align 4
   ret void
 }
 
@@ -214,7 +214,7 @@ define dso_local void @test_llequi_z_store(i32 zeroext %a) {
 entry:
   %cmp = icmp eq i32 %a, 0
   %conv = zext i1 %cmp to i32
-  store i32 %conv, i32* @glob, align 4
+  store i32 %conv, ptr @glob, align 4
   ret void
 }
 
@@ -248,6 +248,6 @@ define dso_local void @test_llequi_sext_z_store(i32 zeroext %a) {
 entry:
   %cmp = icmp eq i32 %a, 0
   %sub = sext i1 %cmp to i32
-  store i32 %sub, i32* @glob, align 4
+  store i32 %sub, ptr @glob, align 4
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/testComparesllequll.ll b/llvm/test/CodeGen/PowerPC/testComparesllequll.ll
index a396d9c3c15c..367f3200ad1d 100644
--- a/llvm/test/CodeGen/PowerPC/testComparesllequll.ll
+++ b/llvm/test/CodeGen/PowerPC/testComparesllequll.ll
@@ -140,7 +140,7 @@ define dso_local void @test_llequll_store(i64 %a, i64 %b) {
 entry:
   %cmp = icmp eq i64 %a, %b
   %conv1 = zext i1 %cmp to i64
-  store i64 %conv1, i64* @glob, align 8
+  store i64 %conv1, ptr @glob, align 8
   ret void
 }
 
@@ -174,7 +174,7 @@ define dso_local void @test_llequll_sext_store(i64 %a, i64 %b) {
 entry:
   %cmp = icmp eq i64 %a, %b
   %conv1 = sext i1 %cmp to i64
-  store i64 %conv1, i64* @glob, align 8
+  store i64 %conv1, ptr @glob, align 8
   ret void
 }
 
@@ -205,7 +205,7 @@ define dso_local void @test_llequll_z_store(i64 %a) {
 entry:
   %cmp = icmp eq i64 %a, 0
   %conv1 = zext i1 %cmp to i64
-  store i64 %conv1, i64* @glob, align 8
+  store i64 %conv1, ptr @glob, align 8
   ret void
 }
 
@@ -236,6 +236,6 @@ define dso_local void @test_llequll_sext_z_store(i64 %a) {
 entry:
   %cmp = icmp eq i64 %a, 0
   %conv1 = sext i1 %cmp to i64
-  store i64 %conv1, i64* @glob, align 8
+  store i64 %conv1, ptr @glob, align 8
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/testComparesllequs.ll b/llvm/test/CodeGen/PowerPC/testComparesllequs.ll
index 224238b0ea66..abc8b623e9b6 100644
--- a/llvm/test/CodeGen/PowerPC/testComparesllequs.ll
+++ b/llvm/test/CodeGen/PowerPC/testComparesllequs.ll
@@ -146,7 +146,7 @@ define dso_local void @test_llequs_store(i16 zeroext %a, i16 zeroext %b) {
 entry:
   %cmp = icmp eq i16 %a, %b
   %conv3 = zext i1 %cmp to i16
-  store i16 %conv3, i16* @glob, align 2
+  store i16 %conv3, ptr @glob, align 2
   ret void
 }
 
@@ -183,7 +183,7 @@ define dso_local void @test_llequs_sext_store(i16 zeroext %a, i16 zeroext %b) {
 entry:
   %cmp = icmp eq i16 %a, %b
   %conv3 = sext i1 %cmp to i16
-  store i16 %conv3, i16* @glob, align 2
+  store i16 %conv3, ptr @glob, align 2
   ret void
 }
 
@@ -214,7 +214,7 @@ define dso_local void @test_llequs_z_store(i16 zeroext %a) {
 entry:
   %cmp = icmp eq i16 %a, 0
   %conv2 = zext i1 %cmp to i16
-  store i16 %conv2, i16* @glob, align 2
+  store i16 %conv2, ptr @glob, align 2
   ret void
 }
 
@@ -248,6 +248,6 @@ define dso_local void @test_llequs_sext_z_store(i16 zeroext %a) {
 entry:
   %cmp = icmp eq i16 %a, 0
   %conv2 = sext i1 %cmp to i16
-  store i16 %conv2, i16* @glob, align 2
+  store i16 %conv2, ptr @glob, align 2
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/testComparesllgesc.ll b/llvm/test/CodeGen/PowerPC/testComparesllgesc.ll
index 5a0152de5948..faebfadf2f5d 100644
--- a/llvm/test/CodeGen/PowerPC/testComparesllgesc.ll
+++ b/llvm/test/CodeGen/PowerPC/testComparesllgesc.ll
@@ -88,7 +88,7 @@ define dso_local void @test_llgesc_store(i8 signext %a, i8 signext %b) {
 entry:
   %cmp = icmp sge i8 %a, %b
   %conv3 = zext i1 %cmp to i8
-  store i8 %conv3, i8* @glob, align 1
+  store i8 %conv3, ptr @glob, align 1
   ret void
 }
 
@@ -121,6 +121,6 @@ define dso_local void @test_llgesc_sext_store(i8 signext %a, i8 signext %b) {
 entry:
   %cmp = icmp sge i8 %a, %b
   %conv3 = sext i1 %cmp to i8
-  store i8 %conv3, i8* @glob, align 1
+  store i8 %conv3, ptr @glob, align 1
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/testComparesllgesi.ll b/llvm/test/CodeGen/PowerPC/testComparesllgesi.ll
index 8e5f1bfc29d4..c2f34c403fcc 100644
--- a/llvm/test/CodeGen/PowerPC/testComparesllgesi.ll
+++ b/llvm/test/CodeGen/PowerPC/testComparesllgesi.ll
@@ -88,7 +88,7 @@ define dso_local void @test_llgesi_store(i32 signext %a, i32 signext %b) {
 entry:
   %cmp = icmp sge i32 %a, %b
   %conv = zext i1 %cmp to i32
-  store i32 %conv, i32* @glob, align 4
+  store i32 %conv, ptr @glob, align 4
   ret void
 }
 
@@ -121,6 +121,6 @@ define dso_local void @test_llgesi_sext_store(i32 signext %a, i32 signext %b) {
 entry:
   %cmp = icmp sge i32 %a, %b
   %sub = sext i1 %cmp to i32
-  store i32 %sub, i32* @glob, align 4
+  store i32 %sub, ptr @glob, align 4
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/testComparesllgesll.ll b/llvm/test/CodeGen/PowerPC/testComparesllgesll.ll
index ceba16fbd558..2a96ceaf32e8 100644
--- a/llvm/test/CodeGen/PowerPC/testComparesllgesll.ll
+++ b/llvm/test/CodeGen/PowerPC/testComparesllgesll.ll
@@ -146,7 +146,7 @@ define dso_local void @test_llgesll_store(i64 %a, i64 %b) {
 entry:
   %cmp = icmp sge i64 %a, %b
   %conv1 = zext i1 %cmp to i64
-  store i64 %conv1, i64* @glob, align 8
+  store i64 %conv1, ptr @glob, align 8
   ret void
 }
 
@@ -185,7 +185,7 @@ define dso_local void @test_llgesll_sext_store(i64 %a, i64 %b) {
 entry:
   %cmp = icmp sge i64 %a, %b
   %conv1 = sext i1 %cmp to i64
-  store i64 %conv1, i64* @glob, align 8
+  store i64 %conv1, ptr @glob, align 8
   ret void
 }
 
@@ -215,7 +215,7 @@ define dso_local void @test_llgesll_z_store(i64 %a) {
 entry:
   %cmp = icmp sgt i64 %a, -1
   %conv1 = zext i1 %cmp to i64
-  store i64 %conv1, i64* @glob, align 8
+  store i64 %conv1, ptr @glob, align 8
   ret void
 }
 
@@ -245,6 +245,6 @@ define dso_local void @test_llgesll_sext_z_store(i64 %a) {
 entry:
   %cmp = icmp sgt i64 %a, -1
   %conv1 = sext i1 %cmp to i64
-  store i64 %conv1, i64* @glob, align 8
+  store i64 %conv1, ptr @glob, align 8
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/testComparesllgess.ll b/llvm/test/CodeGen/PowerPC/testComparesllgess.ll
index bdf0fc44303f..2205d1a4ac84 100644
--- a/llvm/test/CodeGen/PowerPC/testComparesllgess.ll
+++ b/llvm/test/CodeGen/PowerPC/testComparesllgess.ll
@@ -88,7 +88,7 @@ define dso_local void @test_llgess_store(i16 signext %a, i16 signext %b) {
 entry:
   %cmp = icmp sge i16 %a, %b
   %conv3 = zext i1 %cmp to i16
-  store i16 %conv3, i16* @glob, align 2
+  store i16 %conv3, ptr @glob, align 2
   ret void
 }
 
@@ -121,6 +121,6 @@ define dso_local void @test_llgess_sext_store(i16 signext %a, i16 signext %b) {
 entry:
   %cmp = icmp sge i16 %a, %b
   %conv3 = sext i1 %cmp to i16
-  store i16 %conv3, i16* @glob, align 2
+  store i16 %conv3, ptr @glob, align 2
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/testComparesllgeuc.ll b/llvm/test/CodeGen/PowerPC/testComparesllgeuc.ll
index 92e91d78b8c2..509104931f01 100644
--- a/llvm/test/CodeGen/PowerPC/testComparesllgeuc.ll
+++ b/llvm/test/CodeGen/PowerPC/testComparesllgeuc.ll
@@ -73,7 +73,7 @@ define dso_local void @test_llgeuc_store(i8 zeroext %a, i8 zeroext %b) {
 entry:
   %cmp = icmp uge i8 %a, %b
   %conv3 = zext i1 %cmp to i8
-  store i8 %conv3, i8* @glob
+  store i8 %conv3, ptr @glob
   ret void
 }
 
@@ -90,7 +90,7 @@ define dso_local void @test_llgeuc_sext_store(i8 zeroext %a, i8 zeroext %b) {
 entry:
   %cmp = icmp uge i8 %a, %b
   %conv3 = sext i1 %cmp to i8
-  store i8 %conv3, i8* @glob
+  store i8 %conv3, ptr @glob
   ret void
 }
 
@@ -105,7 +105,7 @@ define dso_local void @test_llgeuc_z_store(i8 zeroext %a) {
 entry:
   %cmp = icmp uge i8 %a, 0
   %conv1 = zext i1 %cmp to i8
-  store i8 %conv1, i8* @glob
+  store i8 %conv1, ptr @glob
   ret void
 }
 
@@ -120,7 +120,7 @@ define dso_local void @test_llgeuc_sext_z_store(i8 zeroext %a) {
 entry:
   %cmp = icmp uge i8 %a, 0
   %conv1 = sext i1 %cmp to i8
-  store i8 %conv1, i8* @glob
+  store i8 %conv1, ptr @glob
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/testComparesllgeui.ll b/llvm/test/CodeGen/PowerPC/testComparesllgeui.ll
index f75b5f89a6cd..b6b07e478f81 100644
--- a/llvm/test/CodeGen/PowerPC/testComparesllgeui.ll
+++ b/llvm/test/CodeGen/PowerPC/testComparesllgeui.ll
@@ -73,7 +73,7 @@ define dso_local void @test_llgeui_store(i32 zeroext %a, i32 zeroext %b) {
 entry:
   %cmp = icmp uge i32 %a, %b
   %conv = zext i1 %cmp to i32
-  store i32 %conv, i32* @glob
+  store i32 %conv, ptr @glob
   ret void
 }
 
@@ -90,7 +90,7 @@ define dso_local void @test_llgeui_sext_store(i32 zeroext %a, i32 zeroext %b) {
 entry:
   %cmp = icmp uge i32 %a, %b
   %sub = sext i1 %cmp to i32
-  store i32 %sub, i32* @glob
+  store i32 %sub, ptr @glob
   ret void
 }
 
@@ -105,7 +105,7 @@ define dso_local void @test_llgeui_z_store(i32 zeroext %a) {
 entry:
   %cmp = icmp uge i32 %a, 0
   %sub = zext i1 %cmp to i32
-  store i32 %sub, i32* @glob
+  store i32 %sub, ptr @glob
   ret void
 }
 
@@ -120,7 +120,7 @@ define dso_local void @test_llgeui_sext_z_store(i32 zeroext %a) {
 entry:
   %cmp = icmp uge i32 %a, 0
   %sub = sext i1 %cmp to i32
-  store i32 %sub, i32* @glob
+  store i32 %sub, ptr @glob
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/testComparesllgeull.ll b/llvm/test/CodeGen/PowerPC/testComparesllgeull.ll
index 6e713f7ee90b..1bd5509fc1ef 100644
--- a/llvm/test/CodeGen/PowerPC/testComparesllgeull.ll
+++ b/llvm/test/CodeGen/PowerPC/testComparesllgeull.ll
@@ -73,7 +73,7 @@ define dso_local void @test_llgeull_store(i64 %a, i64 %b) {
 entry:
   %cmp = icmp uge i64 %a, %b
   %conv1 = zext i1 %cmp to i64
-  store i64 %conv1, i64* @glob
+  store i64 %conv1, ptr @glob
   ret void
 }
 
@@ -90,7 +90,7 @@ define dso_local void @test_llgeull_sext_store(i64 %a, i64 %b) {
 entry:
   %cmp = icmp uge i64 %a, %b
   %conv1 = sext i1 %cmp to i64
-  store i64 %conv1, i64* @glob
+  store i64 %conv1, ptr @glob
   ret void
 }
 
@@ -105,7 +105,7 @@ define dso_local void @test_llgeull_z_store(i64 %a) {
 entry:
   %cmp = icmp uge i64 %a, 0
   %conv1 = zext i1 %cmp to i64
-  store i64 %conv1, i64* @glob
+  store i64 %conv1, ptr @glob
   ret void
 }
 
@@ -118,7 +118,7 @@ define dso_local void @test_llgeull_sext_z_store(i64 %a) {
 ; CHECK-NEXT:    std r4, glob at toc@l(r3)
 ; CHECK-NEXT:    blr
 entry:
-  store i64 -1, i64* @glob
+  store i64 -1, ptr @glob
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/testComparesllgeus.ll b/llvm/test/CodeGen/PowerPC/testComparesllgeus.ll
index 8c41f1d7ec2d..011df13e2b4e 100644
--- a/llvm/test/CodeGen/PowerPC/testComparesllgeus.ll
+++ b/llvm/test/CodeGen/PowerPC/testComparesllgeus.ll
@@ -73,7 +73,7 @@ define dso_local void @test_llgeus_store(i16 zeroext %a, i16 zeroext %b) {
 entry:
   %cmp = icmp uge i16 %a, %b
   %conv3 = zext i1 %cmp to i16
-  store i16 %conv3, i16* @glob
+  store i16 %conv3, ptr @glob
   ret void
 }
 
@@ -90,7 +90,7 @@ define dso_local void @test_llgeus_sext_store(i16 zeroext %a, i16 zeroext %b) {
 entry:
   %cmp = icmp uge i16 %a, %b
   %conv3 = sext i1 %cmp to i16
-  store i16 %conv3, i16* @glob
+  store i16 %conv3, ptr @glob
   ret void
 }
 
@@ -105,7 +105,7 @@ define dso_local void @test_llgeus_z_store(i16 zeroext %a) {
 entry:
   %cmp = icmp uge i16 %a, 0
   %conv1 = zext i1 %cmp to i16
-  store i16 %conv1, i16* @glob
+  store i16 %conv1, ptr @glob
   ret void
 }
 
@@ -120,7 +120,7 @@ define dso_local void @test_llgeus_sext_z_store(i16 zeroext %a) {
 entry:
   %cmp = icmp uge i16 %a, 0
   %conv1 = sext i1 %cmp to i16
-  store i16 %conv1, i16* @glob
+  store i16 %conv1, ptr @glob
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/testComparesllgtsll.ll b/llvm/test/CodeGen/PowerPC/testComparesllgtsll.ll
index f7dff13de8e8..105692ab717a 100644
--- a/llvm/test/CodeGen/PowerPC/testComparesllgtsll.ll
+++ b/llvm/test/CodeGen/PowerPC/testComparesllgtsll.ll
@@ -86,7 +86,7 @@ define void @test_llgtsll_store(i64 %a, i64 %b) {
 entry:
   %cmp = icmp sgt i64 %a, %b
   %conv1 = zext i1 %cmp to i64
-  store i64 %conv1, i64* @glob, align 8
+  store i64 %conv1, ptr @glob, align 8
   ret void
 }
 
@@ -107,7 +107,7 @@ define void @test_llgtsll_sext_store(i64 %a, i64 %b) {
 entry:
   %cmp = icmp sgt i64 %a, %b
   %conv1 = sext i1 %cmp to i64
-  store i64 %conv1, i64* @glob, align 8
+  store i64 %conv1, ptr @glob, align 8
   ret void
 }
 
@@ -126,7 +126,7 @@ define void @test_llgtsll_z_store(i64 %a) {
 entry:
   %cmp = icmp sgt i64 %a, 0
   %conv1 = zext i1 %cmp to i64
-  store i64 %conv1, i64* @glob, align 8
+  store i64 %conv1, ptr @glob, align 8
   ret void
 }
 
@@ -144,6 +144,6 @@ define void @test_llgtsll_sext_z_store(i64 %a) {
 entry:
   %cmp = icmp sgt i64 %a, 0
   %conv1 = sext i1 %cmp to i64
-  store i64 %conv1, i64* @glob, align 8
+  store i64 %conv1, ptr @glob, align 8
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/testComparesllgtuc.ll b/llvm/test/CodeGen/PowerPC/testComparesllgtuc.ll
index e535d30d2eba..919d8fdf501c 100644
--- a/llvm/test/CodeGen/PowerPC/testComparesllgtuc.ll
+++ b/llvm/test/CodeGen/PowerPC/testComparesllgtuc.ll
@@ -76,7 +76,7 @@ define void @test_llgtuc_store(i8 zeroext %a, i8 zeroext %b) {
 entry:
   %cmp = icmp ugt i8 %a, %b
   %conv3 = zext i1 %cmp to i8
-  store i8 %conv3, i8* @glob, align 1
+  store i8 %conv3, ptr @glob, align 1
   ret void
 }
 
@@ -93,7 +93,7 @@ define void @test_llgtuc_sext_store(i8 zeroext %a, i8 zeroext %b) {
 entry:
   %cmp = icmp ugt i8 %a, %b
   %conv3 = sext i1 %cmp to i8
-  store i8 %conv3, i8* @glob, align 1
+  store i8 %conv3, ptr @glob, align 1
   ret void
 }
 
@@ -111,7 +111,7 @@ define void @test_llgtuc_z_store(i8 zeroext %a) {
 entry:
   %cmp = icmp ne i8 %a, 0
   %conv2 = zext i1 %cmp to i8
-  store i8 %conv2, i8* @glob, align 1
+  store i8 %conv2, ptr @glob, align 1
   ret void
 }
 
@@ -130,6 +130,6 @@ define void @test_llgtuc_sext_z_store(i8 zeroext %a) {
 entry:
   %cmp = icmp ne i8 %a, 0
   %conv2 = sext i1 %cmp to i8
-  store i8 %conv2, i8* @glob, align 1
+  store i8 %conv2, ptr @glob, align 1
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/testComparesllgtui.ll b/llvm/test/CodeGen/PowerPC/testComparesllgtui.ll
index 3c6b2f1c2759..d1c3a232c98d 100644
--- a/llvm/test/CodeGen/PowerPC/testComparesllgtui.ll
+++ b/llvm/test/CodeGen/PowerPC/testComparesllgtui.ll
@@ -76,7 +76,7 @@ define void @test_llgtui_store(i32 zeroext %a, i32 zeroext %b) {
 entry:
   %cmp = icmp ugt i32 %a, %b
   %conv = zext i1 %cmp to i32
-  store i32 %conv, i32* @glob, align 4
+  store i32 %conv, ptr @glob, align 4
   ret void
 }
 
@@ -93,7 +93,7 @@ define void @test_llgtui_sext_store(i32 zeroext %a, i32 zeroext %b) {
 entry:
   %cmp = icmp ugt i32 %a, %b
   %sub = sext i1 %cmp to i32
-  store i32 %sub, i32* @glob, align 4
+  store i32 %sub, ptr @glob, align 4
   ret void
 }
 
@@ -111,7 +111,7 @@ define void @test_llgtui_z_store(i32 zeroext %a) {
 entry:
   %cmp = icmp ne i32 %a, 0
   %conv = zext i1 %cmp to i32
-  store i32 %conv, i32* @glob, align 4
+  store i32 %conv, ptr @glob, align 4
   ret void
 }
 
@@ -130,6 +130,6 @@ define void @test_llgtui_sext_z_store(i32 zeroext %a) {
 entry:
   %cmp = icmp ne i32 %a, 0
   %sub = sext i1 %cmp to i32
-  store i32 %sub, i32* @glob, align 4
+  store i32 %sub, ptr @glob, align 4
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/testComparesllgtus.ll b/llvm/test/CodeGen/PowerPC/testComparesllgtus.ll
index 1dbd9ca80bec..88338d51ee2a 100644
--- a/llvm/test/CodeGen/PowerPC/testComparesllgtus.ll
+++ b/llvm/test/CodeGen/PowerPC/testComparesllgtus.ll
@@ -76,7 +76,7 @@ define void @test_llgtus_store(i16 zeroext %a, i16 zeroext %b) {
 entry:
   %cmp = icmp ugt i16 %a, %b
   %conv3 = zext i1 %cmp to i16
-  store i16 %conv3, i16* @glob, align 2
+  store i16 %conv3, ptr @glob, align 2
   ret void
 }
 
@@ -93,7 +93,7 @@ define void @test_llgtus_sext_store(i16 zeroext %a, i16 zeroext %b) {
 entry:
   %cmp = icmp ugt i16 %a, %b
   %conv3 = sext i1 %cmp to i16
-  store i16 %conv3, i16* @glob, align 2
+  store i16 %conv3, ptr @glob, align 2
   ret void
 }
 
@@ -111,7 +111,7 @@ define void @test_llgtus_z_store(i16 zeroext %a) {
 entry:
   %cmp = icmp ne i16 %a, 0
   %conv2 = zext i1 %cmp to i16
-  store i16 %conv2, i16* @glob, align 2
+  store i16 %conv2, ptr @glob, align 2
   ret void
 }
 
@@ -130,7 +130,7 @@ define void @test_llgtus_sext_z_store(i16 zeroext %a) {
 entry:
   %cmp = icmp ne i16 %a, 0
   %conv2 = sext i1 %cmp to i16
-  store i16 %conv2, i16* @glob, align 2
+  store i16 %conv2, ptr @glob, align 2
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/testCompareslllesc.ll b/llvm/test/CodeGen/PowerPC/testCompareslllesc.ll
index 25910f33d049..2fa4933658f4 100644
--- a/llvm/test/CodeGen/PowerPC/testCompareslllesc.ll
+++ b/llvm/test/CodeGen/PowerPC/testCompareslllesc.ll
@@ -89,7 +89,7 @@ define dso_local void @test_lllesc_store(i8 signext %a, i8 signext %b) {
 entry:
   %cmp = icmp sle i8 %a, %b
   %conv3 = zext i1 %cmp to i8
-  store i8 %conv3, i8* @glob, align 1
+  store i8 %conv3, ptr @glob, align 1
   ret void
 }
 
@@ -122,6 +122,6 @@ define dso_local void @test_lllesc_sext_store(i8 signext %a, i8 signext %b) {
 entry:
   %cmp = icmp sle i8 %a, %b
   %conv3 = sext i1 %cmp to i8
-  store i8 %conv3, i8* @glob, align 1
+  store i8 %conv3, ptr @glob, align 1
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/testCompareslllesi.ll b/llvm/test/CodeGen/PowerPC/testCompareslllesi.ll
index c8470ce79fe2..fe4b6ffa4b4b 100644
--- a/llvm/test/CodeGen/PowerPC/testCompareslllesi.ll
+++ b/llvm/test/CodeGen/PowerPC/testCompareslllesi.ll
@@ -89,7 +89,7 @@ define dso_local void @test_lllesi_store(i32 signext %a, i32 signext %b) {
 entry:
   %cmp = icmp sle i32 %a, %b
   %conv = zext i1 %cmp to i32
-  store i32 %conv, i32* @glob, align 4
+  store i32 %conv, ptr @glob, align 4
   ret void
 }
 
@@ -122,6 +122,6 @@ define dso_local void @test_lllesi_sext_store(i32 signext %a, i32 signext %b) {
 entry:
   %cmp = icmp sle i32 %a, %b
   %sub = sext i1 %cmp to i32
-  store i32 %sub, i32* @glob, align 4
+  store i32 %sub, ptr @glob, align 4
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/testCompareslllesll.ll b/llvm/test/CodeGen/PowerPC/testCompareslllesll.ll
index 570465195e9a..065253d0d985 100644
--- a/llvm/test/CodeGen/PowerPC/testCompareslllesll.ll
+++ b/llvm/test/CodeGen/PowerPC/testCompareslllesll.ll
@@ -157,7 +157,7 @@ define dso_local void @test_lllesll_store(i64 %a, i64 %b) {
 entry:
   %cmp = icmp sle i64 %a, %b
   %conv1 = zext i1 %cmp to i64
-  store i64 %conv1, i64* @glob, align 8
+  store i64 %conv1, ptr @glob, align 8
   ret void
 }
 
@@ -197,7 +197,7 @@ define dso_local void @test_lllesll_sext_store(i64 %a, i64 %b) {
 entry:
   %cmp = icmp sle i64 %a, %b
   %conv1 = sext i1 %cmp to i64
-  store i64 %conv1, i64* @glob, align 8
+  store i64 %conv1, ptr @glob, align 8
   ret void
 }
 
@@ -231,7 +231,7 @@ define dso_local void @test_lllesll_z_store(i64 %a) {
 entry:
   %cmp = icmp slt i64 %a, 1
   %conv1 = zext i1 %cmp to i64
-  store i64 %conv1, i64* @glob, align 8
+  store i64 %conv1, ptr @glob, align 8
   ret void
 }
 
@@ -265,6 +265,6 @@ define dso_local void @test_lllesll_sext_z_store(i64 %a) {
 entry:
   %cmp = icmp slt i64 %a, 1
   %conv1 = sext i1 %cmp to i64
-  store i64 %conv1, i64* @glob, align 8
+  store i64 %conv1, ptr @glob, align 8
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/testComparesllless.ll b/llvm/test/CodeGen/PowerPC/testComparesllless.ll
index ba963934b925..f8db96c8bb3b 100644
--- a/llvm/test/CodeGen/PowerPC/testComparesllless.ll
+++ b/llvm/test/CodeGen/PowerPC/testComparesllless.ll
@@ -89,7 +89,7 @@ define dso_local void @test_llless_store(i16 signext %a, i16 signext %b) {
 entry:
   %cmp = icmp sle i16 %a, %b
   %conv3 = zext i1 %cmp to i16
-  store i16 %conv3, i16* @glob, align 2
+  store i16 %conv3, ptr @glob, align 2
   ret void
 }
 
@@ -122,6 +122,6 @@ define dso_local void @test_llless_sext_store(i16 signext %a, i16 signext %b) {
 entry:
   %cmp = icmp sle i16 %a, %b
   %conv3 = sext i1 %cmp to i16
-  store i16 %conv3, i16* @glob, align 2
+  store i16 %conv3, ptr @glob, align 2
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/testComparesllleuc.ll b/llvm/test/CodeGen/PowerPC/testComparesllleuc.ll
index 9dbf1584002e..6e9ce7c7c5ef 100644
--- a/llvm/test/CodeGen/PowerPC/testComparesllleuc.ll
+++ b/llvm/test/CodeGen/PowerPC/testComparesllleuc.ll
@@ -76,7 +76,7 @@ define dso_local void @test_llleuc_store(i8 zeroext %a, i8 zeroext %b) {
 entry:
   %cmp = icmp ule i8 %a, %b
   %conv3 = zext i1 %cmp to i8
-  store i8 %conv3, i8* @glob
+  store i8 %conv3, ptr @glob
   ret void
 }
 
@@ -93,7 +93,7 @@ define dso_local void @test_llleuc_sext_store(i8 zeroext %a, i8 zeroext %b) {
 entry:
   %cmp = icmp ule i8 %a, %b
   %conv3 = sext i1 %cmp to i8
-  store i8 %conv3, i8* @glob
+  store i8 %conv3, ptr @glob
   ret void
 }
 
@@ -109,7 +109,7 @@ define dso_local void @test_llleuc_z_store(i8 zeroext %a) {
 entry:
   %cmp = icmp ule i8 %a, 0
   %conv2 = zext i1 %cmp to i8
-  store i8 %conv2, i8* @glob
+  store i8 %conv2, ptr @glob
   ret void
 }
 
@@ -126,6 +126,6 @@ define dso_local void @test_llleuc_sext_z_store(i8 zeroext %a) {
 entry:
   %cmp = icmp ule i8 %a, 0
   %conv2 = sext i1 %cmp to i8
-  store i8 %conv2, i8* @glob
+  store i8 %conv2, ptr @glob
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/testComparesllleui.ll b/llvm/test/CodeGen/PowerPC/testComparesllleui.ll
index 75e143d1695b..bf94ab723e43 100644
--- a/llvm/test/CodeGen/PowerPC/testComparesllleui.ll
+++ b/llvm/test/CodeGen/PowerPC/testComparesllleui.ll
@@ -76,7 +76,7 @@ define dso_local void @test_llleui_store(i32 zeroext %a, i32 zeroext %b) {
 entry:
   %cmp = icmp ule i32 %a, %b
   %conv = zext i1 %cmp to i32
-  store i32 %conv, i32* @glob
+  store i32 %conv, ptr @glob
   ret void
 }
 
@@ -93,7 +93,7 @@ define dso_local void @test_llleui_sext_store(i32 zeroext %a, i32 zeroext %b) {
 entry:
   %cmp = icmp ule i32 %a, %b
   %sub = sext i1 %cmp to i32
-  store i32 %sub, i32* @glob
+  store i32 %sub, ptr @glob
   ret void
 }
 
@@ -109,7 +109,7 @@ define dso_local void @test_llleui_z_store(i32 zeroext %a) {
 entry:
   %cmp = icmp ule i32 %a, 0
   %conv = zext i1 %cmp to i32
-  store i32 %conv, i32* @glob
+  store i32 %conv, ptr @glob
   ret void
 }
 
@@ -126,7 +126,7 @@ define dso_local void @test_llleui_sext_z_store(i32 zeroext %a) {
 entry:
   %cmp = icmp ule i32 %a, 0
   %sub = sext i1 %cmp to i32
-  store i32 %sub, i32* @glob
+  store i32 %sub, ptr @glob
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/testComparesllleull.ll b/llvm/test/CodeGen/PowerPC/testComparesllleull.ll
index e1de2f28cf27..881043b5ee1f 100644
--- a/llvm/test/CodeGen/PowerPC/testComparesllleull.ll
+++ b/llvm/test/CodeGen/PowerPC/testComparesllleull.ll
@@ -75,7 +75,7 @@ define dso_local void @test_llleull_store(i64 %a, i64 %b) {
 entry:
   %cmp = icmp ule i64 %a, %b
   %conv1 = zext i1 %cmp to i64
-  store i64 %conv1, i64* @glob
+  store i64 %conv1, ptr @glob
   ret void
 }
 
@@ -92,7 +92,7 @@ define dso_local void @test_llleull_sext_store(i64 %a, i64 %b) {
 entry:
   %cmp = icmp ule i64 %a, %b
   %conv1 = sext i1 %cmp to i64
-  store i64 %conv1, i64* @glob
+  store i64 %conv1, ptr @glob
   ret void
 }
 
@@ -108,7 +108,7 @@ define dso_local void @test_llleull_z_store(i64 %a) {
 entry:
   %cmp = icmp ule i64 %a, 0
   %conv1 = zext i1 %cmp to i64
-  store i64 %conv1, i64* @glob
+  store i64 %conv1, ptr @glob
   ret void
 }
 
@@ -124,7 +124,7 @@ define dso_local void @test_llleull_sext_z_store(i64 %a) {
 entry:
   %cmp = icmp ule i64 %a, 0
   %conv1 = sext i1 %cmp to i64
-  store i64 %conv1, i64* @glob
+  store i64 %conv1, ptr @glob
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/testComparesllleus.ll b/llvm/test/CodeGen/PowerPC/testComparesllleus.ll
index 9974e316066a..3b3ec08bf9d5 100644
--- a/llvm/test/CodeGen/PowerPC/testComparesllleus.ll
+++ b/llvm/test/CodeGen/PowerPC/testComparesllleus.ll
@@ -76,7 +76,7 @@ define dso_local void @test_llleus_store(i16 zeroext %a, i16 zeroext %b) {
 entry:
   %cmp = icmp ule i16 %a, %b
   %conv3 = zext i1 %cmp to i16
-  store i16 %conv3, i16* @glob
+  store i16 %conv3, ptr @glob
   ret void
 }
 
@@ -93,7 +93,7 @@ define dso_local void @test_llleus_sext_store(i16 zeroext %a, i16 zeroext %b) {
 entry:
   %cmp = icmp ule i16 %a, %b
   %conv3 = sext i1 %cmp to i16
-  store i16 %conv3, i16* @glob
+  store i16 %conv3, ptr @glob
   ret void
 }
 
@@ -109,7 +109,7 @@ define dso_local void @test_llleus_z_store(i16 zeroext %a) {
 entry:
   %cmp = icmp ule i16 %a, 0
   %conv2 = zext i1 %cmp to i16
-  store i16 %conv2, i16* @glob
+  store i16 %conv2, ptr @glob
   ret void
 }
 
@@ -126,7 +126,7 @@ define dso_local void @test_llleus_sext_z_store(i16 zeroext %a) {
 entry:
   %cmp = icmp ule i16 %a, 0
   %conv2 = sext i1 %cmp to i16
-  store i16 %conv2, i16* @glob
+  store i16 %conv2, ptr @glob
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/testComparesllltsll.ll b/llvm/test/CodeGen/PowerPC/testComparesllltsll.ll
index 7af71b3cf85a..a1be92ec371b 100644
--- a/llvm/test/CodeGen/PowerPC/testComparesllltsll.ll
+++ b/llvm/test/CodeGen/PowerPC/testComparesllltsll.ll
@@ -68,7 +68,7 @@ define dso_local void @test_llltsll_store(i64 %a, i64 %b) {
 entry:
   %cmp = icmp slt i64 %a, %b
   %conv1 = zext i1 %cmp to i64
-  store i64 %conv1, i64* @glob, align 8
+  store i64 %conv1, ptr @glob, align 8
   ret void
 }
 
@@ -88,7 +88,7 @@ define dso_local void @test_llltsll_sext_store(i64 %a, i64 %b) {
 entry:
   %cmp = icmp slt i64 %a, %b
   %conv1 = sext i1 %cmp to i64
-  store i64 %conv1, i64* @glob, align 8
+  store i64 %conv1, ptr @glob, align 8
   ret void
 }
 
@@ -103,6 +103,6 @@ define dso_local void @test_llltsll_sext_z_store(i64 %a) {
 entry:
   %cmp = icmp slt i64 %a, 0
   %sub = sext i1 %cmp to i64
-  store i64 %sub, i64* @glob, align 8
+  store i64 %sub, ptr @glob, align 8
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/testComparesllltuc.ll b/llvm/test/CodeGen/PowerPC/testComparesllltuc.ll
index 06e0cace0bd2..5b068f9e5c46 100644
--- a/llvm/test/CodeGen/PowerPC/testComparesllltuc.ll
+++ b/llvm/test/CodeGen/PowerPC/testComparesllltuc.ll
@@ -46,7 +46,7 @@ define dso_local void @test_llltuc_store(i8 zeroext %a, i8 zeroext %b) {
 entry:
   %cmp = icmp ult i8 %a, %b
   %conv3 = zext i1 %cmp to i8
-  store i8 %conv3, i8* @glob, align 1
+  store i8 %conv3, ptr @glob, align 1
   ret void
 }
 
@@ -62,6 +62,6 @@ define dso_local void @test_llltuc_sext_store(i8 zeroext %a, i8 zeroext %b) {
 entry:
   %cmp = icmp ult i8 %a, %b
   %conv3 = sext i1 %cmp to i8
-  store i8 %conv3, i8* @glob, align 1
+  store i8 %conv3, ptr @glob, align 1
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/testComparesllltui.ll b/llvm/test/CodeGen/PowerPC/testComparesllltui.ll
index 50a0ff11961d..ebe05589bde2 100644
--- a/llvm/test/CodeGen/PowerPC/testComparesllltui.ll
+++ b/llvm/test/CodeGen/PowerPC/testComparesllltui.ll
@@ -67,7 +67,7 @@ define void @test_llltui_store(i32 zeroext %a, i32 zeroext %b) {
 entry:
   %cmp = icmp ult i32 %a, %b
   %conv = zext i1 %cmp to i32
-  store i32 %conv, i32* @glob, align 4
+  store i32 %conv, ptr @glob, align 4
   ret void
 }
 
@@ -84,7 +84,7 @@ define void @test_llltui_sext_store(i32 zeroext %a, i32 zeroext %b) {
 entry:
   %cmp = icmp ult i32 %a, %b
   %sub = sext i1 %cmp to i32
-  store i32 %sub, i32* @glob, align 4
+  store i32 %sub, ptr @glob, align 4
   ret void
 }
 
@@ -98,7 +98,7 @@ define void @test_llltui_z_store(i32 zeroext %a) {
 ; CHECK-NEXT:    stw r4, 0(r3)
 ; CHECK-NEXT:    blr
 entry:
-  store i32 0, i32* @glob, align 4
+  store i32 0, ptr @glob, align 4
   ret void
 }
 
@@ -112,7 +112,7 @@ define void @test_llltui_sext_z_store(i32 zeroext %a) {
 ; CHECK-NEXT:    stw r4, 0(r3)
 ; CHECK-NEXT:    blr
 entry:
-  store i32 0, i32* @glob, align 4
+  store i32 0, ptr @glob, align 4
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/testComparesllltus.ll b/llvm/test/CodeGen/PowerPC/testComparesllltus.ll
index 0e8ea8e004ac..96b9f463cfc7 100644
--- a/llvm/test/CodeGen/PowerPC/testComparesllltus.ll
+++ b/llvm/test/CodeGen/PowerPC/testComparesllltus.ll
@@ -46,7 +46,7 @@ define dso_local void @test_llltus_store(i16 zeroext %a, i16 zeroext %b) {
 entry:
   %cmp = icmp ult i16 %a, %b
   %conv3 = zext i1 %cmp to i16
-  store i16 %conv3, i16* @glob, align 2
+  store i16 %conv3, ptr @glob, align 2
   ret void
 }
 
@@ -62,6 +62,6 @@ define dso_local void @test_llltus_sext_store(i16 zeroext %a, i16 zeroext %b) {
 entry:
   %cmp = icmp ult i16 %a, %b
   %conv3 = sext i1 %cmp to i16
-  store i16 %conv3, i16* @glob, align 2
+  store i16 %conv3, ptr @glob, align 2
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/testComparesllnesll.ll b/llvm/test/CodeGen/PowerPC/testComparesllnesll.ll
index 7528c33e8d38..47f051adf498 100644
--- a/llvm/test/CodeGen/PowerPC/testComparesllnesll.ll
+++ b/llvm/test/CodeGen/PowerPC/testComparesllnesll.ll
@@ -135,7 +135,7 @@ define dso_local void @test_llnesll_store(i64 %a, i64 %b) {
 entry:
   %cmp = icmp ne i64 %a, %b
   %conv1 = zext i1 %cmp to i64
-  store i64 %conv1, i64* @glob, align 8
+  store i64 %conv1, ptr @glob, align 8
   ret void
 }
 
@@ -168,7 +168,7 @@ define dso_local void @test_llnesll_sext_store(i64 %a, i64 %b) {
 entry:
   %cmp = icmp ne i64 %a, %b
   %conv1 = sext i1 %cmp to i64
-  store i64 %conv1, i64* @glob, align 8
+  store i64 %conv1, ptr @glob, align 8
   ret void
 }
 
@@ -198,7 +198,7 @@ define dso_local void @test_llnesll_z_store(i64 %a) {
 entry:
   %cmp = icmp ne i64 %a, 0
   %conv1 = zext i1 %cmp to i64
-  store i64 %conv1, i64* @glob, align 8
+  store i64 %conv1, ptr @glob, align 8
   ret void
 }
 
@@ -228,6 +228,6 @@ define dso_local void @test_llnesll_sext_z_store(i64 %a) {
 entry:
   %cmp = icmp ne i64 %a, 0
   %conv1 = sext i1 %cmp to i64
-  store i64 %conv1, i64* @glob, align 8
+  store i64 %conv1, ptr @glob, align 8
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/testComparesllneull.ll b/llvm/test/CodeGen/PowerPC/testComparesllneull.ll
index d6b260438fd5..bc2b2e0b237e 100644
--- a/llvm/test/CodeGen/PowerPC/testComparesllneull.ll
+++ b/llvm/test/CodeGen/PowerPC/testComparesllneull.ll
@@ -135,7 +135,7 @@ define dso_local void @test_llneull_store(i64 %a, i64 %b) {
 entry:
   %cmp = icmp ne i64 %a, %b
   %conv1 = zext i1 %cmp to i64
-  store i64 %conv1, i64* @glob, align 8
+  store i64 %conv1, ptr @glob, align 8
   ret void
 }
 
@@ -168,7 +168,7 @@ define dso_local void @test_llneull_sext_store(i64 %a, i64 %b) {
 entry:
   %cmp = icmp ne i64 %a, %b
   %conv1 = sext i1 %cmp to i64
-  store i64 %conv1, i64* @glob, align 8
+  store i64 %conv1, ptr @glob, align 8
   ret void
 }
 
@@ -198,7 +198,7 @@ define dso_local void @test_llneull_z_store(i64 %a) {
 entry:
   %cmp = icmp ne i64 %a, 0
   %conv1 = zext i1 %cmp to i64
-  store i64 %conv1, i64* @glob, align 8
+  store i64 %conv1, ptr @glob, align 8
   ret void
 }
 
@@ -228,6 +228,6 @@ define dso_local void @test_llneull_sext_z_store(i64 %a) {
 entry:
   %cmp = icmp ne i64 %a, 0
   %conv1 = sext i1 %cmp to i64
-  store i64 %conv1, i64* @glob, align 8
+  store i64 %conv1, ptr @glob, align 8
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/test_call_aix.ll b/llvm/test/CodeGen/PowerPC/test_call_aix.ll
index 4065fc860307..56bd7263a3c2 100644
--- a/llvm/test/CodeGen/PowerPC/test_call_aix.ll
+++ b/llvm/test/CodeGen/PowerPC/test_call_aix.ll
@@ -23,7 +23,7 @@ entry:
 ; CHECK: bl .foo
 ; CHECK-NEXT: nop
 
-  call void bitcast (void (...)* @foo to void ()*)()
+  call void @foo()
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/test_func_desc.ll b/llvm/test/CodeGen/PowerPC/test_func_desc.ll
index 63154fd94cde..9d2e26d5aa19 100644
--- a/llvm/test/CodeGen/PowerPC/test_func_desc.ll
+++ b/llvm/test/CodeGen/PowerPC/test_func_desc.ll
@@ -13,7 +13,7 @@ entry:
 define i32 @main() {
 entry:
   %0 = call i32 @foo()
-  %1 = call i32 bitcast (i32 (...)* @extern_foo to i32 ()*)()
+  %1 = call i32 @extern_foo()
   %2 = call i32 @static_foo()
   %3 = add nsw i32 %0, %1
   %4 = add nsw i32 %3, %2

diff  --git a/llvm/test/CodeGen/PowerPC/thread-pointer.ll b/llvm/test/CodeGen/PowerPC/thread-pointer.ll
index c094aefdf988..8202411ea903 100644
--- a/llvm/test/CodeGen/PowerPC/thread-pointer.ll
+++ b/llvm/test/CodeGen/PowerPC/thread-pointer.ll
@@ -3,15 +3,15 @@
 ; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc64le-unknown-linux-gnu | FileCheck %s --check-prefix=CHECK-64
 
 ; Function Attrs: nounwind readnone
-declare i8* @llvm.thread.pointer() #1
+declare ptr @llvm.thread.pointer() #1
 
-define i8* @thread_pointer() {
+define ptr @thread_pointer() {
 ; CHECK-32-LABEL: @thread_pointer
 ; CHECK-32: mr 3, 2
 ; CHECK-32: blr
 ; CHECK-64-LABEL: @thread_pointer
 ; CHECK-64: mr 3, 13
 ; CHECK-64: blr
-  %1 = tail call i8* @llvm.thread.pointer()
-  ret i8* %1
+  %1 = tail call ptr @llvm.thread.pointer()
+  ret ptr %1
 }

diff  --git a/llvm/test/CodeGen/PowerPC/tls-cse.ll b/llvm/test/CodeGen/PowerPC/tls-cse.ll
index c770209c6ca5..de4c1768e128 100644
--- a/llvm/test/CodeGen/PowerPC/tls-cse.ll
+++ b/llvm/test/CodeGen/PowerPC/tls-cse.ll
@@ -8,38 +8,35 @@
 target datalayout = "e-m:e-i64:64-n32:64"
 target triple = "powerpc64le-unknown-linux-gnu"
 
-%"class.llvm::PrettyStackTraceEntry" = type { i32 (...)**, %"class.llvm::PrettyStackTraceEntry"* }
+%"class.llvm::PrettyStackTraceEntry" = type { ptr, ptr }
 
- at _ZTVN4llvm21PrettyStackTraceEntryE = unnamed_addr constant [5 x i8*] [i8* null, i8* null, i8* bitcast (void (%"class.llvm::PrettyStackTraceEntry"*)* @_ZN4llvm21PrettyStackTraceEntryD2Ev to i8*), i8* bitcast (void (%"class.llvm::PrettyStackTraceEntry"*)* @_ZN4llvm21PrettyStackTraceEntryD0Ev to i8*), i8* bitcast (void ()* @__cxa_pure_virtual to i8*)], align 8
- at _ZL20PrettyStackTraceHead = internal thread_local unnamed_addr global %"class.llvm::PrettyStackTraceEntry"* null, align 8
+ at _ZTVN4llvm21PrettyStackTraceEntryE = unnamed_addr constant [5 x ptr] [ptr null, ptr null, ptr @_ZN4llvm21PrettyStackTraceEntryD2Ev, ptr @_ZN4llvm21PrettyStackTraceEntryD0Ev, ptr @__cxa_pure_virtual], align 8
+ at _ZL20PrettyStackTraceHead = internal thread_local unnamed_addr global ptr null, align 8
 @.str = private unnamed_addr constant [87 x i8] c"PrettyStackTraceHead == this && \22Pretty stack trace entry destruction is out of order\22\00", align 1
 @.str1 = private unnamed_addr constant [64 x i8] c"/home/wschmidt/llvm/llvm-test2/lib/Support/PrettyStackTrace.cpp\00", align 1
 @__PRETTY_FUNCTION__._ZN4llvm21PrettyStackTraceEntryD2Ev = private unnamed_addr constant [62 x i8] c"virtual llvm::PrettyStackTraceEntry::~PrettyStackTraceEntry()\00", align 1
 
-declare void @_ZN4llvm21PrettyStackTraceEntryD2Ev(%"class.llvm::PrettyStackTraceEntry"* %this) unnamed_addr
+declare void @_ZN4llvm21PrettyStackTraceEntryD2Ev(ptr %this) unnamed_addr
 declare void @__cxa_pure_virtual()
-declare void @__assert_fail(i8*, i8*, i32 zeroext, i8*)
-declare void @_ZdlPv(i8*)
+declare void @__assert_fail(ptr, ptr, i32 zeroext, ptr)
+declare void @_ZdlPv(ptr)
 
-define void @_ZN4llvm21PrettyStackTraceEntryD0Ev(%"class.llvm::PrettyStackTraceEntry"* %this) unnamed_addr align 2 {
+define void @_ZN4llvm21PrettyStackTraceEntryD0Ev(ptr %this) unnamed_addr align 2 {
 entry:
-  %0 = getelementptr inbounds %"class.llvm::PrettyStackTraceEntry", %"class.llvm::PrettyStackTraceEntry"* %this, i64 0, i32 0
-  store i32 (...)** bitcast (i8** getelementptr inbounds ([5 x i8*], [5 x i8*]* @_ZTVN4llvm21PrettyStackTraceEntryE, i64 0, i64 2) to i32 (...)**), i32 (...)*** %0, align 8
-  %1 = load %"class.llvm::PrettyStackTraceEntry"*, %"class.llvm::PrettyStackTraceEntry"** @_ZL20PrettyStackTraceHead, align 8
-  %cmp.i = icmp eq %"class.llvm::PrettyStackTraceEntry"* %1, %this
+  store ptr getelementptr inbounds ([5 x ptr], ptr @_ZTVN4llvm21PrettyStackTraceEntryE, i64 0, i64 2), ptr %this, align 8
+  %0 = load ptr, ptr @_ZL20PrettyStackTraceHead, align 8
+  %cmp.i = icmp eq ptr %0, %this
   br i1 %cmp.i, label %_ZN4llvm21PrettyStackTraceEntryD2Ev.exit, label %cond.false.i
 
 cond.false.i:                                     ; preds = %entry
-  tail call void @__assert_fail(i8* getelementptr inbounds ([87 x i8], [87 x i8]* @.str, i64 0, i64 0), i8* getelementptr inbounds ([64 x i8], [64 x i8]* @.str1, i64 0, i64 0), i32 zeroext 119, i8* getelementptr inbounds ([62 x i8], [62 x i8]* @__PRETTY_FUNCTION__._ZN4llvm21PrettyStackTraceEntryD2Ev, i64 0, i64 0))
+  tail call void @__assert_fail(ptr @.str, ptr @.str1, i32 zeroext 119, ptr @__PRETTY_FUNCTION__._ZN4llvm21PrettyStackTraceEntryD2Ev)
   unreachable
 
 _ZN4llvm21PrettyStackTraceEntryD2Ev.exit:         ; preds = %entry
-  %NextEntry.i.i = getelementptr inbounds %"class.llvm::PrettyStackTraceEntry", %"class.llvm::PrettyStackTraceEntry"* %this, i64 0, i32 1
-  %2 = bitcast %"class.llvm::PrettyStackTraceEntry"** %NextEntry.i.i to i64*
-  %3 = load i64, i64* %2, align 8
-  store i64 %3, i64* bitcast (%"class.llvm::PrettyStackTraceEntry"** @_ZL20PrettyStackTraceHead to i64*), align 8
-  %4 = bitcast %"class.llvm::PrettyStackTraceEntry"* %this to i8*
-  tail call void @_ZdlPv(i8* %4)
+  %NextEntry.i.i = getelementptr inbounds %"class.llvm::PrettyStackTraceEntry", ptr %this, i64 0, i32 1
+  %1 = load i64, ptr %NextEntry.i.i, align 8
+  store i64 %1, ptr @_ZL20PrettyStackTraceHead, align 8
+  tail call void @_ZdlPv(ptr %this)
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/tls-debug-aix.ll b/llvm/test/CodeGen/PowerPC/tls-debug-aix.ll
index edcf2f0c42da..3dd0237b3008 100644
--- a/llvm/test/CodeGen/PowerPC/tls-debug-aix.ll
+++ b/llvm/test/CodeGen/PowerPC/tls-debug-aix.ll
@@ -26,8 +26,8 @@
 define i32 @foo() !dbg !12 {
 entry:
   %retval = alloca i32, align 4
-  store i32 0, i32* %retval, align 4
-  %0 = load i32, i32* @i, align 4, !dbg !16
+  store i32 0, ptr %retval, align 4
+  %0 = load i32, ptr @i, align 4, !dbg !16
   ret i32 %0, !dbg !16
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/tls-pic.ll b/llvm/test/CodeGen/PowerPC/tls-pic.ll
index db60ec7e22a7..50254d469c1a 100644
--- a/llvm/test/CodeGen/PowerPC/tls-pic.ll
+++ b/llvm/test/CodeGen/PowerPC/tls-pic.ll
@@ -12,8 +12,8 @@ target triple = "powerpc64-unknown-linux-gnu"
 define signext i32 @main() nounwind {
 entry:
   %retval = alloca i32, align 4
-  store i32 0, i32* %retval
-  %0 = load i32, i32* @a, align 4
+  store i32 0, ptr %retval
+  %0 = load i32, ptr @a, align 4
   ret i32 %0
 }
 
@@ -54,8 +54,8 @@ entry:
 define signext i32 @main2() nounwind {
 entry:
   %retval = alloca i32, align 4
-  store i32 0, i32* %retval
-  %0 = load i32, i32* @a2, align 4
+  store i32 0, ptr %retval
+  %0 = load i32, ptr @a2, align 4
   ret i32 %0
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/tls-pie-xform.ll b/llvm/test/CodeGen/PowerPC/tls-pie-xform.ll
index 92b0ae380597..a6619a58b241 100644
--- a/llvm/test/CodeGen/PowerPC/tls-pie-xform.ll
+++ b/llvm/test/CodeGen/PowerPC/tls-pie-xform.ll
@@ -14,7 +14,7 @@ define dso_local zeroext i8 @test_char_one() {
 ; CHECK-NEXT:    lbzx 3, 3, var_char at tls
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i8, i8* @var_char, align 1, !tbaa !4
+  %0 = load i8, ptr @var_char, align 1, !tbaa !4
   ret i8 %0
 }
 
@@ -27,7 +27,7 @@ define dso_local void @test_char_two(i32 signext %a) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = trunc i32 %a to i8
-  store i8 %conv, i8* @var_char, align 1, !tbaa !4
+  store i8 %conv, ptr @var_char, align 1, !tbaa !4
   ret void
 }
 
@@ -42,9 +42,9 @@ define dso_local zeroext i8 @test_char_three(i8 zeroext %a) {
 ; CHECK-NEXT:    stbx 5, 4, var_char at tls
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i8, i8* @var_char, align 1, !tbaa !4
+  %0 = load i8, ptr @var_char, align 1, !tbaa !4
   %add = add i8 %0, %a
-  store i8 %add, i8* @var_char, align 1, !tbaa !4
+  store i8 %add, ptr @var_char, align 1, !tbaa !4
   ret i8 %add
 }
 
@@ -56,7 +56,7 @@ define dso_local signext i16 @test_short_one() {
 ; CHECK-NEXT:    lhzx 3, 3, var_short at tls
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i16, i16* @var_short, align 2, !tbaa !7
+  %0 = load i16, ptr @var_short, align 2, !tbaa !7
   ret i16 %0
 }
 
@@ -69,7 +69,7 @@ define dso_local void @test_short_two(i32 signext %a) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = trunc i32 %a to i16
-  store i16 %conv, i16* @var_short, align 2, !tbaa !7
+  store i16 %conv, ptr @var_short, align 2, !tbaa !7
   ret void
 }
 
@@ -84,9 +84,9 @@ define dso_local signext i16 @test_short_three(i16 signext %a) {
 ; CHECK-NEXT:    sthx 5, 4, var_short at tls
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i16, i16* @var_short, align 2, !tbaa !7
+  %0 = load i16, ptr @var_short, align 2, !tbaa !7
   %add = add i16 %0, %a
-  store i16 %add, i16* @var_short, align 2, !tbaa !7
+  store i16 %add, ptr @var_short, align 2, !tbaa !7
   ret i16 %add
 }
 
@@ -98,7 +98,7 @@ define dso_local signext i32 @test_int_one() {
 ; CHECK-NEXT:    lwzx 3, 3, var_int at tls
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i32, i32* @var_int, align 4, !tbaa !9
+  %0 = load i32, ptr @var_int, align 4, !tbaa !9
   ret i32 %0
 }
 
@@ -110,7 +110,7 @@ define dso_local void @test_int_two(i32 signext %a) {
 ; CHECK-NEXT:    stwx 3, 4, var_int at tls
 ; CHECK-NEXT:    blr
 entry:
-  store i32 %a, i32* @var_int, align 4, !tbaa !9
+  store i32 %a, ptr @var_int, align 4, !tbaa !9
   ret void
 }
 
@@ -125,9 +125,9 @@ define dso_local signext i32 @test_int_three(i32 signext %a) {
 ; CHECK-NEXT:    stwx 5, 4, var_int at tls
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i32, i32* @var_int, align 4, !tbaa !9
+  %0 = load i32, ptr @var_int, align 4, !tbaa !9
   %add = add nsw i32 %0, %a
-  store i32 %add, i32* @var_int, align 4, !tbaa !9
+  store i32 %add, ptr @var_int, align 4, !tbaa !9
   ret i32 %add
 }
 
@@ -139,7 +139,7 @@ define dso_local i64 @test_longlong_one() {
 ; CHECK-NEXT:    ldx 3, 3, var_long_long at tls
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i64, i64* @var_long_long, align 8, !tbaa !11
+  %0 = load i64, ptr @var_long_long, align 8, !tbaa !11
   ret i64 %0
 }
 
@@ -152,7 +152,7 @@ define dso_local void @test_longlong_two(i32 signext %a) {
 ; CHECK-NEXT:    blr
 entry:
   %conv = sext i32 %a to i64
-  store i64 %conv, i64* @var_long_long, align 8, !tbaa !11
+  store i64 %conv, ptr @var_long_long, align 8, !tbaa !11
   ret void
 }
 
@@ -166,9 +166,9 @@ define dso_local i64 @test_longlong_three(i64 %a) {
 ; CHECK-NEXT:    stdx 3, 4, var_long_long at tls
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i64, i64* @var_long_long, align 8, !tbaa !11
+  %0 = load i64, ptr @var_long_long, align 8, !tbaa !11
   %add = add nsw i64 %0, %a
-  store i64 %add, i64* @var_long_long, align 8, !tbaa !11
+  store i64 %add, ptr @var_long_long, align 8, !tbaa !11
   ret i64 %add
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/tls-store2.ll b/llvm/test/CodeGen/PowerPC/tls-store2.ll
index d51c08891b4e..f5f9b9cb77dc 100644
--- a/llvm/test/CodeGen/PowerPC/tls-store2.ll
+++ b/llvm/test/CodeGen/PowerPC/tls-store2.ll
@@ -6,15 +6,15 @@ target triple = "powerpc64le-unknown-linux-gnu"
 ; Test back-to-back stores of TLS variables to ensure call sequences no
 ; longer overlap.
 
- at __once_callable = external thread_local global i8**
- at __once_call = external thread_local global void ()*
+ at __once_callable = external thread_local global ptr
+ at __once_call = external thread_local global ptr
 
-define i64 @call_once(i64 %flag, i8* %ptr) {
+define i64 @call_once(i64 %flag, ptr %ptr) {
 entry:
-  %var = alloca i8*, align 8
-  store i8* %ptr, i8** %var, align 8
-  store i8** %var, i8*** @__once_callable, align 8
-  store void ()* @__once_call_impl, void ()** @__once_call, align 8
+  %var = alloca ptr, align 8
+  store ptr %ptr, ptr %var, align 8
+  store ptr %var, ptr @__once_callable, align 8
+  store ptr @__once_call_impl, ptr @__once_call, align 8
   ret i64 %flag
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/tls.ll b/llvm/test/CodeGen/PowerPC/tls.ll
index e2c900639c7f..ec9ab9ae7ff0 100644
--- a/llvm/test/CodeGen/PowerPC/tls.ll
+++ b/llvm/test/CodeGen/PowerPC/tls.ll
@@ -18,7 +18,7 @@ entry:
 ;OPT1:          addis [[REG1:[1-9][0-9]*]], 13, a at tprel@ha
 ;OPT1-NEXT:     li [[REG3:[0-9]+]], 42
 ;OPT1:     stw [[REG3]], a at tprel@l([[REG1]])
-  store i32 42, i32* @a, align 4
+  store i32 42, ptr @a, align 4
   ret i32 0
 }
 
@@ -30,8 +30,8 @@ entry:
 define dso_local signext i32 @main2() nounwind {
 entry:
   %retval = alloca i32, align 4
-  store i32 0, i32* %retval
-  %0 = load i32, i32* @a2, align 4
+  store i32 0, ptr %retval
+  %0 = load i32, ptr @a2, align 4
   ret i32 %0
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/tls_get_addr_clobbers.ll b/llvm/test/CodeGen/PowerPC/tls_get_addr_clobbers.ll
index 632358ec8610..63af08c3a19c 100644
--- a/llvm/test/CodeGen/PowerPC/tls_get_addr_clobbers.ll
+++ b/llvm/test/CodeGen/PowerPC/tls_get_addr_clobbers.ll
@@ -1,8 +1,8 @@
 ; RUN: llc -verify-machineinstrs -mtriple="powerpc64le-unknown-linux-gnu" -relocation-model=pic < %s | FileCheck %s
 
- at a = thread_local global i32* null, align 8
+ at a = thread_local global ptr null, align 8
 
-define void @test_foo(i32* nocapture %x01, i32* nocapture %x02, i32* nocapture %x03, i32* nocapture %x04, i32* nocapture %x05, i32* nocapture %x06, i32* nocapture %x07, i32* nocapture %x08) #0 {
+define void @test_foo(ptr nocapture %x01, ptr nocapture %x02, ptr nocapture %x03, ptr nocapture %x04, ptr nocapture %x05, ptr nocapture %x06, ptr nocapture %x07, ptr nocapture %x08) #0 {
 entry:
 
 ; CHECK-LABEL: test_foo:
@@ -34,19 +34,19 @@ entry:
 ; CHECK-DAG: stw 3, 0([[BACKUP_10]])
 ; CHECK: blr
 
-  %0 = load i32*, i32** @a, align 8
-  %cmp = icmp eq i32* %0, null
+  %0 = load ptr, ptr @a, align 8
+  %cmp = icmp eq ptr %0, null
   br i1 %cmp, label %return, label %if.end
 
 if.end:                                           ; preds = %entry
-  store i32 0, i32* %x01, align 4
-  store i32 0, i32* %x02, align 4
-  store i32 0, i32* %x03, align 4
-  store i32 0, i32* %x04, align 4
-  store i32 0, i32* %x05, align 4
-  store i32 0, i32* %x06, align 4
-  store i32 0, i32* %x07, align 4
-  store i32 0, i32* %x08, align 4
+  store i32 0, ptr %x01, align 4
+  store i32 0, ptr %x02, align 4
+  store i32 0, ptr %x03, align 4
+  store i32 0, ptr %x04, align 4
+  store i32 0, ptr %x05, align 4
+  store i32 0, ptr %x06, align 4
+  store i32 0, ptr %x07, align 4
+  store i32 0, ptr %x08, align 4
   br label %return
 
 return:                                           ; preds = %entry, %if.end

diff  --git a/llvm/test/CodeGen/PowerPC/tls_get_addr_stackframe.ll b/llvm/test/CodeGen/PowerPC/tls_get_addr_stackframe.ll
index 70197c208329..7b2b00bea759 100644
--- a/llvm/test/CodeGen/PowerPC/tls_get_addr_stackframe.ll
+++ b/llvm/test/CodeGen/PowerPC/tls_get_addr_stackframe.ll
@@ -3,23 +3,23 @@
 ; CHECK: mflr 0
 ; CHECK: __tls_get_addr
 
-%struct1.2.41 = type { %struct2.0.39, %struct3.1.40, %struct1.2.41* }
+%struct1.2.41 = type { %struct2.0.39, %struct3.1.40, ptr }
 %struct2.0.39 = type { i64, i32, i32, i32, i32 }
 %struct3.1.40 = type { [160 x i8] }
 
- at tls_var = external thread_local global %struct1.2.41*, align 8
+ at tls_var = external thread_local global ptr, align 8
 
 define i32 @foo_test() {
-  %1 = load %struct1.2.41*, %struct1.2.41** @tls_var, align 8
+  %1 = load ptr, ptr @tls_var, align 8
 
-  %2 = getelementptr inbounds %struct1.2.41, %struct1.2.41* %1, i64 0, i32 0, i32 3
-  %3 = load i32, i32* %2, align 8
+  %2 = getelementptr inbounds %struct1.2.41, ptr %1, i64 0, i32 0, i32 3
+  %3 = load i32, ptr %2, align 8
   %4 = add nsw i32 %3, -1
   %5 = icmp eq i32 %4, 0
   br i1 %5, label %bb7, label %foo.exit
 
 bb7:                                       ; preds = %3
-  tail call void undef(%struct1.2.41* undef, %struct1.2.41* nonnull undef)
+  tail call void undef(ptr undef, ptr nonnull undef)
   br label %foo.exit
 
 foo.exit:                                         ; preds = %8, %3, %2, %0

diff  --git a/llvm/test/CodeGen/PowerPC/toc-data-const.ll b/llvm/test/CodeGen/PowerPC/toc-data-const.ll
index 2b87480d101c..5d126ca44af8 100644
--- a/llvm/test/CodeGen/PowerPC/toc-data-const.ll
+++ b/llvm/test/CodeGen/PowerPC/toc-data-const.ll
@@ -2,15 +2,15 @@
 ; RUN: llc -mtriple powerpc64-ibm-aix-xcoff < %s | FileCheck %s --check-prefix CHECK
 
 @i1 = external constant i32 #0
- at i2 = constant i32* @i1 #0
+ at i2 = constant ptr @i1 #0
 
 define i32 @read() {
-  %1  = load i32, i32* @i1, align 4
+  %1  = load i32, ptr @i1, align 4
   ret i32 %1
 }
 
-define i32** @retptr() {
-  ret i32** @i2
+define ptr @retptr() {
+  ret ptr @i2
 }
 
 ; CHECK:       .read:

diff  --git a/llvm/test/CodeGen/PowerPC/toc-data.ll b/llvm/test/CodeGen/PowerPC/toc-data.ll
index 307a543dff0d..cbf3be9fcaad 100644
--- a/llvm/test/CodeGen/PowerPC/toc-data.ll
+++ b/llvm/test/CodeGen/PowerPC/toc-data.ll
@@ -20,7 +20,7 @@
 
 define dso_local void @write_int(i32 signext %in) {
   entry:
-    store i32 %in, i32* @i, align 4
+    store i32 %in, ptr @i, align 4
     ret void
 }
 ; CHECK32: name:            write_int
@@ -47,7 +47,7 @@ define dso_local void @write_int(i32 signext %in) {
 
 define dso_local i64 @read_ll() {
   entry:
-    %0 = load i64, i64* @ll, align 8
+    %0 = load i64, ptr @ll, align 8
     ret i64 %0
 }
 ; CHECK32: name:            read_ll
@@ -73,7 +73,7 @@ define dso_local i64 @read_ll() {
 
 define dso_local float @read_float() {
   entry:
-    %0 = load float, float* @f, align 4
+    %0 = load float, ptr @f, align 4
     ret float %0
 }
 ; CHECK32: name:            read_float
@@ -99,7 +99,7 @@ define dso_local float @read_float() {
 
 define dso_local void @write_double(double %in) {
   entry:
-    store double %in, double* @d, align 8
+    store double %in, ptr @d, align 8
     ret void
 }
 ; CHECK32: name:            write_double
@@ -122,9 +122,9 @@ define dso_local void @write_double(double %in) {
 ; TEST64-NEXT:    stfd 1, 0(3)
 
 
-define dso_local nonnull i32* @addr() {
+define dso_local nonnull ptr @addr() {
   entry:
-    ret i32* @i
+    ret ptr @i
 }
 ; CHECK32: name:            addr
 ; CHECK32:       %[[SCRATCH:[0-9]+]]:gprc = ADDItoc @i, $r2

diff  --git a/llvm/test/CodeGen/PowerPC/toc-float.ll b/llvm/test/CodeGen/PowerPC/toc-float.ll
index 6b251f281fb2..c98b364fa75d 100644
--- a/llvm/test/CodeGen/PowerPC/toc-float.ll
+++ b/llvm/test/CodeGen/PowerPC/toc-float.ll
@@ -56,7 +56,7 @@ define float @floatConstantArray() local_unnamed_addr  {
 ; CHECK-P8-NEXT:    lfs 1, .LCPI2_0 at toc@l(4)
 ; CHECK-P8-NEXT:    xsaddsp 1, 0, 1
 ; CHECK-P8-NEXT:    blr
-  %1 = load float, float* getelementptr inbounds ([10 x float], [10 x float]* @FArr, i64 0, i64 3), align 4
+  %1 = load float, ptr getelementptr inbounds ([10 x float], ptr @FArr, i64 0, i64 3), align 4
   %2 = fadd float %1, 0x400B333340000000
   ret float %2
 }
@@ -98,7 +98,7 @@ define double @doubleConstantArray()  {
 ; CHECK-P8-NEXT:    lfd 1, .LCPI4_0 at toc@l(4)
 ; CHECK-P8-NEXT:    xsadddp 1, 0, 1
 ; CHECK-P8-NEXT:    blr
-  %1 = load double, double* getelementptr inbounds ([200 x double], [200 x double]* @d, i64 0, i64 3), align 8
+  %1 = load double, ptr getelementptr inbounds ([200 x double], ptr @d, i64 0, i64 3), align 8
   %2 = fadd double %1, 6.880000e+00
   ret double %2
 }
@@ -130,7 +130,7 @@ define double @doubleLargeConstantArray()  {
 ; CHECK-P8-NEXT:    lfdx 0, 3, 4
 ; CHECK-P8-NEXT:    xsadddp 1, 0, 1
 ; CHECK-P8-NEXT:    blr
-  %1 = load double, double* getelementptr inbounds ([20000 x double], [20000 x double]* @arr, i64 0, i64 4096), align 8
+  %1 = load double, ptr getelementptr inbounds ([20000 x double], ptr @arr, i64 0, i64 4096), align 8
   %2 = fadd double %1, 6.880000e+00
   ret double %2
 }
@@ -154,6 +154,6 @@ define <4 x i32> @vectorArray() #0 {
 ; CHECK-P8-NEXT:    xxswapd 34, 0
 ; CHECK-P8-NEXT:    blr
 entry:
-  %0 = load <4 x i32>, <4 x i32>* getelementptr inbounds ([10 x <4 x i32>], [10 x <4 x i32>]* @vec_arr, i64 0, i64 2), align 16
+  %0 = load <4 x i32>, ptr getelementptr inbounds ([10 x <4 x i32>], ptr @vec_arr, i64 0, i64 2), align 16
   ret <4 x i32> %0
 }

diff  --git a/llvm/test/CodeGen/PowerPC/tocSaveInPrologue.ll b/llvm/test/CodeGen/PowerPC/tocSaveInPrologue.ll
index fbc094bcf516..13ba5ba63c16 100644
--- a/llvm/test/CodeGen/PowerPC/tocSaveInPrologue.ll
+++ b/llvm/test/CodeGen/PowerPC/tocSaveInPrologue.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mcpu=pwr8 -ppc-asm-full-reg-names \
 ; RUN:   -mtriple=powerpc64le-unknown-unknown < %s | FileCheck %s
-define dso_local void @test(void (i32)* nocapture %fp, i32 signext %Arg, i32 signext %Len) local_unnamed_addr #0 {
+define dso_local void @test(ptr nocapture %fp, i32 signext %Arg, i32 signext %Len) local_unnamed_addr #0 {
 ; CHECK-LABEL: test:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    mflr r0

diff  --git a/llvm/test/CodeGen/PowerPC/trampoline.ll b/llvm/test/CodeGen/PowerPC/trampoline.ll
index f8e896690acc..738f95e95b6d 100644
--- a/llvm/test/CodeGen/PowerPC/trampoline.ll
+++ b/llvm/test/CodeGen/PowerPC/trampoline.ll
@@ -4,165 +4,156 @@ module asm "\09.lazy_reference .objc_class_name_NSImageRep"
 module asm "\09.objc_class_name_NSBitmapImageRep=0"
 module asm "\09.globl .objc_class_name_NSBitmapImageRep"
 	%struct.CGImage = type opaque
-	%"struct.FRAME.-[NSBitmapImageRep copyWithZone:]" = type { %struct.NSBitmapImageRep*, void (%struct.__block_1*, %struct.CGImage*)* }
+	%"struct.FRAME.-[NSBitmapImageRep copyWithZone:]" = type { ptr, ptr }
 	%struct.NSBitmapImageRep = type { %struct.NSImageRep }
 	%struct.NSImageRep = type {  }
 	%struct.NSZone = type opaque
-	%struct.__block_1 = type { %struct.__invoke_impl, %struct.NSZone*, %struct.NSBitmapImageRep** }
+	%struct.__block_1 = type { %struct.__invoke_impl, ptr, ptr }
 	%struct.__builtin_trampoline = type { [40 x i8] }
-	%struct.__invoke_impl = type { i8*, i32, i32, i8* }
+	%struct.__invoke_impl = type { ptr, i32, i32, ptr }
 	%struct._objc__method_prototype_list = type opaque
-	%struct._objc_class = type { %struct._objc_class*, %struct._objc_class*, i8*, i32, i32, i32, %struct._objc_ivar_list*, %struct._objc_method_list*, %struct.objc_cache*, %struct._objc_protocol**, i8*, %struct._objc_class_ext* }
+	%struct._objc_class = type { ptr, ptr, ptr, i32, i32, i32, ptr, ptr, ptr, ptr, ptr, ptr }
 	%struct._objc_class_ext = type opaque
 	%struct._objc_ivar_list = type opaque
-	%struct._objc_method = type { %struct.objc_selector*, i8*, i8* }
+	%struct._objc_method = type { ptr, ptr, ptr }
 	%struct._objc_method_list = type opaque
-	%struct._objc_module = type { i32, i32, i8*, %struct._objc_symtab* }
-	%struct._objc_protocol = type { %struct._objc_protocol_extension*, i8*, %struct._objc_protocol**, %struct._objc__method_prototype_list*, %struct._objc__method_prototype_list* }
+	%struct._objc_module = type { i32, i32, ptr, ptr }
+	%struct._objc_protocol = type { ptr, ptr, ptr, ptr, ptr }
 	%struct._objc_protocol_extension = type opaque
-	%struct._objc_super = type { %struct.objc_object*, %struct._objc_class* }
-	%struct._objc_symtab = type { i32, %struct.objc_selector**, i16, i16, [1 x i8*] }
-	%struct.anon = type { %struct._objc__method_prototype_list*, i32, [1 x %struct._objc_method] }
+	%struct._objc_super = type { ptr, ptr }
+	%struct._objc_symtab = type { i32, ptr, i16, i16, [1 x ptr] }
+	%struct.anon = type { ptr, i32, [1 x %struct._objc_method] }
 	%struct.objc_cache = type opaque
 	%struct.objc_object = type opaque
 	%struct.objc_selector = type opaque
 	%struct.objc_super = type opaque
- at _NSConcreteStackBlock = external global i8*		; <i8**> [#uses=1]
-@"\01L_OBJC_SELECTOR_REFERENCES_1" = internal global %struct.objc_selector* bitcast ([34 x i8]* @"\01L_OBJC_METH_VAR_NAME_1" to %struct.objc_selector*), section "__OBJC,__message_refs,literal_pointers,no_dead_strip"		; <%struct.objc_selector**> [#uses=2]
-@"\01L_OBJC_CLASS_NSBitmapImageRep" = internal global %struct._objc_class { %struct._objc_class* @"\01L_OBJC_METACLASS_NSBitmapImageRep", %struct._objc_class* bitcast ([11 x i8]* @"\01L_OBJC_CLASS_NAME_1" to %struct._objc_class*), i8* getelementptr ([17 x i8], [17 x i8]* @"\01L_OBJC_CLASS_NAME_0", i32 0, i32 0), i32 0, i32 1, i32 0, %struct._objc_ivar_list* null, %struct._objc_method_list* bitcast ({ i8*, i32, [1 x %struct._objc_method] }* @"\01L_OBJC_INSTANCE_METHODS_NSBitmapImageRep" to %struct._objc_method_list*), %struct.objc_cache* null, %struct._objc_protocol** null, i8* null, %struct._objc_class_ext* null }, section "__OBJC,__class,regular,no_dead_strip"		; <%struct._objc_class*> [#uses=3]
-@"\01L_OBJC_SELECTOR_REFERENCES_0" = internal global %struct.objc_selector* bitcast ([14 x i8]* @"\01L_OBJC_METH_VAR_NAME_0" to %struct.objc_selector*), section "__OBJC,__message_refs,literal_pointers,no_dead_strip"		; <%struct.objc_selector**> [#uses=2]
-@"\01L_OBJC_SYMBOLS" = internal global { i32, %struct.objc_selector**, i16, i16, [1 x %struct._objc_class*] } { i32 0, %struct.objc_selector** null, i16 1, i16 0, [1 x %struct._objc_class*] [ %struct._objc_class* @"\01L_OBJC_CLASS_NSBitmapImageRep" ] }, section "__OBJC,__symbols,regular,no_dead_strip"		; <{ i32, %struct.objc_selector**, i16, i16, [1 x %struct._objc_class*] }*> [#uses=2]
-@"\01L_OBJC_METH_VAR_NAME_0" = internal global [14 x i8] c"copyWithZone:\00", section "__TEXT,__cstring,cstring_literals", align 4		; <[14 x i8]*> [#uses=2]
-@"\01L_OBJC_METH_VAR_TYPE_0" = internal global [20 x i8] c"@12 at 0:4^{_NSZone=}8\00", section "__TEXT,__cstring,cstring_literals", align 4		; <[20 x i8]*> [#uses=1]
-@"\01L_OBJC_INSTANCE_METHODS_NSBitmapImageRep" = internal global { i8*, i32, [1 x %struct._objc_method] } { i8* null, i32 1, [1 x %struct._objc_method] [ %struct._objc_method { %struct.objc_selector* bitcast ([14 x i8]* @"\01L_OBJC_METH_VAR_NAME_0" to %struct.objc_selector*), i8* getelementptr ([20 x i8], [20 x i8]* @"\01L_OBJC_METH_VAR_TYPE_0", i32 0, i32 0), i8* bitcast (%struct.objc_object* (%struct.NSBitmapImageRep*, %struct.objc_selector*, %struct.NSZone*)* @"-[NSBitmapImageRep copyWithZone:]" to i8*) } ] }, section "__OBJC,__inst_meth,regular,no_dead_strip"		; <{ i8*, i32, [1 x %struct._objc_method] }*> [#uses=2]
-@"\01L_OBJC_CLASS_NAME_0" = internal global [17 x i8] c"NSBitmapImageRep\00", section "__TEXT,__cstring,cstring_literals", align 4		; <[17 x i8]*> [#uses=1]
-@"\01L_OBJC_CLASS_NAME_1" = internal global [11 x i8] c"NSImageRep\00", section "__TEXT,__cstring,cstring_literals", align 4		; <[11 x i8]*> [#uses=2]
-@"\01L_OBJC_METACLASS_NSBitmapImageRep" = internal global %struct._objc_class { %struct._objc_class* bitcast ([11 x i8]* @"\01L_OBJC_CLASS_NAME_1" to %struct._objc_class*), %struct._objc_class* bitcast ([11 x i8]* @"\01L_OBJC_CLASS_NAME_1" to %struct._objc_class*), i8* getelementptr ([17 x i8], [17 x i8]* @"\01L_OBJC_CLASS_NAME_0", i32 0, i32 0), i32 0, i32 2, i32 48, %struct._objc_ivar_list* null, %struct._objc_method_list* null, %struct.objc_cache* null, %struct._objc_protocol** null, i8* null, %struct._objc_class_ext* null }, section "__OBJC,__meta_class,regular,no_dead_strip"		; <%struct._objc_class*> [#uses=2]
-@"\01L_OBJC_METH_VAR_NAME_1" = internal global [34 x i8] c"_performBlockUsingBackingCGImage:\00", section "__TEXT,__cstring,cstring_literals", align 4		; <[34 x i8]*> [#uses=2]
-@"\01L_OBJC_IMAGE_INFO" = internal constant [2 x i32] zeroinitializer, section "__OBJC, __image_info,regular"		; <[2 x i32]*> [#uses=1]
-@"\01L_OBJC_CLASS_NAME_2" = internal global [1 x i8] zeroinitializer, section "__TEXT,__cstring,cstring_literals", align 4		; <[1 x i8]*> [#uses=1]
-@"\01L_OBJC_MODULES" = internal global %struct._objc_module { i32 7, i32 16, i8* getelementptr ([1 x i8], [1 x i8]* @"\01L_OBJC_CLASS_NAME_2", i32 0, i32 0), %struct._objc_symtab* bitcast ({ i32, %struct.objc_selector**, i16, i16, [1 x %struct._objc_class*] }* @"\01L_OBJC_SYMBOLS" to %struct._objc_symtab*) }, section "__OBJC,__module_info,regular,no_dead_strip"		; <%struct._objc_module*> [#uses=1]
- at llvm.used = appending global [14 x i8*] [ i8* bitcast (%struct.objc_selector** @"\01L_OBJC_SELECTOR_REFERENCES_1" to i8*), i8* bitcast (%struct._objc_class* @"\01L_OBJC_CLASS_NSBitmapImageRep" to i8*), i8* bitcast (%struct.objc_selector** @"\01L_OBJC_SELECTOR_REFERENCES_0" to i8*), i8* bitcast ({ i32, %struct.objc_selector**, i16, i16, [1 x %struct._objc_class*] }* @"\01L_OBJC_SYMBOLS" to i8*), i8* getelementptr ([14 x i8], [14 x i8]* @"\01L_OBJC_METH_VAR_NAME_0", i32 0, i32 0), i8* getelementptr ([20 x i8], [20 x i8]* @"\01L_OBJC_METH_VAR_TYPE_0", i32 0, i32 0), i8* bitcast ({ i8*, i32, [1 x %struct._objc_method] }* @"\01L_OBJC_INSTANCE_METHODS_NSBitmapImageRep" to i8*), i8* getelementptr ([17 x i8], [17 x i8]* @"\01L_OBJC_CLASS_NAME_0", i32 0, i32 0), i8* getelementptr ([11 x i8], [11 x i8]* @"\01L_OBJC_CLASS_NAME_1", i32 0, i32 0), i8* bitcast (%struct._objc_class* @"\01L_OBJC_METACLASS_NSBitmapImageRep" to i8*), i8* getelementptr ([34 x i8], [34 x i8]* @"\01L_OBJC_METH_VAR_NAME_1", i32 0, i32 0), i8* bitcast ([2 x i32]* @"\01L_OBJC_IMAGE_INFO" to i8*), i8* getelementptr ([1 x i8], [1 x i8]* @"\01L_OBJC_CLASS_NAME_2", i32 0, i32 0), i8* bitcast (%struct._objc_module* @"\01L_OBJC_MODULES" to i8*) ], section "llvm.metadata"		; <[14 x i8*]*> [#uses=0]
+ at _NSConcreteStackBlock = external global ptr		; <ptr> [#uses=1]
+@"\01L_OBJC_SELECTOR_REFERENCES_1" = internal global ptr @"\01L_OBJC_METH_VAR_NAME_1", section "__OBJC,__message_refs,literal_pointers,no_dead_strip"		; <ptr> [#uses=2]
+@"\01L_OBJC_CLASS_NSBitmapImageRep" = internal global %struct._objc_class { ptr @"\01L_OBJC_METACLASS_NSBitmapImageRep", ptr @"\01L_OBJC_CLASS_NAME_1", ptr @"\01L_OBJC_CLASS_NAME_0", i32 0, i32 1, i32 0, ptr null, ptr @"\01L_OBJC_INSTANCE_METHODS_NSBitmapImageRep", ptr null, ptr null, ptr null, ptr null }, section "__OBJC,__class,regular,no_dead_strip"		; <ptr> [#uses=3]
+@"\01L_OBJC_SELECTOR_REFERENCES_0" = internal global ptr @"\01L_OBJC_METH_VAR_NAME_0", section "__OBJC,__message_refs,literal_pointers,no_dead_strip"		; <ptr> [#uses=2]
+@"\01L_OBJC_SYMBOLS" = internal global { i32, ptr, i16, i16, [1 x ptr] } { i32 0, ptr null, i16 1, i16 0, [1 x ptr] [ ptr @"\01L_OBJC_CLASS_NSBitmapImageRep" ] }, section "__OBJC,__symbols,regular,no_dead_strip"		; <ptr> [#uses=2]
+@"\01L_OBJC_METH_VAR_NAME_0" = internal global [14 x i8] c"copyWithZone:\00", section "__TEXT,__cstring,cstring_literals", align 4		; <ptr> [#uses=2]
+@"\01L_OBJC_METH_VAR_TYPE_0" = internal global [20 x i8] c"@12 at 0:4^{_NSZone=}8\00", section "__TEXT,__cstring,cstring_literals", align 4		; <ptr> [#uses=1]
+@"\01L_OBJC_INSTANCE_METHODS_NSBitmapImageRep" = internal global { ptr, i32, [1 x %struct._objc_method] } { ptr null, i32 1, [1 x %struct._objc_method] [ %struct._objc_method { ptr @"\01L_OBJC_METH_VAR_NAME_0", ptr @"\01L_OBJC_METH_VAR_TYPE_0", ptr @"-[NSBitmapImageRep copyWithZone:]" } ] }, section "__OBJC,__inst_meth,regular,no_dead_strip"		; <ptr> [#uses=2]
+@"\01L_OBJC_CLASS_NAME_0" = internal global [17 x i8] c"NSBitmapImageRep\00", section "__TEXT,__cstring,cstring_literals", align 4		; <ptr> [#uses=1]
+@"\01L_OBJC_CLASS_NAME_1" = internal global [11 x i8] c"NSImageRep\00", section "__TEXT,__cstring,cstring_literals", align 4		; <ptr> [#uses=2]
+@"\01L_OBJC_METACLASS_NSBitmapImageRep" = internal global %struct._objc_class { ptr @"\01L_OBJC_CLASS_NAME_1", ptr @"\01L_OBJC_CLASS_NAME_1", ptr @"\01L_OBJC_CLASS_NAME_0", i32 0, i32 2, i32 48, ptr null, ptr null, ptr null, ptr null, ptr null, ptr null }, section "__OBJC,__meta_class,regular,no_dead_strip"		; <ptr> [#uses=2]
+@"\01L_OBJC_METH_VAR_NAME_1" = internal global [34 x i8] c"_performBlockUsingBackingCGImage:\00", section "__TEXT,__cstring,cstring_literals", align 4		; <ptr> [#uses=2]
+@"\01L_OBJC_IMAGE_INFO" = internal constant [2 x i32] zeroinitializer, section "__OBJC, __image_info,regular"		; <ptr> [#uses=1]
+@"\01L_OBJC_CLASS_NAME_2" = internal global [1 x i8] zeroinitializer, section "__TEXT,__cstring,cstring_literals", align 4		; <ptr> [#uses=1]
+@"\01L_OBJC_MODULES" = internal global %struct._objc_module { i32 7, i32 16, ptr @"\01L_OBJC_CLASS_NAME_2", ptr @"\01L_OBJC_SYMBOLS" }, section "__OBJC,__module_info,regular,no_dead_strip"		; <ptr> [#uses=1]
+ at llvm.used = appending global [14 x ptr] [ ptr @"\01L_OBJC_SELECTOR_REFERENCES_1", ptr @"\01L_OBJC_CLASS_NSBitmapImageRep", ptr @"\01L_OBJC_SELECTOR_REFERENCES_0", ptr @"\01L_OBJC_SYMBOLS", ptr @"\01L_OBJC_METH_VAR_NAME_0", ptr @"\01L_OBJC_METH_VAR_TYPE_0", ptr @"\01L_OBJC_INSTANCE_METHODS_NSBitmapImageRep", ptr @"\01L_OBJC_CLASS_NAME_0", ptr @"\01L_OBJC_CLASS_NAME_1", ptr @"\01L_OBJC_METACLASS_NSBitmapImageRep", ptr @"\01L_OBJC_METH_VAR_NAME_1", ptr @"\01L_OBJC_IMAGE_INFO", ptr @"\01L_OBJC_CLASS_NAME_2", ptr @"\01L_OBJC_MODULES" ], section "llvm.metadata"		; <ptr> [#uses=0]
 
-define internal %struct.objc_object* @"-[NSBitmapImageRep copyWithZone:]"(%struct.NSBitmapImageRep* %self, %struct.objc_selector* %_cmd, %struct.NSZone* %zone) nounwind {
+define internal ptr @"-[NSBitmapImageRep copyWithZone:]"(ptr %self, ptr %_cmd, ptr %zone) nounwind {
 entry:
-	%self_addr = alloca %struct.NSBitmapImageRep*		; <%struct.NSBitmapImageRep**> [#uses=2]
-	%_cmd_addr = alloca %struct.objc_selector*		; <%struct.objc_selector**> [#uses=1]
-	%zone_addr = alloca %struct.NSZone*		; <%struct.NSZone**> [#uses=2]
-	%retval = alloca %struct.objc_object*		; <%struct.objc_object**> [#uses=1]
-	%__block_holder_tmp_1.0 = alloca %struct.__block_1		; <%struct.__block_1*> [#uses=7]
-	%new = alloca %struct.NSBitmapImageRep*		; <%struct.NSBitmapImageRep**> [#uses=2]
-	%self.1 = alloca %struct.objc_object*		; <%struct.objc_object**> [#uses=2]
-	%0 = alloca i8*		; <i8**> [#uses=2]
-	%TRAMP.9 = alloca %struct.__builtin_trampoline, align 4		; <%struct.__builtin_trampoline*> [#uses=1]
-	%1 = alloca void (%struct.__block_1*, %struct.CGImage*)*		; <void (%struct.__block_1*, %struct.CGImage*)**> [#uses=2]
-	%2 = alloca %struct.NSBitmapImageRep*		; <%struct.NSBitmapImageRep**> [#uses=2]
-	%FRAME.7 = alloca %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"		; <%"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"*> [#uses=5]
+	%self_addr = alloca ptr		; <ptr> [#uses=2]
+	%_cmd_addr = alloca ptr		; <ptr> [#uses=1]
+	%zone_addr = alloca ptr		; <ptr> [#uses=2]
+	%retval = alloca ptr		; <ptr> [#uses=1]
+	%__block_holder_tmp_1.0 = alloca %struct.__block_1		; <ptr> [#uses=7]
+	%new = alloca ptr		; <ptr> [#uses=2]
+	%self.1 = alloca ptr		; <ptr> [#uses=2]
+	%0 = alloca ptr		; <ptr> [#uses=2]
+	%TRAMP.9 = alloca %struct.__builtin_trampoline, align 4		; <ptr> [#uses=1]
+	%1 = alloca ptr		; <ptr> [#uses=2]
+	%2 = alloca ptr		; <ptr> [#uses=2]
+	%FRAME.7 = alloca %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"		; <ptr> [#uses=5]
 	%"alloca point" = bitcast i32 0 to i32		; <i32> [#uses=0]
-	store %struct.NSBitmapImageRep* %self, %struct.NSBitmapImageRep** %self_addr
-	store %struct.objc_selector* %_cmd, %struct.objc_selector** %_cmd_addr
-	store %struct.NSZone* %zone, %struct.NSZone** %zone_addr
-	%3 = getelementptr %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]", %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"* %FRAME.7, i32 0, i32 0		; <%struct.NSBitmapImageRep**> [#uses=1]
-	%4 = load %struct.NSBitmapImageRep*, %struct.NSBitmapImageRep** %self_addr, align 4		; <%struct.NSBitmapImageRep*> [#uses=1]
-	store %struct.NSBitmapImageRep* %4, %struct.NSBitmapImageRep** %3, align 4
-	%TRAMP.91 = bitcast %struct.__builtin_trampoline* %TRAMP.9 to i8*		; <i8*> [#uses=1]
-	%FRAME.72 = bitcast %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"* %FRAME.7 to i8*		; <i8*> [#uses=1]
-	call void @llvm.init.trampoline(i8* %TRAMP.91, i8* bitcast (void (%"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"*, %struct.__block_1*, %struct.CGImage*)* @__helper_1.1632 to i8*), i8* %FRAME.72)		; <i8*> [#uses=1]
-        %tramp = call i8* @llvm.adjust.trampoline(i8* %TRAMP.91)
-	store i8* %tramp, i8** %0, align 4
-	%5 = getelementptr %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]", %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"* %FRAME.7, i32 0, i32 1		; <void (%struct.__block_1*, %struct.CGImage*)**> [#uses=1]
-	%6 = load i8*, i8** %0, align 4		; <i8*> [#uses=1]
-	%7 = bitcast i8* %6 to void (%struct.__block_1*, %struct.CGImage*)*		; <void (%struct.__block_1*, %struct.CGImage*)*> [#uses=1]
-	store void (%struct.__block_1*, %struct.CGImage*)* %7, void (%struct.__block_1*, %struct.CGImage*)** %5, align 4
-	store %struct.NSBitmapImageRep* null, %struct.NSBitmapImageRep** %new, align 4
-	%8 = getelementptr %struct.__block_1, %struct.__block_1* %__block_holder_tmp_1.0, i32 0, i32 0		; <%struct.__invoke_impl*> [#uses=1]
-	%9 = getelementptr %struct.__invoke_impl, %struct.__invoke_impl* %8, i32 0, i32 0		; <i8**> [#uses=1]
-	store i8* bitcast (i8** @_NSConcreteStackBlock to i8*), i8** %9, align 4
-	%10 = getelementptr %struct.__block_1, %struct.__block_1* %__block_holder_tmp_1.0, i32 0, i32 0		; <%struct.__invoke_impl*> [#uses=1]
-	%11 = getelementptr %struct.__invoke_impl, %struct.__invoke_impl* %10, i32 0, i32 1		; <i32*> [#uses=1]
-	store i32 67108864, i32* %11, align 4
-	%12 = getelementptr %struct.__block_1, %struct.__block_1* %__block_holder_tmp_1.0, i32 0, i32 0		; <%struct.__invoke_impl*> [#uses=1]
-	%13 = getelementptr %struct.__invoke_impl, %struct.__invoke_impl* %12, i32 0, i32 2		; <i32*> [#uses=1]
-	store i32 24, i32* %13, align 4
-	%14 = getelementptr %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]", %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"* %FRAME.7, i32 0, i32 1		; <void (%struct.__block_1*, %struct.CGImage*)**> [#uses=1]
-	%15 = load void (%struct.__block_1*, %struct.CGImage*)*, void (%struct.__block_1*, %struct.CGImage*)** %14, align 4		; <void (%struct.__block_1*, %struct.CGImage*)*> [#uses=1]
-	store void (%struct.__block_1*, %struct.CGImage*)* %15, void (%struct.__block_1*, %struct.CGImage*)** %1, align 4
-	%16 = getelementptr %struct.__block_1, %struct.__block_1* %__block_holder_tmp_1.0, i32 0, i32 0		; <%struct.__invoke_impl*> [#uses=1]
-	%17 = getelementptr %struct.__invoke_impl, %struct.__invoke_impl* %16, i32 0, i32 3		; <i8**> [#uses=1]
-	%18 = load void (%struct.__block_1*, %struct.CGImage*)*, void (%struct.__block_1*, %struct.CGImage*)** %1, align 4		; <void (%struct.__block_1*, %struct.CGImage*)*> [#uses=1]
-	%19 = bitcast void (%struct.__block_1*, %struct.CGImage*)* %18 to i8*		; <i8*> [#uses=1]
-	store i8* %19, i8** %17, align 4
-	%20 = getelementptr %struct.__block_1, %struct.__block_1* %__block_holder_tmp_1.0, i32 0, i32 1		; <%struct.NSZone**> [#uses=1]
-	%21 = load %struct.NSZone*, %struct.NSZone** %zone_addr, align 4		; <%struct.NSZone*> [#uses=1]
-	store %struct.NSZone* %21, %struct.NSZone** %20, align 4
-	%22 = getelementptr %struct.__block_1, %struct.__block_1* %__block_holder_tmp_1.0, i32 0, i32 2		; <%struct.NSBitmapImageRep***> [#uses=1]
-	store %struct.NSBitmapImageRep** %new, %struct.NSBitmapImageRep*** %22, align 4
-	%23 = getelementptr %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]", %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"* %FRAME.7, i32 0, i32 0		; <%struct.NSBitmapImageRep**> [#uses=1]
-	%24 = load %struct.NSBitmapImageRep*, %struct.NSBitmapImageRep** %23, align 4		; <%struct.NSBitmapImageRep*> [#uses=1]
-	store %struct.NSBitmapImageRep* %24, %struct.NSBitmapImageRep** %2, align 4
-	%25 = load %struct.NSBitmapImageRep*, %struct.NSBitmapImageRep** %2, align 4		; <%struct.NSBitmapImageRep*> [#uses=1]
-	%26 = bitcast %struct.NSBitmapImageRep* %25 to %struct.objc_object*		; <%struct.objc_object*> [#uses=1]
-	store %struct.objc_object* %26, %struct.objc_object** %self.1, align 4
-	%27 = load %struct.objc_selector*, %struct.objc_selector** @"\01L_OBJC_SELECTOR_REFERENCES_1", align 4		; <%struct.objc_selector*> [#uses=1]
-	%__block_holder_tmp_1.03 = bitcast %struct.__block_1* %__block_holder_tmp_1.0 to void (%struct.CGImage*)*		; <void (%struct.CGImage*)*> [#uses=1]
-	%28 = load %struct.objc_object*, %struct.objc_object** %self.1, align 4		; <%struct.objc_object*> [#uses=1]
-	%29 = call %struct.objc_object* (%struct.objc_object*, %struct.objc_selector*, ...) inttoptr (i64 4294901504 to %struct.objc_object* (%struct.objc_object*, %struct.objc_selector*, ...)*)(%struct.objc_object* %28, %struct.objc_selector* %27, void (%struct.CGImage*)* %__block_holder_tmp_1.03) nounwind		; <%struct.objc_object*> [#uses=0]
+	store ptr %self, ptr %self_addr
+	store ptr %_cmd, ptr %_cmd_addr
+	store ptr %zone, ptr %zone_addr
+	%3 = getelementptr %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]", ptr %FRAME.7, i32 0, i32 0		; <ptr> [#uses=1]
+	%4 = load ptr, ptr %self_addr, align 4		; <ptr> [#uses=1]
+	store ptr %4, ptr %3, align 4
+	call void @llvm.init.trampoline(ptr %TRAMP.9, ptr @__helper_1.1632, ptr %FRAME.7)		; <ptr> [#uses=1]
+        %tramp = call ptr @llvm.adjust.trampoline(ptr %TRAMP.9)
+	store ptr %tramp, ptr %0, align 4
+	%5 = getelementptr %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]", ptr %FRAME.7, i32 0, i32 1		; <ptr> [#uses=1]
+	%6 = load ptr, ptr %0, align 4		; <ptr> [#uses=1]
+	store ptr %6, ptr %5, align 4
+	store ptr null, ptr %new, align 4
+	%7 = getelementptr %struct.__block_1, ptr %__block_holder_tmp_1.0, i32 0, i32 0		; <ptr> [#uses=1]
+	%8 = getelementptr %struct.__invoke_impl, ptr %7, i32 0, i32 0		; <ptr> [#uses=1]
+	store ptr @_NSConcreteStackBlock, ptr %8, align 4
+	%9 = getelementptr %struct.__block_1, ptr %__block_holder_tmp_1.0, i32 0, i32 0		; <ptr> [#uses=1]
+	%10 = getelementptr %struct.__invoke_impl, ptr %9, i32 0, i32 1		; <ptr> [#uses=1]
+	store i32 67108864, ptr %10, align 4
+	%11 = getelementptr %struct.__block_1, ptr %__block_holder_tmp_1.0, i32 0, i32 0		; <ptr> [#uses=1]
+	%12 = getelementptr %struct.__invoke_impl, ptr %11, i32 0, i32 2		; <ptr> [#uses=1]
+	store i32 24, ptr %12, align 4
+	%13 = getelementptr %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]", ptr %FRAME.7, i32 0, i32 1		; <ptr> [#uses=1]
+	%14 = load ptr, ptr %13, align 4		; <ptr> [#uses=1]
+	store ptr %14, ptr %1, align 4
+	%15 = getelementptr %struct.__block_1, ptr %__block_holder_tmp_1.0, i32 0, i32 0		; <ptr> [#uses=1]
+	%16 = getelementptr %struct.__invoke_impl, ptr %15, i32 0, i32 3		; <ptr> [#uses=1]
+	%17 = load ptr, ptr %1, align 4		; <ptr> [#uses=1]
+	store ptr %17, ptr %16, align 4
+	%18 = getelementptr %struct.__block_1, ptr %__block_holder_tmp_1.0, i32 0, i32 1		; <ptr> [#uses=1]
+	%19 = load ptr, ptr %zone_addr, align 4		; <ptr> [#uses=1]
+	store ptr %19, ptr %18, align 4
+	%20 = getelementptr %struct.__block_1, ptr %__block_holder_tmp_1.0, i32 0, i32 2		; <ptr> [#uses=1]
+	store ptr %new, ptr %20, align 4
+	%21 = getelementptr %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]", ptr %FRAME.7, i32 0, i32 0		; <ptr> [#uses=1]
+	%22 = load ptr, ptr %21, align 4		; <ptr> [#uses=1]
+	store ptr %22, ptr %2, align 4
+	%23 = load ptr, ptr %2, align 4		; <ptr> [#uses=1]
+	store ptr %23, ptr %self.1, align 4
+	%24 = load ptr, ptr @"\01L_OBJC_SELECTOR_REFERENCES_1", align 4		; <ptr> [#uses=1]
+	%25 = load ptr, ptr %self.1, align 4		; <ptr> [#uses=1]
+	%26 = call ptr (ptr, ptr, ...) inttoptr (i64 4294901504 to ptr)(ptr %25, ptr %24, ptr %__block_holder_tmp_1.0) nounwind		; <ptr> [#uses=0]
 	br label %return
 
 return:		; preds = %entry
-	%retval5 = load %struct.objc_object*, %struct.objc_object** %retval		; <%struct.objc_object*> [#uses=1]
-	ret %struct.objc_object* %retval5
+	%retval5 = load ptr, ptr %retval		; <ptr> [#uses=1]
+	ret ptr %retval5
 }
 
-declare void @llvm.init.trampoline(i8*, i8*, i8*) nounwind
-declare i8* @llvm.adjust.trampoline(i8*) nounwind
+declare void @llvm.init.trampoline(ptr, ptr, ptr) nounwind
+declare ptr @llvm.adjust.trampoline(ptr) nounwind
 
-define internal void @__helper_1.1632(%"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"* nest %CHAIN.8, %struct.__block_1* %_self, %struct.CGImage* %cgImage) nounwind {
+define internal void @__helper_1.1632(ptr nest %CHAIN.8, ptr %_self, ptr %cgImage) nounwind {
 entry:
-	%CHAIN.8_addr = alloca %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"*		; <%"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"**> [#uses=2]
-	%_self_addr = alloca %struct.__block_1*		; <%struct.__block_1**> [#uses=3]
-	%cgImage_addr = alloca %struct.CGImage*		; <%struct.CGImage**> [#uses=1]
-	%zone = alloca %struct.NSZone*		; <%struct.NSZone**> [#uses=2]
-	%objc_super = alloca %struct._objc_super		; <%struct._objc_super*> [#uses=3]
-	%new = alloca %struct.NSBitmapImageRep**		; <%struct.NSBitmapImageRep***> [#uses=2]
-	%objc_super.5 = alloca %struct.objc_super*		; <%struct.objc_super**> [#uses=2]
-	%0 = alloca %struct.NSBitmapImageRep*		; <%struct.NSBitmapImageRep**> [#uses=2]
+	%CHAIN.8_addr = alloca ptr		; <ptr> [#uses=2]
+	%_self_addr = alloca ptr		; <ptr> [#uses=3]
+	%cgImage_addr = alloca ptr		; <ptr> [#uses=1]
+	%zone = alloca ptr		; <ptr> [#uses=2]
+	%objc_super = alloca %struct._objc_super		; <ptr> [#uses=3]
+	%new = alloca ptr		; <ptr> [#uses=2]
+	%objc_super.5 = alloca ptr		; <ptr> [#uses=2]
+	%0 = alloca ptr		; <ptr> [#uses=2]
 	%"alloca point" = bitcast i32 0 to i32		; <i32> [#uses=0]
-	store %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"* %CHAIN.8, %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"** %CHAIN.8_addr
-	store %struct.__block_1* %_self, %struct.__block_1** %_self_addr
-	store %struct.CGImage* %cgImage, %struct.CGImage** %cgImage_addr
-	%1 = load %struct.__block_1*, %struct.__block_1** %_self_addr, align 4		; <%struct.__block_1*> [#uses=1]
-	%2 = getelementptr %struct.__block_1, %struct.__block_1* %1, i32 0, i32 2		; <%struct.NSBitmapImageRep***> [#uses=1]
-	%3 = load %struct.NSBitmapImageRep**, %struct.NSBitmapImageRep*** %2, align 4		; <%struct.NSBitmapImageRep**> [#uses=1]
-	store %struct.NSBitmapImageRep** %3, %struct.NSBitmapImageRep*** %new, align 4
-	%4 = load %struct.__block_1*, %struct.__block_1** %_self_addr, align 4		; <%struct.__block_1*> [#uses=1]
-	%5 = getelementptr %struct.__block_1, %struct.__block_1* %4, i32 0, i32 1		; <%struct.NSZone**> [#uses=1]
-	%6 = load %struct.NSZone*, %struct.NSZone** %5, align 4		; <%struct.NSZone*> [#uses=1]
-	store %struct.NSZone* %6, %struct.NSZone** %zone, align 4
-	%7 = load %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"*, %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"** %CHAIN.8_addr, align 4		; <%"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"*> [#uses=1]
-	%8 = getelementptr %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]", %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"* %7, i32 0, i32 0		; <%struct.NSBitmapImageRep**> [#uses=1]
-	%9 = load %struct.NSBitmapImageRep*, %struct.NSBitmapImageRep** %8, align 4		; <%struct.NSBitmapImageRep*> [#uses=1]
-	store %struct.NSBitmapImageRep* %9, %struct.NSBitmapImageRep** %0, align 4
-	%10 = load %struct.NSBitmapImageRep*, %struct.NSBitmapImageRep** %0, align 4		; <%struct.NSBitmapImageRep*> [#uses=1]
-	%11 = bitcast %struct.NSBitmapImageRep* %10 to %struct.objc_object*		; <%struct.objc_object*> [#uses=1]
-	%12 = getelementptr %struct._objc_super, %struct._objc_super* %objc_super, i32 0, i32 0		; <%struct.objc_object**> [#uses=1]
-	store %struct.objc_object* %11, %struct.objc_object** %12, align 4
-	%13 = load %struct._objc_class*, %struct._objc_class** getelementptr (%struct._objc_class, %struct._objc_class* @"\01L_OBJC_CLASS_NSBitmapImageRep", i32 0, i32 1), align 4		; <%struct._objc_class*> [#uses=1]
-	%14 = getelementptr %struct._objc_super, %struct._objc_super* %objc_super, i32 0, i32 1		; <%struct._objc_class**> [#uses=1]
-	store %struct._objc_class* %13, %struct._objc_class** %14, align 4
-	%objc_super1 = bitcast %struct._objc_super* %objc_super to %struct.objc_super*		; <%struct.objc_super*> [#uses=1]
-	store %struct.objc_super* %objc_super1, %struct.objc_super** %objc_super.5, align 4
-	%15 = load %struct.objc_selector*, %struct.objc_selector** @"\01L_OBJC_SELECTOR_REFERENCES_0", align 4		; <%struct.objc_selector*> [#uses=1]
-	%16 = load %struct.objc_super*, %struct.objc_super** %objc_super.5, align 4		; <%struct.objc_super*> [#uses=1]
-	%17 = load %struct.NSZone*, %struct.NSZone** %zone, align 4		; <%struct.NSZone*> [#uses=1]
-	%18 = call %struct.objc_object* (%struct.objc_super*, %struct.objc_selector*, ...) @objc_msgSendSuper(%struct.objc_super* %16, %struct.objc_selector* %15, %struct.NSZone* %17) nounwind		; <%struct.objc_object*> [#uses=1]
-	%19 = bitcast %struct.objc_object* %18 to %struct.NSBitmapImageRep*		; <%struct.NSBitmapImageRep*> [#uses=1]
-	%20 = load %struct.NSBitmapImageRep**, %struct.NSBitmapImageRep*** %new, align 4		; <%struct.NSBitmapImageRep**> [#uses=1]
-	store %struct.NSBitmapImageRep* %19, %struct.NSBitmapImageRep** %20, align 4
+	store ptr %CHAIN.8, ptr %CHAIN.8_addr
+	store ptr %_self, ptr %_self_addr
+	store ptr %cgImage, ptr %cgImage_addr
+	%1 = load ptr, ptr %_self_addr, align 4		; <ptr> [#uses=1]
+	%2 = getelementptr %struct.__block_1, ptr %1, i32 0, i32 2		; <ptr> [#uses=1]
+	%3 = load ptr, ptr %2, align 4		; <ptr> [#uses=1]
+	store ptr %3, ptr %new, align 4
+	%4 = load ptr, ptr %_self_addr, align 4		; <ptr> [#uses=1]
+	%5 = getelementptr %struct.__block_1, ptr %4, i32 0, i32 1		; <ptr> [#uses=1]
+	%6 = load ptr, ptr %5, align 4		; <ptr> [#uses=1]
+	store ptr %6, ptr %zone, align 4
+	%7 = load ptr, ptr %CHAIN.8_addr, align 4		; <ptr> [#uses=1]
+	%8 = getelementptr %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]", ptr %7, i32 0, i32 0		; <ptr> [#uses=1]
+	%9 = load ptr, ptr %8, align 4		; <ptr> [#uses=1]
+	store ptr %9, ptr %0, align 4
+	%10 = load ptr, ptr %0, align 4		; <ptr> [#uses=1]
+	%11 = getelementptr %struct._objc_super, ptr %objc_super, i32 0, i32 0		; <ptr> [#uses=1]
+	store ptr %10, ptr %11, align 4
+	%12 = load ptr, ptr getelementptr (%struct._objc_class, ptr @"\01L_OBJC_CLASS_NSBitmapImageRep", i32 0, i32 1), align 4		; <ptr> [#uses=1]
+	%13 = getelementptr %struct._objc_super, ptr %objc_super, i32 0, i32 1		; <ptr> [#uses=1]
+	store ptr %12, ptr %13, align 4
+	store ptr %objc_super, ptr %objc_super.5, align 4
+	%14 = load ptr, ptr @"\01L_OBJC_SELECTOR_REFERENCES_0", align 4		; <ptr> [#uses=1]
+	%15 = load ptr, ptr %objc_super.5, align 4		; <ptr> [#uses=1]
+	%16 = load ptr, ptr %zone, align 4		; <ptr> [#uses=1]
+	%17 = call ptr (ptr, ptr, ...) @objc_msgSendSuper(ptr %15, ptr %14, ptr %16) nounwind		; <ptr> [#uses=1]
+	%18 = load ptr, ptr %new, align 4		; <ptr> [#uses=1]
+	store ptr %17, ptr %18, align 4
 	br label %return
 
 return:		; preds = %entry
 	ret void
 }
 
-declare %struct.objc_object* @objc_msgSendSuper(%struct.objc_super*, %struct.objc_selector*, ...)
+declare ptr @objc_msgSendSuper(ptr, ptr, ...)

diff  --git a/llvm/test/CodeGen/PowerPC/trunc-srl-load.ll b/llvm/test/CodeGen/PowerPC/trunc-srl-load.ll
index dcddbb135bae..dfc1bd46601c 100644
--- a/llvm/test/CodeGen/PowerPC/trunc-srl-load.ll
+++ b/llvm/test/CodeGen/PowerPC/trunc-srl-load.ll
@@ -10,7 +10,7 @@ define dso_local fastcc void @trunc_srl_load(i32 zeroext %AttrArgNo) {
 ; CHECK-NEXT:  # %bb.1: # %exit
 ; CHECK-NEXT:  .LBB0_2: # %cond.false
 entry:
-  %bf.load.i = load i64, i64* null, align 8
+  %bf.load.i = load i64, ptr null, align 8
   %bf.lshr.i = lshr i64 %bf.load.i, 32
   %0 = trunc i64 %bf.lshr.i to i32
   %bf.cast.i = and i32 %0, 65535

diff  --git a/llvm/test/CodeGen/PowerPC/uint-to-ppcfp128-crash.ll b/llvm/test/CodeGen/PowerPC/uint-to-ppcfp128-crash.ll
index 04e4c0f21262..bf18eeadb4d0 100644
--- a/llvm/test/CodeGen/PowerPC/uint-to-ppcfp128-crash.ll
+++ b/llvm/test/CodeGen/PowerPC/uint-to-ppcfp128-crash.ll
@@ -4,7 +4,7 @@
 
 ; Ensure we don't crash by trying to convert directly from a subword load
 ; to a ppc_fp128 as we do for conversions to f32/f64.
-define ppc_fp128 @test(i16* nocapture readonly %Ptr) {
+define ppc_fp128 @test(ptr nocapture readonly %Ptr) {
 ; CHECK-LABEL: test:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lhz 3, 0(3)
@@ -15,7 +15,7 @@ define ppc_fp128 @test(i16* nocapture readonly %Ptr) {
 ; CHECK-NEXT:    xscvuxddp 1, 0
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i16, i16* %Ptr, align 2
+  %0 = load i16, ptr %Ptr, align 2
   %conv = uitofp i16 %0 to ppc_fp128
   ret ppc_fp128 %conv
 }

diff  --git a/llvm/test/CodeGen/PowerPC/unal-altivec-wint.ll b/llvm/test/CodeGen/PowerPC/unal-altivec-wint.ll
index d8444329a8fd..d6244cd828e5 100644
--- a/llvm/test/CodeGen/PowerPC/unal-altivec-wint.ll
+++ b/llvm/test/CodeGen/PowerPC/unal-altivec-wint.ll
@@ -2,15 +2,14 @@
 target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64"
 target triple = "powerpc64-unknown-linux-gnu"
 
-declare <4 x i32> @llvm.ppc.altivec.lvx(i8*) #1
+declare <4 x i32> @llvm.ppc.altivec.lvx(ptr) #1
 
-define <4 x i32> @test1(<4 x i32>* %h) #0 {
+define <4 x i32> @test1(ptr %h) #0 {
 entry:
-  %h1 = getelementptr <4 x i32>, <4 x i32>* %h, i64 1
-  %hv = bitcast <4 x i32>* %h1 to i8*
-  %vl = call <4 x i32> @llvm.ppc.altivec.lvx(i8* %hv)
+  %h1 = getelementptr <4 x i32>, ptr %h, i64 1
+  %vl = call <4 x i32> @llvm.ppc.altivec.lvx(ptr %h1)
 
-  %v0 = load <4 x i32>, <4 x i32>* %h, align 8
+  %v0 = load <4 x i32>, ptr %h, align 8
 
   %a = add <4 x i32> %v0, %vl
   ret <4 x i32> %a
@@ -23,15 +22,14 @@ entry:
 ; CHECK: blr
 }
 
-declare void @llvm.ppc.altivec.stvx(<4 x i32>, i8*) #0
+declare void @llvm.ppc.altivec.stvx(<4 x i32>, ptr) #0
 
-define <4 x i32> @test2(<4 x i32>* %h, <4 x i32> %d) #0 {
+define <4 x i32> @test2(ptr %h, <4 x i32> %d) #0 {
 entry:
-  %h1 = getelementptr <4 x i32>, <4 x i32>* %h, i64 1
-  %hv = bitcast <4 x i32>* %h1 to i8*
-  call void @llvm.ppc.altivec.stvx(<4 x i32> %d, i8* %hv)
+  %h1 = getelementptr <4 x i32>, ptr %h, i64 1
+  call void @llvm.ppc.altivec.stvx(<4 x i32> %d, ptr %h1)
 
-  %v0 = load <4 x i32>, <4 x i32>* %h, align 8
+  %v0 = load <4 x i32>, ptr %h, align 8
 
   ret <4 x i32> %v0
 

diff  --git a/llvm/test/CodeGen/PowerPC/unal-altivec.ll b/llvm/test/CodeGen/PowerPC/unal-altivec.ll
index a804b35052be..e5288a026f95 100644
--- a/llvm/test/CodeGen/PowerPC/unal-altivec.ll
+++ b/llvm/test/CodeGen/PowerPC/unal-altivec.ll
@@ -2,31 +2,27 @@
 target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64"
 target triple = "powerpc64-unknown-linux-gnu"
 
-define void @foo(float* noalias nocapture %a, float* noalias nocapture %b) #0 {
+define void @foo(ptr noalias nocapture %a, ptr noalias nocapture %b) #0 {
 vector.ph:
   br label %vector.body
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %0 = getelementptr inbounds float, float* %b, i64 %index
-  %1 = bitcast float* %0 to <4 x float>*
-  %wide.load = load <4 x float>, <4 x float>* %1, align 4
+  %0 = getelementptr inbounds float, ptr %b, i64 %index
+  %wide.load = load <4 x float>, ptr %0, align 4
   %.sum11 = or i64 %index, 4
-  %2 = getelementptr float, float* %b, i64 %.sum11
-  %3 = bitcast float* %2 to <4 x float>*
-  %wide.load8 = load <4 x float>, <4 x float>* %3, align 4
-  %4 = fadd <4 x float> %wide.load, <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00>
-  %5 = fadd <4 x float> %wide.load8, <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00>
-  %6 = getelementptr inbounds float, float* %a, i64 %index
-  %7 = bitcast float* %6 to <4 x float>*
-  store <4 x float> %4, <4 x float>* %7, align 4
+  %1 = getelementptr float, ptr %b, i64 %.sum11
+  %wide.load8 = load <4 x float>, ptr %1, align 4
+  %2 = fadd <4 x float> %wide.load, <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00>
+  %3 = fadd <4 x float> %wide.load8, <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00>
+  %4 = getelementptr inbounds float, ptr %a, i64 %index
+  store <4 x float> %2, ptr %4, align 4
   %.sum12 = or i64 %index, 4
-  %8 = getelementptr float, float* %a, i64 %.sum12
-  %9 = bitcast float* %8 to <4 x float>*
-  store <4 x float> %5, <4 x float>* %9, align 4
+  %5 = getelementptr float, ptr %a, i64 %.sum12
+  store <4 x float> %3, ptr %5, align 4
   %index.next = add i64 %index, 8
-  %10 = icmp eq i64 %index.next, 16000
-  br i1 %10, label %for.end, label %vector.body
+  %6 = icmp eq i64 %index.next, 16000
+  br i1 %6, label %for.end, label %vector.body
 
 ; CHECK: @foo
 ; CHECK-DAG: li [[C0:[0-9]+]], 0

diff  --git a/llvm/test/CodeGen/PowerPC/unal-altivec2.ll b/llvm/test/CodeGen/PowerPC/unal-altivec2.ll
index 7e0bb5810971..fafcab8468eb 100644
--- a/llvm/test/CodeGen/PowerPC/unal-altivec2.ll
+++ b/llvm/test/CodeGen/PowerPC/unal-altivec2.ll
@@ -3,7 +3,7 @@ target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3
 target triple = "powerpc64-unknown-linux-gnu"
 
 ; Function Attrs: nounwind
-define void @foo(float* noalias nocapture %x, float* noalias nocapture readonly %y) #0 {
+define void @foo(ptr noalias nocapture %x, ptr noalias nocapture readonly %y) #0 {
 entry:
   br label %vector.body
 
@@ -12,136 +12,104 @@ vector.body:                                      ; preds = %vector.body, %entry
 ; CHECK: lvsl
 ; CHECK: blr
   %index = phi i64 [ 0, %entry ], [ %index.next.15, %vector.body ]
-  %0 = getelementptr inbounds float, float* %y, i64 %index
-  %1 = bitcast float* %0 to <4 x float>*
-  %wide.load = load <4 x float>, <4 x float>* %1, align 4
-  %2 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load)
-  %3 = getelementptr inbounds float, float* %x, i64 %index
-  %4 = bitcast float* %3 to <4 x float>*
-  store <4 x float> %2, <4 x float>* %4, align 4
+  %0 = getelementptr inbounds float, ptr %y, i64 %index
+  %wide.load = load <4 x float>, ptr %0, align 4
+  %1 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load)
+  %2 = getelementptr inbounds float, ptr %x, i64 %index
+  store <4 x float> %1, ptr %2, align 4
   %index.next = add i64 %index, 4
-  %5 = getelementptr inbounds float, float* %y, i64 %index.next
-  %6 = bitcast float* %5 to <4 x float>*
-  %wide.load.1 = load <4 x float>, <4 x float>* %6, align 4
-  %7 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.1)
-  %8 = getelementptr inbounds float, float* %x, i64 %index.next
-  %9 = bitcast float* %8 to <4 x float>*
-  store <4 x float> %7, <4 x float>* %9, align 4
+  %3 = getelementptr inbounds float, ptr %y, i64 %index.next
+  %wide.load.1 = load <4 x float>, ptr %3, align 4
+  %4 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.1)
+  %5 = getelementptr inbounds float, ptr %x, i64 %index.next
+  store <4 x float> %4, ptr %5, align 4
   %index.next.1 = add i64 %index.next, 4
-  %10 = getelementptr inbounds float, float* %y, i64 %index.next.1
-  %11 = bitcast float* %10 to <4 x float>*
-  %wide.load.2 = load <4 x float>, <4 x float>* %11, align 4
-  %12 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.2)
-  %13 = getelementptr inbounds float, float* %x, i64 %index.next.1
-  %14 = bitcast float* %13 to <4 x float>*
-  store <4 x float> %12, <4 x float>* %14, align 4
+  %6 = getelementptr inbounds float, ptr %y, i64 %index.next.1
+  %wide.load.2 = load <4 x float>, ptr %6, align 4
+  %7 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.2)
+  %8 = getelementptr inbounds float, ptr %x, i64 %index.next.1
+  store <4 x float> %7, ptr %8, align 4
   %index.next.2 = add i64 %index.next.1, 4
-  %15 = getelementptr inbounds float, float* %y, i64 %index.next.2
-  %16 = bitcast float* %15 to <4 x float>*
-  %wide.load.3 = load <4 x float>, <4 x float>* %16, align 4
-  %17 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.3)
-  %18 = getelementptr inbounds float, float* %x, i64 %index.next.2
-  %19 = bitcast float* %18 to <4 x float>*
-  store <4 x float> %17, <4 x float>* %19, align 4
+  %9 = getelementptr inbounds float, ptr %y, i64 %index.next.2
+  %wide.load.3 = load <4 x float>, ptr %9, align 4
+  %10 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.3)
+  %11 = getelementptr inbounds float, ptr %x, i64 %index.next.2
+  store <4 x float> %10, ptr %11, align 4
   %index.next.3 = add i64 %index.next.2, 4
-  %20 = getelementptr inbounds float, float* %y, i64 %index.next.3
-  %21 = bitcast float* %20 to <4 x float>*
-  %wide.load.4 = load <4 x float>, <4 x float>* %21, align 4
-  %22 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.4)
-  %23 = getelementptr inbounds float, float* %x, i64 %index.next.3
-  %24 = bitcast float* %23 to <4 x float>*
-  store <4 x float> %22, <4 x float>* %24, align 4
+  %12 = getelementptr inbounds float, ptr %y, i64 %index.next.3
+  %wide.load.4 = load <4 x float>, ptr %12, align 4
+  %13 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.4)
+  %14 = getelementptr inbounds float, ptr %x, i64 %index.next.3
+  store <4 x float> %13, ptr %14, align 4
   %index.next.4 = add i64 %index.next.3, 4
-  %25 = getelementptr inbounds float, float* %y, i64 %index.next.4
-  %26 = bitcast float* %25 to <4 x float>*
-  %wide.load.5 = load <4 x float>, <4 x float>* %26, align 4
-  %27 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.5)
-  %28 = getelementptr inbounds float, float* %x, i64 %index.next.4
-  %29 = bitcast float* %28 to <4 x float>*
-  store <4 x float> %27, <4 x float>* %29, align 4
+  %15 = getelementptr inbounds float, ptr %y, i64 %index.next.4
+  %wide.load.5 = load <4 x float>, ptr %15, align 4
+  %16 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.5)
+  %17 = getelementptr inbounds float, ptr %x, i64 %index.next.4
+  store <4 x float> %16, ptr %17, align 4
   %index.next.5 = add i64 %index.next.4, 4
-  %30 = getelementptr inbounds float, float* %y, i64 %index.next.5
-  %31 = bitcast float* %30 to <4 x float>*
-  %wide.load.6 = load <4 x float>, <4 x float>* %31, align 4
-  %32 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.6)
-  %33 = getelementptr inbounds float, float* %x, i64 %index.next.5
-  %34 = bitcast float* %33 to <4 x float>*
-  store <4 x float> %32, <4 x float>* %34, align 4
+  %18 = getelementptr inbounds float, ptr %y, i64 %index.next.5
+  %wide.load.6 = load <4 x float>, ptr %18, align 4
+  %19 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.6)
+  %20 = getelementptr inbounds float, ptr %x, i64 %index.next.5
+  store <4 x float> %19, ptr %20, align 4
   %index.next.6 = add i64 %index.next.5, 4
-  %35 = getelementptr inbounds float, float* %y, i64 %index.next.6
-  %36 = bitcast float* %35 to <4 x float>*
-  %wide.load.7 = load <4 x float>, <4 x float>* %36, align 4
-  %37 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.7)
-  %38 = getelementptr inbounds float, float* %x, i64 %index.next.6
-  %39 = bitcast float* %38 to <4 x float>*
-  store <4 x float> %37, <4 x float>* %39, align 4
+  %21 = getelementptr inbounds float, ptr %y, i64 %index.next.6
+  %wide.load.7 = load <4 x float>, ptr %21, align 4
+  %22 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.7)
+  %23 = getelementptr inbounds float, ptr %x, i64 %index.next.6
+  store <4 x float> %22, ptr %23, align 4
   %index.next.7 = add i64 %index.next.6, 4
-  %40 = getelementptr inbounds float, float* %y, i64 %index.next.7
-  %41 = bitcast float* %40 to <4 x float>*
-  %wide.load.8 = load <4 x float>, <4 x float>* %41, align 4
-  %42 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.8)
-  %43 = getelementptr inbounds float, float* %x, i64 %index.next.7
-  %44 = bitcast float* %43 to <4 x float>*
-  store <4 x float> %42, <4 x float>* %44, align 4
+  %24 = getelementptr inbounds float, ptr %y, i64 %index.next.7
+  %wide.load.8 = load <4 x float>, ptr %24, align 4
+  %25 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.8)
+  %26 = getelementptr inbounds float, ptr %x, i64 %index.next.7
+  store <4 x float> %25, ptr %26, align 4
   %index.next.8 = add i64 %index.next.7, 4
-  %45 = getelementptr inbounds float, float* %y, i64 %index.next.8
-  %46 = bitcast float* %45 to <4 x float>*
-  %wide.load.9 = load <4 x float>, <4 x float>* %46, align 4
-  %47 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.9)
-  %48 = getelementptr inbounds float, float* %x, i64 %index.next.8
-  %49 = bitcast float* %48 to <4 x float>*
-  store <4 x float> %47, <4 x float>* %49, align 4
+  %27 = getelementptr inbounds float, ptr %y, i64 %index.next.8
+  %wide.load.9 = load <4 x float>, ptr %27, align 4
+  %28 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.9)
+  %29 = getelementptr inbounds float, ptr %x, i64 %index.next.8
+  store <4 x float> %28, ptr %29, align 4
   %index.next.9 = add i64 %index.next.8, 4
-  %50 = getelementptr inbounds float, float* %y, i64 %index.next.9
-  %51 = bitcast float* %50 to <4 x float>*
-  %wide.load.10 = load <4 x float>, <4 x float>* %51, align 4
-  %52 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.10)
-  %53 = getelementptr inbounds float, float* %x, i64 %index.next.9
-  %54 = bitcast float* %53 to <4 x float>*
-  store <4 x float> %52, <4 x float>* %54, align 4
+  %30 = getelementptr inbounds float, ptr %y, i64 %index.next.9
+  %wide.load.10 = load <4 x float>, ptr %30, align 4
+  %31 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.10)
+  %32 = getelementptr inbounds float, ptr %x, i64 %index.next.9
+  store <4 x float> %31, ptr %32, align 4
   %index.next.10 = add i64 %index.next.9, 4
-  %55 = getelementptr inbounds float, float* %y, i64 %index.next.10
-  %56 = bitcast float* %55 to <4 x float>*
-  %wide.load.11 = load <4 x float>, <4 x float>* %56, align 4
-  %57 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.11)
-  %58 = getelementptr inbounds float, float* %x, i64 %index.next.10
-  %59 = bitcast float* %58 to <4 x float>*
-  store <4 x float> %57, <4 x float>* %59, align 4
+  %33 = getelementptr inbounds float, ptr %y, i64 %index.next.10
+  %wide.load.11 = load <4 x float>, ptr %33, align 4
+  %34 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.11)
+  %35 = getelementptr inbounds float, ptr %x, i64 %index.next.10
+  store <4 x float> %34, ptr %35, align 4
   %index.next.11 = add i64 %index.next.10, 4
-  %60 = getelementptr inbounds float, float* %y, i64 %index.next.11
-  %61 = bitcast float* %60 to <4 x float>*
-  %wide.load.12 = load <4 x float>, <4 x float>* %61, align 4
-  %62 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.12)
-  %63 = getelementptr inbounds float, float* %x, i64 %index.next.11
-  %64 = bitcast float* %63 to <4 x float>*
-  store <4 x float> %62, <4 x float>* %64, align 4
+  %36 = getelementptr inbounds float, ptr %y, i64 %index.next.11
+  %wide.load.12 = load <4 x float>, ptr %36, align 4
+  %37 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.12)
+  %38 = getelementptr inbounds float, ptr %x, i64 %index.next.11
+  store <4 x float> %37, ptr %38, align 4
   %index.next.12 = add i64 %index.next.11, 4
-  %65 = getelementptr inbounds float, float* %y, i64 %index.next.12
-  %66 = bitcast float* %65 to <4 x float>*
-  %wide.load.13 = load <4 x float>, <4 x float>* %66, align 4
-  %67 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.13)
-  %68 = getelementptr inbounds float, float* %x, i64 %index.next.12
-  %69 = bitcast float* %68 to <4 x float>*
-  store <4 x float> %67, <4 x float>* %69, align 4
+  %39 = getelementptr inbounds float, ptr %y, i64 %index.next.12
+  %wide.load.13 = load <4 x float>, ptr %39, align 4
+  %40 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.13)
+  %41 = getelementptr inbounds float, ptr %x, i64 %index.next.12
+  store <4 x float> %40, ptr %41, align 4
   %index.next.13 = add i64 %index.next.12, 4
-  %70 = getelementptr inbounds float, float* %y, i64 %index.next.13
-  %71 = bitcast float* %70 to <4 x float>*
-  %wide.load.14 = load <4 x float>, <4 x float>* %71, align 4
-  %72 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.14)
-  %73 = getelementptr inbounds float, float* %x, i64 %index.next.13
-  %74 = bitcast float* %73 to <4 x float>*
-  store <4 x float> %72, <4 x float>* %74, align 4
+  %42 = getelementptr inbounds float, ptr %y, i64 %index.next.13
+  %wide.load.14 = load <4 x float>, ptr %42, align 4
+  %43 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.14)
+  %44 = getelementptr inbounds float, ptr %x, i64 %index.next.13
+  store <4 x float> %43, ptr %44, align 4
   %index.next.14 = add i64 %index.next.13, 4
-  %75 = getelementptr inbounds float, float* %y, i64 %index.next.14
-  %76 = bitcast float* %75 to <4 x float>*
-  %wide.load.15 = load <4 x float>, <4 x float>* %76, align 4
-  %77 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.15)
-  %78 = getelementptr inbounds float, float* %x, i64 %index.next.14
-  %79 = bitcast float* %78 to <4 x float>*
-  store <4 x float> %77, <4 x float>* %79, align 4
+  %45 = getelementptr inbounds float, ptr %y, i64 %index.next.14
+  %wide.load.15 = load <4 x float>, ptr %45, align 4
+  %46 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.15)
+  %47 = getelementptr inbounds float, ptr %x, i64 %index.next.14
+  store <4 x float> %46, ptr %47, align 4
   %index.next.15 = add i64 %index.next.14, 4
-  %80 = icmp eq i64 %index.next.15, 2048
-  br i1 %80, label %for.end, label %vector.body
+  %48 = icmp eq i64 %index.next.15, 2048
+  br i1 %48, label %for.end, label %vector.body
 
 for.end:                                          ; preds = %vector.body
   ret void
@@ -150,10 +118,9 @@ for.end:                                          ; preds = %vector.body
 ; Function Attrs: nounwind readonly
 declare <4 x float> @llvm_cos_v4f32(<4 x float>) #1
 
-define <2 x double> @bar(double* %x) {
+define <2 x double> @bar(ptr %x) {
 entry:
-  %p = bitcast double* %x to <2 x double>*
-  %r = load <2 x double>, <2 x double>* %p, align 8
+  %r = load <2 x double>, ptr %x, align 8
 
 ; CHECK-LABEL: @bar
 ; CHECK-NOT: lvsl

diff  --git a/llvm/test/CodeGen/PowerPC/unal-vec-ldst.ll b/llvm/test/CodeGen/PowerPC/unal-vec-ldst.ll
index 58c9eafe936e..4791971e5b39 100644
--- a/llvm/test/CodeGen/PowerPC/unal-vec-ldst.ll
+++ b/llvm/test/CodeGen/PowerPC/unal-vec-ldst.ll
@@ -2,7 +2,7 @@
 ; RUN: llc -mtriple powerpc64-unknown-linux-gnu -verify-machineinstrs < %s | FileCheck %s
 ; RUN: llc -mtriple powerpc64-ibm-aix-xcoff -verify-machineinstrs -vec-extabi < %s | FileCheck %s
 
-define <16 x i8> @test_l_v16i8(<16 x i8>* %p) #0 {
+define <16 x i8> @test_l_v16i8(ptr %p) #0 {
 ; CHECK-LABEL: test_l_v16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    li 4, 15
@@ -12,12 +12,12 @@ define <16 x i8> @test_l_v16i8(<16 x i8>* %p) #0 {
 ; CHECK-NEXT:    vperm 2, 4, 2, 3
 ; CHECK-NEXT:    blr
 entry:
-  %r = load <16 x i8>, <16 x i8>* %p, align 1
+  %r = load <16 x i8>, ptr %p, align 1
   ret <16 x i8> %r
 
 }
 
-define <32 x i8> @test_l_v32i8(<32 x i8>* %p) #0 {
+define <32 x i8> @test_l_v32i8(ptr %p) #0 {
 ; CHECK-LABEL: test_l_v32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    li 4, 31
@@ -30,12 +30,12 @@ define <32 x i8> @test_l_v32i8(<32 x i8>* %p) #0 {
 ; CHECK-NEXT:    vperm 2, 0, 4, 5
 ; CHECK-NEXT:    blr
 entry:
-  %r = load <32 x i8>, <32 x i8>* %p, align 1
+  %r = load <32 x i8>, ptr %p, align 1
   ret <32 x i8> %r
 
 }
 
-define <8 x i16> @test_l_v8i16(<8 x i16>* %p) #0 {
+define <8 x i16> @test_l_v8i16(ptr %p) #0 {
 ; CHECK-LABEL: test_l_v8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    li 4, 15
@@ -45,12 +45,12 @@ define <8 x i16> @test_l_v8i16(<8 x i16>* %p) #0 {
 ; CHECK-NEXT:    vperm 2, 4, 2, 3
 ; CHECK-NEXT:    blr
 entry:
-  %r = load <8 x i16>, <8 x i16>* %p, align 2
+  %r = load <8 x i16>, ptr %p, align 2
   ret <8 x i16> %r
 
 }
 
-define <16 x i16> @test_l_v16i16(<16 x i16>* %p) #0 {
+define <16 x i16> @test_l_v16i16(ptr %p) #0 {
 ; CHECK-LABEL: test_l_v16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    li 4, 31
@@ -63,12 +63,12 @@ define <16 x i16> @test_l_v16i16(<16 x i16>* %p) #0 {
 ; CHECK-NEXT:    vperm 2, 0, 4, 5
 ; CHECK-NEXT:    blr
 entry:
-  %r = load <16 x i16>, <16 x i16>* %p, align 2
+  %r = load <16 x i16>, ptr %p, align 2
   ret <16 x i16> %r
 
 }
 
-define <4 x i32> @test_l_v4i32(<4 x i32>* %p) #0 {
+define <4 x i32> @test_l_v4i32(ptr %p) #0 {
 ; CHECK-LABEL: test_l_v4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    li 4, 15
@@ -78,12 +78,12 @@ define <4 x i32> @test_l_v4i32(<4 x i32>* %p) #0 {
 ; CHECK-NEXT:    vperm 2, 4, 2, 3
 ; CHECK-NEXT:    blr
 entry:
-  %r = load <4 x i32>, <4 x i32>* %p, align 4
+  %r = load <4 x i32>, ptr %p, align 4
   ret <4 x i32> %r
 
 }
 
-define <8 x i32> @test_l_v8i32(<8 x i32>* %p) #0 {
+define <8 x i32> @test_l_v8i32(ptr %p) #0 {
 ; CHECK-LABEL: test_l_v8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    li 4, 31
@@ -96,23 +96,23 @@ define <8 x i32> @test_l_v8i32(<8 x i32>* %p) #0 {
 ; CHECK-NEXT:    vperm 2, 0, 4, 5
 ; CHECK-NEXT:    blr
 entry:
-  %r = load <8 x i32>, <8 x i32>* %p, align 4
+  %r = load <8 x i32>, ptr %p, align 4
   ret <8 x i32> %r
 
 }
 
-define <2 x i64> @test_l_v2i64(<2 x i64>* %p) #0 {
+define <2 x i64> @test_l_v2i64(ptr %p) #0 {
 ; CHECK-LABEL: test_l_v2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lxvd2x 34, 0, 3
 ; CHECK-NEXT:    blr
 entry:
-  %r = load <2 x i64>, <2 x i64>* %p, align 8
+  %r = load <2 x i64>, ptr %p, align 8
   ret <2 x i64> %r
 
 }
 
-define <4 x i64> @test_l_v4i64(<4 x i64>* %p) #0 {
+define <4 x i64> @test_l_v4i64(ptr %p) #0 {
 ; CHECK-LABEL: test_l_v4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    li 4, 16
@@ -120,12 +120,12 @@ define <4 x i64> @test_l_v4i64(<4 x i64>* %p) #0 {
 ; CHECK-NEXT:    lxvd2x 35, 3, 4
 ; CHECK-NEXT:    blr
 entry:
-  %r = load <4 x i64>, <4 x i64>* %p, align 8
+  %r = load <4 x i64>, ptr %p, align 8
   ret <4 x i64> %r
 
 }
 
-define <4 x float> @test_l_v4float(<4 x float>* %p) #0 {
+define <4 x float> @test_l_v4float(ptr %p) #0 {
 ; CHECK-LABEL: test_l_v4float:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    li 4, 15
@@ -135,12 +135,12 @@ define <4 x float> @test_l_v4float(<4 x float>* %p) #0 {
 ; CHECK-NEXT:    vperm 2, 4, 2, 3
 ; CHECK-NEXT:    blr
 entry:
-  %r = load <4 x float>, <4 x float>* %p, align 4
+  %r = load <4 x float>, ptr %p, align 4
   ret <4 x float> %r
 
 }
 
-define <8 x float> @test_l_v8float(<8 x float>* %p) #0 {
+define <8 x float> @test_l_v8float(ptr %p) #0 {
 ; CHECK-LABEL: test_l_v8float:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    li 4, 31
@@ -153,23 +153,23 @@ define <8 x float> @test_l_v8float(<8 x float>* %p) #0 {
 ; CHECK-NEXT:    vperm 2, 0, 4, 5
 ; CHECK-NEXT:    blr
 entry:
-  %r = load <8 x float>, <8 x float>* %p, align 4
+  %r = load <8 x float>, ptr %p, align 4
   ret <8 x float> %r
 
 }
 
-define <2 x double> @test_l_v2double(<2 x double>* %p) #0 {
+define <2 x double> @test_l_v2double(ptr %p) #0 {
 ; CHECK-LABEL: test_l_v2double:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lxvd2x 34, 0, 3
 ; CHECK-NEXT:    blr
 entry:
-  %r = load <2 x double>, <2 x double>* %p, align 8
+  %r = load <2 x double>, ptr %p, align 8
   ret <2 x double> %r
 
 }
 
-define <4 x double> @test_l_v4double(<4 x double>* %p) #0 {
+define <4 x double> @test_l_v4double(ptr %p) #0 {
 ; CHECK-LABEL: test_l_v4double:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    li 4, 16
@@ -177,23 +177,23 @@ define <4 x double> @test_l_v4double(<4 x double>* %p) #0 {
 ; CHECK-NEXT:    lxvd2x 35, 3, 4
 ; CHECK-NEXT:    blr
 entry:
-  %r = load <4 x double>, <4 x double>* %p, align 8
+  %r = load <4 x double>, ptr %p, align 8
   ret <4 x double> %r
 
 }
 
-define <16 x i8> @test_l_p8v16i8(<16 x i8>* %p) #2 {
+define <16 x i8> @test_l_p8v16i8(ptr %p) #2 {
 ; CHECK-LABEL: test_l_p8v16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lxvw4x 34, 0, 3
 ; CHECK-NEXT:    blr
 entry:
-  %r = load <16 x i8>, <16 x i8>* %p, align 1
+  %r = load <16 x i8>, ptr %p, align 1
   ret <16 x i8> %r
 
 }
 
-define <32 x i8> @test_l_p8v32i8(<32 x i8>* %p) #2 {
+define <32 x i8> @test_l_p8v32i8(ptr %p) #2 {
 ; CHECK-LABEL: test_l_p8v32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    li 4, 16
@@ -201,23 +201,23 @@ define <32 x i8> @test_l_p8v32i8(<32 x i8>* %p) #2 {
 ; CHECK-NEXT:    lxvw4x 35, 3, 4
 ; CHECK-NEXT:    blr
 entry:
-  %r = load <32 x i8>, <32 x i8>* %p, align 1
+  %r = load <32 x i8>, ptr %p, align 1
   ret <32 x i8> %r
 
 }
 
-define <8 x i16> @test_l_p8v8i16(<8 x i16>* %p) #2 {
+define <8 x i16> @test_l_p8v8i16(ptr %p) #2 {
 ; CHECK-LABEL: test_l_p8v8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lxvw4x 34, 0, 3
 ; CHECK-NEXT:    blr
 entry:
-  %r = load <8 x i16>, <8 x i16>* %p, align 2
+  %r = load <8 x i16>, ptr %p, align 2
   ret <8 x i16> %r
 
 }
 
-define <16 x i16> @test_l_p8v16i16(<16 x i16>* %p) #2 {
+define <16 x i16> @test_l_p8v16i16(ptr %p) #2 {
 ; CHECK-LABEL: test_l_p8v16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    li 4, 16
@@ -225,23 +225,23 @@ define <16 x i16> @test_l_p8v16i16(<16 x i16>* %p) #2 {
 ; CHECK-NEXT:    lxvw4x 35, 3, 4
 ; CHECK-NEXT:    blr
 entry:
-  %r = load <16 x i16>, <16 x i16>* %p, align 2
+  %r = load <16 x i16>, ptr %p, align 2
   ret <16 x i16> %r
 
 }
 
-define <4 x i32> @test_l_p8v4i32(<4 x i32>* %p) #2 {
+define <4 x i32> @test_l_p8v4i32(ptr %p) #2 {
 ; CHECK-LABEL: test_l_p8v4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lxvw4x 34, 0, 3
 ; CHECK-NEXT:    blr
 entry:
-  %r = load <4 x i32>, <4 x i32>* %p, align 4
+  %r = load <4 x i32>, ptr %p, align 4
   ret <4 x i32> %r
 
 }
 
-define <8 x i32> @test_l_p8v8i32(<8 x i32>* %p) #2 {
+define <8 x i32> @test_l_p8v8i32(ptr %p) #2 {
 ; CHECK-LABEL: test_l_p8v8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    li 4, 16
@@ -249,23 +249,23 @@ define <8 x i32> @test_l_p8v8i32(<8 x i32>* %p) #2 {
 ; CHECK-NEXT:    lxvw4x 35, 3, 4
 ; CHECK-NEXT:    blr
 entry:
-  %r = load <8 x i32>, <8 x i32>* %p, align 4
+  %r = load <8 x i32>, ptr %p, align 4
   ret <8 x i32> %r
 
 }
 
-define <2 x i64> @test_l_p8v2i64(<2 x i64>* %p) #2 {
+define <2 x i64> @test_l_p8v2i64(ptr %p) #2 {
 ; CHECK-LABEL: test_l_p8v2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lxvd2x 34, 0, 3
 ; CHECK-NEXT:    blr
 entry:
-  %r = load <2 x i64>, <2 x i64>* %p, align 8
+  %r = load <2 x i64>, ptr %p, align 8
   ret <2 x i64> %r
 
 }
 
-define <4 x i64> @test_l_p8v4i64(<4 x i64>* %p) #2 {
+define <4 x i64> @test_l_p8v4i64(ptr %p) #2 {
 ; CHECK-LABEL: test_l_p8v4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    li 4, 16
@@ -273,23 +273,23 @@ define <4 x i64> @test_l_p8v4i64(<4 x i64>* %p) #2 {
 ; CHECK-NEXT:    lxvd2x 35, 3, 4
 ; CHECK-NEXT:    blr
 entry:
-  %r = load <4 x i64>, <4 x i64>* %p, align 8
+  %r = load <4 x i64>, ptr %p, align 8
   ret <4 x i64> %r
 
 }
 
-define <4 x float> @test_l_p8v4float(<4 x float>* %p) #2 {
+define <4 x float> @test_l_p8v4float(ptr %p) #2 {
 ; CHECK-LABEL: test_l_p8v4float:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lxvw4x 34, 0, 3
 ; CHECK-NEXT:    blr
 entry:
-  %r = load <4 x float>, <4 x float>* %p, align 4
+  %r = load <4 x float>, ptr %p, align 4
   ret <4 x float> %r
 
 }
 
-define <8 x float> @test_l_p8v8float(<8 x float>* %p) #2 {
+define <8 x float> @test_l_p8v8float(ptr %p) #2 {
 ; CHECK-LABEL: test_l_p8v8float:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    li 4, 16
@@ -297,23 +297,23 @@ define <8 x float> @test_l_p8v8float(<8 x float>* %p) #2 {
 ; CHECK-NEXT:    lxvw4x 35, 3, 4
 ; CHECK-NEXT:    blr
 entry:
-  %r = load <8 x float>, <8 x float>* %p, align 4
+  %r = load <8 x float>, ptr %p, align 4
   ret <8 x float> %r
 
 }
 
-define <2 x double> @test_l_p8v2double(<2 x double>* %p) #2 {
+define <2 x double> @test_l_p8v2double(ptr %p) #2 {
 ; CHECK-LABEL: test_l_p8v2double:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lxvd2x 34, 0, 3
 ; CHECK-NEXT:    blr
 entry:
-  %r = load <2 x double>, <2 x double>* %p, align 8
+  %r = load <2 x double>, ptr %p, align 8
   ret <2 x double> %r
 
 }
 
-define <4 x double> @test_l_p8v4double(<4 x double>* %p) #2 {
+define <4 x double> @test_l_p8v4double(ptr %p) #2 {
 ; CHECK-LABEL: test_l_p8v4double:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    li 4, 16
@@ -321,23 +321,23 @@ define <4 x double> @test_l_p8v4double(<4 x double>* %p) #2 {
 ; CHECK-NEXT:    lxvd2x 35, 3, 4
 ; CHECK-NEXT:    blr
 entry:
-  %r = load <4 x double>, <4 x double>* %p, align 8
+  %r = load <4 x double>, ptr %p, align 8
   ret <4 x double> %r
 
 }
 
-define void @test_s_v16i8(<16 x i8>* %p, <16 x i8> %v) #0 {
+define void @test_s_v16i8(ptr %p, <16 x i8> %v) #0 {
 ; CHECK-LABEL: test_s_v16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    stxvw4x 34, 0, 3
 ; CHECK-NEXT:    blr
 entry:
-  store <16 x i8> %v, <16 x i8>* %p, align 1
+  store <16 x i8> %v, ptr %p, align 1
   ret void
 
 }
 
-define void @test_s_v32i8(<32 x i8>* %p, <32 x i8> %v) #0 {
+define void @test_s_v32i8(ptr %p, <32 x i8> %v) #0 {
 ; CHECK-LABEL: test_s_v32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    li 4, 16
@@ -345,23 +345,23 @@ define void @test_s_v32i8(<32 x i8>* %p, <32 x i8> %v) #0 {
 ; CHECK-NEXT:    stxvw4x 35, 3, 4
 ; CHECK-NEXT:    blr
 entry:
-  store <32 x i8> %v, <32 x i8>* %p, align 1
+  store <32 x i8> %v, ptr %p, align 1
   ret void
 
 }
 
-define void @test_s_v8i16(<8 x i16>* %p, <8 x i16> %v) #0 {
+define void @test_s_v8i16(ptr %p, <8 x i16> %v) #0 {
 ; CHECK-LABEL: test_s_v8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    stxvw4x 34, 0, 3
 ; CHECK-NEXT:    blr
 entry:
-  store <8 x i16> %v, <8 x i16>* %p, align 2
+  store <8 x i16> %v, ptr %p, align 2
   ret void
 
 }
 
-define void @test_s_v16i16(<16 x i16>* %p, <16 x i16> %v) #0 {
+define void @test_s_v16i16(ptr %p, <16 x i16> %v) #0 {
 ; CHECK-LABEL: test_s_v16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    li 4, 16
@@ -369,23 +369,23 @@ define void @test_s_v16i16(<16 x i16>* %p, <16 x i16> %v) #0 {
 ; CHECK-NEXT:    stxvw4x 35, 3, 4
 ; CHECK-NEXT:    blr
 entry:
-  store <16 x i16> %v, <16 x i16>* %p, align 2
+  store <16 x i16> %v, ptr %p, align 2
   ret void
 
 }
 
-define void @test_s_v4i32(<4 x i32>* %p, <4 x i32> %v) #0 {
+define void @test_s_v4i32(ptr %p, <4 x i32> %v) #0 {
 ; CHECK-LABEL: test_s_v4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    stxvw4x 34, 0, 3
 ; CHECK-NEXT:    blr
 entry:
-  store <4 x i32> %v, <4 x i32>* %p, align 4
+  store <4 x i32> %v, ptr %p, align 4
   ret void
 
 }
 
-define void @test_s_v8i32(<8 x i32>* %p, <8 x i32> %v) #0 {
+define void @test_s_v8i32(ptr %p, <8 x i32> %v) #0 {
 ; CHECK-LABEL: test_s_v8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    li 4, 16
@@ -393,23 +393,23 @@ define void @test_s_v8i32(<8 x i32>* %p, <8 x i32> %v) #0 {
 ; CHECK-NEXT:    stxvw4x 35, 3, 4
 ; CHECK-NEXT:    blr
 entry:
-  store <8 x i32> %v, <8 x i32>* %p, align 4
+  store <8 x i32> %v, ptr %p, align 4
   ret void
 
 }
 
-define void @test_s_v2i64(<2 x i64>* %p, <2 x i64> %v) #0 {
+define void @test_s_v2i64(ptr %p, <2 x i64> %v) #0 {
 ; CHECK-LABEL: test_s_v2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    stxvd2x 34, 0, 3
 ; CHECK-NEXT:    blr
 entry:
-  store <2 x i64> %v, <2 x i64>* %p, align 8
+  store <2 x i64> %v, ptr %p, align 8
   ret void
 
 }
 
-define void @test_s_v4i64(<4 x i64>* %p, <4 x i64> %v) #0 {
+define void @test_s_v4i64(ptr %p, <4 x i64> %v) #0 {
 ; CHECK-LABEL: test_s_v4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    li 4, 16
@@ -417,23 +417,23 @@ define void @test_s_v4i64(<4 x i64>* %p, <4 x i64> %v) #0 {
 ; CHECK-NEXT:    stxvd2x 35, 3, 4
 ; CHECK-NEXT:    blr
 entry:
-  store <4 x i64> %v, <4 x i64>* %p, align 8
+  store <4 x i64> %v, ptr %p, align 8
   ret void
 
 }
 
-define void @test_s_v4float(<4 x float>* %p, <4 x float> %v) #0 {
+define void @test_s_v4float(ptr %p, <4 x float> %v) #0 {
 ; CHECK-LABEL: test_s_v4float:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    stxvw4x 34, 0, 3
 ; CHECK-NEXT:    blr
 entry:
-  store <4 x float> %v, <4 x float>* %p, align 4
+  store <4 x float> %v, ptr %p, align 4
   ret void
 
 }
 
-define void @test_s_v8float(<8 x float>* %p, <8 x float> %v) #0 {
+define void @test_s_v8float(ptr %p, <8 x float> %v) #0 {
 ; CHECK-LABEL: test_s_v8float:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    li 4, 16
@@ -441,23 +441,23 @@ define void @test_s_v8float(<8 x float>* %p, <8 x float> %v) #0 {
 ; CHECK-NEXT:    stxvw4x 35, 3, 4
 ; CHECK-NEXT:    blr
 entry:
-  store <8 x float> %v, <8 x float>* %p, align 4
+  store <8 x float> %v, ptr %p, align 4
   ret void
 
 }
 
-define void @test_s_v2double(<2 x double>* %p, <2 x double> %v) #0 {
+define void @test_s_v2double(ptr %p, <2 x double> %v) #0 {
 ; CHECK-LABEL: test_s_v2double:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    stxvd2x 34, 0, 3
 ; CHECK-NEXT:    blr
 entry:
-  store <2 x double> %v, <2 x double>* %p, align 8
+  store <2 x double> %v, ptr %p, align 8
   ret void
 
 }
 
-define void @test_s_v4double(<4 x double>* %p, <4 x double> %v) #0 {
+define void @test_s_v4double(ptr %p, <4 x double> %v) #0 {
 ; CHECK-LABEL: test_s_v4double:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    li 4, 16
@@ -465,7 +465,7 @@ define void @test_s_v4double(<4 x double>* %p, <4 x double> %v) #0 {
 ; CHECK-NEXT:    stxvd2x 35, 3, 4
 ; CHECK-NEXT:    blr
 entry:
-  store <4 x double> %v, <4 x double>* %p, align 8
+  store <4 x double> %v, ptr %p, align 8
   ret void
 
 }

diff  --git a/llvm/test/CodeGen/PowerPC/unal-vec-negarith.ll b/llvm/test/CodeGen/PowerPC/unal-vec-negarith.ll
index 7b2a4528c0b3..6f8b9c397124 100644
--- a/llvm/test/CodeGen/PowerPC/unal-vec-negarith.ll
+++ b/llvm/test/CodeGen/PowerPC/unal-vec-negarith.ll
@@ -4,9 +4,9 @@
 target datalayout = "E-m:e-i64:64-n32:64"
 target triple = "powerpc64-unknown-linux-gnu"
 
-define <16 x i8> @test_l_v16i8(<16 x i8>* %p) #0 {
+define <16 x i8> @test_l_v16i8(ptr %p) #0 {
 entry:
-  %r = load <16 x i8>, <16 x i8>* %p, align 1
+  %r = load <16 x i8>, ptr %p, align 1
   ret <16 x i8> %r
 
 ; CHECK-NOT: v4i32,ch = llvm.ppc.altivec.lvx{{.*}}<(load (s248) from %ir.p + 4294967281, align 1)>

diff  --git a/llvm/test/CodeGen/PowerPC/unal4-std.ll b/llvm/test/CodeGen/PowerPC/unal4-std.ll
index f8559f426dce..45664ff9d13f 100644
--- a/llvm/test/CodeGen/PowerPC/unal4-std.ll
+++ b/llvm/test/CodeGen/PowerPC/unal4-std.ll
@@ -4,7 +4,7 @@
 target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64"
 target triple = "powerpc64-unknown-linux-gnu"
 
-define void @copy_to_conceal(<8 x i16>* %inp) #0 {
+define void @copy_to_conceal(ptr %inp) #0 {
 ; CHECK-LABEL: copy_to_conceal:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vxor 2, 2, 2
@@ -22,7 +22,7 @@ define void @copy_to_conceal(<8 x i16>* %inp) #0 {
 ; CHECK-VSX-NEXT:    stxvw4x 0, 0, 3
 ; CHECK-VSX-NEXT:    blr
 entry:
-  store <8 x i16> zeroinitializer, <8 x i16>* %inp, align 2
+  store <8 x i16> zeroinitializer, ptr %inp, align 2
   br label %if.end210
 
 if.end210:                                        ; preds = %entry

diff  --git a/llvm/test/CodeGen/PowerPC/unaligned-addressing-mode.ll b/llvm/test/CodeGen/PowerPC/unaligned-addressing-mode.ll
index 02039a604dff..dbd0f815edab 100644
--- a/llvm/test/CodeGen/PowerPC/unaligned-addressing-mode.ll
+++ b/llvm/test/CodeGen/PowerPC/unaligned-addressing-mode.ll
@@ -2,7 +2,7 @@
 ; RUN: llc -verify-machineinstrs -mtriple=powerpc64le-unknown-linux-gnu -mcpu=pwr9 -ppc-convert-rr-to-ri=false -ppc-asm-full-reg-names < %s | FileCheck %s
 
 ; ISEL matches address mode xaddr.
-define i8 @test_xaddr(i8* %p) {
+define i8 @test_xaddr(ptr %p) {
 ; CHECK-LABEL: test_xaddr:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    li r4, 0
@@ -11,16 +11,16 @@ define i8 @test_xaddr(i8* %p) {
 ; CHECK-NEXT:    lbzx r3, r3, r4
 ; CHECK-NEXT:    blr
 entry:
-  %p.addr = alloca i8*, align 8
-  store i8* %p, i8** %p.addr, align 8
-  %0 = load i8*, i8** %p.addr, align 8
-  %add.ptr = getelementptr inbounds i8, i8* %0, i64 40000
-  %1 = load i8, i8* %add.ptr, align 1
+  %p.addr = alloca ptr, align 8
+  store ptr %p, ptr %p.addr, align 8
+  %0 = load ptr, ptr %p.addr, align 8
+  %add.ptr = getelementptr inbounds i8, ptr %0, i64 40000
+  %1 = load i8, ptr %add.ptr, align 1
   ret i8 %1
 }
 
 ; ISEL matches address mode xaddrX4.
-define i64 @test_xaddrX4(i8* %p) {
+define i64 @test_xaddrX4(ptr %p) {
 ; CHECK-LABEL: test_xaddrX4:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    li r4, 3
@@ -28,31 +28,29 @@ define i64 @test_xaddrX4(i8* %p) {
 ; CHECK-NEXT:    ldx r3, r3, r4
 ; CHECK-NEXT:    blr
 entry:
-  %p.addr = alloca i8*, align 8
-  store i8* %p, i8** %p.addr, align 8
-  %0 = load i8*, i8** %p.addr, align 8
-  %add.ptr = getelementptr inbounds i8, i8* %0, i64 3
-  %1 = bitcast i8* %add.ptr to i64*
-  %2 = load i64, i64* %1, align 8
-  ret i64 %2
+  %p.addr = alloca ptr, align 8
+  store ptr %p, ptr %p.addr, align 8
+  %0 = load ptr, ptr %p.addr, align 8
+  %add.ptr = getelementptr inbounds i8, ptr %0, i64 3
+  %1 = load i64, ptr %add.ptr, align 8
+  ret i64 %1
 }
 
 ; ISEL matches address mode xaddrX16.
-define <2 x double> @test_xaddrX16(double* %arr) {
+define <2 x double> @test_xaddrX16(ptr %arr) {
 ; CHECK-LABEL: test_xaddrX16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    li r4, 40
 ; CHECK-NEXT:    lxvx vs34, r3, r4
 ; CHECK-NEXT:    blr
 entry:
-  %arrayidx1 = getelementptr inbounds double, double* %arr, i64 5
-  %0 = bitcast double* %arrayidx1 to <2 x double>*
-  %1 = load <2 x double>, <2 x double>* %0, align 16
-  ret <2 x double> %1
+  %arrayidx1 = getelementptr inbounds double, ptr %arr, i64 5
+  %0 = load <2 x double>, ptr %arrayidx1, align 16
+  ret <2 x double> %0
 }
 
 ; ISEL matches address mode xoaddr.
-define void @test_xoaddr(i32* %arr, i32* %arrTo) {
+define void @test_xoaddr(ptr %arr, ptr %arrTo) {
 ; CHECK-LABEL: test_xoaddr:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    li r5, 8
@@ -61,18 +59,16 @@ define void @test_xoaddr(i32* %arr, i32* %arrTo) {
 ; CHECK-NEXT:    stxvx vs0, r4, r3
 ; CHECK-NEXT:    blr
 entry:
-  %arrayidx = getelementptr inbounds i32, i32* %arrTo, i64 1
-  %0 = bitcast i32* %arrayidx to <4 x i32>*
-  %arrayidx1 = getelementptr inbounds i32, i32* %arr, i64 2
-  %1 = bitcast i32* %arrayidx1 to <4 x i32>*
-  %2 = load <4 x i32>, <4 x i32>* %1, align 8
-  store <4 x i32> %2, <4 x i32>* %0, align 8
+  %arrayidx = getelementptr inbounds i32, ptr %arrTo, i64 1
+  %arrayidx1 = getelementptr inbounds i32, ptr %arr, i64 2
+  %0 = load <4 x i32>, ptr %arrayidx1, align 8
+  store <4 x i32> %0, ptr %arrayidx, align 8
   ret void
 }
 
 ; ISEL matches address mode xaddrX4 and generates LI which can be moved outside of
 ; loop.
-define i64 @test_xaddrX4_loop(i8* %p) {
+define i64 @test_xaddrX4_loop(ptr %p) {
 ; CHECK-LABEL: test_xaddrX4_loop:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    addi r4, r3, -8
@@ -97,13 +93,11 @@ for.body:                                         ; preds = %for.body, %entry
   %i.015 = phi i64 [ 0, %entry ], [ %inc, %for.body ]
   %res.014 = phi i64 [ 0, %entry ], [ %add, %for.body ]
   %mul = shl i64 %i.015, 3
-  %add.ptr = getelementptr inbounds i8, i8* %p, i64 %mul
-  %0 = bitcast i8* %add.ptr to i64*
-  %1 = load i64, i64* %0, align 8
-  %add.ptr3 = getelementptr inbounds i8, i8* %add.ptr, i64 3
-  %2 = bitcast i8* %add.ptr3 to i64*
-  %3 = load i64, i64* %2, align 8
-  %mul4 = mul i64 %3, %1
+  %add.ptr = getelementptr inbounds i8, ptr %p, i64 %mul
+  %0 = load i64, ptr %add.ptr, align 8
+  %add.ptr3 = getelementptr inbounds i8, ptr %add.ptr, i64 3
+  %1 = load i64, ptr %add.ptr3, align 8
+  %mul4 = mul i64 %1, %0
   %add = add i64 %mul4, %res.014
   %inc = add nuw nsw i64 %i.015, 1
   %exitcond = icmp eq i64 %inc, 8

diff  --git a/llvm/test/CodeGen/PowerPC/unaligned-dqform-ld.ll b/llvm/test/CodeGen/PowerPC/unaligned-dqform-ld.ll
index 3daf77d917ef..b9285f438a36 100644
--- a/llvm/test/CodeGen/PowerPC/unaligned-dqform-ld.ll
+++ b/llvm/test/CodeGen/PowerPC/unaligned-dqform-ld.ll
@@ -11,17 +11,17 @@
 %2 = type { %3 }
 %3 = type { %4 }
 %4 = type { %5 }
-%5 = type { i8*, i32, i32 }
+%5 = type { ptr, i32, i32 }
 %6 = type { [160 x i8] }
 %7 = type { %8, i32, %8 }
-%8 = type { i8*, i64 }
+%8 = type { ptr, i64 }
 
 $abc = comdat any
 
 ; This test checks that X-Form load, lxvx, is being produced here instead of
 ; the DQ-Form, lxv. We should not be producing lxv here as the frame index
 ; value is unaligned (not a multiple of 16).
-define void @abc(%0* %arg, [5 x i64] %arg1, [5 x i64] %arg2, [5 x i64] %arg3, [5 x i64] %arg4) local_unnamed_addr #0 comdat {
+define void @abc(ptr %arg, [5 x i64] %arg1, [5 x i64] %arg2, [5 x i64] %arg3, [5 x i64] %arg4) local_unnamed_addr #0 comdat {
 ; CHECK-P9-LE-LABEL: abc:
 ; CHECK-P9-LE:       # %bb.0: # %bb
 ; CHECK-P9-LE-NEXT:    addi r6, r1, 120
@@ -91,31 +91,27 @@ bb:
   %i16 = extractvalue [5 x i64] %arg3, 3
   %i17 = extractvalue [5 x i64] %arg4, 0
   %i18 = extractvalue [5 x i64] %arg4, 4
-  store i64 %i5, i64* undef, align 8
-  store i64 %i6, i64* null, align 8
-  %i19 = getelementptr inbounds [4 x %7], [4 x %7]* %i, i64 0, i64 0, i32 2
-  %i20 = bitcast %8* %i19 to i64*
-  store i64 %i7, i64* %i20, align 8
-  store i64 %i8, i64* undef, align 8
-  store i64 %i9, i64* null, align 8
-  store i64 %i10, i64* undef, align 8
-  %i21 = getelementptr inbounds [4 x %7], [4 x %7]* %i, i64 0, i64 1, i32 2
-  %i22 = bitcast %8* %i21 to i64*
-  store i64 %i11, i64* %i22, align 8
-  %i23 = getelementptr inbounds [4 x %7], [4 x %7]* %i, i64 0, i64 1, i32 2, i32 1
-  store i64 %i12, i64* %i23, align 8
-  %i24 = getelementptr inbounds [4 x %7], [4 x %7]* %i, i64 0, i64 2
-  %i25 = bitcast %7* %i24 to i64*
-  store i64 %i13, i64* %i25, align 8
-  %i26 = getelementptr inbounds [4 x %7], [4 x %7]* %i, i64 0, i64 2, i32 0, i32 1
-  store i64 %i14, i64* %i26, align 8
-  store i64 %i15, i64* undef, align 8
-  store i64 %i16, i64* null, align 8
-  store i64 %i17, i64* undef, align 8
-  store i64 undef, i64* null, align 8
-  store i64 %i18, i64* undef, align 8
-  %i27 = getelementptr inbounds %0, %0* %arg, i64 0, i32 0, i32 0, i32 0
-  %i28 = getelementptr inbounds %3, %3* %i27, i64 1, i32 0
-  store %4* %i28, %4** undef, align 8
+  store i64 %i5, ptr undef, align 8
+  store i64 %i6, ptr null, align 8
+  %i19 = getelementptr inbounds [4 x %7], ptr %i, i64 0, i64 0, i32 2
+  store i64 %i7, ptr %i19, align 8
+  store i64 %i8, ptr undef, align 8
+  store i64 %i9, ptr null, align 8
+  store i64 %i10, ptr undef, align 8
+  %i21 = getelementptr inbounds [4 x %7], ptr %i, i64 0, i64 1, i32 2
+  store i64 %i11, ptr %i21, align 8
+  %i23 = getelementptr inbounds [4 x %7], ptr %i, i64 0, i64 1, i32 2, i32 1
+  store i64 %i12, ptr %i23, align 8
+  %i24 = getelementptr inbounds [4 x %7], ptr %i, i64 0, i64 2
+  store i64 %i13, ptr %i24, align 8
+  %i26 = getelementptr inbounds [4 x %7], ptr %i, i64 0, i64 2, i32 0, i32 1
+  store i64 %i14, ptr %i26, align 8
+  store i64 %i15, ptr undef, align 8
+  store i64 %i16, ptr null, align 8
+  store i64 %i17, ptr undef, align 8
+  store i64 undef, ptr null, align 8
+  store i64 %i18, ptr undef, align 8
+  %i28 = getelementptr inbounds %3, ptr %arg, i64 1, i32 0
+  store ptr %i28, ptr undef, align 8
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/unaligned-floats.ll b/llvm/test/CodeGen/PowerPC/unaligned-floats.ll
index af7d36ac1d09..61a8738f9ccf 100644
--- a/llvm/test/CodeGen/PowerPC/unaligned-floats.ll
+++ b/llvm/test/CodeGen/PowerPC/unaligned-floats.ll
@@ -20,24 +20,24 @@ define dso_local i32 @main() local_unnamed_addr {
 ; UNALIGN:       lfs f1, 12(r1)
 ; UNALIGN:       blr
 entry:
-  %0 = load i8, i8* getelementptr inbounds (%struct.anon, %struct.anon* @s, i32 0, i32 1, i32 1), align 1
+  %0 = load i8, ptr getelementptr inbounds (%struct.anon, ptr @s, i32 0, i32 1, i32 1), align 1
   %conv = zext i8 %0 to i32
   %shl = shl nuw i32 %conv, 24
-  %1 = load i8, i8* getelementptr inbounds (%struct.anon, %struct.anon* @s, i32 0, i32 1, i32 2), align 2
+  %1 = load i8, ptr getelementptr inbounds (%struct.anon, ptr @s, i32 0, i32 1, i32 2), align 2
   %conv1 = zext i8 %1 to i32
   %shl2 = shl nuw nsw i32 %conv1, 16
   %add = or i32 %shl2, %shl
-  %2 = load i8, i8* getelementptr inbounds (%struct.anon, %struct.anon* @s, i32 0, i32 1, i32 3), align 1
+  %2 = load i8, ptr getelementptr inbounds (%struct.anon, ptr @s, i32 0, i32 1, i32 3), align 1
   %conv3 = zext i8 %2 to i32
   %shl4 = shl nuw nsw i32 %conv3, 8
   %add5 = or i32 %add, %shl4
-  %3 = load i8, i8* getelementptr inbounds (%struct.anon, %struct.anon* @s, i32 0, i32 1, i32 4), align 4
+  %3 = load i8, ptr getelementptr inbounds (%struct.anon, ptr @s, i32 0, i32 1, i32 4), align 4
   %conv6 = zext i8 %3 to i32
   %add7 = or i32 %add5, %conv6
   %4 = bitcast i32 %add7 to float
   %conv8 = fpext float %4 to double
-  %call = tail call i32 (i8*, ...) @printf(i8* nonnull dereferenceable(1) getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i32 0, i32 0), double %conv8)
+  %call = tail call i32 (ptr, ...) @printf(ptr nonnull dereferenceable(1) @.str, double %conv8)
   ret i32 0
 }
 ; Function Attrs: nofree nounwind
-declare i32 @printf(i8* nocapture readonly, ...) local_unnamed_addr
+declare i32 @printf(ptr nocapture readonly, ...) local_unnamed_addr

diff  --git a/llvm/test/CodeGen/PowerPC/unaligned.ll b/llvm/test/CodeGen/PowerPC/unaligned.ll
index f7577c6c97f4..481ed7230a2a 100644
--- a/llvm/test/CodeGen/PowerPC/unaligned.ll
+++ b/llvm/test/CodeGen/PowerPC/unaligned.ll
@@ -3,7 +3,7 @@
 ; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 -mattr=+vsx | FileCheck -check-prefix=CHECK-VSX %s
 target datalayout = "E-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f128:64:128-n32"
 
-define void @foo1(i16* %p, i16* %r) nounwind {
+define void @foo1(ptr %p, ptr %r) nounwind {
 ; CHECK-LABEL: foo1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lhz 3, 0(3)
@@ -16,14 +16,14 @@ define void @foo1(i16* %p, i16* %r) nounwind {
 ; CHECK-VSX-NEXT:    sth 3, 0(4)
 ; CHECK-VSX-NEXT:    blr
 entry:
-  %v = load i16, i16* %p, align 1
-  store i16 %v, i16* %r, align 1
+  %v = load i16, ptr %p, align 1
+  store i16 %v, ptr %r, align 1
   ret void
 
 
 }
 
-define void @foo2(i32* %p, i32* %r) nounwind {
+define void @foo2(ptr %p, ptr %r) nounwind {
 ; CHECK-LABEL: foo2:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lwz 3, 0(3)
@@ -36,14 +36,14 @@ define void @foo2(i32* %p, i32* %r) nounwind {
 ; CHECK-VSX-NEXT:    stw 3, 0(4)
 ; CHECK-VSX-NEXT:    blr
 entry:
-  %v = load i32, i32* %p, align 1
-  store i32 %v, i32* %r, align 1
+  %v = load i32, ptr %p, align 1
+  store i32 %v, ptr %r, align 1
   ret void
 
 
 }
 
-define void @foo3(i64* %p, i64* %r) nounwind {
+define void @foo3(ptr %p, ptr %r) nounwind {
 ; CHECK-LABEL: foo3:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    ld 3, 0(3)
@@ -56,14 +56,14 @@ define void @foo3(i64* %p, i64* %r) nounwind {
 ; CHECK-VSX-NEXT:    std 3, 0(4)
 ; CHECK-VSX-NEXT:    blr
 entry:
-  %v = load i64, i64* %p, align 1
-  store i64 %v, i64* %r, align 1
+  %v = load i64, ptr %p, align 1
+  store i64 %v, ptr %r, align 1
   ret void
 
 
 }
 
-define void @foo4(float* %p, float* %r) nounwind {
+define void @foo4(ptr %p, ptr %r) nounwind {
 ; CHECK-LABEL: foo4:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lwz 3, 0(3)
@@ -76,14 +76,14 @@ define void @foo4(float* %p, float* %r) nounwind {
 ; CHECK-VSX-NEXT:    stw 3, 0(4)
 ; CHECK-VSX-NEXT:    blr
 entry:
-  %v = load float, float* %p, align 1
-  store float %v, float* %r, align 1
+  %v = load float, ptr %p, align 1
+  store float %v, ptr %r, align 1
   ret void
 
 
 }
 
-define void @foo5(double* %p, double* %r) nounwind {
+define void @foo5(ptr %p, ptr %r) nounwind {
 ; CHECK-LABEL: foo5:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    ld 3, 0(3)
@@ -96,14 +96,14 @@ define void @foo5(double* %p, double* %r) nounwind {
 ; CHECK-VSX-NEXT:    std 3, 0(4)
 ; CHECK-VSX-NEXT:    blr
 entry:
-  %v = load double, double* %p, align 1
-  store double %v, double* %r, align 1
+  %v = load double, ptr %p, align 1
+  store double %v, ptr %r, align 1
   ret void
 
 
 }
 
-define void @foo6(<4 x float>* %p, <4 x float>* %r) nounwind {
+define void @foo6(ptr %p, ptr %r) nounwind {
 ; These loads and stores are legalized into aligned loads and stores
 ; using aligned stack slots.
 ; CHECK-LABEL: foo6:
@@ -134,8 +134,8 @@ define void @foo6(<4 x float>* %p, <4 x float>* %r) nounwind {
 ; stack slots, but lvsl/vperm is better still.  (On P8 lxvw4x is preferable.)
 ; Using unaligned stxvw4x is preferable on both machines.
 entry:
-  %v = load <4 x float>, <4 x float>* %p, align 1
-  store <4 x float> %v, <4 x float>* %r, align 1
+  %v = load <4 x float>, ptr %p, align 1
+  store <4 x float> %v, ptr %r, align 1
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/uwtables.ll b/llvm/test/CodeGen/PowerPC/uwtables.ll
index e302934ab8d6..f9f35d95151d 100644
--- a/llvm/test/CodeGen/PowerPC/uwtables.ll
+++ b/llvm/test/CodeGen/PowerPC/uwtables.ll
@@ -9,25 +9,24 @@
 ; RUN:   -ppc-vsr-nums-as-vr < %s | FileCheck %s
 
 
- at _ZTIi = external constant i8*
+ at _ZTIi = external constant ptr
 
 ; Function is marked as nounwind but it still throws with __cxa_throw and
 ; calls __cxa_call_unexpected.
 ; Need to make sure that we do not only have a debug frame.
 ; Function Attrs: noreturn nounwind
-define void @_Z4funcv() local_unnamed_addr #0 personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+define void @_Z4funcv() local_unnamed_addr #0 personality ptr @__gxx_personality_v0 {
 entry:
-  %exception = tail call i8* @__cxa_allocate_exception(i64 4)
-  %0 = bitcast i8* %exception to i32*
-  store i32 100, i32* %0, align 16
-  invoke void @__cxa_throw(i8* %exception, i8* bitcast (i8** @_ZTIi to i8*), i8* null)
+  %exception = tail call ptr @__cxa_allocate_exception(i64 4)
+  store i32 100, ptr %exception, align 16
+  invoke void @__cxa_throw(ptr %exception, ptr @_ZTIi, ptr null)
           to label %unreachable unwind label %lpad
 
 lpad:                                             ; preds = %entry
-  %1 = landingpad { i8*, i32 }
-          filter [0 x i8*] zeroinitializer
-  %2 = extractvalue { i8*, i32 } %1, 0
-  tail call void @__cxa_call_unexpected(i8* %2)
+  %0 = landingpad { ptr, i32 }
+          filter [0 x ptr] zeroinitializer
+  %1 = extractvalue { ptr, i32 } %0, 0
+  tail call void @__cxa_call_unexpected(ptr %1)
   unreachable
 
 unreachable:                                      ; preds = %entry
@@ -38,13 +37,13 @@ unreachable:                                      ; preds = %entry
 ; CHECK: .cfi_endproc
 }
 
-declare i8* @__cxa_allocate_exception(i64) local_unnamed_addr
+declare ptr @__cxa_allocate_exception(i64) local_unnamed_addr
 
-declare void @__cxa_throw(i8*, i8*, i8*) local_unnamed_addr
+declare void @__cxa_throw(ptr, ptr, ptr) local_unnamed_addr
 
 declare i32 @__gxx_personality_v0(...)
 
-declare void @__cxa_call_unexpected(i8*) local_unnamed_addr
+declare void @__cxa_call_unexpected(ptr) local_unnamed_addr
 
 
 attributes #0 = { noreturn nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="ppc64le" "target-features"="+altivec,+bpermd,+crypto,+direct-move,+extdiv,+htm,+power8-vector,+vsx,-power9-vector" "unsafe-fp-math"="false" "use-soft-float"="false" }

diff  --git a/llvm/test/CodeGen/PowerPC/vaddsplat.ll b/llvm/test/CodeGen/PowerPC/vaddsplat.ll
index 9a3bbc9d90bb..0308a698ffbf 100644
--- a/llvm/test/CodeGen/PowerPC/vaddsplat.ll
+++ b/llvm/test/CodeGen/PowerPC/vaddsplat.ll
@@ -9,10 +9,10 @@ target triple = "powerpc64-unknown-linux-gnu"
 %v8i16 = type <8 x i16>
 %v16i8 = type <16 x i8>
 
-define void @test_v4i32_pos_even(%v4i32* %P, %v4i32* %S) {
-       %p = load %v4i32, %v4i32* %P
+define void @test_v4i32_pos_even(ptr %P, ptr %S) {
+       %p = load %v4i32, ptr %P
        %r = add %v4i32 %p, < i32 18, i32 18, i32 18, i32 18 >
-       store %v4i32 %r, %v4i32* %S
+       store %v4i32 %r, ptr %S
        ret void
 }
 
@@ -20,10 +20,10 @@ define void @test_v4i32_pos_even(%v4i32* %P, %v4i32* %S) {
 ; CHECK: vspltisw [[REG1:[0-9]+]], 9
 ; CHECK: vadduwm {{[0-9]+}}, [[REG1]], [[REG1]]
 
-define void @test_v4i32_neg_even(%v4i32* %P, %v4i32* %S) {
-       %p = load %v4i32, %v4i32* %P
+define void @test_v4i32_neg_even(ptr %P, ptr %S) {
+       %p = load %v4i32, ptr %P
        %r = add %v4i32 %p, < i32 -28, i32 -28, i32 -28, i32 -28 >
-       store %v4i32 %r, %v4i32* %S
+       store %v4i32 %r, ptr %S
        ret void
 }
 
@@ -31,10 +31,10 @@ define void @test_v4i32_neg_even(%v4i32* %P, %v4i32* %S) {
 ; CHECK: vspltisw [[REG1:[0-9]+]], -14
 ; CHECK: vadduwm {{[0-9]+}}, [[REG1]], [[REG1]]
 
-define void @test_v8i16_pos_even(%v8i16* %P, %v8i16* %S) {
-       %p = load %v8i16, %v8i16* %P
+define void @test_v8i16_pos_even(ptr %P, ptr %S) {
+       %p = load %v8i16, ptr %P
        %r = add %v8i16 %p, < i16 30, i16 30, i16 30, i16 30, i16 30, i16 30, i16 30, i16 30 >
-       store %v8i16 %r, %v8i16* %S
+       store %v8i16 %r, ptr %S
        ret void
 }
 
@@ -42,10 +42,10 @@ define void @test_v8i16_pos_even(%v8i16* %P, %v8i16* %S) {
 ; CHECK: vspltish [[REG1:[0-9]+]], 15
 ; CHECK: vadduhm {{[0-9]+}}, [[REG1]], [[REG1]]
 
-define void @test_v8i16_neg_even(%v8i16* %P, %v8i16* %S) {
-       %p = load %v8i16, %v8i16* %P
+define void @test_v8i16_neg_even(ptr %P, ptr %S) {
+       %p = load %v8i16, ptr %P
        %r = add %v8i16 %p, < i16 -32, i16 -32, i16 -32, i16 -32, i16 -32, i16 -32, i16 -32, i16 -32 >
-       store %v8i16 %r, %v8i16* %S
+       store %v8i16 %r, ptr %S
        ret void
 }
 
@@ -53,10 +53,10 @@ define void @test_v8i16_neg_even(%v8i16* %P, %v8i16* %S) {
 ; CHECK: vspltish [[REG1:[0-9]+]], -16
 ; CHECK: vadduhm {{[0-9]+}}, [[REG1]], [[REG1]]
 
-define void @test_v16i8_pos_even(%v16i8* %P, %v16i8* %S) {
-       %p = load %v16i8, %v16i8* %P
+define void @test_v16i8_pos_even(ptr %P, ptr %S) {
+       %p = load %v16i8, ptr %P
        %r = add %v16i8 %p, < i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16 >
-       store %v16i8 %r, %v16i8* %S
+       store %v16i8 %r, ptr %S
        ret void
 }
 
@@ -64,10 +64,10 @@ define void @test_v16i8_pos_even(%v16i8* %P, %v16i8* %S) {
 ; CHECK: vspltisb [[REG1:[0-9]+]], 8
 ; CHECK: vaddubm {{[0-9]+}}, [[REG1]], [[REG1]]
 
-define void @test_v16i8_neg_even(%v16i8* %P, %v16i8* %S) {
-       %p = load %v16i8, %v16i8* %P
+define void @test_v16i8_neg_even(ptr %P, ptr %S) {
+       %p = load %v16i8, ptr %P
        %r = add %v16i8 %p, < i8 -18, i8 -18, i8 -18, i8 -18, i8 -18, i8 -18, i8 -18, i8 -18, i8 -18, i8 -18, i8 -18, i8 -18, i8 -18, i8 -18, i8 -18, i8 -18 >
-       store %v16i8 %r, %v16i8* %S
+       store %v16i8 %r, ptr %S
        ret void
 }
 
@@ -75,10 +75,10 @@ define void @test_v16i8_neg_even(%v16i8* %P, %v16i8* %S) {
 ; CHECK: vspltisb [[REG1:[0-9]+]], -9
 ; CHECK: vaddubm {{[0-9]+}}, [[REG1]], [[REG1]]
 
-define void @test_v4i32_pos_odd(%v4i32* %P, %v4i32* %S) {
-       %p = load %v4i32, %v4i32* %P
+define void @test_v4i32_pos_odd(ptr %P, ptr %S) {
+       %p = load %v4i32, ptr %P
        %r = add %v4i32 %p, < i32 27, i32 27, i32 27, i32 27 >
-       store %v4i32 %r, %v4i32* %S
+       store %v4i32 %r, ptr %S
        ret void
 }
 
@@ -87,10 +87,10 @@ define void @test_v4i32_pos_odd(%v4i32* %P, %v4i32* %S) {
 ; CHECK: vspltisw [[REG1:[0-9]+]], 11
 ; CHECK: vsubuwm {{[0-9]+}}, [[REG1]], [[REG2]]
 
-define void @test_v4i32_neg_odd(%v4i32* %P, %v4i32* %S) {
-       %p = load %v4i32, %v4i32* %P
+define void @test_v4i32_neg_odd(ptr %P, ptr %S) {
+       %p = load %v4i32, ptr %P
        %r = add %v4i32 %p, < i32 -27, i32 -27, i32 -27, i32 -27 >
-       store %v4i32 %r, %v4i32* %S
+       store %v4i32 %r, ptr %S
        ret void
 }
 
@@ -99,10 +99,10 @@ define void @test_v4i32_neg_odd(%v4i32* %P, %v4i32* %S) {
 ; CHECK: vspltisw [[REG1:[0-9]+]], -11
 ; CHECK: vadduwm {{[0-9]+}}, [[REG1]], [[REG2]]
 
-define void @test_v8i16_pos_odd(%v8i16* %P, %v8i16* %S) {
-       %p = load %v8i16, %v8i16* %P
+define void @test_v8i16_pos_odd(ptr %P, ptr %S) {
+       %p = load %v8i16, ptr %P
        %r = add %v8i16 %p, < i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31 >
-       store %v8i16 %r, %v8i16* %S
+       store %v8i16 %r, ptr %S
        ret void
 }
 
@@ -111,10 +111,10 @@ define void @test_v8i16_pos_odd(%v8i16* %P, %v8i16* %S) {
 ; CHECK: vspltish [[REG1:[0-9]+]], 15
 ; CHECK: vsubuhm {{[0-9]+}}, [[REG1]], [[REG2]]
 
-define void @test_v8i16_neg_odd(%v8i16* %P, %v8i16* %S) {
-       %p = load %v8i16, %v8i16* %P
+define void @test_v8i16_neg_odd(ptr %P, ptr %S) {
+       %p = load %v8i16, ptr %P
        %r = add %v8i16 %p, < i16 -31, i16 -31, i16 -31, i16 -31, i16 -31, i16 -31, i16 -31, i16 -31 >
-       store %v8i16 %r, %v8i16* %S
+       store %v8i16 %r, ptr %S
        ret void
 }
 
@@ -123,10 +123,10 @@ define void @test_v8i16_neg_odd(%v8i16* %P, %v8i16* %S) {
 ; CHECK: vspltish [[REG1:[0-9]+]], -15
 ; CHECK: vadduhm {{[0-9]+}}, [[REG1]], [[REG2]]
 
-define void @test_v16i8_pos_odd(%v16i8* %P, %v16i8* %S) {
-       %p = load %v16i8, %v16i8* %P
+define void @test_v16i8_pos_odd(ptr %P, ptr %S) {
+       %p = load %v16i8, ptr %P
        %r = add %v16i8 %p, < i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17 >
-       store %v16i8 %r, %v16i8* %S
+       store %v16i8 %r, ptr %S
        ret void
 }
 
@@ -135,10 +135,10 @@ define void @test_v16i8_pos_odd(%v16i8* %P, %v16i8* %S) {
 ; CHECK: vspltisb [[REG1:[0-9]+]], 1
 ; CHECK: vsububm {{[0-9]+}}, [[REG1]], [[REG2]]
 
-define void @test_v16i8_neg_odd(%v16i8* %P, %v16i8* %S) {
-       %p = load %v16i8, %v16i8* %P
+define void @test_v16i8_neg_odd(ptr %P, ptr %S) {
+       %p = load %v16i8, ptr %P
        %r = add %v16i8 %p, < i8 -17, i8 -17, i8 -17, i8 -17, i8 -17, i8 -17, i8 -17, i8 -17, i8 -17, i8 -17, i8 -17, i8 -17, i8 -17, i8 -17, i8 -17, i8 -17 >
-       store %v16i8 %r, %v16i8* %S
+       store %v16i8 %r, ptr %S
        ret void
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/varargs-struct-float.ll b/llvm/test/CodeGen/PowerPC/varargs-struct-float.ll
index fae7ba371201..37ef8ec8d7ad 100644
--- a/llvm/test/CodeGen/PowerPC/varargs-struct-float.ll
+++ b/llvm/test/CodeGen/PowerPC/varargs-struct-float.ll
@@ -8,10 +8,8 @@ target triple = "powerpc64-unknown-linux-gnu"
 define void @foo(float inreg %s.coerce) nounwind {
 entry:
   %s = alloca %struct.Sf1, align 4
-  %coerce.dive = getelementptr %struct.Sf1, %struct.Sf1* %s, i32 0, i32 0
-  store float %s.coerce, float* %coerce.dive, align 1
-  %coerce.dive1 = getelementptr %struct.Sf1, %struct.Sf1* %s, i32 0, i32 0
-  %0 = load float, float* %coerce.dive1, align 1
+  store float %s.coerce, ptr %s, align 1
+  %0 = load float, ptr %s, align 1
   call void (i32, ...) @testvaSf1(i32 1, float inreg %0)
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/varargs.ll b/llvm/test/CodeGen/PowerPC/varargs.ll
index a163a5feef2a..2b686bf0d38d 100644
--- a/llvm/test/CodeGen/PowerPC/varargs.ll
+++ b/llvm/test/CodeGen/PowerPC/varargs.ll
@@ -4,7 +4,7 @@
 ; RUN: llc -verify-machineinstrs -ppc-asm-full-reg-names < %s -mtriple=powerpc64le-unknown-linux-gnu | FileCheck -check-prefix=P64 %s
 
 ; PR8327
-define i8* @test1(i8** %foo) nounwind {
+define ptr @test1(ptr %foo) nounwind {
 ; P32-LABEL: test1:
 ; P32:       # %bb.0:
 ; P32-NEXT:    lbz r4, 0(r3)
@@ -41,8 +41,8 @@ define i8* @test1(i8** %foo) nounwind {
 ; P64-NEXT:    std r5, 0(r3)
 ; P64-NEXT:    ld r3, 0(r4)
 ; P64-NEXT:    blr
-  %A = va_arg i8** %foo, i8*
-  ret i8* %A
+  %A = va_arg ptr %foo, ptr
+  ret ptr %A
 }
 
 

diff  --git a/llvm/test/CodeGen/PowerPC/vcmp-fold.ll b/llvm/test/CodeGen/PowerPC/vcmp-fold.ll
index 416fd6cadea0..45ba9ff52bd7 100644
--- a/llvm/test/CodeGen/PowerPC/vcmp-fold.ll
+++ b/llvm/test/CodeGen/PowerPC/vcmp-fold.ll
@@ -3,17 +3,17 @@
 ; RUN: llc -verify-machineinstrs < %s -mtriple=ppc32-- -mcpu=g5 | grep vcmpbfp | count 1
 
 
-define void @test(<4 x float>* %x, <4 x float>* %y, i32* %P) {
+define void @test(ptr %x, ptr %y, ptr %P) {
 entry:
-	%tmp = load <4 x float>, <4 x float>* %x		; <<4 x float>> [#uses=1]
-	%tmp2 = load <4 x float>, <4 x float>* %y		; <<4 x float>> [#uses=1]
+	%tmp = load <4 x float>, ptr %x		; <<4 x float>> [#uses=1]
+	%tmp2 = load <4 x float>, ptr %y		; <<4 x float>> [#uses=1]
 	%tmp.upgrd.1 = call i32 @llvm.ppc.altivec.vcmpbfp.p( i32 1, <4 x float> %tmp, <4 x float> %tmp2 )		; <i32> [#uses=1]
-	%tmp4 = load <4 x float>, <4 x float>* %x		; <<4 x float>> [#uses=1]
-	%tmp6 = load <4 x float>, <4 x float>* %y		; <<4 x float>> [#uses=1]
+	%tmp4 = load <4 x float>, ptr %x		; <<4 x float>> [#uses=1]
+	%tmp6 = load <4 x float>, ptr %y		; <<4 x float>> [#uses=1]
 	%tmp.upgrd.2 = call <4 x i32> @llvm.ppc.altivec.vcmpbfp( <4 x float> %tmp4, <4 x float> %tmp6 )		; <<4 x i32>> [#uses=1]
 	%tmp7 = bitcast <4 x i32> %tmp.upgrd.2 to <4 x float>		; <<4 x float>> [#uses=1]
-	store <4 x float> %tmp7, <4 x float>* %x
-	store i32 %tmp.upgrd.1, i32* %P
+	store <4 x float> %tmp7, ptr %x
+	store i32 %tmp.upgrd.1, ptr %P
 	ret void
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/vec-abi-align.ll b/llvm/test/CodeGen/PowerPC/vec-abi-align.ll
index c01b55a32a6b..3050485de61a 100644
--- a/llvm/test/CodeGen/PowerPC/vec-abi-align.ll
+++ b/llvm/test/CodeGen/PowerPC/vec-abi-align.ll
@@ -11,7 +11,7 @@ target triple = "powerpc64-unknown-linux-gnu"
 ; Function Attrs: nounwind
 define void @test1(i64 %d1, i64 %d2, i64 %d3, i64 %d4, i64 %d5, i64 %d6, i64 %d7, i64 %d8, i64 %d9, <4 x float> inreg %vs.coerce) #0 {
 entry:
-  store <4 x float> %vs.coerce, <4 x float>* @ve, align 16
+  store <4 x float> %vs.coerce, ptr @ve, align 16
   ret void
 
 ; CHECK-LABEL: @test1
@@ -24,14 +24,13 @@ entry:
 }
 
 ; Function Attrs: nounwind
-define void @test2(i64 %d1, i64 %d2, i64 %d3, i64 %d4, i64 %d5, i64 %d6, i64 %d7, i64 %d8, %struct.s2* byval(%struct.s2) nocapture readonly %vs) #0 {
+define void @test2(i64 %d1, i64 %d2, i64 %d3, i64 %d4, i64 %d5, i64 %d6, i64 %d7, i64 %d8, ptr byval(%struct.s2) nocapture readonly %vs) #0 {
 entry:
-  %m = getelementptr inbounds %struct.s2, %struct.s2* %vs, i64 0, i32 0
-  %0 = load i64, i64* %m, align 8
-  store i64 %0, i64* @n, align 8
-  %v = getelementptr inbounds %struct.s2, %struct.s2* %vs, i64 0, i32 1
-  %1 = load <4 x float>, <4 x float>* %v, align 16
-  store <4 x float> %1, <4 x float>* @ve, align 16
+  %0 = load i64, ptr %vs, align 8
+  store i64 %0, ptr @n, align 8
+  %v = getelementptr inbounds %struct.s2, ptr %vs, i64 0, i32 1
+  %1 = load <4 x float>, ptr %v, align 16
+  store <4 x float> %1, ptr @ve, align 16
   ret void
 
 ; CHECK-LABEL: @test2
@@ -50,14 +49,13 @@ entry:
 }
 
 ; Function Attrs: nounwind
-define void @test3(i64 %d1, i64 %d2, i64 %d3, i64 %d4, i64 %d5, i64 %d6, i64 %d7, i64 %d8, i64 %d9, %struct.s2* byval(%struct.s2) nocapture readonly %vs) #0 {
+define void @test3(i64 %d1, i64 %d2, i64 %d3, i64 %d4, i64 %d5, i64 %d6, i64 %d7, i64 %d8, i64 %d9, ptr byval(%struct.s2) nocapture readonly %vs) #0 {
 entry:
-  %m = getelementptr inbounds %struct.s2, %struct.s2* %vs, i64 0, i32 0
-  %0 = load i64, i64* %m, align 8
-  store i64 %0, i64* @n, align 8
-  %v = getelementptr inbounds %struct.s2, %struct.s2* %vs, i64 0, i32 1
-  %1 = load <4 x float>, <4 x float>* %v, align 16
-  store <4 x float> %1, <4 x float>* @ve, align 16
+  %0 = load i64, ptr %vs, align 8
+  store i64 %0, ptr @n, align 8
+  %v = getelementptr inbounds %struct.s2, ptr %vs, i64 0, i32 1
+  %1 = load <4 x float>, ptr %v, align 16
+  store <4 x float> %1, ptr @ve, align 16
   ret void
 
 ; CHECK-LABEL: @test3

diff  --git a/llvm/test/CodeGen/PowerPC/vec-bswap.ll b/llvm/test/CodeGen/PowerPC/vec-bswap.ll
index d3941feac685..27f750de96c6 100644
--- a/llvm/test/CodeGen/PowerPC/vec-bswap.ll
+++ b/llvm/test/CodeGen/PowerPC/vec-bswap.ll
@@ -8,7 +8,7 @@
 ; RUN:   -verify-machineinstrs  -vec-extabi | \
 ; RUN:   FileCheck %s --check-prefixes=AIX,AIX32
 
-define dso_local void @test(i32* %Arr, i32 signext %Len) {
+define dso_local void @test(ptr %Arr, i32 signext %Len) {
 ; CHECK-LABEL: test:
 ; CHECK:         lxv [[REG:vs[0-9]+]], 0(r{{[0-9]+}})
 ; CHECK-NOT:     [[REG]]
@@ -40,19 +40,15 @@ vector.body:                                      ; preds = %vector.body, %vecto
   %induction = add <4 x i32> %broadcast.splat, <i32 0, i32 1, i32 2, i32 3>
   %0 = add i32 %index, 0
   %1 = sext i32 %0 to i64
-  %2 = getelementptr inbounds i32, i32* %Arr, i64 %1
-  %3 = getelementptr inbounds i32, i32* %2, i32 0
-  %4 = bitcast i32* %3 to <4 x i32>*
-  %wide.load = load <4 x i32>, <4 x i32>* %4, align 4
-  %5 = call <4 x i32> @llvm.bswap.v4i32(<4 x i32> %wide.load)
-  %6 = sext i32 %0 to i64
-  %7 = getelementptr inbounds i32, i32* %Arr, i64 %6
-  %8 = getelementptr inbounds i32, i32* %7, i32 0
-  %9 = bitcast i32* %8 to <4 x i32>*
-  store <4 x i32> %5, <4 x i32>* %9, align 4
+  %2 = getelementptr inbounds i32, ptr %Arr, i64 %1
+  %wide.load = load <4 x i32>, ptr %2, align 4
+  %3 = call <4 x i32> @llvm.bswap.v4i32(<4 x i32> %wide.load)
+  %4 = sext i32 %0 to i64
+  %5 = getelementptr inbounds i32, ptr %Arr, i64 %4
+  store <4 x i32> %3, ptr %5, align 4
   %index.next = add i32 %index, 4
-  %10 = icmp eq i32 %index.next, %n.vec
-  br i1 %10, label %middle.block, label %vector.body
+  %6 = icmp eq i32 %index.next, %n.vec
+  br i1 %6, label %middle.block, label %vector.body
 
 middle.block:                                     ; preds = %vector.body
   %cmp.n = icmp eq i32 %Len, %n.vec
@@ -71,12 +67,12 @@ for.cond.cleanup:                                 ; preds = %for.cond.for.cond.c
 for.body:                                         ; preds = %for.inc, %scalar.ph
   %i.02 = phi i32 [ %bc.resume.val, %scalar.ph ], [ %inc, %for.inc ]
   %idxprom = sext i32 %i.02 to i64
-  %arrayidx = getelementptr inbounds i32, i32* %Arr, i64 %idxprom
-  %11 = load i32, i32* %arrayidx, align 4
-  %12 = call i32 @llvm.bswap.i32(i32 %11)
+  %arrayidx = getelementptr inbounds i32, ptr %Arr, i64 %idxprom
+  %7 = load i32, ptr %arrayidx, align 4
+  %8 = call i32 @llvm.bswap.i32(i32 %7)
   %idxprom1 = sext i32 %i.02 to i64
-  %arrayidx2 = getelementptr inbounds i32, i32* %Arr, i64 %idxprom1
-  store i32 %12, i32* %arrayidx2, align 4
+  %arrayidx2 = getelementptr inbounds i32, ptr %Arr, i64 %idxprom1
+  store i32 %8, ptr %arrayidx2, align 4
   br label %for.inc
 
 for.inc:                                          ; preds = %for.body

diff  --git a/llvm/test/CodeGen/PowerPC/vec-extract-itofp.ll b/llvm/test/CodeGen/PowerPC/vec-extract-itofp.ll
index 29af49815dc7..c5874d1516bd 100644
--- a/llvm/test/CodeGen/PowerPC/vec-extract-itofp.ll
+++ b/llvm/test/CodeGen/PowerPC/vec-extract-itofp.ll
@@ -6,7 +6,7 @@
 ; RUN:   -ppc-asm-full-reg-names -verify-machineinstrs \
 ; RUN:   < %s | FileCheck %s -check-prefix=CHECK-BE
 
-define dso_local void @testutof(<8 x i16> %a, float* nocapture %ptr) local_unnamed_addr #0 {
+define dso_local void @testutof(<8 x i16> %a, ptr nocapture %ptr) local_unnamed_addr #0 {
 ; CHECK-LABEL: testutof:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vextractuh v2, v2, 14
@@ -23,11 +23,11 @@ define dso_local void @testutof(<8 x i16> %a, float* nocapture %ptr) local_unnam
 entry:
   %vecext = extractelement <8 x i16> %a, i32 0
   %conv = uitofp i16 %vecext to float
-  store float %conv, float* %ptr, align 4
+  store float %conv, ptr %ptr, align 4
   ret void
 }
 
-define dso_local void @testutod(<8 x i16> %a, double* nocapture %ptr) local_unnamed_addr #0 {
+define dso_local void @testutod(<8 x i16> %a, ptr nocapture %ptr) local_unnamed_addr #0 {
 ; CHECK-LABEL: testutod:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vextractuh v2, v2, 14
@@ -44,11 +44,11 @@ define dso_local void @testutod(<8 x i16> %a, double* nocapture %ptr) local_unna
 entry:
   %vecext = extractelement <8 x i16> %a, i32 0
   %conv = uitofp i16 %vecext to double
-  store double %conv, double* %ptr, align 8
+  store double %conv, ptr %ptr, align 8
   ret void
 }
 
-define dso_local void @teststof(<8 x i16> %a, float* nocapture %ptr) local_unnamed_addr #0 {
+define dso_local void @teststof(<8 x i16> %a, ptr nocapture %ptr) local_unnamed_addr #0 {
 ; CHECK-LABEL: teststof:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vextractuh v2, v2, 14
@@ -67,11 +67,11 @@ define dso_local void @teststof(<8 x i16> %a, float* nocapture %ptr) local_unnam
 entry:
   %vecext = extractelement <8 x i16> %a, i32 0
   %conv = sitofp i16 %vecext to float
-  store float %conv, float* %ptr, align 4
+  store float %conv, ptr %ptr, align 4
   ret void
 }
 
-define dso_local void @teststod(<8 x i16> %a, double* nocapture %ptr) local_unnamed_addr #0 {
+define dso_local void @teststod(<8 x i16> %a, ptr nocapture %ptr) local_unnamed_addr #0 {
 ; CHECK-LABEL: teststod:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vextractuh v2, v2, 14
@@ -90,11 +90,11 @@ define dso_local void @teststod(<8 x i16> %a, double* nocapture %ptr) local_unna
 entry:
   %vecext = extractelement <8 x i16> %a, i32 0
   %conv = sitofp i16 %vecext to double
-  store double %conv, double* %ptr, align 8
+  store double %conv, ptr %ptr, align 8
   ret void
 }
 
-define dso_local void @testsubtod(<16 x i8> %a, double* nocapture %ptr) local_unnamed_addr #0 {
+define dso_local void @testsubtod(<16 x i8> %a, ptr nocapture %ptr) local_unnamed_addr #0 {
 ; CHECK-LABEL: testsubtod:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vextractub v2, v2, 15
@@ -111,11 +111,11 @@ define dso_local void @testsubtod(<16 x i8> %a, double* nocapture %ptr) local_un
 entry:
   %vecext = extractelement <16 x i8> %a, i32 0
   %conv = uitofp i8 %vecext to double
-  store double %conv, double* %ptr, align 8
+  store double %conv, ptr %ptr, align 8
   ret void
 }
 
-define dso_local void @testsbtod(<16 x i8> %a, double* nocapture %ptr) local_unnamed_addr #0 {
+define dso_local void @testsbtod(<16 x i8> %a, ptr nocapture %ptr) local_unnamed_addr #0 {
 ; CHECK-LABEL: testsbtod:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vextractuh v2, v2, 15
@@ -134,11 +134,11 @@ define dso_local void @testsbtod(<16 x i8> %a, double* nocapture %ptr) local_unn
 entry:
   %vecext = extractelement <16 x i8> %a, i32 0
   %conv = sitofp i8 %vecext to double
-  store double %conv, double* %ptr, align 8
+  store double %conv, ptr %ptr, align 8
   ret void
 }
 
-define dso_local void @testsubtof(<16 x i8> %a, float* nocapture %ptr) local_unnamed_addr #0 {
+define dso_local void @testsubtof(<16 x i8> %a, ptr nocapture %ptr) local_unnamed_addr #0 {
 ; CHECK-LABEL: testsubtof:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vextractub v2, v2, 15
@@ -155,11 +155,11 @@ define dso_local void @testsubtof(<16 x i8> %a, float* nocapture %ptr) local_unn
 entry:
   %vecext = extractelement <16 x i8> %a, i32 0
   %conv = uitofp i8 %vecext to float
-  store float %conv, float* %ptr, align 8
+  store float %conv, ptr %ptr, align 8
   ret void
 }
 
-define dso_local void @testsbtof(<16 x i8> %a, float* nocapture %ptr) local_unnamed_addr #0 {
+define dso_local void @testsbtof(<16 x i8> %a, ptr nocapture %ptr) local_unnamed_addr #0 {
 ; CHECK-LABEL: testsbtof:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vextractub v2, v2, 15
@@ -178,6 +178,6 @@ define dso_local void @testsbtof(<16 x i8> %a, float* nocapture %ptr) local_unna
 entry:
   %vecext = extractelement <16 x i8> %a, i32 0
   %conv = sitofp i8 %vecext to float
-  store float %conv, float* %ptr, align 8
+  store float %conv, ptr %ptr, align 8
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/vec-itofp.ll b/llvm/test/CodeGen/PowerPC/vec-itofp.ll
index f28ec0e761b7..9fcc57ad78d5 100644
--- a/llvm/test/CodeGen/PowerPC/vec-itofp.ll
+++ b/llvm/test/CodeGen/PowerPC/vec-itofp.ll
@@ -9,7 +9,7 @@
 ; RUN:     -mcpu=pwr9 -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr < %s | \
 ; RUN: FileCheck %s --check-prefix=CHECK-BE
 
-define void @test8(<8 x double>* nocapture %Sink, <8 x i16>* nocapture readonly %SrcPtr) {
+define void @test8(ptr nocapture %Sink, ptr nocapture readonly %SrcPtr) {
 ; CHECK-P8-LABEL: test8:
 ; CHECK-P8:       # %bb.0: # %entry
 ; CHECK-P8-NEXT:    addis r5, r2, .LCPI0_0 at toc@ha
@@ -112,13 +112,13 @@ define void @test8(<8 x double>* nocapture %Sink, <8 x i16>* nocapture readonly
 ; CHECK-BE-NEXT:    stxv vs3, 48(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %0 = load <8 x i16>, <8 x i16>* %SrcPtr, align 16
+  %0 = load <8 x i16>, ptr %SrcPtr, align 16
   %1 = uitofp <8 x i16> %0 to <8 x double>
-  store <8 x double> %1, <8 x double>* %Sink, align 16
+  store <8 x double> %1, ptr %Sink, align 16
   ret void
 }
 
-define void @test4(<4 x double>* nocapture %Sink, <4 x i16>* nocapture readonly %SrcPtr) {
+define void @test4(ptr nocapture %Sink, ptr nocapture readonly %SrcPtr) {
 ; CHECK-P8-LABEL: test4:
 ; CHECK-P8:       # %bb.0: # %entry
 ; CHECK-P8-NEXT:    addis r5, r2, .LCPI1_0 at toc@ha
@@ -179,13 +179,13 @@ define void @test4(<4 x double>* nocapture %Sink, <4 x i16>* nocapture readonly
 ; CHECK-BE-NEXT:    stxv vs1, 16(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %0 = load <4 x i16>, <4 x i16>* %SrcPtr, align 16
+  %0 = load <4 x i16>, ptr %SrcPtr, align 16
   %1 = uitofp <4 x i16> %0 to <4 x double>
-  store <4 x double> %1, <4 x double>* %Sink, align 16
+  store <4 x double> %1, ptr %Sink, align 16
   ret void
 }
 
-define void @test2(<2 x double>* nocapture %Sink, <2 x i16>* nocapture readonly %SrcPtr) {
+define void @test2(ptr nocapture %Sink, ptr nocapture readonly %SrcPtr) {
 ; CHECK-P8-LABEL: test2:
 ; CHECK-P8:       # %bb.0: # %entry
 ; CHECK-P8-NEXT:    addis r5, r2, .LCPI2_0 at toc@ha
@@ -225,13 +225,13 @@ define void @test2(<2 x double>* nocapture %Sink, <2 x i16>* nocapture readonly
 ; CHECK-BE-NEXT:    stxv vs0, 0(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %0 = load <2 x i16>, <2 x i16>* %SrcPtr, align 16
+  %0 = load <2 x i16>, ptr %SrcPtr, align 16
   %1 = uitofp <2 x i16> %0 to <2 x double>
-  store <2 x double> %1, <2 x double>* %Sink, align 16
+  store <2 x double> %1, ptr %Sink, align 16
   ret void
 }
 
-define void @stest8(<8 x double>* nocapture %Sink, <8 x i16>* nocapture readonly %SrcPtr) {
+define void @stest8(ptr nocapture %Sink, ptr nocapture readonly %SrcPtr) {
 ; CHECK-P8-LABEL: stest8:
 ; CHECK-P8:       # %bb.0: # %entry
 ; CHECK-P8-NEXT:    addis r5, r2, .LCPI3_0 at toc@ha
@@ -351,13 +351,13 @@ define void @stest8(<8 x double>* nocapture %Sink, <8 x i16>* nocapture readonly
 ; CHECK-BE-NEXT:    stxv vs3, 48(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %0 = load <8 x i16>, <8 x i16>* %SrcPtr, align 16
+  %0 = load <8 x i16>, ptr %SrcPtr, align 16
   %1 = sitofp <8 x i16> %0 to <8 x double>
-  store <8 x double> %1, <8 x double>* %Sink, align 16
+  store <8 x double> %1, ptr %Sink, align 16
   ret void
 }
 
-define void @stest4(<4 x double>* nocapture %Sink, <4 x i16>* nocapture readonly %SrcPtr) {
+define void @stest4(ptr nocapture %Sink, ptr nocapture readonly %SrcPtr) {
 ; CHECK-P8-LABEL: stest4:
 ; CHECK-P8:       # %bb.0: # %entry
 ; CHECK-P8-NEXT:    addis r5, r2, .LCPI4_0 at toc@ha
@@ -427,13 +427,13 @@ define void @stest4(<4 x double>* nocapture %Sink, <4 x i16>* nocapture readonly
 ; CHECK-BE-NEXT:    stxv vs1, 16(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %0 = load <4 x i16>, <4 x i16>* %SrcPtr, align 16
+  %0 = load <4 x i16>, ptr %SrcPtr, align 16
   %1 = sitofp <4 x i16> %0 to <4 x double>
-  store <4 x double> %1, <4 x double>* %Sink, align 16
+  store <4 x double> %1, ptr %Sink, align 16
   ret void
 }
 
-define void @stest2(<2 x double>* nocapture %Sink, <2 x i16>* nocapture readonly %SrcPtr) {
+define void @stest2(ptr nocapture %Sink, ptr nocapture readonly %SrcPtr) {
 ; CHECK-P8-LABEL: stest2:
 ; CHECK-P8:       # %bb.0: # %entry
 ; CHECK-P8-NEXT:    addis r5, r2, .LCPI5_0 at toc@ha
@@ -478,8 +478,8 @@ define void @stest2(<2 x double>* nocapture %Sink, <2 x i16>* nocapture readonly
 ; CHECK-BE-NEXT:    stxv vs0, 0(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %0 = load <2 x i16>, <2 x i16>* %SrcPtr, align 16
+  %0 = load <2 x i16>, ptr %SrcPtr, align 16
   %1 = sitofp <2 x i16> %0 to <2 x double>
-  store <2 x double> %1, <2 x double>* %Sink, align 16
+  store <2 x double> %1, ptr %Sink, align 16
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/vec-trunc.ll b/llvm/test/CodeGen/PowerPC/vec-trunc.ll
index bfec8e0a7ec1..e7f4ec1b978c 100644
--- a/llvm/test/CodeGen/PowerPC/vec-trunc.ll
+++ b/llvm/test/CodeGen/PowerPC/vec-trunc.ll
@@ -6,7 +6,7 @@
 ; RUN:     -mattr=+vsx -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr < %s | \
 ; RUN: FileCheck %s --check-prefix=CHECK-BE
 
-define void @test8i8(<8 x i8>* nocapture %Sink, <8 x i16>* nocapture readonly %SrcPtr) {
+define void @test8i8(ptr nocapture %Sink, ptr nocapture readonly %SrcPtr) {
 ; CHECK-LABEL: test8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lxvd2x vs0, 0, r4
@@ -26,13 +26,13 @@ define void @test8i8(<8 x i8>* nocapture %Sink, <8 x i16>* nocapture readonly %S
 ; CHECK-BE-NEXT:    std r4, 0(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %0 = load <8 x i16>, <8 x i16>* %SrcPtr, align 16
+  %0 = load <8 x i16>, ptr %SrcPtr, align 16
   %1 = trunc <8 x i16> %0 to <8 x i8>
-  store <8 x i8> %1, <8 x i8>* %Sink, align 16
+  store <8 x i8> %1, ptr %Sink, align 16
   ret void
 }
 
-define void @test4i8(<4 x i8>* nocapture %Sink, <4 x i16>* nocapture readonly %SrcPtr) {
+define void @test4i8(ptr nocapture %Sink, ptr nocapture readonly %SrcPtr) {
 ; CHECK-LABEL: test4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lxvd2x vs0, 0, r4
@@ -52,13 +52,13 @@ define void @test4i8(<4 x i8>* nocapture %Sink, <4 x i16>* nocapture readonly %S
 ; CHECK-BE-NEXT:    stw r4, 0(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %0 = load <4 x i16>, <4 x i16>* %SrcPtr, align 16
+  %0 = load <4 x i16>, ptr %SrcPtr, align 16
   %1 = trunc <4 x i16> %0 to <4 x i8>
-  store <4 x i8> %1, <4 x i8>* %Sink, align 16
+  store <4 x i8> %1, ptr %Sink, align 16
   ret void
 }
 
-define void @test4i8w(<4 x i8>* nocapture %Sink, <4 x i32>* nocapture readonly %SrcPtr) {
+define void @test4i8w(ptr nocapture %Sink, ptr nocapture readonly %SrcPtr) {
 ; CHECK-LABEL: test4i8w:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    addis r5, r2, .LCPI2_0 at toc@ha
@@ -85,13 +85,13 @@ define void @test4i8w(<4 x i8>* nocapture %Sink, <4 x i32>* nocapture readonly %
 ; CHECK-BE-NEXT:    stw r4, 0(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %0 = load <4 x i32>, <4 x i32>* %SrcPtr, align 16
+  %0 = load <4 x i32>, ptr %SrcPtr, align 16
   %1 = trunc <4 x i32> %0 to <4 x i8>
-  store <4 x i8> %1, <4 x i8>* %Sink, align 16
+  store <4 x i8> %1, ptr %Sink, align 16
   ret void
 }
 
-define void @test2i8(<2 x i8>* nocapture %Sink, <2 x i16>* nocapture readonly %SrcPtr) {
+define void @test2i8(ptr nocapture %Sink, ptr nocapture readonly %SrcPtr) {
 ; CHECK-LABEL: test2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lxvd2x vs0, 0, r4
@@ -113,13 +113,13 @@ define void @test2i8(<2 x i8>* nocapture %Sink, <2 x i16>* nocapture readonly %S
 ; CHECK-BE-NEXT:    sth r4, 0(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %0 = load <2 x i16>, <2 x i16>* %SrcPtr, align 16
+  %0 = load <2 x i16>, ptr %SrcPtr, align 16
   %1 = trunc <2 x i16> %0 to <2 x i8>
-  store <2 x i8> %1, <2 x i8>* %Sink, align 16
+  store <2 x i8> %1, ptr %Sink, align 16
   ret void
 }
 
-define void @test4i16(<4 x i16>* nocapture %Sink, <4 x i32>* nocapture readonly %SrcPtr) {
+define void @test4i16(ptr nocapture %Sink, ptr nocapture readonly %SrcPtr) {
 ; CHECK-LABEL: test4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lxvd2x vs0, 0, r4
@@ -139,13 +139,13 @@ define void @test4i16(<4 x i16>* nocapture %Sink, <4 x i32>* nocapture readonly
 ; CHECK-BE-NEXT:    std r4, 0(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %0 = load <4 x i32>, <4 x i32>* %SrcPtr, align 16
+  %0 = load <4 x i32>, ptr %SrcPtr, align 16
   %1 = trunc <4 x i32> %0 to <4 x i16>
-  store <4 x i16> %1, <4 x i16>* %Sink, align 16
+  store <4 x i16> %1, ptr %Sink, align 16
   ret void
 }
 
-define void @test2i16(<2 x i16>* nocapture %Sink, <2 x i32>* nocapture readonly %SrcPtr) {
+define void @test2i16(ptr nocapture %Sink, ptr nocapture readonly %SrcPtr) {
 ; CHECK-LABEL: test2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lxvd2x vs0, 0, r4
@@ -165,13 +165,13 @@ define void @test2i16(<2 x i16>* nocapture %Sink, <2 x i32>* nocapture readonly
 ; CHECK-BE-NEXT:    stw r4, 0(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %0 = load <2 x i32>, <2 x i32>* %SrcPtr, align 16
+  %0 = load <2 x i32>, ptr %SrcPtr, align 16
   %1 = trunc <2 x i32> %0 to <2 x i16>
-  store <2 x i16> %1, <2 x i16>* %Sink, align 16
+  store <2 x i16> %1, ptr %Sink, align 16
   ret void
 }
 
-define void @test2i16d(<2 x i16>* nocapture %Sink, <2 x i64>* nocapture readonly %SrcPtr) {
+define void @test2i16d(ptr nocapture %Sink, ptr nocapture readonly %SrcPtr) {
 ; CHECK-LABEL: test2i16d:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    addis r5, r2, .LCPI6_0 at toc@ha
@@ -198,8 +198,8 @@ define void @test2i16d(<2 x i16>* nocapture %Sink, <2 x i64>* nocapture readonly
 ; CHECK-BE-NEXT:    stw r4, 0(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %0 = load <2 x i64>, <2 x i64>* %SrcPtr, align 16
+  %0 = load <2 x i64>, ptr %SrcPtr, align 16
   %1 = trunc <2 x i64> %0 to <2 x i16>
-  store <2 x i16> %1, <2 x i16>* %Sink, align 16
+  store <2 x i16> %1, ptr %Sink, align 16
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/vec_auto_constant.ll b/llvm/test/CodeGen/PowerPC/vec_auto_constant.ll
index 55e9c4cdf949..495b152ad7fc 100644
--- a/llvm/test/CodeGen/PowerPC/vec_auto_constant.ll
+++ b/llvm/test/CodeGen/PowerPC/vec_auto_constant.ll
@@ -16,19 +16,19 @@
 ; CHECK: .byte  30
 ; CHECK: .byte  29
 ; CHECK: .byte  3
- at baz = common global <16 x i8> zeroinitializer    ; <<16 x i8>*> [#uses=1]
+ at baz = common global <16 x i8> zeroinitializer    ; <ptr> [#uses=1]
 
 define void @foo(<16 x i8> %x) nounwind ssp {
 entry:
-  %x_addr = alloca <16 x i8>                      ; <<16 x i8>*> [#uses=2]
-  %temp = alloca <16 x i8>                        ; <<16 x i8>*> [#uses=2]
+  %x_addr = alloca <16 x i8>                      ; <ptr> [#uses=2]
+  %temp = alloca <16 x i8>                        ; <ptr> [#uses=2]
   %"alloca point" = bitcast i32 0 to i32          ; <i32> [#uses=0]
-  store <16 x i8> %x, <16 x i8>* %x_addr
-  store <16 x i8> <i8 22, i8 21, i8 20, i8 3, i8 25, i8 24, i8 23, i8 3, i8 28, i8 27, i8 26, i8 3, i8 31, i8 30, i8 29, i8 3>, <16 x i8>* %temp, align 16
-  %0 = load <16 x i8>, <16 x i8>* %x_addr, align 16          ; <<16 x i8>> [#uses=1]
-  %1 = load <16 x i8>, <16 x i8>* %temp, align 16            ; <<16 x i8>> [#uses=1]
+  store <16 x i8> %x, ptr %x_addr
+  store <16 x i8> <i8 22, i8 21, i8 20, i8 3, i8 25, i8 24, i8 23, i8 3, i8 28, i8 27, i8 26, i8 3, i8 31, i8 30, i8 29, i8 3>, ptr %temp, align 16
+  %0 = load <16 x i8>, ptr %x_addr, align 16          ; <<16 x i8>> [#uses=1]
+  %1 = load <16 x i8>, ptr %temp, align 16            ; <<16 x i8>> [#uses=1]
   %tmp = add <16 x i8> %0, %1                     ; <<16 x i8>> [#uses=1]
-  store <16 x i8> %tmp, <16 x i8>* @baz, align 16
+  store <16 x i8> %tmp, ptr @baz, align 16
   br label %return
 
 return:                                           ; preds = %entry

diff  --git a/llvm/test/CodeGen/PowerPC/vec_br_cmp.ll b/llvm/test/CodeGen/PowerPC/vec_br_cmp.ll
index fad927a22a96..b59511b2d28d 100644
--- a/llvm/test/CodeGen/PowerPC/vec_br_cmp.ll
+++ b/llvm/test/CodeGen/PowerPC/vec_br_cmp.ll
@@ -4,15 +4,15 @@
 
 ; A predicate compare used immediately by a branch should not generate an mfcr.
 
-define void @test(<4 x float>* %A, <4 x float>* %B) {
-	%tmp = load <4 x float>, <4 x float>* %A		; <<4 x float>> [#uses=1]
-	%tmp3 = load <4 x float>, <4 x float>* %B		; <<4 x float>> [#uses=1]
+define void @test(ptr %A, ptr %B) {
+	%tmp = load <4 x float>, ptr %A		; <<4 x float>> [#uses=1]
+	%tmp3 = load <4 x float>, ptr %B		; <<4 x float>> [#uses=1]
 	%tmp.upgrd.1 = tail call i32 @llvm.ppc.altivec.vcmpeqfp.p( i32 1, <4 x float> %tmp, <4 x float> %tmp3 )		; <i32> [#uses=1]
 	%tmp.upgrd.2 = icmp eq i32 %tmp.upgrd.1, 0		; <i1> [#uses=1]
 	br i1 %tmp.upgrd.2, label %cond_true, label %UnifiedReturnBlock
 
 cond_true:		; preds = %0
-	store <4 x float> zeroinitializer, <4 x float>* %B
+	store <4 x float> zeroinitializer, ptr %B
 	ret void
 
 UnifiedReturnBlock:		; preds = %0

diff  --git a/llvm/test/CodeGen/PowerPC/vec_buildvector_loadstore.ll b/llvm/test/CodeGen/PowerPC/vec_buildvector_loadstore.ll
index 0a00dc4477b6..880f4baeb885 100644
--- a/llvm/test/CodeGen/PowerPC/vec_buildvector_loadstore.ll
+++ b/llvm/test/CodeGen/PowerPC/vec_buildvector_loadstore.ll
@@ -21,7 +21,7 @@ define void @foo() nounwind ssp {
 ; CHECK-NEXT:    stvx 2, 4, 3
 ; CHECK-NEXT:    blr
 entry:
-    %tmp0 = load <16 x i8>, <16 x i8>* @a, align 16
+    %tmp0 = load <16 x i8>, ptr @a, align 16
   %tmp180.i = extractelement <16 x i8> %tmp0, i32 0 ; <i8> [#uses=1]
   %tmp181.i = insertelement <16 x i8> <i8 0, i8 0, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef>, i8 %tmp180.i, i32 2 ; <<16 x i8>> [#uses=1]
   %tmp182.i = extractelement <16 x i8> %tmp0, i32 1 ; <i8> [#uses=1]
@@ -41,7 +41,7 @@ entry:
   %tmp196.i = insertelement <16 x i8> %tmp195.i, i8 0, i32 12 ; <<16 x i8>> [#uses=1]
   %tmp197.i = insertelement <16 x i8> %tmp196.i, i8 0, i32 13 ; <<16 x i8>> [#uses=1]
 %tmp201 = shufflevector <16 x i8> %tmp197.i, <16 x i8> %tmp0, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 28, i32 29>; ModuleID = 'try.c'
-    store <16 x i8> %tmp201, <16 x i8>* @c, align 16
+    store <16 x i8> %tmp201, ptr @c, align 16
     br label %return
 
 return:		; preds = %bb2

diff  --git a/llvm/test/CodeGen/PowerPC/vec_constants.ll b/llvm/test/CodeGen/PowerPC/vec_constants.ll
index 339b0d106647..3c14c14e0656 100644
--- a/llvm/test/CodeGen/PowerPC/vec_constants.ll
+++ b/llvm/test/CodeGen/PowerPC/vec_constants.ll
@@ -3,7 +3,7 @@
 ; RUN: llc -verify-machineinstrs -O0 -mcpu=pwr7 -mtriple=powerpc64-ibm-aix-xcoff -vec-extabi < %s | FileCheck %s --check-prefixes=CHECK,BE
 ; RUN: llc -verify-machineinstrs -O0 -mcpu=pwr7 -mtriple=powerpc64le-unknown-linux-gnu < %s | FileCheck %s --check-prefixes=CHECK,LE
 
-define void @test1(<4 x i32>* %P1, <4 x i32>* %P2, <4 x float>* %P3) nounwind {
+define void @test1(ptr %P1, ptr %P2, ptr %P3) nounwind {
 ; BE-LABEL: test1:
 ; BE:       # %bb.0:
 ; BE-NEXT:    lxvw4x 0, 0, 3
@@ -39,17 +39,17 @@ define void @test1(<4 x i32>* %P1, <4 x i32>* %P2, <4 x float>* %P3) nounwind {
 ; LE-NEXT:    xxswapd 0, 0
 ; LE-NEXT:    stxvd2x 0, 0, 5
 ; LE-NEXT:    blr
-	%tmp = load <4 x i32>, <4 x i32>* %P1		; <<4 x i32>> [#uses=1]
+	%tmp = load <4 x i32>, ptr %P1		; <<4 x i32>> [#uses=1]
 	%tmp4 = and <4 x i32> %tmp, < i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648 >		; <<4 x i32>> [#uses=1]
-	store <4 x i32> %tmp4, <4 x i32>* %P1
-	%tmp7 = load <4 x i32>, <4 x i32>* %P2		; <<4 x i32>> [#uses=1]
+	store <4 x i32> %tmp4, ptr %P1
+	%tmp7 = load <4 x i32>, ptr %P2		; <<4 x i32>> [#uses=1]
 	%tmp9 = and <4 x i32> %tmp7, < i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647 >		; <<4 x i32>> [#uses=1]
-	store <4 x i32> %tmp9, <4 x i32>* %P2
-	%tmp.upgrd.1 = load <4 x float>, <4 x float>* %P3		; <<4 x float>> [#uses=1]
+	store <4 x i32> %tmp9, ptr %P2
+	%tmp.upgrd.1 = load <4 x float>, ptr %P3		; <<4 x float>> [#uses=1]
 	%tmp11 = bitcast <4 x float> %tmp.upgrd.1 to <4 x i32>		; <<4 x i32>> [#uses=1]
 	%tmp12 = and <4 x i32> %tmp11, < i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647 >		; <<4 x i32>> [#uses=1]
 	%tmp13 = bitcast <4 x i32> %tmp12 to <4 x float>		; <<4 x float>> [#uses=1]
-	store <4 x float> %tmp13, <4 x float>* %P3
+	store <4 x float> %tmp13, ptr %P3
 	ret void
 
 }

diff  --git a/llvm/test/CodeGen/PowerPC/vec_conv.ll b/llvm/test/CodeGen/PowerPC/vec_conv.ll
index afddfd262a70..b8cbb65da8f1 100644
--- a/llvm/test/CodeGen/PowerPC/vec_conv.ll
+++ b/llvm/test/CodeGen/PowerPC/vec_conv.ll
@@ -9,48 +9,48 @@ target triple = "powerpc64-unknown-linux-gnu"
 @cte_int = global <4 x i32> <i32 6, i32 6, i32 6, i32 6>, align 16
 
 
-define void @v4f32_to_v4i32(<4 x float> %x, <4 x i32>* nocapture %y) nounwind {
+define void @v4f32_to_v4i32(<4 x float> %x, ptr nocapture %y) nounwind {
 entry:
-  %0 = load <4 x float>, <4 x float>* @cte_float, align 16
+  %0 = load <4 x float>, ptr @cte_float, align 16
   %mul = fmul <4 x float> %0, %x
   %1 = fptosi <4 x float> %mul to <4 x i32>
-  store <4 x i32> %1, <4 x i32>* %y, align 16
+  store <4 x i32> %1, ptr %y, align 16
   ret void
 }
 ;CHECK-LABEL: v4f32_to_v4i32:
 ;CHECK: vctsxs {{[0-9]+}}, {{[0-9]+}}, 0
 
 
-define void @v4f32_to_v4u32(<4 x float> %x, <4 x i32>* nocapture %y) nounwind {
+define void @v4f32_to_v4u32(<4 x float> %x, ptr nocapture %y) nounwind {
 entry:
-  %0 = load <4 x float>, <4 x float>* @cte_float, align 16
+  %0 = load <4 x float>, ptr @cte_float, align 16
   %mul = fmul <4 x float> %0, %x
   %1 = fptoui <4 x float> %mul to <4 x i32>
-  store <4 x i32> %1, <4 x i32>* %y, align 16
+  store <4 x i32> %1, ptr %y, align 16
   ret void
 }
 ;CHECK-LABEL: v4f32_to_v4u32:
 ;CHECK: vctuxs {{[0-9]+}}, {{[0-9]+}}, 0
 
 
-define void @v4i32_to_v4f32(<4 x i32> %x, <4 x float>* nocapture %y) nounwind {
+define void @v4i32_to_v4f32(<4 x i32> %x, ptr nocapture %y) nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @cte_int, align 16
+  %0 = load <4 x i32>, ptr @cte_int, align 16
   %mul = mul <4 x i32> %0, %x
   %1 = sitofp <4 x i32> %mul to <4 x float>
-  store <4 x float> %1, <4 x float>* %y, align 16
+  store <4 x float> %1, ptr %y, align 16
   ret void
 }
 ;CHECK-LABEL: v4i32_to_v4f32:
 ;CHECK: vcfsx {{[0-9]+}}, {{[0-9]+}}, 0
 
 
-define void @v4u32_to_v4f32(<4 x i32> %x, <4 x float>* nocapture %y) nounwind {
+define void @v4u32_to_v4f32(<4 x i32> %x, ptr nocapture %y) nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @cte_int, align 16
+  %0 = load <4 x i32>, ptr @cte_int, align 16
   %mul = mul <4 x i32> %0, %x
   %1 = uitofp <4 x i32> %mul to <4 x float>
-  store <4 x float> %1, <4 x float>* %y, align 16
+  store <4 x float> %1, ptr %y, align 16
   ret void
 }
 ;CHECK-LABEL: v4u32_to_v4f32:

diff  --git a/llvm/test/CodeGen/PowerPC/vec_conv_fp32_to_i16_elts.ll b/llvm/test/CodeGen/PowerPC/vec_conv_fp32_to_i16_elts.ll
index 1ac409466ed7..3b96286ad0f4 100644
--- a/llvm/test/CodeGen/PowerPC/vec_conv_fp32_to_i16_elts.ll
+++ b/llvm/test/CodeGen/PowerPC/vec_conv_fp32_to_i16_elts.ll
@@ -163,7 +163,7 @@ entry:
   ret i64 %1
 }
 
-define <8 x i16> @test8elt(<8 x float>* nocapture readonly) local_unnamed_addr #2 {
+define <8 x i16> @test8elt(ptr nocapture readonly) local_unnamed_addr #2 {
 ; CHECK-P8-LABEL: test8elt:
 ; CHECK-P8:       # %bb.0: # %entry
 ; CHECK-P8-NEXT:    lxvd2x vs0, 0, r3
@@ -321,12 +321,12 @@ define <8 x i16> @test8elt(<8 x float>* nocapture readonly) local_unnamed_addr #
 ; CHECK-BE-NEXT:    xxmrghd v2, vs0, vs1
 ; CHECK-BE-NEXT:    blr
 entry:
-  %a = load <8 x float>, <8 x float>* %0, align 32
+  %a = load <8 x float>, ptr %0, align 32
   %1 = fptoui <8 x float> %a to <8 x i16>
   ret <8 x i16> %1
 }
 
-define void @test16elt(<16 x i16>* noalias nocapture sret(<16 x i16>) %agg.result, <16 x float>* nocapture readonly) local_unnamed_addr #3 {
+define void @test16elt(ptr noalias nocapture sret(<16 x i16>) %agg.result, ptr nocapture readonly) local_unnamed_addr #3 {
 ; CHECK-P8-LABEL: test16elt:
 ; CHECK-P8:       # %bb.0: # %entry
 ; CHECK-P8-NEXT:    lxvd2x vs0, 0, r4
@@ -635,9 +635,9 @@ define void @test16elt(<16 x i16>* noalias nocapture sret(<16 x i16>) %agg.resul
 ; CHECK-BE-NEXT:    stxv vs0, 16(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %a = load <16 x float>, <16 x float>* %0, align 64
+  %a = load <16 x float>, ptr %0, align 64
   %1 = fptoui <16 x float> %a to <16 x i16>
-  store <16 x i16> %1, <16 x i16>* %agg.result, align 32
+  store <16 x i16> %1, ptr %agg.result, align 32
   ret void
 }
 
@@ -795,7 +795,7 @@ entry:
   ret i64 %1
 }
 
-define <8 x i16> @test8elt_signed(<8 x float>* nocapture readonly) local_unnamed_addr #2 {
+define <8 x i16> @test8elt_signed(ptr nocapture readonly) local_unnamed_addr #2 {
 ; CHECK-P8-LABEL: test8elt_signed:
 ; CHECK-P8:       # %bb.0: # %entry
 ; CHECK-P8-NEXT:    lxvd2x vs0, 0, r3
@@ -953,12 +953,12 @@ define <8 x i16> @test8elt_signed(<8 x float>* nocapture readonly) local_unnamed
 ; CHECK-BE-NEXT:    xxmrghd v2, vs0, vs1
 ; CHECK-BE-NEXT:    blr
 entry:
-  %a = load <8 x float>, <8 x float>* %0, align 32
+  %a = load <8 x float>, ptr %0, align 32
   %1 = fptosi <8 x float> %a to <8 x i16>
   ret <8 x i16> %1
 }
 
-define void @test16elt_signed(<16 x i16>* noalias nocapture sret(<16 x i16>) %agg.result, <16 x float>* nocapture readonly) local_unnamed_addr #3 {
+define void @test16elt_signed(ptr noalias nocapture sret(<16 x i16>) %agg.result, ptr nocapture readonly) local_unnamed_addr #3 {
 ; CHECK-P8-LABEL: test16elt_signed:
 ; CHECK-P8:       # %bb.0: # %entry
 ; CHECK-P8-NEXT:    lxvd2x vs0, 0, r4
@@ -1267,8 +1267,8 @@ define void @test16elt_signed(<16 x i16>* noalias nocapture sret(<16 x i16>) %ag
 ; CHECK-BE-NEXT:    stxv vs0, 16(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %a = load <16 x float>, <16 x float>* %0, align 64
+  %a = load <16 x float>, ptr %0, align 64
   %1 = fptosi <16 x float> %a to <16 x i16>
-  store <16 x i16> %1, <16 x i16>* %agg.result, align 32
+  store <16 x i16> %1, ptr %agg.result, align 32
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/vec_conv_fp32_to_i64_elts.ll b/llvm/test/CodeGen/PowerPC/vec_conv_fp32_to_i64_elts.ll
index 5c4f54170eb9..3728311bfa2e 100644
--- a/llvm/test/CodeGen/PowerPC/vec_conv_fp32_to_i64_elts.ll
+++ b/llvm/test/CodeGen/PowerPC/vec_conv_fp32_to_i64_elts.ll
@@ -41,7 +41,7 @@ entry:
   ret <2 x i64> %1
 }
 
-define void @test4elt(<4 x i64>* noalias nocapture sret(<4 x i64>) %agg.result, <4 x float> %a) local_unnamed_addr #1 {
+define void @test4elt(ptr noalias nocapture sret(<4 x i64>) %agg.result, <4 x float> %a) local_unnamed_addr #1 {
 ; CHECK-P8-LABEL: test4elt:
 ; CHECK-P8:       # %bb.0: # %entry
 ; CHECK-P8-NEXT:    xxmrglw vs0, v2, v2
@@ -82,11 +82,11 @@ define void @test4elt(<4 x i64>* noalias nocapture sret(<4 x i64>) %agg.result,
 ; CHECK-BE-NEXT:    blr
 entry:
   %0 = fptoui <4 x float> %a to <4 x i64>
-  store <4 x i64> %0, <4 x i64>* %agg.result, align 32
+  store <4 x i64> %0, ptr %agg.result, align 32
   ret void
 }
 
-define void @test8elt(<8 x i64>* noalias nocapture sret(<8 x i64>) %agg.result, <8 x float>* nocapture readonly) local_unnamed_addr #2 {
+define void @test8elt(ptr noalias nocapture sret(<8 x i64>) %agg.result, ptr nocapture readonly) local_unnamed_addr #2 {
 ; CHECK-P8-LABEL: test8elt:
 ; CHECK-P8:       # %bb.0: # %entry
 ; CHECK-P8-NEXT:    li r5, 16
@@ -162,13 +162,13 @@ define void @test8elt(<8 x i64>* noalias nocapture sret(<8 x i64>) %agg.result,
 ; CHECK-BE-NEXT:    stxv vs2, 0(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %a = load <8 x float>, <8 x float>* %0, align 32
+  %a = load <8 x float>, ptr %0, align 32
   %1 = fptoui <8 x float> %a to <8 x i64>
-  store <8 x i64> %1, <8 x i64>* %agg.result, align 64
+  store <8 x i64> %1, ptr %agg.result, align 64
   ret void
 }
 
-define void @test16elt(<16 x i64>* noalias nocapture sret(<16 x i64>) %agg.result, <16 x float>* nocapture readonly) local_unnamed_addr #2 {
+define void @test16elt(ptr noalias nocapture sret(<16 x i64>) %agg.result, ptr nocapture readonly) local_unnamed_addr #2 {
 ; CHECK-P8-LABEL: test16elt:
 ; CHECK-P8:       # %bb.0: # %entry
 ; CHECK-P8-NEXT:    li r7, 48
@@ -308,9 +308,9 @@ define void @test16elt(<16 x i64>* noalias nocapture sret(<16 x i64>) %agg.resul
 ; CHECK-BE-NEXT:    stxv vs2, 0(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %a = load <16 x float>, <16 x float>* %0, align 64
+  %a = load <16 x float>, ptr %0, align 64
   %1 = fptoui <16 x float> %a to <16 x i64>
-  store <16 x i64> %1, <16 x i64>* %agg.result, align 128
+  store <16 x i64> %1, ptr %agg.result, align 128
   ret void
 }
 
@@ -346,7 +346,7 @@ entry:
   ret <2 x i64> %1
 }
 
-define void @test4elt_signed(<4 x i64>* noalias nocapture sret(<4 x i64>) %agg.result, <4 x float> %a) local_unnamed_addr #1 {
+define void @test4elt_signed(ptr noalias nocapture sret(<4 x i64>) %agg.result, <4 x float> %a) local_unnamed_addr #1 {
 ; CHECK-P8-LABEL: test4elt_signed:
 ; CHECK-P8:       # %bb.0: # %entry
 ; CHECK-P8-NEXT:    xxmrglw vs0, v2, v2
@@ -387,11 +387,11 @@ define void @test4elt_signed(<4 x i64>* noalias nocapture sret(<4 x i64>) %agg.r
 ; CHECK-BE-NEXT:    blr
 entry:
   %0 = fptoui <4 x float> %a to <4 x i64>
-  store <4 x i64> %0, <4 x i64>* %agg.result, align 32
+  store <4 x i64> %0, ptr %agg.result, align 32
   ret void
 }
 
-define void @test8elt_signed(<8 x i64>* noalias nocapture sret(<8 x i64>) %agg.result, <8 x float>* nocapture readonly) local_unnamed_addr #2 {
+define void @test8elt_signed(ptr noalias nocapture sret(<8 x i64>) %agg.result, ptr nocapture readonly) local_unnamed_addr #2 {
 ; CHECK-P8-LABEL: test8elt_signed:
 ; CHECK-P8:       # %bb.0: # %entry
 ; CHECK-P8-NEXT:    li r5, 16
@@ -467,13 +467,13 @@ define void @test8elt_signed(<8 x i64>* noalias nocapture sret(<8 x i64>) %agg.r
 ; CHECK-BE-NEXT:    stxv vs2, 0(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %a = load <8 x float>, <8 x float>* %0, align 32
+  %a = load <8 x float>, ptr %0, align 32
   %1 = fptoui <8 x float> %a to <8 x i64>
-  store <8 x i64> %1, <8 x i64>* %agg.result, align 64
+  store <8 x i64> %1, ptr %agg.result, align 64
   ret void
 }
 
-define void @test16elt_signed(<16 x i64>* noalias nocapture sret(<16 x i64>) %agg.result, <16 x float>* nocapture readonly) local_unnamed_addr #2 {
+define void @test16elt_signed(ptr noalias nocapture sret(<16 x i64>) %agg.result, ptr nocapture readonly) local_unnamed_addr #2 {
 ; CHECK-P8-LABEL: test16elt_signed:
 ; CHECK-P8:       # %bb.0: # %entry
 ; CHECK-P8-NEXT:    li r7, 48
@@ -613,8 +613,8 @@ define void @test16elt_signed(<16 x i64>* noalias nocapture sret(<16 x i64>) %ag
 ; CHECK-BE-NEXT:    stxv vs2, 0(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %a = load <16 x float>, <16 x float>* %0, align 64
+  %a = load <16 x float>, ptr %0, align 64
   %1 = fptoui <16 x float> %a to <16 x i64>
-  store <16 x i64> %1, <16 x i64>* %agg.result, align 128
+  store <16 x i64> %1, ptr %agg.result, align 128
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/vec_conv_fp32_to_i8_elts.ll b/llvm/test/CodeGen/PowerPC/vec_conv_fp32_to_i8_elts.ll
index bbdf928d51cc..851ce3271f7e 100644
--- a/llvm/test/CodeGen/PowerPC/vec_conv_fp32_to_i8_elts.ll
+++ b/llvm/test/CodeGen/PowerPC/vec_conv_fp32_to_i8_elts.ll
@@ -172,7 +172,7 @@ entry:
   ret i32 %1
 }
 
-define i64 @test8elt(<8 x float>* nocapture readonly) local_unnamed_addr #2 {
+define i64 @test8elt(ptr nocapture readonly) local_unnamed_addr #2 {
 ; CHECK-P8-LABEL: test8elt:
 ; CHECK-P8:       # %bb.0: # %entry
 ; CHECK-P8-NEXT:    lxvd2x vs0, 0, r3
@@ -334,13 +334,13 @@ define i64 @test8elt(<8 x float>* nocapture readonly) local_unnamed_addr #2 {
 ; CHECK-BE-NEXT:    mffprd r3, f0
 ; CHECK-BE-NEXT:    blr
 entry:
-  %a = load <8 x float>, <8 x float>* %0, align 32
+  %a = load <8 x float>, ptr %0, align 32
   %1 = fptoui <8 x float> %a to <8 x i8>
   %2 = bitcast <8 x i8> %1 to i64
   ret i64 %2
 }
 
-define <16 x i8> @test16elt(<16 x float>* nocapture readonly) local_unnamed_addr #3 {
+define <16 x i8> @test16elt(ptr nocapture readonly) local_unnamed_addr #3 {
 ; CHECK-P8-LABEL: test16elt:
 ; CHECK-P8:       # %bb.0: # %entry
 ; CHECK-P8-NEXT:    lxvd2x vs0, 0, r3
@@ -644,7 +644,7 @@ define <16 x i8> @test16elt(<16 x float>* nocapture readonly) local_unnamed_addr
 ; CHECK-BE-NEXT:    xxmrghd v2, vs0, vs2
 ; CHECK-BE-NEXT:    blr
 entry:
-  %a = load <16 x float>, <16 x float>* %0, align 64
+  %a = load <16 x float>, ptr %0, align 64
   %1 = fptoui <16 x float> %a to <16 x i8>
   ret <16 x i8> %1
 }
@@ -812,7 +812,7 @@ entry:
   ret i32 %1
 }
 
-define i64 @test8elt_signed(<8 x float>* nocapture readonly) local_unnamed_addr #2 {
+define i64 @test8elt_signed(ptr nocapture readonly) local_unnamed_addr #2 {
 ; CHECK-P8-LABEL: test8elt_signed:
 ; CHECK-P8:       # %bb.0: # %entry
 ; CHECK-P8-NEXT:    lxvd2x vs0, 0, r3
@@ -974,13 +974,13 @@ define i64 @test8elt_signed(<8 x float>* nocapture readonly) local_unnamed_addr
 ; CHECK-BE-NEXT:    mffprd r3, f0
 ; CHECK-BE-NEXT:    blr
 entry:
-  %a = load <8 x float>, <8 x float>* %0, align 32
+  %a = load <8 x float>, ptr %0, align 32
   %1 = fptosi <8 x float> %a to <8 x i8>
   %2 = bitcast <8 x i8> %1 to i64
   ret i64 %2
 }
 
-define <16 x i8> @test16elt_signed(<16 x float>* nocapture readonly) local_unnamed_addr #3 {
+define <16 x i8> @test16elt_signed(ptr nocapture readonly) local_unnamed_addr #3 {
 ; CHECK-P8-LABEL: test16elt_signed:
 ; CHECK-P8:       # %bb.0: # %entry
 ; CHECK-P8-NEXT:    lxvd2x vs0, 0, r3
@@ -1284,7 +1284,7 @@ define <16 x i8> @test16elt_signed(<16 x float>* nocapture readonly) local_unnam
 ; CHECK-BE-NEXT:    xxmrghd v2, vs0, vs2
 ; CHECK-BE-NEXT:    blr
 entry:
-  %a = load <16 x float>, <16 x float>* %0, align 64
+  %a = load <16 x float>, ptr %0, align 64
   %1 = fptosi <16 x float> %a to <16 x i8>
   ret <16 x i8> %1
 }

diff  --git a/llvm/test/CodeGen/PowerPC/vec_conv_fp64_to_i16_elts.ll b/llvm/test/CodeGen/PowerPC/vec_conv_fp64_to_i16_elts.ll
index db631458937c..3b422dfc7a4f 100644
--- a/llvm/test/CodeGen/PowerPC/vec_conv_fp64_to_i16_elts.ll
+++ b/llvm/test/CodeGen/PowerPC/vec_conv_fp64_to_i16_elts.ll
@@ -60,7 +60,7 @@ entry:
   ret i32 %1
 }
 
-define i64 @test4elt(<4 x double>* nocapture readonly) local_unnamed_addr #1 {
+define i64 @test4elt(ptr nocapture readonly) local_unnamed_addr #1 {
 ; CHECK-P8-LABEL: test4elt:
 ; CHECK-P8:       # %bb.0: # %entry
 ; CHECK-P8-NEXT:    li r4, 16
@@ -138,13 +138,13 @@ define i64 @test4elt(<4 x double>* nocapture readonly) local_unnamed_addr #1 {
 ; CHECK-BE-NEXT:    mffprd r3, f0
 ; CHECK-BE-NEXT:    blr
 entry:
-  %a = load <4 x double>, <4 x double>* %0, align 32
+  %a = load <4 x double>, ptr %0, align 32
   %1 = fptoui <4 x double> %a to <4 x i16>
   %2 = bitcast <4 x i16> %1 to i64
   ret i64 %2
 }
 
-define <8 x i16> @test8elt(<8 x double>* nocapture readonly) local_unnamed_addr #2 {
+define <8 x i16> @test8elt(ptr nocapture readonly) local_unnamed_addr #2 {
 ; CHECK-P8-LABEL: test8elt:
 ; CHECK-P8:       # %bb.0: # %entry
 ; CHECK-P8-NEXT:    li r4, 16
@@ -280,12 +280,12 @@ define <8 x i16> @test8elt(<8 x double>* nocapture readonly) local_unnamed_addr
 ; CHECK-BE-NEXT:    xxmrghd v2, vs0, vs2
 ; CHECK-BE-NEXT:    blr
 entry:
-  %a = load <8 x double>, <8 x double>* %0, align 64
+  %a = load <8 x double>, ptr %0, align 64
   %1 = fptoui <8 x double> %a to <8 x i16>
   ret <8 x i16> %1
 }
 
-define void @test16elt(<16 x i16>* noalias nocapture sret(<16 x i16>) %agg.result, <16 x double>* nocapture readonly) local_unnamed_addr #3 {
+define void @test16elt(ptr noalias nocapture sret(<16 x i16>) %agg.result, ptr nocapture readonly) local_unnamed_addr #3 {
 ; CHECK-P8-LABEL: test16elt:
 ; CHECK-P8:       # %bb.0: # %entry
 ; CHECK-P8-NEXT:    li r5, 16
@@ -550,9 +550,9 @@ define void @test16elt(<16 x i16>* noalias nocapture sret(<16 x i16>) %agg.resul
 ; CHECK-BE-NEXT:    stxv vs0, 16(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %a = load <16 x double>, <16 x double>* %0, align 128
+  %a = load <16 x double>, ptr %0, align 128
   %1 = fptoui <16 x double> %a to <16 x i16>
-  store <16 x i16> %1, <16 x i16>* %agg.result, align 32
+  store <16 x i16> %1, ptr %agg.result, align 32
   ret void
 }
 
@@ -607,7 +607,7 @@ entry:
   ret i32 %1
 }
 
-define i64 @test4elt_signed(<4 x double>* nocapture readonly) local_unnamed_addr #1 {
+define i64 @test4elt_signed(ptr nocapture readonly) local_unnamed_addr #1 {
 ; CHECK-P8-LABEL: test4elt_signed:
 ; CHECK-P8:       # %bb.0: # %entry
 ; CHECK-P8-NEXT:    li r4, 16
@@ -685,13 +685,13 @@ define i64 @test4elt_signed(<4 x double>* nocapture readonly) local_unnamed_addr
 ; CHECK-BE-NEXT:    mffprd r3, f0
 ; CHECK-BE-NEXT:    blr
 entry:
-  %a = load <4 x double>, <4 x double>* %0, align 32
+  %a = load <4 x double>, ptr %0, align 32
   %1 = fptosi <4 x double> %a to <4 x i16>
   %2 = bitcast <4 x i16> %1 to i64
   ret i64 %2
 }
 
-define <8 x i16> @test8elt_signed(<8 x double>* nocapture readonly) local_unnamed_addr #2 {
+define <8 x i16> @test8elt_signed(ptr nocapture readonly) local_unnamed_addr #2 {
 ; CHECK-P8-LABEL: test8elt_signed:
 ; CHECK-P8:       # %bb.0: # %entry
 ; CHECK-P8-NEXT:    li r4, 16
@@ -827,12 +827,12 @@ define <8 x i16> @test8elt_signed(<8 x double>* nocapture readonly) local_unname
 ; CHECK-BE-NEXT:    xxmrghd v2, vs0, vs2
 ; CHECK-BE-NEXT:    blr
 entry:
-  %a = load <8 x double>, <8 x double>* %0, align 64
+  %a = load <8 x double>, ptr %0, align 64
   %1 = fptosi <8 x double> %a to <8 x i16>
   ret <8 x i16> %1
 }
 
-define void @test16elt_signed(<16 x i16>* noalias nocapture sret(<16 x i16>) %agg.result, <16 x double>* nocapture readonly) local_unnamed_addr #3 {
+define void @test16elt_signed(ptr noalias nocapture sret(<16 x i16>) %agg.result, ptr nocapture readonly) local_unnamed_addr #3 {
 ; CHECK-P8-LABEL: test16elt_signed:
 ; CHECK-P8:       # %bb.0: # %entry
 ; CHECK-P8-NEXT:    li r5, 16
@@ -1097,8 +1097,8 @@ define void @test16elt_signed(<16 x i16>* noalias nocapture sret(<16 x i16>) %ag
 ; CHECK-BE-NEXT:    stxv vs0, 16(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %a = load <16 x double>, <16 x double>* %0, align 128
+  %a = load <16 x double>, ptr %0, align 128
   %1 = fptosi <16 x double> %a to <16 x i16>
-  store <16 x i16> %1, <16 x i16>* %agg.result, align 32
+  store <16 x i16> %1, ptr %agg.result, align 32
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/vec_conv_fp64_to_i32_elts.ll b/llvm/test/CodeGen/PowerPC/vec_conv_fp64_to_i32_elts.ll
index e6ca1109b087..d339a1218bbe 100644
--- a/llvm/test/CodeGen/PowerPC/vec_conv_fp64_to_i32_elts.ll
+++ b/llvm/test/CodeGen/PowerPC/vec_conv_fp64_to_i32_elts.ll
@@ -43,7 +43,7 @@ entry:
   ret i64 %1
 }
 
-define <4 x i32> @test4elt(<4 x double>* nocapture readonly) local_unnamed_addr #1 {
+define <4 x i32> @test4elt(ptr nocapture readonly) local_unnamed_addr #1 {
 ; CHECK-P8-LABEL: test4elt:
 ; CHECK-P8:       # %bb.0: # %entry
 ; CHECK-P8-NEXT:    li r4, 16
@@ -80,12 +80,12 @@ define <4 x i32> @test4elt(<4 x double>* nocapture readonly) local_unnamed_addr
 ; CHECK-BE-NEXT:    vmrgew v2, v3, v2
 ; CHECK-BE-NEXT:    blr
 entry:
-  %a = load <4 x double>, <4 x double>* %0, align 32
+  %a = load <4 x double>, ptr %0, align 32
   %1 = fptoui <4 x double> %a to <4 x i32>
   ret <4 x i32> %1
 }
 
-define void @test8elt(<8 x i32>* noalias nocapture sret(<8 x i32>) %agg.result, <8 x double>* nocapture readonly) local_unnamed_addr #2 {
+define void @test8elt(ptr noalias nocapture sret(<8 x i32>) %agg.result, ptr nocapture readonly) local_unnamed_addr #2 {
 ; CHECK-P8-LABEL: test8elt:
 ; CHECK-P8:       # %bb.0: # %entry
 ; CHECK-P8-NEXT:    li r5, 32
@@ -149,13 +149,13 @@ define void @test8elt(<8 x i32>* noalias nocapture sret(<8 x i32>) %agg.result,
 ; CHECK-BE-NEXT:    stxv v3, 16(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %a = load <8 x double>, <8 x double>* %0, align 64
+  %a = load <8 x double>, ptr %0, align 64
   %1 = fptoui <8 x double> %a to <8 x i32>
-  store <8 x i32> %1, <8 x i32>* %agg.result, align 32
+  store <8 x i32> %1, ptr %agg.result, align 32
   ret void
 }
 
-define void @test16elt(<16 x i32>* noalias nocapture sret(<16 x i32>) %agg.result, <16 x double>* nocapture readonly) local_unnamed_addr #2 {
+define void @test16elt(ptr noalias nocapture sret(<16 x i32>) %agg.result, ptr nocapture readonly) local_unnamed_addr #2 {
 ; CHECK-P8-LABEL: test16elt:
 ; CHECK-P8:       # %bb.0: # %entry
 ; CHECK-P8-NEXT:    li r5, 32
@@ -271,9 +271,9 @@ define void @test16elt(<16 x i32>* noalias nocapture sret(<16 x i32>) %agg.resul
 ; CHECK-BE-NEXT:    stxv v5, 48(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %a = load <16 x double>, <16 x double>* %0, align 128
+  %a = load <16 x double>, ptr %0, align 128
   %1 = fptoui <16 x double> %a to <16 x i32>
-  store <16 x i32> %1, <16 x i32>* %agg.result, align 64
+  store <16 x i32> %1, ptr %agg.result, align 64
   ret void
 }
 
@@ -311,7 +311,7 @@ entry:
   ret i64 %1
 }
 
-define <4 x i32> @test4elt_signed(<4 x double>* nocapture readonly) local_unnamed_addr #1 {
+define <4 x i32> @test4elt_signed(ptr nocapture readonly) local_unnamed_addr #1 {
 ; CHECK-P8-LABEL: test4elt_signed:
 ; CHECK-P8:       # %bb.0: # %entry
 ; CHECK-P8-NEXT:    li r4, 16
@@ -348,12 +348,12 @@ define <4 x i32> @test4elt_signed(<4 x double>* nocapture readonly) local_unname
 ; CHECK-BE-NEXT:    vmrgew v2, v3, v2
 ; CHECK-BE-NEXT:    blr
 entry:
-  %a = load <4 x double>, <4 x double>* %0, align 32
+  %a = load <4 x double>, ptr %0, align 32
   %1 = fptosi <4 x double> %a to <4 x i32>
   ret <4 x i32> %1
 }
 
-define void @test8elt_signed(<8 x i32>* noalias nocapture sret(<8 x i32>) %agg.result, <8 x double>* nocapture readonly) local_unnamed_addr #2 {
+define void @test8elt_signed(ptr noalias nocapture sret(<8 x i32>) %agg.result, ptr nocapture readonly) local_unnamed_addr #2 {
 ; CHECK-P8-LABEL: test8elt_signed:
 ; CHECK-P8:       # %bb.0: # %entry
 ; CHECK-P8-NEXT:    li r5, 32
@@ -417,13 +417,13 @@ define void @test8elt_signed(<8 x i32>* noalias nocapture sret(<8 x i32>) %agg.r
 ; CHECK-BE-NEXT:    stxv v3, 16(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %a = load <8 x double>, <8 x double>* %0, align 64
+  %a = load <8 x double>, ptr %0, align 64
   %1 = fptosi <8 x double> %a to <8 x i32>
-  store <8 x i32> %1, <8 x i32>* %agg.result, align 32
+  store <8 x i32> %1, ptr %agg.result, align 32
   ret void
 }
 
-define void @test16elt_signed(<16 x i32>* noalias nocapture sret(<16 x i32>) %agg.result, <16 x double>* nocapture readonly) local_unnamed_addr #2 {
+define void @test16elt_signed(ptr noalias nocapture sret(<16 x i32>) %agg.result, ptr nocapture readonly) local_unnamed_addr #2 {
 ; CHECK-P8-LABEL: test16elt_signed:
 ; CHECK-P8:       # %bb.0: # %entry
 ; CHECK-P8-NEXT:    li r5, 32
@@ -539,8 +539,8 @@ define void @test16elt_signed(<16 x i32>* noalias nocapture sret(<16 x i32>) %ag
 ; CHECK-BE-NEXT:    stxv v5, 48(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %a = load <16 x double>, <16 x double>* %0, align 128
+  %a = load <16 x double>, ptr %0, align 128
   %1 = fptosi <16 x double> %a to <16 x i32>
-  store <16 x i32> %1, <16 x i32>* %agg.result, align 64
+  store <16 x i32> %1, ptr %agg.result, align 64
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/vec_conv_fp64_to_i8_elts.ll b/llvm/test/CodeGen/PowerPC/vec_conv_fp64_to_i8_elts.ll
index e15dbf6d2b61..8870ccc2fc55 100644
--- a/llvm/test/CodeGen/PowerPC/vec_conv_fp64_to_i8_elts.ll
+++ b/llvm/test/CodeGen/PowerPC/vec_conv_fp64_to_i8_elts.ll
@@ -67,7 +67,7 @@ entry:
   ret i16 %1
 }
 
-define i32 @test4elt(<4 x double>* nocapture readonly) local_unnamed_addr #1 {
+define i32 @test4elt(ptr nocapture readonly) local_unnamed_addr #1 {
 ; CHECK-P8-LABEL: test4elt:
 ; CHECK-P8:       # %bb.0: # %entry
 ; CHECK-P8-NEXT:    li r4, 16
@@ -147,13 +147,13 @@ define i32 @test4elt(<4 x double>* nocapture readonly) local_unnamed_addr #1 {
 ; CHECK-BE-NEXT:    vextuwlx r3, r3, v2
 ; CHECK-BE-NEXT:    blr
 entry:
-  %a = load <4 x double>, <4 x double>* %0, align 32
+  %a = load <4 x double>, ptr %0, align 32
   %1 = fptoui <4 x double> %a to <4 x i8>
   %2 = bitcast <4 x i8> %1 to i32
   ret i32 %2
 }
 
-define i64 @test8elt(<8 x double>* nocapture readonly) local_unnamed_addr #1 {
+define i64 @test8elt(ptr nocapture readonly) local_unnamed_addr #1 {
 ; CHECK-P8-LABEL: test8elt:
 ; CHECK-P8:       # %bb.0: # %entry
 ; CHECK-P8-NEXT:    li r4, 16
@@ -293,13 +293,13 @@ define i64 @test8elt(<8 x double>* nocapture readonly) local_unnamed_addr #1 {
 ; CHECK-BE-NEXT:    mffprd r3, f0
 ; CHECK-BE-NEXT:    blr
 entry:
-  %a = load <8 x double>, <8 x double>* %0, align 64
+  %a = load <8 x double>, ptr %0, align 64
   %1 = fptoui <8 x double> %a to <8 x i8>
   %2 = bitcast <8 x i8> %1 to i64
   ret i64 %2
 }
 
-define <16 x i8> @test16elt(<16 x double>* nocapture readonly) local_unnamed_addr #2 {
+define <16 x i8> @test16elt(ptr nocapture readonly) local_unnamed_addr #2 {
 ; CHECK-P8-LABEL: test16elt:
 ; CHECK-P8:       # %bb.0: # %entry
 ; CHECK-P8-NEXT:    li r4, 16
@@ -559,7 +559,7 @@ define <16 x i8> @test16elt(<16 x double>* nocapture readonly) local_unnamed_add
 ; CHECK-BE-NEXT:    xxmrghd v2, vs0, vs4
 ; CHECK-BE-NEXT:    blr
 entry:
-  %a = load <16 x double>, <16 x double>* %0, align 128
+  %a = load <16 x double>, ptr %0, align 128
   %1 = fptoui <16 x double> %a to <16 x i8>
   ret <16 x i8> %1
 }
@@ -622,7 +622,7 @@ entry:
   ret i16 %1
 }
 
-define i32 @test4elt_signed(<4 x double>* nocapture readonly) local_unnamed_addr #1 {
+define i32 @test4elt_signed(ptr nocapture readonly) local_unnamed_addr #1 {
 ; CHECK-P8-LABEL: test4elt_signed:
 ; CHECK-P8:       # %bb.0: # %entry
 ; CHECK-P8-NEXT:    li r4, 16
@@ -702,13 +702,13 @@ define i32 @test4elt_signed(<4 x double>* nocapture readonly) local_unnamed_addr
 ; CHECK-BE-NEXT:    vextuwlx r3, r3, v2
 ; CHECK-BE-NEXT:    blr
 entry:
-  %a = load <4 x double>, <4 x double>* %0, align 32
+  %a = load <4 x double>, ptr %0, align 32
   %1 = fptosi <4 x double> %a to <4 x i8>
   %2 = bitcast <4 x i8> %1 to i32
   ret i32 %2
 }
 
-define i64 @test8elt_signed(<8 x double>* nocapture readonly) local_unnamed_addr #1 {
+define i64 @test8elt_signed(ptr nocapture readonly) local_unnamed_addr #1 {
 ; CHECK-P8-LABEL: test8elt_signed:
 ; CHECK-P8:       # %bb.0: # %entry
 ; CHECK-P8-NEXT:    li r4, 16
@@ -848,13 +848,13 @@ define i64 @test8elt_signed(<8 x double>* nocapture readonly) local_unnamed_addr
 ; CHECK-BE-NEXT:    mffprd r3, f0
 ; CHECK-BE-NEXT:    blr
 entry:
-  %a = load <8 x double>, <8 x double>* %0, align 64
+  %a = load <8 x double>, ptr %0, align 64
   %1 = fptosi <8 x double> %a to <8 x i8>
   %2 = bitcast <8 x i8> %1 to i64
   ret i64 %2
 }
 
-define <16 x i8> @test16elt_signed(<16 x double>* nocapture readonly) local_unnamed_addr #2 {
+define <16 x i8> @test16elt_signed(ptr nocapture readonly) local_unnamed_addr #2 {
 ; CHECK-P8-LABEL: test16elt_signed:
 ; CHECK-P8:       # %bb.0: # %entry
 ; CHECK-P8-NEXT:    li r4, 16
@@ -1114,7 +1114,7 @@ define <16 x i8> @test16elt_signed(<16 x double>* nocapture readonly) local_unna
 ; CHECK-BE-NEXT:    xxmrghd v2, vs0, vs4
 ; CHECK-BE-NEXT:    blr
 entry:
-  %a = load <16 x double>, <16 x double>* %0, align 128
+  %a = load <16 x double>, ptr %0, align 128
   %1 = fptosi <16 x double> %a to <16 x i8>
   ret <16 x i8> %1
 }

diff  --git a/llvm/test/CodeGen/PowerPC/vec_conv_fp_to_i_4byte_elts.ll b/llvm/test/CodeGen/PowerPC/vec_conv_fp_to_i_4byte_elts.ll
index 84f3f231e696..30fb41fa77a7 100644
--- a/llvm/test/CodeGen/PowerPC/vec_conv_fp_to_i_4byte_elts.ll
+++ b/llvm/test/CodeGen/PowerPC/vec_conv_fp_to_i_4byte_elts.ll
@@ -60,7 +60,7 @@ entry:
   ret <4 x i32> %0
 }
 
-define void @test8elt(<8 x i32>* noalias nocapture sret(<8 x i32>) %agg.result, <8 x float>* nocapture readonly) local_unnamed_addr #2 {
+define void @test8elt(ptr noalias nocapture sret(<8 x i32>) %agg.result, ptr nocapture readonly) local_unnamed_addr #2 {
 ; CHECK-P8-LABEL: test8elt:
 ; CHECK-P8:       # %bb.0: # %entry
 ; CHECK-P8-NEXT:    li r5, 16
@@ -92,13 +92,13 @@ define void @test8elt(<8 x i32>* noalias nocapture sret(<8 x i32>) %agg.result,
 ; CHECK-BE-NEXT:    stxv vs1, 0(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %a = load <8 x float>, <8 x float>* %0, align 32
+  %a = load <8 x float>, ptr %0, align 32
   %1 = fptoui <8 x float> %a to <8 x i32>
-  store <8 x i32> %1, <8 x i32>* %agg.result, align 32
+  store <8 x i32> %1, ptr %agg.result, align 32
   ret void
 }
 
-define void @test16elt(<16 x i32>* noalias nocapture sret(<16 x i32>) %agg.result, <16 x float>* nocapture readonly) local_unnamed_addr #2 {
+define void @test16elt(ptr noalias nocapture sret(<16 x i32>) %agg.result, ptr nocapture readonly) local_unnamed_addr #2 {
 ; CHECK-P8-LABEL: test16elt:
 ; CHECK-P8:       # %bb.0: # %entry
 ; CHECK-P8-NEXT:    li r5, 16
@@ -150,9 +150,9 @@ define void @test16elt(<16 x i32>* noalias nocapture sret(<16 x i32>) %agg.resul
 ; CHECK-BE-NEXT:    stxv vs3, 0(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %a = load <16 x float>, <16 x float>* %0, align 64
+  %a = load <16 x float>, ptr %0, align 64
   %1 = fptoui <16 x float> %a to <16 x i32>
-  store <16 x i32> %1, <16 x i32>* %agg.result, align 64
+  store <16 x i32> %1, ptr %agg.result, align 64
   ret void
 }
 
@@ -207,7 +207,7 @@ entry:
   ret <4 x i32> %0
 }
 
-define void @test8elt_signed(<8 x i32>* noalias nocapture sret(<8 x i32>) %agg.result, <8 x float>* nocapture readonly) local_unnamed_addr #2 {
+define void @test8elt_signed(ptr noalias nocapture sret(<8 x i32>) %agg.result, ptr nocapture readonly) local_unnamed_addr #2 {
 ; CHECK-P8-LABEL: test8elt_signed:
 ; CHECK-P8:       # %bb.0: # %entry
 ; CHECK-P8-NEXT:    li r5, 16
@@ -239,13 +239,13 @@ define void @test8elt_signed(<8 x i32>* noalias nocapture sret(<8 x i32>) %agg.r
 ; CHECK-BE-NEXT:    stxv vs1, 0(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %a = load <8 x float>, <8 x float>* %0, align 32
+  %a = load <8 x float>, ptr %0, align 32
   %1 = fptosi <8 x float> %a to <8 x i32>
-  store <8 x i32> %1, <8 x i32>* %agg.result, align 32
+  store <8 x i32> %1, ptr %agg.result, align 32
   ret void
 }
 
-define void @test16elt_signed(<16 x i32>* noalias nocapture sret(<16 x i32>) %agg.result, <16 x float>* nocapture readonly) local_unnamed_addr #2 {
+define void @test16elt_signed(ptr noalias nocapture sret(<16 x i32>) %agg.result, ptr nocapture readonly) local_unnamed_addr #2 {
 ; CHECK-P8-LABEL: test16elt_signed:
 ; CHECK-P8:       # %bb.0: # %entry
 ; CHECK-P8-NEXT:    li r5, 16
@@ -297,8 +297,8 @@ define void @test16elt_signed(<16 x i32>* noalias nocapture sret(<16 x i32>) %ag
 ; CHECK-BE-NEXT:    stxv vs3, 0(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %a = load <16 x float>, <16 x float>* %0, align 64
+  %a = load <16 x float>, ptr %0, align 64
   %1 = fptosi <16 x float> %a to <16 x i32>
-  store <16 x i32> %1, <16 x i32>* %agg.result, align 64
+  store <16 x i32> %1, ptr %agg.result, align 64
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/vec_conv_fp_to_i_8byte_elts.ll b/llvm/test/CodeGen/PowerPC/vec_conv_fp_to_i_8byte_elts.ll
index b8a55d7ef448..9465689509c0 100644
--- a/llvm/test/CodeGen/PowerPC/vec_conv_fp_to_i_8byte_elts.ll
+++ b/llvm/test/CodeGen/PowerPC/vec_conv_fp_to_i_8byte_elts.ll
@@ -29,7 +29,7 @@ entry:
   ret <2 x i64> %0
 }
 
-define void @test4elt(<4 x i64>* noalias nocapture sret(<4 x i64>) %agg.result, <4 x double>* nocapture readonly) local_unnamed_addr #1 {
+define void @test4elt(ptr noalias nocapture sret(<4 x i64>) %agg.result, ptr nocapture readonly) local_unnamed_addr #1 {
 ; CHECK-P8-LABEL: test4elt:
 ; CHECK-P8:       # %bb.0: # %entry
 ; CHECK-P8-NEXT:    li r5, 16
@@ -61,13 +61,13 @@ define void @test4elt(<4 x i64>* noalias nocapture sret(<4 x i64>) %agg.result,
 ; CHECK-BE-NEXT:    stxv vs1, 0(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %a = load <4 x double>, <4 x double>* %0, align 32
+  %a = load <4 x double>, ptr %0, align 32
   %1 = fptoui <4 x double> %a to <4 x i64>
-  store <4 x i64> %1, <4 x i64>* %agg.result, align 32
+  store <4 x i64> %1, ptr %agg.result, align 32
   ret void
 }
 
-define void @test8elt(<8 x i64>* noalias nocapture sret(<8 x i64>) %agg.result, <8 x double>* nocapture readonly) local_unnamed_addr #1 {
+define void @test8elt(ptr noalias nocapture sret(<8 x i64>) %agg.result, ptr nocapture readonly) local_unnamed_addr #1 {
 ; CHECK-P8-LABEL: test8elt:
 ; CHECK-P8:       # %bb.0: # %entry
 ; CHECK-P8-NEXT:    li r5, 16
@@ -119,13 +119,13 @@ define void @test8elt(<8 x i64>* noalias nocapture sret(<8 x i64>) %agg.result,
 ; CHECK-BE-NEXT:    stxv vs3, 0(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %a = load <8 x double>, <8 x double>* %0, align 64
+  %a = load <8 x double>, ptr %0, align 64
   %1 = fptoui <8 x double> %a to <8 x i64>
-  store <8 x i64> %1, <8 x i64>* %agg.result, align 64
+  store <8 x i64> %1, ptr %agg.result, align 64
   ret void
 }
 
-define void @test16elt(<16 x i64>* noalias nocapture sret(<16 x i64>) %agg.result, <16 x double>* nocapture readonly) local_unnamed_addr #1 {
+define void @test16elt(ptr noalias nocapture sret(<16 x i64>) %agg.result, ptr nocapture readonly) local_unnamed_addr #1 {
 ; CHECK-P8-LABEL: test16elt:
 ; CHECK-P8:       # %bb.0: # %entry
 ; CHECK-P8-NEXT:    li r5, 16
@@ -217,9 +217,9 @@ define void @test16elt(<16 x i64>* noalias nocapture sret(<16 x i64>) %agg.resul
 ; CHECK-BE-NEXT:    stxv vs7, 0(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %a = load <16 x double>, <16 x double>* %0, align 128
+  %a = load <16 x double>, ptr %0, align 128
   %1 = fptoui <16 x double> %a to <16 x i64>
-  store <16 x i64> %1, <16 x i64>* %agg.result, align 128
+  store <16 x i64> %1, ptr %agg.result, align 128
   ret void
 }
 
@@ -243,7 +243,7 @@ entry:
   ret <2 x i64> %0
 }
 
-define void @test4elt_signed(<4 x i64>* noalias nocapture sret(<4 x i64>) %agg.result, <4 x double>* nocapture readonly) local_unnamed_addr #1 {
+define void @test4elt_signed(ptr noalias nocapture sret(<4 x i64>) %agg.result, ptr nocapture readonly) local_unnamed_addr #1 {
 ; CHECK-P8-LABEL: test4elt_signed:
 ; CHECK-P8:       # %bb.0: # %entry
 ; CHECK-P8-NEXT:    li r5, 16
@@ -275,13 +275,13 @@ define void @test4elt_signed(<4 x i64>* noalias nocapture sret(<4 x i64>) %agg.r
 ; CHECK-BE-NEXT:    stxv vs1, 0(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %a = load <4 x double>, <4 x double>* %0, align 32
+  %a = load <4 x double>, ptr %0, align 32
   %1 = fptosi <4 x double> %a to <4 x i64>
-  store <4 x i64> %1, <4 x i64>* %agg.result, align 32
+  store <4 x i64> %1, ptr %agg.result, align 32
   ret void
 }
 
-define void @test8elt_signed(<8 x i64>* noalias nocapture sret(<8 x i64>) %agg.result, <8 x double>* nocapture readonly) local_unnamed_addr #1 {
+define void @test8elt_signed(ptr noalias nocapture sret(<8 x i64>) %agg.result, ptr nocapture readonly) local_unnamed_addr #1 {
 ; CHECK-P8-LABEL: test8elt_signed:
 ; CHECK-P8:       # %bb.0: # %entry
 ; CHECK-P8-NEXT:    li r5, 16
@@ -333,13 +333,13 @@ define void @test8elt_signed(<8 x i64>* noalias nocapture sret(<8 x i64>) %agg.r
 ; CHECK-BE-NEXT:    stxv vs3, 0(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %a = load <8 x double>, <8 x double>* %0, align 64
+  %a = load <8 x double>, ptr %0, align 64
   %1 = fptosi <8 x double> %a to <8 x i64>
-  store <8 x i64> %1, <8 x i64>* %agg.result, align 64
+  store <8 x i64> %1, ptr %agg.result, align 64
   ret void
 }
 
-define void @test16elt_signed(<16 x i64>* noalias nocapture sret(<16 x i64>) %agg.result, <16 x double>* nocapture readonly) local_unnamed_addr #1 {
+define void @test16elt_signed(ptr noalias nocapture sret(<16 x i64>) %agg.result, ptr nocapture readonly) local_unnamed_addr #1 {
 ; CHECK-P8-LABEL: test16elt_signed:
 ; CHECK-P8:       # %bb.0: # %entry
 ; CHECK-P8-NEXT:    li r5, 16
@@ -431,8 +431,8 @@ define void @test16elt_signed(<16 x i64>* noalias nocapture sret(<16 x i64>) %ag
 ; CHECK-BE-NEXT:    stxv vs7, 0(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %a = load <16 x double>, <16 x double>* %0, align 128
+  %a = load <16 x double>, ptr %0, align 128
   %1 = fptosi <16 x double> %a to <16 x i64>
-  store <16 x i64> %1, <16 x i64>* %agg.result, align 128
+  store <16 x i64> %1, ptr %agg.result, align 128
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/vec_conv_i16_to_fp32_elts.ll b/llvm/test/CodeGen/PowerPC/vec_conv_i16_to_fp32_elts.ll
index e7306daa82bc..f521e45ef0f6 100644
--- a/llvm/test/CodeGen/PowerPC/vec_conv_i16_to_fp32_elts.ll
+++ b/llvm/test/CodeGen/PowerPC/vec_conv_i16_to_fp32_elts.ll
@@ -94,7 +94,7 @@ entry:
   ret <4 x float> %1
 }
 
-define void @test8elt(<8 x float>* noalias nocapture sret(<8 x float>) %agg.result, <8 x i16> %a) local_unnamed_addr #2 {
+define void @test8elt(ptr noalias nocapture sret(<8 x float>) %agg.result, <8 x i16> %a) local_unnamed_addr #2 {
 ; CHECK-P8-LABEL: test8elt:
 ; CHECK-P8:       # %bb.0: # %entry
 ; CHECK-P8-NEXT:    xxlxor v3, v3, v3
@@ -132,11 +132,11 @@ define void @test8elt(<8 x float>* noalias nocapture sret(<8 x float>) %agg.resu
 ; CHECK-BE-NEXT:    blr
 entry:
   %0 = uitofp <8 x i16> %a to <8 x float>
-  store <8 x float> %0, <8 x float>* %agg.result, align 32
+  store <8 x float> %0, ptr %agg.result, align 32
   ret void
 }
 
-define void @test16elt(<16 x float>* noalias nocapture sret(<16 x float>) %agg.result, <16 x i16>* nocapture readonly) local_unnamed_addr #3 {
+define void @test16elt(ptr noalias nocapture sret(<16 x float>) %agg.result, ptr nocapture readonly) local_unnamed_addr #3 {
 ; CHECK-P8-LABEL: test16elt:
 ; CHECK-P8:       # %bb.0: # %entry
 ; CHECK-P8-NEXT:    addis r5, r2, .LCPI3_0 at toc@ha
@@ -220,9 +220,9 @@ define void @test16elt(<16 x float>* noalias nocapture sret(<16 x float>) %agg.r
 ; CHECK-BE-NEXT:    stxv vs0, 0(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %a = load <16 x i16>, <16 x i16>* %0, align 32
+  %a = load <16 x i16>, ptr %0, align 32
   %1 = uitofp <16 x i16> %a to <16 x float>
-  store <16 x float> %1, <16 x float>* %agg.result, align 64
+  store <16 x float> %1, ptr %agg.result, align 64
   ret void
 }
 
@@ -315,7 +315,7 @@ entry:
   ret <4 x float> %1
 }
 
-define void @test8elt_signed(<8 x float>* noalias nocapture sret(<8 x float>) %agg.result, <8 x i16> %a) local_unnamed_addr #2 {
+define void @test8elt_signed(ptr noalias nocapture sret(<8 x float>) %agg.result, <8 x i16> %a) local_unnamed_addr #2 {
 ; CHECK-P8-LABEL: test8elt_signed:
 ; CHECK-P8:       # %bb.0: # %entry
 ; CHECK-P8-NEXT:    vmrglh v4, v2, v2
@@ -360,11 +360,11 @@ define void @test8elt_signed(<8 x float>* noalias nocapture sret(<8 x float>) %a
 ; CHECK-BE-NEXT:    blr
 entry:
   %0 = sitofp <8 x i16> %a to <8 x float>
-  store <8 x float> %0, <8 x float>* %agg.result, align 32
+  store <8 x float> %0, ptr %agg.result, align 32
   ret void
 }
 
-define void @test16elt_signed(<16 x float>* noalias nocapture sret(<16 x float>) %agg.result, <16 x i16>* nocapture readonly) local_unnamed_addr #3 {
+define void @test16elt_signed(ptr noalias nocapture sret(<16 x float>) %agg.result, ptr nocapture readonly) local_unnamed_addr #3 {
 ; CHECK-P8-LABEL: test16elt_signed:
 ; CHECK-P8:       # %bb.0: # %entry
 ; CHECK-P8-NEXT:    li r5, 16
@@ -446,8 +446,8 @@ define void @test16elt_signed(<16 x float>* noalias nocapture sret(<16 x float>)
 ; CHECK-BE-NEXT:    stxv vs2, 32(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %a = load <16 x i16>, <16 x i16>* %0, align 32
+  %a = load <16 x i16>, ptr %0, align 32
   %1 = sitofp <16 x i16> %a to <16 x float>
-  store <16 x float> %1, <16 x float>* %agg.result, align 64
+  store <16 x float> %1, ptr %agg.result, align 64
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/vec_conv_i16_to_fp64_elts.ll b/llvm/test/CodeGen/PowerPC/vec_conv_i16_to_fp64_elts.ll
index e8d4c7335be0..210f0edc0b96 100644
--- a/llvm/test/CodeGen/PowerPC/vec_conv_i16_to_fp64_elts.ll
+++ b/llvm/test/CodeGen/PowerPC/vec_conv_i16_to_fp64_elts.ll
@@ -49,7 +49,7 @@ entry:
   ret <2 x double> %1
 }
 
-define void @test4elt(<4 x double>* noalias nocapture sret(<4 x double>) %agg.result, i64 %a.coerce) local_unnamed_addr #1 {
+define void @test4elt(ptr noalias nocapture sret(<4 x double>) %agg.result, i64 %a.coerce) local_unnamed_addr #1 {
 ; CHECK-P8-LABEL: test4elt:
 ; CHECK-P8:       # %bb.0: # %entry
 ; CHECK-P8-NEXT:    addis r5, r2, .LCPI1_0 at toc@ha
@@ -111,11 +111,11 @@ define void @test4elt(<4 x double>* noalias nocapture sret(<4 x double>) %agg.re
 entry:
   %0 = bitcast i64 %a.coerce to <4 x i16>
   %1 = uitofp <4 x i16> %0 to <4 x double>
-  store <4 x double> %1, <4 x double>* %agg.result, align 32
+  store <4 x double> %1, ptr %agg.result, align 32
   ret void
 }
 
-define void @test8elt(<8 x double>* noalias nocapture sret(<8 x double>) %agg.result, <8 x i16> %a) local_unnamed_addr #2 {
+define void @test8elt(ptr noalias nocapture sret(<8 x double>) %agg.result, <8 x i16> %a) local_unnamed_addr #2 {
 ; CHECK-P8-LABEL: test8elt:
 ; CHECK-P8:       # %bb.0: # %entry
 ; CHECK-P8-NEXT:    addis r4, r2, .LCPI2_0 at toc@ha
@@ -215,11 +215,11 @@ define void @test8elt(<8 x double>* noalias nocapture sret(<8 x double>) %agg.re
 ; CHECK-BE-NEXT:    blr
 entry:
   %0 = uitofp <8 x i16> %a to <8 x double>
-  store <8 x double> %0, <8 x double>* %agg.result, align 64
+  store <8 x double> %0, ptr %agg.result, align 64
   ret void
 }
 
-define void @test16elt(<16 x double>* noalias nocapture sret(<16 x double>) %agg.result, <16 x i16>* nocapture readonly) local_unnamed_addr #3 {
+define void @test16elt(ptr noalias nocapture sret(<16 x double>) %agg.result, ptr nocapture readonly) local_unnamed_addr #3 {
 ; CHECK-P8-LABEL: test16elt:
 ; CHECK-P8:       # %bb.0: # %entry
 ; CHECK-P8-NEXT:    addis r5, r2, .LCPI3_0 at toc@ha
@@ -370,9 +370,9 @@ define void @test16elt(<16 x double>* noalias nocapture sret(<16 x double>) %agg
 ; CHECK-BE-NEXT:    stxv vs6, 96(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %a = load <16 x i16>, <16 x i16>* %0, align 32
+  %a = load <16 x i16>, ptr %0, align 32
   %1 = uitofp <16 x i16> %a to <16 x double>
-  store <16 x double> %1, <16 x double>* %agg.result, align 128
+  store <16 x double> %1, ptr %agg.result, align 128
   ret void
 }
 
@@ -421,7 +421,7 @@ entry:
   ret <2 x double> %1
 }
 
-define void @test4elt_signed(<4 x double>* noalias nocapture sret(<4 x double>) %agg.result, i64 %a.coerce) local_unnamed_addr #1 {
+define void @test4elt_signed(ptr noalias nocapture sret(<4 x double>) %agg.result, i64 %a.coerce) local_unnamed_addr #1 {
 ; CHECK-P8-LABEL: test4elt_signed:
 ; CHECK-P8:       # %bb.0: # %entry
 ; CHECK-P8-NEXT:    addis r5, r2, .LCPI5_0 at toc@ha
@@ -492,11 +492,11 @@ define void @test4elt_signed(<4 x double>* noalias nocapture sret(<4 x double>)
 entry:
   %0 = bitcast i64 %a.coerce to <4 x i16>
   %1 = sitofp <4 x i16> %0 to <4 x double>
-  store <4 x double> %1, <4 x double>* %agg.result, align 32
+  store <4 x double> %1, ptr %agg.result, align 32
   ret void
 }
 
-define void @test8elt_signed(<8 x double>* noalias nocapture sret(<8 x double>) %agg.result, <8 x i16> %a) local_unnamed_addr #2 {
+define void @test8elt_signed(ptr noalias nocapture sret(<8 x double>) %agg.result, <8 x i16> %a) local_unnamed_addr #2 {
 ; CHECK-P8-LABEL: test8elt_signed:
 ; CHECK-P8:       # %bb.0: # %entry
 ; CHECK-P8-NEXT:    addis r4, r2, .LCPI6_0 at toc@ha
@@ -613,11 +613,11 @@ define void @test8elt_signed(<8 x double>* noalias nocapture sret(<8 x double>)
 ; CHECK-BE-NEXT:    blr
 entry:
   %0 = sitofp <8 x i16> %a to <8 x double>
-  store <8 x double> %0, <8 x double>* %agg.result, align 64
+  store <8 x double> %0, ptr %agg.result, align 64
   ret void
 }
 
-define void @test16elt_signed(<16 x double>* noalias nocapture sret(<16 x double>) %agg.result, <16 x i16>* nocapture readonly) local_unnamed_addr #3 {
+define void @test16elt_signed(ptr noalias nocapture sret(<16 x double>) %agg.result, ptr nocapture readonly) local_unnamed_addr #3 {
 ; CHECK-P8-LABEL: test16elt_signed:
 ; CHECK-P8:       # %bb.0: # %entry
 ; CHECK-P8-NEXT:    addis r5, r2, .LCPI7_0 at toc@ha
@@ -801,8 +801,8 @@ define void @test16elt_signed(<16 x double>* noalias nocapture sret(<16 x double
 ; CHECK-BE-NEXT:    stxv vs7, 112(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %a = load <16 x i16>, <16 x i16>* %0, align 32
+  %a = load <16 x i16>, ptr %0, align 32
   %1 = sitofp <16 x i16> %a to <16 x double>
-  store <16 x double> %1, <16 x double>* %agg.result, align 128
+  store <16 x double> %1, ptr %agg.result, align 128
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/vec_conv_i32_to_fp64_elts.ll b/llvm/test/CodeGen/PowerPC/vec_conv_i32_to_fp64_elts.ll
index 9a9ecea96214..b4273f2cd464 100644
--- a/llvm/test/CodeGen/PowerPC/vec_conv_i32_to_fp64_elts.ll
+++ b/llvm/test/CodeGen/PowerPC/vec_conv_i32_to_fp64_elts.ll
@@ -38,7 +38,7 @@ entry:
   ret <2 x double> %1
 }
 
-define void @test4elt(<4 x double>* noalias nocapture sret(<4 x double>) %agg.result, <4 x i32> %a) local_unnamed_addr #1 {
+define void @test4elt(ptr noalias nocapture sret(<4 x double>) %agg.result, <4 x i32> %a) local_unnamed_addr #1 {
 ; CHECK-P8-LABEL: test4elt:
 ; CHECK-P8:       # %bb.0: # %entry
 ; CHECK-P8-NEXT:    xxmrglw v3, v2, v2
@@ -73,11 +73,11 @@ define void @test4elt(<4 x double>* noalias nocapture sret(<4 x double>) %agg.re
 ; CHECK-BE-NEXT:    blr
 entry:
   %0 = uitofp <4 x i32> %a to <4 x double>
-  store <4 x double> %0, <4 x double>* %agg.result, align 32
+  store <4 x double> %0, ptr %agg.result, align 32
   ret void
 }
 
-define void @test8elt(<8 x double>* noalias nocapture sret(<8 x double>) %agg.result, <8 x i32>* nocapture readonly) local_unnamed_addr #2 {
+define void @test8elt(ptr noalias nocapture sret(<8 x double>) %agg.result, ptr nocapture readonly) local_unnamed_addr #2 {
 ; CHECK-P8-LABEL: test8elt:
 ; CHECK-P8:       # %bb.0: # %entry
 ; CHECK-P8-NEXT:    li r5, 16
@@ -141,13 +141,13 @@ define void @test8elt(<8 x double>* noalias nocapture sret(<8 x double>) %agg.re
 ; CHECK-BE-NEXT:    stxv vs0, 48(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %a = load <8 x i32>, <8 x i32>* %0, align 32
+  %a = load <8 x i32>, ptr %0, align 32
   %1 = uitofp <8 x i32> %a to <8 x double>
-  store <8 x double> %1, <8 x double>* %agg.result, align 64
+  store <8 x double> %1, ptr %agg.result, align 64
   ret void
 }
 
-define void @test16elt(<16 x double>* noalias nocapture sret(<16 x double>) %agg.result, <16 x i32>* nocapture readonly) local_unnamed_addr #2 {
+define void @test16elt(ptr noalias nocapture sret(<16 x double>) %agg.result, ptr nocapture readonly) local_unnamed_addr #2 {
 ; CHECK-P8-LABEL: test16elt:
 ; CHECK-P8:       # %bb.0: # %entry
 ; CHECK-P8-NEXT:    li r5, 16
@@ -263,9 +263,9 @@ define void @test16elt(<16 x double>* noalias nocapture sret(<16 x double>) %agg
 ; CHECK-BE-NEXT:    stxv vs4, 112(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %a = load <16 x i32>, <16 x i32>* %0, align 64
+  %a = load <16 x i32>, ptr %0, align 64
   %1 = uitofp <16 x i32> %a to <16 x double>
-  store <16 x double> %1, <16 x double>* %agg.result, align 128
+  store <16 x double> %1, ptr %agg.result, align 128
   ret void
 }
 
@@ -298,7 +298,7 @@ entry:
   ret <2 x double> %1
 }
 
-define void @test4elt_signed(<4 x double>* noalias nocapture sret(<4 x double>) %agg.result, <4 x i32> %a) local_unnamed_addr #1 {
+define void @test4elt_signed(ptr noalias nocapture sret(<4 x double>) %agg.result, <4 x i32> %a) local_unnamed_addr #1 {
 ; CHECK-P8-LABEL: test4elt_signed:
 ; CHECK-P8:       # %bb.0: # %entry
 ; CHECK-P8-NEXT:    xxmrglw v3, v2, v2
@@ -333,11 +333,11 @@ define void @test4elt_signed(<4 x double>* noalias nocapture sret(<4 x double>)
 ; CHECK-BE-NEXT:    blr
 entry:
   %0 = sitofp <4 x i32> %a to <4 x double>
-  store <4 x double> %0, <4 x double>* %agg.result, align 32
+  store <4 x double> %0, ptr %agg.result, align 32
   ret void
 }
 
-define void @test8elt_signed(<8 x double>* noalias nocapture sret(<8 x double>) %agg.result, <8 x i32>* nocapture readonly) local_unnamed_addr #2 {
+define void @test8elt_signed(ptr noalias nocapture sret(<8 x double>) %agg.result, ptr nocapture readonly) local_unnamed_addr #2 {
 ; CHECK-P8-LABEL: test8elt_signed:
 ; CHECK-P8:       # %bb.0: # %entry
 ; CHECK-P8-NEXT:    li r5, 16
@@ -401,13 +401,13 @@ define void @test8elt_signed(<8 x double>* noalias nocapture sret(<8 x double>)
 ; CHECK-BE-NEXT:    stxv vs0, 48(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %a = load <8 x i32>, <8 x i32>* %0, align 32
+  %a = load <8 x i32>, ptr %0, align 32
   %1 = sitofp <8 x i32> %a to <8 x double>
-  store <8 x double> %1, <8 x double>* %agg.result, align 64
+  store <8 x double> %1, ptr %agg.result, align 64
   ret void
 }
 
-define void @test16elt_signed(<16 x double>* noalias nocapture sret(<16 x double>) %agg.result, <16 x i32>* nocapture readonly) local_unnamed_addr #2 {
+define void @test16elt_signed(ptr noalias nocapture sret(<16 x double>) %agg.result, ptr nocapture readonly) local_unnamed_addr #2 {
 ; CHECK-P8-LABEL: test16elt_signed:
 ; CHECK-P8:       # %bb.0: # %entry
 ; CHECK-P8-NEXT:    li r5, 16
@@ -523,8 +523,8 @@ define void @test16elt_signed(<16 x double>* noalias nocapture sret(<16 x double
 ; CHECK-BE-NEXT:    stxv vs4, 112(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %a = load <16 x i32>, <16 x i32>* %0, align 64
+  %a = load <16 x i32>, ptr %0, align 64
   %1 = sitofp <16 x i32> %a to <16 x double>
-  store <16 x double> %1, <16 x double>* %agg.result, align 128
+  store <16 x double> %1, ptr %agg.result, align 128
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/vec_conv_i64_to_fp32_elts.ll b/llvm/test/CodeGen/PowerPC/vec_conv_i64_to_fp32_elts.ll
index 23b370f67756..16acda0ca3a3 100644
--- a/llvm/test/CodeGen/PowerPC/vec_conv_i64_to_fp32_elts.ll
+++ b/llvm/test/CodeGen/PowerPC/vec_conv_i64_to_fp32_elts.ll
@@ -49,7 +49,7 @@ entry:
   ret i64 %1
 }
 
-define <4 x float> @test4elt(<4 x i64>* nocapture readonly) local_unnamed_addr #1 {
+define <4 x float> @test4elt(ptr nocapture readonly) local_unnamed_addr #1 {
 ; CHECK-P8-LABEL: test4elt:
 ; CHECK-P8:       # %bb.0: # %entry
 ; CHECK-P8-NEXT:    li r4, 16
@@ -86,12 +86,12 @@ define <4 x float> @test4elt(<4 x i64>* nocapture readonly) local_unnamed_addr #
 ; CHECK-BE-NEXT:    vpkudum v2, v2, v3
 ; CHECK-BE-NEXT:    blr
 entry:
-  %a = load <4 x i64>, <4 x i64>* %0, align 32
+  %a = load <4 x i64>, ptr %0, align 32
   %1 = uitofp <4 x i64> %a to <4 x float>
   ret <4 x float> %1
 }
 
-define void @test8elt(<8 x float>* noalias nocapture sret(<8 x float>) %agg.result, <8 x i64>* nocapture readonly) local_unnamed_addr #2 {
+define void @test8elt(ptr noalias nocapture sret(<8 x float>) %agg.result, ptr nocapture readonly) local_unnamed_addr #2 {
 ; CHECK-P8-LABEL: test8elt:
 ; CHECK-P8:       # %bb.0: # %entry
 ; CHECK-P8-NEXT:    li r5, 32
@@ -161,13 +161,13 @@ define void @test8elt(<8 x float>* noalias nocapture sret(<8 x float>) %agg.resu
 ; CHECK-BE-NEXT:    stxv v2, 16(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %a = load <8 x i64>, <8 x i64>* %0, align 64
+  %a = load <8 x i64>, ptr %0, align 64
   %1 = uitofp <8 x i64> %a to <8 x float>
-  store <8 x float> %1, <8 x float>* %agg.result, align 32
+  store <8 x float> %1, ptr %agg.result, align 32
   ret void
 }
 
-define void @test16elt(<16 x float>* noalias nocapture sret(<16 x float>) %agg.result, <16 x i64>* nocapture readonly) local_unnamed_addr #2 {
+define void @test16elt(ptr noalias nocapture sret(<16 x float>) %agg.result, ptr nocapture readonly) local_unnamed_addr #2 {
 ; CHECK-P8-LABEL: test16elt:
 ; CHECK-P8:       # %bb.0: # %entry
 ; CHECK-P8-NEXT:    li r5, 32
@@ -295,9 +295,9 @@ define void @test16elt(<16 x float>* noalias nocapture sret(<16 x float>) %agg.r
 ; CHECK-BE-NEXT:    stxv v2, 48(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %a = load <16 x i64>, <16 x i64>* %0, align 128
+  %a = load <16 x i64>, ptr %0, align 128
   %1 = uitofp <16 x i64> %a to <16 x float>
-  store <16 x float> %1, <16 x float>* %agg.result, align 64
+  store <16 x float> %1, ptr %agg.result, align 64
   ret void
 }
 
@@ -341,7 +341,7 @@ entry:
   ret i64 %1
 }
 
-define <4 x float> @test4elt_signed(<4 x i64>* nocapture readonly) local_unnamed_addr #1 {
+define <4 x float> @test4elt_signed(ptr nocapture readonly) local_unnamed_addr #1 {
 ; CHECK-P8-LABEL: test4elt_signed:
 ; CHECK-P8:       # %bb.0: # %entry
 ; CHECK-P8-NEXT:    li r4, 16
@@ -378,12 +378,12 @@ define <4 x float> @test4elt_signed(<4 x i64>* nocapture readonly) local_unnamed
 ; CHECK-BE-NEXT:    vpkudum v2, v2, v3
 ; CHECK-BE-NEXT:    blr
 entry:
-  %a = load <4 x i64>, <4 x i64>* %0, align 32
+  %a = load <4 x i64>, ptr %0, align 32
   %1 = sitofp <4 x i64> %a to <4 x float>
   ret <4 x float> %1
 }
 
-define void @test8elt_signed(<8 x float>* noalias nocapture sret(<8 x float>) %agg.result, <8 x i64>* nocapture readonly) local_unnamed_addr #2 {
+define void @test8elt_signed(ptr noalias nocapture sret(<8 x float>) %agg.result, ptr nocapture readonly) local_unnamed_addr #2 {
 ; CHECK-P8-LABEL: test8elt_signed:
 ; CHECK-P8:       # %bb.0: # %entry
 ; CHECK-P8-NEXT:    li r5, 32
@@ -453,13 +453,13 @@ define void @test8elt_signed(<8 x float>* noalias nocapture sret(<8 x float>) %a
 ; CHECK-BE-NEXT:    stxv v2, 16(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %a = load <8 x i64>, <8 x i64>* %0, align 64
+  %a = load <8 x i64>, ptr %0, align 64
   %1 = sitofp <8 x i64> %a to <8 x float>
-  store <8 x float> %1, <8 x float>* %agg.result, align 32
+  store <8 x float> %1, ptr %agg.result, align 32
   ret void
 }
 
-define void @test16elt_signed(<16 x float>* noalias nocapture sret(<16 x float>) %agg.result, <16 x i64>* nocapture readonly) local_unnamed_addr #2 {
+define void @test16elt_signed(ptr noalias nocapture sret(<16 x float>) %agg.result, ptr nocapture readonly) local_unnamed_addr #2 {
 ; CHECK-P8-LABEL: test16elt_signed:
 ; CHECK-P8:       # %bb.0: # %entry
 ; CHECK-P8-NEXT:    li r5, 32
@@ -587,8 +587,8 @@ define void @test16elt_signed(<16 x float>* noalias nocapture sret(<16 x float>)
 ; CHECK-BE-NEXT:    stxv v2, 48(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %a = load <16 x i64>, <16 x i64>* %0, align 128
+  %a = load <16 x i64>, ptr %0, align 128
   %1 = sitofp <16 x i64> %a to <16 x float>
-  store <16 x float> %1, <16 x float>* %agg.result, align 64
+  store <16 x float> %1, ptr %agg.result, align 64
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/vec_conv_i8_to_fp32_elts.ll b/llvm/test/CodeGen/PowerPC/vec_conv_i8_to_fp32_elts.ll
index 7ba0f8d444f6..1bf167a1f415 100644
--- a/llvm/test/CodeGen/PowerPC/vec_conv_i8_to_fp32_elts.ll
+++ b/llvm/test/CodeGen/PowerPC/vec_conv_i8_to_fp32_elts.ll
@@ -101,7 +101,7 @@ entry:
   ret <4 x float> %1
 }
 
-define void @test8elt(<8 x float>* noalias nocapture sret(<8 x float>) %agg.result, i64 %a.coerce) local_unnamed_addr #2 {
+define void @test8elt(ptr noalias nocapture sret(<8 x float>) %agg.result, i64 %a.coerce) local_unnamed_addr #2 {
 ; CHECK-P8-LABEL: test8elt:
 ; CHECK-P8:       # %bb.0: # %entry
 ; CHECK-P8-NEXT:    addis r5, r2, .LCPI2_0 at toc@ha
@@ -163,11 +163,11 @@ define void @test8elt(<8 x float>* noalias nocapture sret(<8 x float>) %agg.resu
 entry:
   %0 = bitcast i64 %a.coerce to <8 x i8>
   %1 = uitofp <8 x i8> %0 to <8 x float>
-  store <8 x float> %1, <8 x float>* %agg.result, align 32
+  store <8 x float> %1, ptr %agg.result, align 32
   ret void
 }
 
-define void @test16elt(<16 x float>* noalias nocapture sret(<16 x float>) %agg.result, <16 x i8> %a) local_unnamed_addr #3 {
+define void @test16elt(ptr noalias nocapture sret(<16 x float>) %agg.result, <16 x i8> %a) local_unnamed_addr #3 {
 ; CHECK-P8-LABEL: test16elt:
 ; CHECK-P8:       # %bb.0: # %entry
 ; CHECK-P8-NEXT:    addis r4, r2, .LCPI3_0 at toc@ha
@@ -267,7 +267,7 @@ define void @test16elt(<16 x float>* noalias nocapture sret(<16 x float>) %agg.r
 ; CHECK-BE-NEXT:    blr
 entry:
   %0 = uitofp <16 x i8> %a to <16 x float>
-  store <16 x float> %0, <16 x float>* %agg.result, align 64
+  store <16 x float> %0, ptr %agg.result, align 64
   ret void
 }
 
@@ -370,7 +370,7 @@ entry:
   ret <4 x float> %1
 }
 
-define void @test8elt_signed(<8 x float>* noalias nocapture sret(<8 x float>) %agg.result, i64 %a.coerce) local_unnamed_addr #2 {
+define void @test8elt_signed(ptr noalias nocapture sret(<8 x float>) %agg.result, i64 %a.coerce) local_unnamed_addr #2 {
 ; CHECK-P8-LABEL: test8elt_signed:
 ; CHECK-P8:       # %bb.0: # %entry
 ; CHECK-P8-NEXT:    addis r5, r2, .LCPI6_0 at toc@ha
@@ -439,11 +439,11 @@ define void @test8elt_signed(<8 x float>* noalias nocapture sret(<8 x float>) %a
 entry:
   %0 = bitcast i64 %a.coerce to <8 x i8>
   %1 = sitofp <8 x i8> %0 to <8 x float>
-  store <8 x float> %1, <8 x float>* %agg.result, align 32
+  store <8 x float> %1, ptr %agg.result, align 32
   ret void
 }
 
-define void @test16elt_signed(<16 x float>* noalias nocapture sret(<16 x float>) %agg.result, <16 x i8> %a) local_unnamed_addr #3 {
+define void @test16elt_signed(ptr noalias nocapture sret(<16 x float>) %agg.result, <16 x i8> %a) local_unnamed_addr #3 {
 ; CHECK-P8-LABEL: test16elt_signed:
 ; CHECK-P8:       # %bb.0: # %entry
 ; CHECK-P8-NEXT:    addis r4, r2, .LCPI7_0 at toc@ha
@@ -558,6 +558,6 @@ define void @test16elt_signed(<16 x float>* noalias nocapture sret(<16 x float>)
 ; CHECK-BE-NEXT:    blr
 entry:
   %0 = sitofp <16 x i8> %a to <16 x float>
-  store <16 x float> %0, <16 x float>* %agg.result, align 64
+  store <16 x float> %0, ptr %agg.result, align 64
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/vec_conv_i8_to_fp64_elts.ll b/llvm/test/CodeGen/PowerPC/vec_conv_i8_to_fp64_elts.ll
index 9e518fc03dfc..78229f35df82 100644
--- a/llvm/test/CodeGen/PowerPC/vec_conv_i8_to_fp64_elts.ll
+++ b/llvm/test/CodeGen/PowerPC/vec_conv_i8_to_fp64_elts.ll
@@ -49,7 +49,7 @@ entry:
   ret <2 x double> %1
 }
 
-define void @test4elt(<4 x double>* noalias nocapture sret(<4 x double>) %agg.result, i32 %a.coerce) local_unnamed_addr #1 {
+define void @test4elt(ptr noalias nocapture sret(<4 x double>) %agg.result, i32 %a.coerce) local_unnamed_addr #1 {
 ; CHECK-P8-LABEL: test4elt:
 ; CHECK-P8:       # %bb.0: # %entry
 ; CHECK-P8-NEXT:    addis r5, r2, .LCPI1_0 at toc@ha
@@ -111,11 +111,11 @@ define void @test4elt(<4 x double>* noalias nocapture sret(<4 x double>) %agg.re
 entry:
   %0 = bitcast i32 %a.coerce to <4 x i8>
   %1 = uitofp <4 x i8> %0 to <4 x double>
-  store <4 x double> %1, <4 x double>* %agg.result, align 32
+  store <4 x double> %1, ptr %agg.result, align 32
   ret void
 }
 
-define void @test8elt(<8 x double>* noalias nocapture sret(<8 x double>) %agg.result, i64 %a.coerce) local_unnamed_addr #1 {
+define void @test8elt(ptr noalias nocapture sret(<8 x double>) %agg.result, i64 %a.coerce) local_unnamed_addr #1 {
 ; CHECK-P8-LABEL: test8elt:
 ; CHECK-P8:       # %bb.0: # %entry
 ; CHECK-P8-NEXT:    addis r5, r2, .LCPI2_0 at toc@ha
@@ -219,11 +219,11 @@ define void @test8elt(<8 x double>* noalias nocapture sret(<8 x double>) %agg.re
 entry:
   %0 = bitcast i64 %a.coerce to <8 x i8>
   %1 = uitofp <8 x i8> %0 to <8 x double>
-  store <8 x double> %1, <8 x double>* %agg.result, align 64
+  store <8 x double> %1, ptr %agg.result, align 64
   ret void
 }
 
-define void @test16elt(<16 x double>* noalias nocapture sret(<16 x double>) %agg.result, <16 x i8> %a) local_unnamed_addr #2 {
+define void @test16elt(ptr noalias nocapture sret(<16 x double>) %agg.result, <16 x i8> %a) local_unnamed_addr #2 {
 ; CHECK-P8-LABEL: test16elt:
 ; CHECK-P8:       # %bb.0: # %entry
 ; CHECK-P8-NEXT:    addis r4, r2, .LCPI3_0 at toc@ha
@@ -407,7 +407,7 @@ define void @test16elt(<16 x double>* noalias nocapture sret(<16 x double>) %agg
 ; CHECK-BE-NEXT:    blr
 entry:
   %0 = uitofp <16 x i8> %a to <16 x double>
-  store <16 x double> %0, <16 x double>* %agg.result, align 128
+  store <16 x double> %0, ptr %agg.result, align 128
   ret void
 }
 
@@ -456,7 +456,7 @@ entry:
   ret <2 x double> %1
 }
 
-define void @test4elt_signed(<4 x double>* noalias nocapture sret(<4 x double>) %agg.result, i32 %a.coerce) local_unnamed_addr #1 {
+define void @test4elt_signed(ptr noalias nocapture sret(<4 x double>) %agg.result, i32 %a.coerce) local_unnamed_addr #1 {
 ; CHECK-P8-LABEL: test4elt_signed:
 ; CHECK-P8:       # %bb.0: # %entry
 ; CHECK-P8-NEXT:    addis r5, r2, .LCPI5_0 at toc@ha
@@ -527,11 +527,11 @@ define void @test4elt_signed(<4 x double>* noalias nocapture sret(<4 x double>)
 entry:
   %0 = bitcast i32 %a.coerce to <4 x i8>
   %1 = sitofp <4 x i8> %0 to <4 x double>
-  store <4 x double> %1, <4 x double>* %agg.result, align 32
+  store <4 x double> %1, ptr %agg.result, align 32
   ret void
 }
 
-define void @test8elt_signed(<8 x double>* noalias nocapture sret(<8 x double>) %agg.result, i64 %a.coerce) local_unnamed_addr #1 {
+define void @test8elt_signed(ptr noalias nocapture sret(<8 x double>) %agg.result, i64 %a.coerce) local_unnamed_addr #1 {
 ; CHECK-P8-LABEL: test8elt_signed:
 ; CHECK-P8:       # %bb.0: # %entry
 ; CHECK-P8-NEXT:    addis r5, r2, .LCPI6_0 at toc@ha
@@ -652,11 +652,11 @@ define void @test8elt_signed(<8 x double>* noalias nocapture sret(<8 x double>)
 entry:
   %0 = bitcast i64 %a.coerce to <8 x i8>
   %1 = sitofp <8 x i8> %0 to <8 x double>
-  store <8 x double> %1, <8 x double>* %agg.result, align 64
+  store <8 x double> %1, ptr %agg.result, align 64
   ret void
 }
 
-define void @test16elt_signed(<16 x double>* noalias nocapture sret(<16 x double>) %agg.result, <16 x i8> %a) local_unnamed_addr #2 {
+define void @test16elt_signed(ptr noalias nocapture sret(<16 x double>) %agg.result, <16 x i8> %a) local_unnamed_addr #2 {
 ; CHECK-P8-LABEL: test16elt_signed:
 ; CHECK-P8:       # %bb.0: # %entry
 ; CHECK-P8-NEXT:    addis r4, r2, .LCPI7_0 at toc@ha
@@ -873,6 +873,6 @@ define void @test16elt_signed(<16 x double>* noalias nocapture sret(<16 x double
 ; CHECK-BE-NEXT:    blr
 entry:
   %0 = sitofp <16 x i8> %a to <16 x double>
-  store <16 x double> %0, <16 x double>* %agg.result, align 128
+  store <16 x double> %0, ptr %agg.result, align 128
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/vec_conv_i_to_fp_4byte_elts.ll b/llvm/test/CodeGen/PowerPC/vec_conv_i_to_fp_4byte_elts.ll
index 8177fe9df1ed..e25a9adeb2ce 100644
--- a/llvm/test/CodeGen/PowerPC/vec_conv_i_to_fp_4byte_elts.ll
+++ b/llvm/test/CodeGen/PowerPC/vec_conv_i_to_fp_4byte_elts.ll
@@ -60,7 +60,7 @@ entry:
   ret <4 x float> %0
 }
 
-define void @test8elt(<8 x float>* noalias nocapture sret(<8 x float>) %agg.result, <8 x i32>* nocapture readonly) local_unnamed_addr #2 {
+define void @test8elt(ptr noalias nocapture sret(<8 x float>) %agg.result, ptr nocapture readonly) local_unnamed_addr #2 {
 ; CHECK-P8-LABEL: test8elt:
 ; CHECK-P8:       # %bb.0: # %entry
 ; CHECK-P8-NEXT:    li r5, 16
@@ -92,13 +92,13 @@ define void @test8elt(<8 x float>* noalias nocapture sret(<8 x float>) %agg.resu
 ; CHECK-BE-NEXT:    stxv vs1, 0(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %a = load <8 x i32>, <8 x i32>* %0, align 32
+  %a = load <8 x i32>, ptr %0, align 32
   %1 = uitofp <8 x i32> %a to <8 x float>
-  store <8 x float> %1, <8 x float>* %agg.result, align 32
+  store <8 x float> %1, ptr %agg.result, align 32
   ret void
 }
 
-define void @test16elt(<16 x float>* noalias nocapture sret(<16 x float>) %agg.result, <16 x i32>* nocapture readonly) local_unnamed_addr #2 {
+define void @test16elt(ptr noalias nocapture sret(<16 x float>) %agg.result, ptr nocapture readonly) local_unnamed_addr #2 {
 ; CHECK-P8-LABEL: test16elt:
 ; CHECK-P8:       # %bb.0: # %entry
 ; CHECK-P8-NEXT:    li r5, 16
@@ -150,9 +150,9 @@ define void @test16elt(<16 x float>* noalias nocapture sret(<16 x float>) %agg.r
 ; CHECK-BE-NEXT:    stxv vs3, 0(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %a = load <16 x i32>, <16 x i32>* %0, align 64
+  %a = load <16 x i32>, ptr %0, align 64
   %1 = uitofp <16 x i32> %a to <16 x float>
-  store <16 x float> %1, <16 x float>* %agg.result, align 64
+  store <16 x float> %1, ptr %agg.result, align 64
   ret void
 }
 
@@ -207,7 +207,7 @@ entry:
   ret <4 x float> %0
 }
 
-define void @test8elt_signed(<8 x float>* noalias nocapture sret(<8 x float>) %agg.result, <8 x i32>* nocapture readonly) local_unnamed_addr #2 {
+define void @test8elt_signed(ptr noalias nocapture sret(<8 x float>) %agg.result, ptr nocapture readonly) local_unnamed_addr #2 {
 ; CHECK-P8-LABEL: test8elt_signed:
 ; CHECK-P8:       # %bb.0: # %entry
 ; CHECK-P8-NEXT:    li r5, 16
@@ -239,13 +239,13 @@ define void @test8elt_signed(<8 x float>* noalias nocapture sret(<8 x float>) %a
 ; CHECK-BE-NEXT:    stxv vs1, 0(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %a = load <8 x i32>, <8 x i32>* %0, align 32
+  %a = load <8 x i32>, ptr %0, align 32
   %1 = sitofp <8 x i32> %a to <8 x float>
-  store <8 x float> %1, <8 x float>* %agg.result, align 32
+  store <8 x float> %1, ptr %agg.result, align 32
   ret void
 }
 
-define void @test16elt_signed(<16 x float>* noalias nocapture sret(<16 x float>) %agg.result, <16 x i32>* nocapture readonly) local_unnamed_addr #2 {
+define void @test16elt_signed(ptr noalias nocapture sret(<16 x float>) %agg.result, ptr nocapture readonly) local_unnamed_addr #2 {
 ; CHECK-P8-LABEL: test16elt_signed:
 ; CHECK-P8:       # %bb.0: # %entry
 ; CHECK-P8-NEXT:    li r5, 16
@@ -297,8 +297,8 @@ define void @test16elt_signed(<16 x float>* noalias nocapture sret(<16 x float>)
 ; CHECK-BE-NEXT:    stxv vs3, 0(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %a = load <16 x i32>, <16 x i32>* %0, align 64
+  %a = load <16 x i32>, ptr %0, align 64
   %1 = sitofp <16 x i32> %a to <16 x float>
-  store <16 x float> %1, <16 x float>* %agg.result, align 64
+  store <16 x float> %1, ptr %agg.result, align 64
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/vec_conv_i_to_fp_8byte_elts.ll b/llvm/test/CodeGen/PowerPC/vec_conv_i_to_fp_8byte_elts.ll
index 00443f36c7d2..6d7ca6577665 100644
--- a/llvm/test/CodeGen/PowerPC/vec_conv_i_to_fp_8byte_elts.ll
+++ b/llvm/test/CodeGen/PowerPC/vec_conv_i_to_fp_8byte_elts.ll
@@ -29,7 +29,7 @@ entry:
   ret <2 x double> %0
 }
 
-define void @test4elt(<4 x double>* noalias nocapture sret(<4 x double>) %agg.result, <4 x i64>* nocapture readonly) local_unnamed_addr #1 {
+define void @test4elt(ptr noalias nocapture sret(<4 x double>) %agg.result, ptr nocapture readonly) local_unnamed_addr #1 {
 ; CHECK-P8-LABEL: test4elt:
 ; CHECK-P8:       # %bb.0: # %entry
 ; CHECK-P8-NEXT:    li r5, 16
@@ -61,13 +61,13 @@ define void @test4elt(<4 x double>* noalias nocapture sret(<4 x double>) %agg.re
 ; CHECK-BE-NEXT:    stxv vs0, 0(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %a = load <4 x i64>, <4 x i64>* %0, align 32
+  %a = load <4 x i64>, ptr %0, align 32
   %1 = uitofp <4 x i64> %a to <4 x double>
-  store <4 x double> %1, <4 x double>* %agg.result, align 32
+  store <4 x double> %1, ptr %agg.result, align 32
   ret void
 }
 
-define void @test8elt(<8 x double>* noalias nocapture sret(<8 x double>) %agg.result, <8 x i64>* nocapture readonly) local_unnamed_addr #1 {
+define void @test8elt(ptr noalias nocapture sret(<8 x double>) %agg.result, ptr nocapture readonly) local_unnamed_addr #1 {
 ; CHECK-P8-LABEL: test8elt:
 ; CHECK-P8:       # %bb.0: # %entry
 ; CHECK-P8-NEXT:    li r5, 16
@@ -119,13 +119,13 @@ define void @test8elt(<8 x double>* noalias nocapture sret(<8 x double>) %agg.re
 ; CHECK-BE-NEXT:    stxv vs0, 0(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %a = load <8 x i64>, <8 x i64>* %0, align 64
+  %a = load <8 x i64>, ptr %0, align 64
   %1 = uitofp <8 x i64> %a to <8 x double>
-  store <8 x double> %1, <8 x double>* %agg.result, align 64
+  store <8 x double> %1, ptr %agg.result, align 64
   ret void
 }
 
-define void @test16elt(<16 x double>* noalias nocapture sret(<16 x double>) %agg.result, <16 x i64>* nocapture readonly) local_unnamed_addr #1 {
+define void @test16elt(ptr noalias nocapture sret(<16 x double>) %agg.result, ptr nocapture readonly) local_unnamed_addr #1 {
 ; CHECK-P8-LABEL: test16elt:
 ; CHECK-P8:       # %bb.0: # %entry
 ; CHECK-P8-NEXT:    li r5, 16
@@ -217,9 +217,9 @@ define void @test16elt(<16 x double>* noalias nocapture sret(<16 x double>) %agg
 ; CHECK-BE-NEXT:    stxv vs0, 0(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %a = load <16 x i64>, <16 x i64>* %0, align 128
+  %a = load <16 x i64>, ptr %0, align 128
   %1 = uitofp <16 x i64> %a to <16 x double>
-  store <16 x double> %1, <16 x double>* %agg.result, align 128
+  store <16 x double> %1, ptr %agg.result, align 128
   ret void
 }
 
@@ -243,7 +243,7 @@ entry:
   ret <2 x double> %0
 }
 
-define void @test4elt_signed(<4 x double>* noalias nocapture sret(<4 x double>) %agg.result, <4 x i64>* nocapture readonly) local_unnamed_addr #1 {
+define void @test4elt_signed(ptr noalias nocapture sret(<4 x double>) %agg.result, ptr nocapture readonly) local_unnamed_addr #1 {
 ; CHECK-P8-LABEL: test4elt_signed:
 ; CHECK-P8:       # %bb.0: # %entry
 ; CHECK-P8-NEXT:    li r5, 16
@@ -275,13 +275,13 @@ define void @test4elt_signed(<4 x double>* noalias nocapture sret(<4 x double>)
 ; CHECK-BE-NEXT:    stxv vs0, 0(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %a = load <4 x i64>, <4 x i64>* %0, align 32
+  %a = load <4 x i64>, ptr %0, align 32
   %1 = sitofp <4 x i64> %a to <4 x double>
-  store <4 x double> %1, <4 x double>* %agg.result, align 32
+  store <4 x double> %1, ptr %agg.result, align 32
   ret void
 }
 
-define void @test8elt_signed(<8 x double>* noalias nocapture sret(<8 x double>) %agg.result, <8 x i64>* nocapture readonly) local_unnamed_addr #1 {
+define void @test8elt_signed(ptr noalias nocapture sret(<8 x double>) %agg.result, ptr nocapture readonly) local_unnamed_addr #1 {
 ; CHECK-P8-LABEL: test8elt_signed:
 ; CHECK-P8:       # %bb.0: # %entry
 ; CHECK-P8-NEXT:    li r5, 16
@@ -333,13 +333,13 @@ define void @test8elt_signed(<8 x double>* noalias nocapture sret(<8 x double>)
 ; CHECK-BE-NEXT:    stxv vs0, 0(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %a = load <8 x i64>, <8 x i64>* %0, align 64
+  %a = load <8 x i64>, ptr %0, align 64
   %1 = sitofp <8 x i64> %a to <8 x double>
-  store <8 x double> %1, <8 x double>* %agg.result, align 64
+  store <8 x double> %1, ptr %agg.result, align 64
   ret void
 }
 
-define void @test16elt_signed(<16 x double>* noalias nocapture sret(<16 x double>) %agg.result, <16 x i64>* nocapture readonly) local_unnamed_addr #1 {
+define void @test16elt_signed(ptr noalias nocapture sret(<16 x double>) %agg.result, ptr nocapture readonly) local_unnamed_addr #1 {
 ; CHECK-P8-LABEL: test16elt_signed:
 ; CHECK-P8:       # %bb.0: # %entry
 ; CHECK-P8-NEXT:    li r5, 16
@@ -431,8 +431,8 @@ define void @test16elt_signed(<16 x double>* noalias nocapture sret(<16 x double
 ; CHECK-BE-NEXT:    stxv vs0, 0(r3)
 ; CHECK-BE-NEXT:    blr
 entry:
-  %a = load <16 x i64>, <16 x i64>* %0, align 128
+  %a = load <16 x i64>, ptr %0, align 128
   %1 = sitofp <16 x i64> %a to <16 x double>
-  store <16 x double> %1, <16 x double>* %agg.result, align 128
+  store <16 x double> %1, ptr %agg.result, align 128
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/vec_fneg.ll b/llvm/test/CodeGen/PowerPC/vec_fneg.ll
index 904445abcaef..2854a31cad9e 100644
--- a/llvm/test/CodeGen/PowerPC/vec_fneg.ll
+++ b/llvm/test/CodeGen/PowerPC/vec_fneg.ll
@@ -5,12 +5,12 @@
 ; RUN:          -mattr=+altivec -mattr=-vsx |  FileCheck %s \
 ; RUN:          -check-prefix=CHECK-NOVSX
 
-define void @test_float(<4 x float>* %A) {
+define void @test_float(ptr %A) {
 ; CHECK-LABEL: test_float
 ; CHECK-NOVSX-LABEL: test_float
-	%tmp2 = load <4 x float>, <4 x float>* %A
+	%tmp2 = load <4 x float>, ptr %A
 	%tmp3 = fsub <4 x float> < float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00 >, %tmp2
-	store <4 x float> %tmp3, <4 x float>* %A
+	store <4 x float> %tmp3, ptr %A
 	ret void
 
 ; CHECK: xvnegsp
@@ -20,12 +20,12 @@ define void @test_float(<4 x float>* %A) {
 
 }
 
-define void @test_double(<2 x double>* %A) {
+define void @test_double(ptr %A) {
 ; CHECK-LABEL: test_double
 ; CHECK-NOVSX-LABEL: test_double
-	%tmp2 = load <2 x double>, <2 x double>* %A
+	%tmp2 = load <2 x double>, ptr %A
 	%tmp3 = fsub <2 x double> < double -0.000000e+00, double -0.000000e+00 >, %tmp2
-	store <2 x double> %tmp3, <2 x double>* %A
+	store <2 x double> %tmp3, ptr %A
 	ret void
 
 ; CHECK: xvnegdp

diff  --git a/llvm/test/CodeGen/PowerPC/vec_insert_elt.ll b/llvm/test/CodeGen/PowerPC/vec_insert_elt.ll
index dd873aad5ec8..a17114763149 100644
--- a/llvm/test/CodeGen/PowerPC/vec_insert_elt.ll
+++ b/llvm/test/CodeGen/PowerPC/vec_insert_elt.ll
@@ -388,7 +388,7 @@ entry:
   ret <4 x float> %vecins
 }
 
-define <4 x float> @testFloat2(<4 x float> %a, i8* %b, i32 zeroext %idx1, i32 zeroext %idx2) {
+define <4 x float> @testFloat2(<4 x float> %a, ptr %b, i32 zeroext %idx1, i32 zeroext %idx2) {
 ; CHECK-LABEL: testFloat2:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lwz r3, 0(r5)
@@ -461,17 +461,15 @@ define <4 x float> @testFloat2(<4 x float> %a, i8* %b, i32 zeroext %idx1, i32 ze
 ; AIX-P8-32-NEXT:    lxvw4x v2, 0, r5
 ; AIX-P8-32-NEXT:    blr
 entry:
-  %0 = bitcast i8* %b to float*
-  %add.ptr1 = getelementptr inbounds i8, i8* %b, i64 1
-  %1 = bitcast i8* %add.ptr1 to float*
-  %2 = load float, float* %0, align 4
-  %vecins = insertelement <4 x float> %a, float %2, i32 %idx1
-  %3 = load float, float* %1, align 4
-  %vecins2 = insertelement <4 x float> %vecins, float %3, i32 %idx2
+  %add.ptr1 = getelementptr inbounds i8, ptr %b, i64 1
+  %0 = load float, ptr %b, align 4
+  %vecins = insertelement <4 x float> %a, float %0, i32 %idx1
+  %1 = load float, ptr %add.ptr1, align 4
+  %vecins2 = insertelement <4 x float> %vecins, float %1, i32 %idx2
   ret <4 x float> %vecins2
 }
 
-define <4 x float> @testFloat3(<4 x float> %a, i8* %b, i32 zeroext %idx1, i32 zeroext %idx2) {
+define <4 x float> @testFloat3(<4 x float> %a, ptr %b, i32 zeroext %idx1, i32 zeroext %idx2) {
 ; CHECK-LABEL: testFloat3:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    plwz r3, 65536(r5), 0
@@ -555,14 +553,12 @@ define <4 x float> @testFloat3(<4 x float> %a, i8* %b, i32 zeroext %idx1, i32 ze
 ; AIX-P8-32-NEXT:    lxvw4x v2, 0, r5
 ; AIX-P8-32-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %b, i64 65536
-  %0 = bitcast i8* %add.ptr to float*
-  %add.ptr1 = getelementptr inbounds i8, i8* %b, i64 68719476736
-  %1 = bitcast i8* %add.ptr1 to float*
-  %2 = load float, float* %0, align 4
-  %vecins = insertelement <4 x float> %a, float %2, i32 %idx1
-  %3 = load float, float* %1, align 4
-  %vecins2 = insertelement <4 x float> %vecins, float %3, i32 %idx2
+  %add.ptr = getelementptr inbounds i8, ptr %b, i64 65536
+  %add.ptr1 = getelementptr inbounds i8, ptr %b, i64 68719476736
+  %0 = load float, ptr %add.ptr, align 4
+  %vecins = insertelement <4 x float> %a, float %0, i32 %idx1
+  %1 = load float, ptr %add.ptr1, align 4
+  %vecins2 = insertelement <4 x float> %vecins, float %1, i32 %idx2
   ret <4 x float> %vecins2
 }
 
@@ -617,7 +613,7 @@ entry:
   ret <4 x float> %vecins1
 }
 
-define <4 x float> @testFloatImm2(<4 x float> %a, i32* %b) {
+define <4 x float> @testFloatImm2(<4 x float> %a, ptr %b) {
 ; CHECK-LABEL: testFloatImm2:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lwz r3, 0(r5)
@@ -670,17 +666,15 @@ define <4 x float> @testFloatImm2(<4 x float> %a, i32* %b) {
 ; AIX-P8-32-NEXT:    vperm v2, v2, v3, v4
 ; AIX-P8-32-NEXT:    blr
 entry:
-  %0 = bitcast i32* %b to float*
-  %add.ptr1 = getelementptr inbounds i32, i32* %b, i64 1
-  %1 = bitcast i32* %add.ptr1 to float*
-  %2 = load float, float* %0, align 4
-  %vecins = insertelement <4 x float> %a, float %2, i32 0
-  %3 = load float, float* %1, align 4
-  %vecins2 = insertelement <4 x float> %vecins, float %3, i32 2
+  %add.ptr1 = getelementptr inbounds i32, ptr %b, i64 1
+  %0 = load float, ptr %b, align 4
+  %vecins = insertelement <4 x float> %a, float %0, i32 0
+  %1 = load float, ptr %add.ptr1, align 4
+  %vecins2 = insertelement <4 x float> %vecins, float %1, i32 2
   ret <4 x float> %vecins2
 }
 
-define <4 x float> @testFloatImm3(<4 x float> %a, i32* %b) {
+define <4 x float> @testFloatImm3(<4 x float> %a, ptr %b) {
 ; CHECK-LABEL: testFloatImm3:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    plwz r3, 262144(r5), 0
@@ -742,14 +736,12 @@ define <4 x float> @testFloatImm3(<4 x float> %a, i32* %b) {
 ; AIX-P8-32-NEXT:    vperm v2, v2, v4, v3
 ; AIX-P8-32-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i32, i32* %b, i64 65536
-  %0 = bitcast i32* %add.ptr to float*
-  %add.ptr1 = getelementptr inbounds i32, i32* %b, i64 68719476736
-  %1 = bitcast i32* %add.ptr1 to float*
-  %2 = load float, float* %0, align 4
-  %vecins = insertelement <4 x float> %a, float %2, i32 0
-  %3 = load float, float* %1, align 4
-  %vecins2 = insertelement <4 x float> %vecins, float %3, i32 2
+  %add.ptr = getelementptr inbounds i32, ptr %b, i64 65536
+  %add.ptr1 = getelementptr inbounds i32, ptr %b, i64 68719476736
+  %0 = load float, ptr %add.ptr, align 4
+  %vecins = insertelement <4 x float> %a, float %0, i32 0
+  %1 = load float, ptr %add.ptr1, align 4
+  %vecins2 = insertelement <4 x float> %vecins, float %1, i32 2
   ret <4 x float> %vecins2
 }
 
@@ -803,7 +795,7 @@ entry:
   ret <2 x double> %vecins
 }
 
-define <2 x double> @testDouble2(<2 x double> %a, i8* %b, i32 zeroext %idx1, i32 zeroext %idx2) {
+define <2 x double> @testDouble2(<2 x double> %a, ptr %b, i32 zeroext %idx1, i32 zeroext %idx2) {
 ; CHECK-LABEL: testDouble2:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    ld r3, 0(r5)
@@ -878,17 +870,15 @@ define <2 x double> @testDouble2(<2 x double> %a, i8* %b, i32 zeroext %idx1, i32
 ; AIX-P8-32-NEXT:    lxvd2x v2, 0, r4
 ; AIX-P8-32-NEXT:    blr
 entry:
-  %0 = bitcast i8* %b to double*
-  %add.ptr1 = getelementptr inbounds i8, i8* %b, i64 1
-  %1 = bitcast i8* %add.ptr1 to double*
-  %2 = load double, double* %0, align 8
-  %vecins = insertelement <2 x double> %a, double %2, i32 %idx1
-  %3 = load double, double* %1, align 8
-  %vecins2 = insertelement <2 x double> %vecins, double %3, i32 %idx2
+  %add.ptr1 = getelementptr inbounds i8, ptr %b, i64 1
+  %0 = load double, ptr %b, align 8
+  %vecins = insertelement <2 x double> %a, double %0, i32 %idx1
+  %1 = load double, ptr %add.ptr1, align 8
+  %vecins2 = insertelement <2 x double> %vecins, double %1, i32 %idx2
   ret <2 x double> %vecins2
 }
 
-define <2 x double> @testDouble3(<2 x double> %a, i8* %b, i32 zeroext %idx1, i32 zeroext %idx2) {
+define <2 x double> @testDouble3(<2 x double> %a, ptr %b, i32 zeroext %idx1, i32 zeroext %idx2) {
 ; CHECK-LABEL: testDouble3:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    pld r3, 65536(r5), 0
@@ -972,14 +962,12 @@ define <2 x double> @testDouble3(<2 x double> %a, i8* %b, i32 zeroext %idx1, i32
 ; AIX-P8-32-NEXT:    lxvd2x v2, 0, r4
 ; AIX-P8-32-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %b, i64 65536
-  %0 = bitcast i8* %add.ptr to double*
-  %add.ptr1 = getelementptr inbounds i8, i8* %b, i64 68719476736
-  %1 = bitcast i8* %add.ptr1 to double*
-  %2 = load double, double* %0, align 8
-  %vecins = insertelement <2 x double> %a, double %2, i32 %idx1
-  %3 = load double, double* %1, align 8
-  %vecins2 = insertelement <2 x double> %vecins, double %3, i32 %idx2
+  %add.ptr = getelementptr inbounds i8, ptr %b, i64 65536
+  %add.ptr1 = getelementptr inbounds i8, ptr %b, i64 68719476736
+  %0 = load double, ptr %add.ptr, align 8
+  %vecins = insertelement <2 x double> %a, double %0, i32 %idx1
+  %1 = load double, ptr %add.ptr1, align 8
+  %vecins2 = insertelement <2 x double> %vecins, double %1, i32 %idx2
   ret <2 x double> %vecins2
 }
 
@@ -1014,7 +1002,7 @@ entry:
   ret <2 x double> %vecins
 }
 
-define <2 x double> @testDoubleImm2(<2 x double> %a, i32* %b) {
+define <2 x double> @testDoubleImm2(<2 x double> %a, ptr %b) {
 ; CHECK-LABEL: testDoubleImm2:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lfd f0, 0(r5)
@@ -1039,13 +1027,12 @@ define <2 x double> @testDoubleImm2(<2 x double> %a, i32* %b) {
 ; AIX-P8-NEXT:    xxpermdi v2, vs0, v2, 1
 ; AIX-P8-NEXT:    blr
 entry:
-  %0 = bitcast i32* %b to double*
-  %1 = load double, double* %0, align 8
-  %vecins = insertelement <2 x double> %a, double %1, i32 0
+  %0 = load double, ptr %b, align 8
+  %vecins = insertelement <2 x double> %a, double %0, i32 0
   ret <2 x double> %vecins
 }
 
-define <2 x double> @testDoubleImm3(<2 x double> %a, i32* %b) {
+define <2 x double> @testDoubleImm3(<2 x double> %a, ptr %b) {
 ; CHECK-LABEL: testDoubleImm3:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lfd f0, 4(r5)
@@ -1070,14 +1057,13 @@ define <2 x double> @testDoubleImm3(<2 x double> %a, i32* %b) {
 ; AIX-P8-NEXT:    xxpermdi v2, vs0, v2, 1
 ; AIX-P8-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i32, i32* %b, i64 1
-  %0 = bitcast i32* %add.ptr to double*
-  %1 = load double, double* %0, align 8
-  %vecins = insertelement <2 x double> %a, double %1, i32 0
+  %add.ptr = getelementptr inbounds i32, ptr %b, i64 1
+  %0 = load double, ptr %add.ptr, align 8
+  %vecins = insertelement <2 x double> %a, double %0, i32 0
   ret <2 x double> %vecins
 }
 
-define <2 x double> @testDoubleImm4(<2 x double> %a, i32* %b) {
+define <2 x double> @testDoubleImm4(<2 x double> %a, ptr %b) {
 ; CHECK-LABEL: testDoubleImm4:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    plfd f0, 262144(r5), 0
@@ -1104,14 +1090,13 @@ define <2 x double> @testDoubleImm4(<2 x double> %a, i32* %b) {
 ; AIX-P8-NEXT:    xxpermdi v2, vs0, v2, 1
 ; AIX-P8-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i32, i32* %b, i64 65536
-  %0 = bitcast i32* %add.ptr to double*
-  %1 = load double, double* %0, align 8
-  %vecins = insertelement <2 x double> %a, double %1, i32 0
+  %add.ptr = getelementptr inbounds i32, ptr %b, i64 65536
+  %0 = load double, ptr %add.ptr, align 8
+  %vecins = insertelement <2 x double> %a, double %0, i32 0
   ret <2 x double> %vecins
 }
 
-define <2 x double> @testDoubleImm5(<2 x double> %a, i32* %b) {
+define <2 x double> @testDoubleImm5(<2 x double> %a, ptr %b) {
 ; CHECK-LABEL: testDoubleImm5:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    li r3, 1
@@ -1150,10 +1135,9 @@ define <2 x double> @testDoubleImm5(<2 x double> %a, i32* %b) {
 ; AIX-P8-32-NEXT:    xxpermdi v2, vs0, v2, 1
 ; AIX-P8-32-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i32, i32* %b, i64 68719476736
-  %0 = bitcast i32* %add.ptr to double*
-  %1 = load double, double* %0, align 8
-  %vecins = insertelement <2 x double> %a, double %1, i32 0
+  %add.ptr = getelementptr inbounds i32, ptr %b, i64 68719476736
+  %0 = load double, ptr %add.ptr, align 8
+  %vecins = insertelement <2 x double> %a, double %0, i32 0
   ret <2 x double> %vecins
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/vec_mergeow.ll b/llvm/test/CodeGen/PowerPC/vec_mergeow.ll
index 7194bed2b6b6..bd2e4ae9daf8 100644
--- a/llvm/test/CodeGen/PowerPC/vec_mergeow.ll
+++ b/llvm/test/CodeGen/PowerPC/vec_mergeow.ll
@@ -9,12 +9,12 @@
 ; instruction. If run on a little endian machine, this should produce the
 ; vmrgow instruction. Note also that on little endian the input registers 
 ; are swapped also.
-define void @check_merge_even_xy(<16 x i8>* %A, <16 x i8>* %B) {
+define void @check_merge_even_xy(ptr %A, ptr %B) {
 entry:
 ; CHECK-LE-LABEL: @check_merge_even_xy
 ; CHECK-BE-LABEL: @check_merge_even_xy
-        %tmp = load <16 x i8>, <16 x i8>* %A
-	%tmp2 = load <16 x i8>, <16 x i8>* %B
+        %tmp = load <16 x i8>, ptr %A
+	%tmp2 = load <16 x i8>, ptr %B
 	%tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp2, 
 	      		      <16 x i32> <i32 0, i32 1, i32 2, i32 3, 
 			      	    	  i32 16, i32 17, i32 18, i32 19, 
@@ -22,7 +22,7 @@ entry:
 					  i32 24, i32 25, i32 26, i32 27>
 ; CHECK-LE: vmrgow 2, 3, 2
 ; CHECK-BE: vmrgew 2, 2, 3
-      	store <16 x i8> %tmp3, <16 x i8>* %A
+      	store <16 x i8> %tmp3, ptr %A
 	ret void
 ; CHECK-LE: blr
 ; CHECK-BE: blr
@@ -33,11 +33,11 @@ entry:
 ; ordering. If run on a big endian machine, this should produce the vmrgew 
 ; instruction. If run on a little endian machine, this should produce the
 ; vmrgow instruction. 
-define void @check_merge_even_xx(<16 x i8>* %A) {
+define void @check_merge_even_xx(ptr %A) {
 entry:
 ; CHECK-LE-LABEL: @check_merge_even_xx
 ; CHECK-BE-LABEL: @check_merge_even_xx
-        %tmp = load <16 x i8>, <16 x i8>* %A
+        %tmp = load <16 x i8>, ptr %A
 	%tmp2 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp, 
 	      		      <16 x i32> <i32 0, i32 1, i32 2, i32 3, 
 			      	          i32 0, i32 1, i32 2, i32 3, 
@@ -45,7 +45,7 @@ entry:
 					  i32 8, i32 9, i32 10, i32 11>
 ; CHECK-LE: vmrgow 2, 2, 2
 ; CHECK-BE: vmrgew 2, 2, 2
-  	store <16 x i8> %tmp2, <16 x i8>* %A
+  	store <16 x i8> %tmp2, ptr %A
 	ret void
 ; CHECK-LE: blr
 ; CHECK-BE: blr       
@@ -57,12 +57,12 @@ entry:
 ; instruction. If run on a little endian machine, this should produce the
 ; vmrgew instruction. Note also that on little endian the input registers 
 ; are swapped also.
-define void @check_merge_odd_xy(<16 x i8>* %A, <16 x i8>* %B) {
+define void @check_merge_odd_xy(ptr %A, ptr %B) {
 entry:
 ; CHECK-LE-LABEL: @check_merge_odd_xy
 ; CHECK-BE-LABEL: @check_merge_odd_xy
-        %tmp = load <16 x i8>, <16 x i8>* %A
-	%tmp2 = load <16 x i8>, <16 x i8>* %B
+        %tmp = load <16 x i8>, ptr %A
+	%tmp2 = load <16 x i8>, ptr %B
 	%tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp2, 
 	      		      <16 x i32> <i32 4, i32 5, i32 6, i32 7, 
 			      	    	  i32 20, i32 21, i32 22, i32 23, 
@@ -70,7 +70,7 @@ entry:
 					  i32 28, i32 29, i32 30, i32 31>
 ; CHECK-LE: vmrgew 2, 3, 2
 ; CHECK-BE: vmrgow 2, 2, 3
-        store <16 x i8> %tmp3, <16 x i8>* %A
+        store <16 x i8> %tmp3, ptr %A
 	ret void
 ; CHECK-LE: blr
 ; CHECK-BE: blr
@@ -81,11 +81,11 @@ entry:
 ; ordering. If run on a big endian machine, this should produce the vmrgow 
 ; instruction. If run on a little endian machine, this should produce the
 ; vmrgew instruction. 
-define void @check_merge_odd_xx(<16 x i8>* %A) {
+define void @check_merge_odd_xx(ptr %A) {
 entry:
 ; CHECK-LE-LABEL: @check_merge_odd_xx
 ; CHECK-BE-LABEL: @check_merge_odd_xx
-        %tmp = load <16 x i8>, <16 x i8>* %A
+        %tmp = load <16 x i8>, ptr %A
 	%tmp2 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp, 
 	      		      <16 x i32> <i32 4, i32 5, i32 6, i32 7, 
 			      	    	  i32 4, i32 5, i32 6, i32 7, 
@@ -93,7 +93,7 @@ entry:
 					  i32 12, i32 13, i32 14, i32 15>
 ; CHECK-LE: vmrgew 2, 2, 2
 ; CHECK-BE: vmrgow 2, 2, 2
-        store <16 x i8> %tmp2, <16 x i8>* %A
+        store <16 x i8> %tmp2, ptr %A
 	ret void
 ; CHECK-LE: blr
 ; CHECK-BE: blr

diff  --git a/llvm/test/CodeGen/PowerPC/vec_misaligned.ll b/llvm/test/CodeGen/PowerPC/vec_misaligned.ll
index 5355787adb0d..6b9a1e4e319c 100644
--- a/llvm/test/CodeGen/PowerPC/vec_misaligned.ll
+++ b/llvm/test/CodeGen/PowerPC/vec_misaligned.ll
@@ -6,40 +6,38 @@ target datalayout = "E-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f3
 target triple = "powerpc-unknown-linux-gnu"
 	%struct.S2203 = type { %struct.u16qi }
 	%struct.u16qi = type { <16 x i8> }
- at s = weak global %struct.S2203 zeroinitializer		; <%struct.S2203*> [#uses=1]
+ at s = weak global %struct.S2203 zeroinitializer		; <ptr> [#uses=1]
 
 define void @foo(i32 %x, ...) {
 entry:
 ; CHECK: foo:
 ; CHECK-LE: foo:
-	%x_addr = alloca i32		; <i32*> [#uses=1]
-	%ap = alloca i8*		; <i8**> [#uses=3]
-	%ap.0 = alloca i8*		; <i8**> [#uses=3]
+	%x_addr = alloca i32		; <ptr> [#uses=1]
+	%ap = alloca ptr		; <ptr> [#uses=3]
+	%ap.0 = alloca ptr		; <ptr> [#uses=3]
 	%"alloca point" = bitcast i32 0 to i32		; <i32> [#uses=0]
-	store i32 %x, i32* %x_addr
-	%ap1 = bitcast i8** %ap to i8*		; <i8*> [#uses=1]
-	call void @llvm.va_start( i8* %ap1 )
-	%tmp = load i8*, i8** %ap, align 4		; <i8*> [#uses=1]
-	store i8* %tmp, i8** %ap.0, align 4
-	%tmp2 = load i8*, i8** %ap.0, align 4		; <i8*> [#uses=1]
-	%tmp3 = getelementptr i8, i8* %tmp2, i64 16		; <i8*> [#uses=1]
-	store i8* %tmp3, i8** %ap, align 4
-	%tmp4 = load i8*, i8** %ap.0, align 4		; <i8*> [#uses=1]
-	%tmp45 = bitcast i8* %tmp4 to %struct.S2203*		; <%struct.S2203*> [#uses=1]
-	%tmp6 = getelementptr %struct.S2203, %struct.S2203* @s, i32 0, i32 0		; <%struct.u16qi*> [#uses=1]
-	%tmp7 = getelementptr %struct.S2203, %struct.S2203* %tmp45, i32 0, i32 0		; <%struct.u16qi*> [#uses=1]
-	%tmp8 = getelementptr %struct.u16qi, %struct.u16qi* %tmp6, i32 0, i32 0		; <<16 x i8>*> [#uses=1]
-	%tmp9 = getelementptr %struct.u16qi, %struct.u16qi* %tmp7, i32 0, i32 0		; <<16 x i8>*> [#uses=1]
-	%tmp10 = load <16 x i8>, <16 x i8>* %tmp9, align 4		; <<16 x i8>> [#uses=1]
+	store i32 %x, ptr %x_addr
+	call void @llvm.va_start( ptr %ap )
+	%tmp = load ptr, ptr %ap, align 4		; <ptr> [#uses=1]
+	store ptr %tmp, ptr %ap.0, align 4
+	%tmp2 = load ptr, ptr %ap.0, align 4		; <ptr> [#uses=1]
+	%tmp3 = getelementptr i8, ptr %tmp2, i64 16		; <ptr> [#uses=1]
+	store ptr %tmp3, ptr %ap, align 4
+	%tmp4 = load ptr, ptr %ap.0, align 4		; <ptr> [#uses=1]
+	%tmp6 = getelementptr %struct.S2203, ptr @s, i32 0, i32 0		; <ptr> [#uses=1]
+	%tmp7 = getelementptr %struct.S2203, ptr %tmp4, i32 0, i32 0		; <ptr> [#uses=1]
+	%tmp8 = getelementptr %struct.u16qi, ptr %tmp6, i32 0, i32 0		; <ptr> [#uses=1]
+	%tmp9 = getelementptr %struct.u16qi, ptr %tmp7, i32 0, i32 0		; <ptr> [#uses=1]
+	%tmp10 = load <16 x i8>, ptr %tmp9, align 4		; <<16 x i8>> [#uses=1]
 ; CHECK: lvsl
 ; CHECK: vperm
 ; CHECK-LE: lvsr
 ; CHECK-LE: vperm
-	store <16 x i8> %tmp10, <16 x i8>* %tmp8, align 4
+	store <16 x i8> %tmp10, ptr %tmp8, align 4
 	br label %return
 
 return:		; preds = %entry
 	ret void
 }
 
-declare void @llvm.va_start(i8*) nounwind 
+declare void @llvm.va_start(ptr) nounwind 

diff  --git a/llvm/test/CodeGen/PowerPC/vec_mul.ll b/llvm/test/CodeGen/PowerPC/vec_mul.ll
index b1714b21334d..e56ebfa2dc70 100644
--- a/llvm/test/CodeGen/PowerPC/vec_mul.ll
+++ b/llvm/test/CodeGen/PowerPC/vec_mul.ll
@@ -4,9 +4,9 @@
 ; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc64-unknown-linux-gnu -mattr=+altivec -mattr=+vsx -mcpu=pwr7 | FileCheck %s -check-prefix=CHECK-VSX
 ; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc64le-unknown-linux-gnu -mattr=+altivec -mattr=+vsx -mcpu=pwr8 -mattr=-power8-altivec | FileCheck %s -check-prefix=CHECK-LE-VSX
 
-define <4 x i32> @test_v4i32(<4 x i32>* %X, <4 x i32>* %Y) {
-	%tmp = load <4 x i32>, <4 x i32>* %X		; <<4 x i32>> [#uses=1]
-	%tmp2 = load <4 x i32>, <4 x i32>* %Y		; <<4 x i32>> [#uses=1]
+define <4 x i32> @test_v4i32(ptr %X, ptr %Y) {
+	%tmp = load <4 x i32>, ptr %X		; <<4 x i32>> [#uses=1]
+	%tmp2 = load <4 x i32>, ptr %Y		; <<4 x i32>> [#uses=1]
 	%tmp3 = mul <4 x i32> %tmp, %tmp2		; <<4 x i32>> [#uses=1]
 	ret <4 x i32> %tmp3
 }
@@ -23,9 +23,9 @@ define <4 x i32> @test_v4i32(<4 x i32>* %X, <4 x i32>* %Y) {
 ; CHECK-LE-VSX: vmsumuhm
 ; CHECK-LE-VSX-NOT: mullw
 
-define <8 x i16> @test_v8i16(<8 x i16>* %X, <8 x i16>* %Y) {
-	%tmp = load <8 x i16>, <8 x i16>* %X		; <<8 x i16>> [#uses=1]
-	%tmp2 = load <8 x i16>, <8 x i16>* %Y		; <<8 x i16>> [#uses=1]
+define <8 x i16> @test_v8i16(ptr %X, ptr %Y) {
+	%tmp = load <8 x i16>, ptr %X		; <<8 x i16>> [#uses=1]
+	%tmp2 = load <8 x i16>, ptr %Y		; <<8 x i16>> [#uses=1]
 	%tmp3 = mul <8 x i16> %tmp, %tmp2		; <<8 x i16>> [#uses=1]
 	ret <8 x i16> %tmp3
 }
@@ -42,9 +42,9 @@ define <8 x i16> @test_v8i16(<8 x i16>* %X, <8 x i16>* %Y) {
 ; CHECK-LE-VSX: vmladduhm
 ; CHECK-LE-VSX-NOT: mullw
 
-define <16 x i8> @test_v16i8(<16 x i8>* %X, <16 x i8>* %Y) {
-	%tmp = load <16 x i8>, <16 x i8>* %X		; <<16 x i8>> [#uses=1]
-	%tmp2 = load <16 x i8>, <16 x i8>* %Y		; <<16 x i8>> [#uses=1]
+define <16 x i8> @test_v16i8(ptr %X, ptr %Y) {
+	%tmp = load <16 x i8>, ptr %X		; <<16 x i8>> [#uses=1]
+	%tmp2 = load <16 x i8>, ptr %Y		; <<16 x i8>> [#uses=1]
 	%tmp3 = mul <16 x i8> %tmp, %tmp2		; <<16 x i8>> [#uses=1]
 	ret <16 x i8> %tmp3
 }
@@ -67,9 +67,9 @@ define <16 x i8> @test_v16i8(<16 x i8>* %X, <16 x i8>* %Y) {
 ; CHECK-LE-VSX: vperm {{[0-9]+}}, [[REG2]], [[REG1]]
 ; CHECK-LE-VSX-NOT: mullw
 
-define <4 x float> @test_float(<4 x float>* %X, <4 x float>* %Y) {
-	%tmp = load <4 x float>, <4 x float>* %X
-	%tmp2 = load <4 x float>, <4 x float>* %Y
+define <4 x float> @test_float(ptr %X, ptr %Y) {
+	%tmp = load <4 x float>, ptr %X
+	%tmp2 = load <4 x float>, ptr %Y
 	%tmp3 = fmul <4 x float> %tmp, %tmp2
 	ret <4 x float> %tmp3
 }

diff  --git a/llvm/test/CodeGen/PowerPC/vec_perf_shuffle.ll b/llvm/test/CodeGen/PowerPC/vec_perf_shuffle.ll
index 9ada7ca658be..a1ef5c541cd4 100644
--- a/llvm/test/CodeGen/PowerPC/vec_perf_shuffle.ll
+++ b/llvm/test/CodeGen/PowerPC/vec_perf_shuffle.ll
@@ -2,37 +2,37 @@
 
 ; TODO: Fix this case when disabling perfect shuffle
 
-define <4 x float> @test_uu72(<4 x float>* %P1, <4 x float>* %P2) {
-	%V1 = load <4 x float>, <4 x float>* %P1		; <<4 x float>> [#uses=1]
-	%V2 = load <4 x float>, <4 x float>* %P2		; <<4 x float>> [#uses=1]
+define <4 x float> @test_uu72(ptr %P1, ptr %P2) {
+	%V1 = load <4 x float>, ptr %P1		; <<4 x float>> [#uses=1]
+	%V2 = load <4 x float>, ptr %P2		; <<4 x float>> [#uses=1]
 	%V3 = shufflevector <4 x float> %V1, <4 x float> %V2, <4 x i32> < i32 undef, i32 undef, i32 7, i32 2 >		; <<4 x float>> [#uses=1]
 	ret <4 x float> %V3
 }
 
-define <4 x float> @test_30u5(<4 x float>* %P1, <4 x float>* %P2) {
-	%V1 = load <4 x float>, <4 x float>* %P1		; <<4 x float>> [#uses=1]
-	%V2 = load <4 x float>, <4 x float>* %P2		; <<4 x float>> [#uses=1]
+define <4 x float> @test_30u5(ptr %P1, ptr %P2) {
+	%V1 = load <4 x float>, ptr %P1		; <<4 x float>> [#uses=1]
+	%V2 = load <4 x float>, ptr %P2		; <<4 x float>> [#uses=1]
 	%V3 = shufflevector <4 x float> %V1, <4 x float> %V2, <4 x i32> < i32 3, i32 0, i32 undef, i32 5 >		; <<4 x float>> [#uses=1]
 	ret <4 x float> %V3
 }
 
-define <4 x float> @test_3u73(<4 x float>* %P1, <4 x float>* %P2) {
-	%V1 = load <4 x float>, <4 x float>* %P1		; <<4 x float>> [#uses=1]
-	%V2 = load <4 x float>, <4 x float>* %P2		; <<4 x float>> [#uses=1]
+define <4 x float> @test_3u73(ptr %P1, ptr %P2) {
+	%V1 = load <4 x float>, ptr %P1		; <<4 x float>> [#uses=1]
+	%V2 = load <4 x float>, ptr %P2		; <<4 x float>> [#uses=1]
 	%V3 = shufflevector <4 x float> %V1, <4 x float> %V2, <4 x i32> < i32 3, i32 undef, i32 7, i32 3 >		; <<4 x float>> [#uses=1]
 	ret <4 x float> %V3
 }
 
-define <4 x float> @test_3774(<4 x float>* %P1, <4 x float>* %P2) {
-	%V1 = load <4 x float>, <4 x float>* %P1		; <<4 x float>> [#uses=1]
-	%V2 = load <4 x float>, <4 x float>* %P2		; <<4 x float>> [#uses=1]
+define <4 x float> @test_3774(ptr %P1, ptr %P2) {
+	%V1 = load <4 x float>, ptr %P1		; <<4 x float>> [#uses=1]
+	%V2 = load <4 x float>, ptr %P2		; <<4 x float>> [#uses=1]
 	%V3 = shufflevector <4 x float> %V1, <4 x float> %V2, <4 x i32> < i32 3, i32 7, i32 7, i32 4 >		; <<4 x float>> [#uses=1]
 	ret <4 x float> %V3
 }
 
-define <4 x float> @test_4450(<4 x float>* %P1, <4 x float>* %P2) {
-	%V1 = load <4 x float>, <4 x float>* %P1		; <<4 x float>> [#uses=1]
-	%V2 = load <4 x float>, <4 x float>* %P2		; <<4 x float>> [#uses=1]
+define <4 x float> @test_4450(ptr %P1, ptr %P2) {
+	%V1 = load <4 x float>, ptr %P1		; <<4 x float>> [#uses=1]
+	%V2 = load <4 x float>, ptr %P2		; <<4 x float>> [#uses=1]
 	%V3 = shufflevector <4 x float> %V1, <4 x float> %V2, <4 x i32> < i32 4, i32 4, i32 5, i32 0 >		; <<4 x float>> [#uses=1]
 	ret <4 x float> %V3
 }

diff  --git a/llvm/test/CodeGen/PowerPC/vec_shift.ll b/llvm/test/CodeGen/PowerPC/vec_shift.ll
index 6cdd0f4daf0e..45dbbbe8c0df 100644
--- a/llvm/test/CodeGen/PowerPC/vec_shift.ll
+++ b/llvm/test/CodeGen/PowerPC/vec_shift.ll
@@ -1,10 +1,10 @@
 ; RUN: llc -verify-machineinstrs < %s  -mtriple=ppc32-- -mcpu=g5
 ; PR3628
 
-define void @update(<4 x i32> %val, <4 x i32>* %dst) nounwind {
+define void @update(<4 x i32> %val, ptr %dst) nounwind {
 entry:
 	%shl = shl <4 x i32> %val, < i32 4, i32 3, i32 2, i32 1 >
 	%shr = ashr <4 x i32> %shl, < i32 1, i32 2, i32 3, i32 4 >
-	store <4 x i32> %shr, <4 x i32>* %dst
+	store <4 x i32> %shr, ptr %dst
 	ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/vec_shuffle.ll b/llvm/test/CodeGen/PowerPC/vec_shuffle.ll
index 40885f80310e..7d4941dd2f55 100644
--- a/llvm/test/CodeGen/PowerPC/vec_shuffle.ll
+++ b/llvm/test/CodeGen/PowerPC/vec_shuffle.ll
@@ -3,7 +3,7 @@
 ; RUN:   llc -mtriple=ppc32-- -mcpu=g5 | not grep vperm
 ; RUN: llc -verify-machineinstrs < %s -mtriple=ppc32-- -mcpu=g5 | FileCheck %s
 
-define void @VSLDOI_xy(<8 x i16>* %A, <8 x i16>* %B) {
+define void @VSLDOI_xy(ptr %A, ptr %B) {
 ; CHECK-LABEL: VSLDOI_xy:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lvx 2, 0, 3
@@ -12,8 +12,8 @@ define void @VSLDOI_xy(<8 x i16>* %A, <8 x i16>* %B) {
 ; CHECK-NEXT:    stvx 2, 0, 3
 ; CHECK-NEXT:    blr
 entry:
-	%tmp = load <8 x i16>, <8 x i16>* %A		; <<8 x i16>> [#uses=1]
-	%tmp2 = load <8 x i16>, <8 x i16>* %B		; <<8 x i16>> [#uses=1]
+	%tmp = load <8 x i16>, ptr %A		; <<8 x i16>> [#uses=1]
+	%tmp2 = load <8 x i16>, ptr %B		; <<8 x i16>> [#uses=1]
 	%tmp.upgrd.1 = bitcast <8 x i16> %tmp to <16 x i8>		; <<16 x i8>> [#uses=11]
 	%tmp2.upgrd.2 = bitcast <8 x i16> %tmp2 to <16 x i8>		; <<16 x i8>> [#uses=5]
 	%tmp.upgrd.3 = extractelement <16 x i8> %tmp.upgrd.1, i32 5		; <i8> [#uses=1]
@@ -49,19 +49,19 @@ entry:
 	%tmp32 = insertelement <16 x i8> %tmp31, i8 %tmp16, i32 14		; <<16 x i8>> [#uses=1]
 	%tmp33 = insertelement <16 x i8> %tmp32, i8 %tmp17, i32 15		; <<16 x i8>> [#uses=1]
 	%tmp33.upgrd.4 = bitcast <16 x i8> %tmp33 to <8 x i16>		; <<8 x i16>> [#uses=1]
-	store <8 x i16> %tmp33.upgrd.4, <8 x i16>* %A
+	store <8 x i16> %tmp33.upgrd.4, ptr %A
 	ret void
 }
 
-define void @VSLDOI_xx(<8 x i16>* %A, <8 x i16>* %B) {
+define void @VSLDOI_xx(ptr %A, ptr %B) {
 ; CHECK-LABEL: VSLDOI_xx:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lvx 2, 0, 3
 ; CHECK-NEXT:    vsldoi 2, 2, 2, 5
 ; CHECK-NEXT:    stvx 2, 0, 3
 ; CHECK-NEXT:    blr
-	%tmp = load <8 x i16>, <8 x i16>* %A		; <<8 x i16>> [#uses=1]
-	%tmp2 = load <8 x i16>, <8 x i16>* %A		; <<8 x i16>> [#uses=1]
+	%tmp = load <8 x i16>, ptr %A		; <<8 x i16>> [#uses=1]
+	%tmp2 = load <8 x i16>, ptr %A		; <<8 x i16>> [#uses=1]
 	%tmp.upgrd.5 = bitcast <8 x i16> %tmp to <16 x i8>		; <<16 x i8>> [#uses=11]
 	%tmp2.upgrd.6 = bitcast <8 x i16> %tmp2 to <16 x i8>		; <<16 x i8>> [#uses=5]
 	%tmp.upgrd.7 = extractelement <16 x i8> %tmp.upgrd.5, i32 5		; <i8> [#uses=1]
@@ -97,11 +97,11 @@ define void @VSLDOI_xx(<8 x i16>* %A, <8 x i16>* %B) {
 	%tmp32 = insertelement <16 x i8> %tmp31, i8 %tmp16, i32 14		; <<16 x i8>> [#uses=1]
 	%tmp33 = insertelement <16 x i8> %tmp32, i8 %tmp17, i32 15		; <<16 x i8>> [#uses=1]
 	%tmp33.upgrd.8 = bitcast <16 x i8> %tmp33 to <8 x i16>		; <<8 x i16>> [#uses=1]
-	store <8 x i16> %tmp33.upgrd.8, <8 x i16>* %A
+	store <8 x i16> %tmp33.upgrd.8, ptr %A
 	ret void
 }
 
-define void @VPERM_promote(<8 x i16>* %A, <8 x i16>* %B) {
+define void @VPERM_promote(ptr %A, ptr %B) {
 ; CHECK-LABEL: VPERM_promote:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lvx 2, 0, 3
@@ -111,19 +111,19 @@ define void @VPERM_promote(<8 x i16>* %A, <8 x i16>* %B) {
 ; CHECK-NEXT:    stvx 2, 0, 3
 ; CHECK-NEXT:    blr
 entry:
-	%tmp = load <8 x i16>, <8 x i16>* %A		; <<8 x i16>> [#uses=1]
+	%tmp = load <8 x i16>, ptr %A		; <<8 x i16>> [#uses=1]
 	%tmp.upgrd.9 = bitcast <8 x i16> %tmp to <4 x i32>		; <<4 x i32>> [#uses=1]
-	%tmp2 = load <8 x i16>, <8 x i16>* %B		; <<8 x i16>> [#uses=1]
+	%tmp2 = load <8 x i16>, ptr %B		; <<8 x i16>> [#uses=1]
 	%tmp2.upgrd.10 = bitcast <8 x i16> %tmp2 to <4 x i32>		; <<4 x i32>> [#uses=1]
 	%tmp3 = call <4 x i32> @llvm.ppc.altivec.vperm( <4 x i32> %tmp.upgrd.9, <4 x i32> %tmp2.upgrd.10, <16 x i8> < i8 14, i8 14, i8 14, i8 14, i8 14, i8 14, i8 14, i8 14, i8 14, i8 14, i8 14, i8 14, i8 14, i8 14, i8 14, i8 14 > )		; <<4 x i32>> [#uses=1]
 	%tmp3.upgrd.11 = bitcast <4 x i32> %tmp3 to <8 x i16>		; <<8 x i16>> [#uses=1]
-	store <8 x i16> %tmp3.upgrd.11, <8 x i16>* %A
+	store <8 x i16> %tmp3.upgrd.11, ptr %A
 	ret void
 }
 
 declare <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32>, <4 x i32>, <16 x i8>)
 
-define void @tb_l(<16 x i8>* %A, <16 x i8>* %B) {
+define void @tb_l(ptr %A, ptr %B) {
 ; CHECK-LABEL: tb_l:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lvx 2, 0, 3
@@ -132,8 +132,8 @@ define void @tb_l(<16 x i8>* %A, <16 x i8>* %B) {
 ; CHECK-NEXT:    stvx 2, 0, 3
 ; CHECK-NEXT:    blr
 entry:
-	%tmp = load <16 x i8>, <16 x i8>* %A		; <<16 x i8>> [#uses=8]
-	%tmp2 = load <16 x i8>, <16 x i8>* %B		; <<16 x i8>> [#uses=8]
+	%tmp = load <16 x i8>, ptr %A		; <<16 x i8>> [#uses=8]
+	%tmp2 = load <16 x i8>, ptr %B		; <<16 x i8>> [#uses=8]
 	%tmp.upgrd.12 = extractelement <16 x i8> %tmp, i32 8		; <i8> [#uses=1]
 	%tmp3 = extractelement <16 x i8> %tmp2, i32 8		; <i8> [#uses=1]
 	%tmp4 = extractelement <16 x i8> %tmp, i32 9		; <i8> [#uses=1]
@@ -166,11 +166,11 @@ entry:
 	%tmp31 = insertelement <16 x i8> %tmp30, i8 %tmp15, i32 13		; <<16 x i8>> [#uses=1]
 	%tmp32 = insertelement <16 x i8> %tmp31, i8 %tmp16, i32 14		; <<16 x i8>> [#uses=1]
 	%tmp33 = insertelement <16 x i8> %tmp32, i8 %tmp17, i32 15		; <<16 x i8>> [#uses=1]
-	store <16 x i8> %tmp33, <16 x i8>* %A
+	store <16 x i8> %tmp33, ptr %A
 	ret void
 }
 
-define void @th_l(<8 x i16>* %A, <8 x i16>* %B) {
+define void @th_l(ptr %A, ptr %B) {
 ; CHECK-LABEL: th_l:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lvx 2, 0, 3
@@ -179,8 +179,8 @@ define void @th_l(<8 x i16>* %A, <8 x i16>* %B) {
 ; CHECK-NEXT:    stvx 2, 0, 3
 ; CHECK-NEXT:    blr
 entry:
-	%tmp = load <8 x i16>, <8 x i16>* %A		; <<8 x i16>> [#uses=4]
-	%tmp2 = load <8 x i16>, <8 x i16>* %B		; <<8 x i16>> [#uses=4]
+	%tmp = load <8 x i16>, ptr %A		; <<8 x i16>> [#uses=4]
+	%tmp2 = load <8 x i16>, ptr %B		; <<8 x i16>> [#uses=4]
 	%tmp.upgrd.13 = extractelement <8 x i16> %tmp, i32 4		; <i16> [#uses=1]
 	%tmp3 = extractelement <8 x i16> %tmp2, i32 4		; <i16> [#uses=1]
 	%tmp4 = extractelement <8 x i16> %tmp, i32 5		; <i16> [#uses=1]
@@ -197,11 +197,11 @@ entry:
 	%tmp15 = insertelement <8 x i16> %tmp14, i16 %tmp7, i32 5		; <<8 x i16>> [#uses=1]
 	%tmp16 = insertelement <8 x i16> %tmp15, i16 %tmp8, i32 6		; <<8 x i16>> [#uses=1]
 	%tmp17 = insertelement <8 x i16> %tmp16, i16 %tmp9, i32 7		; <<8 x i16>> [#uses=1]
-	store <8 x i16> %tmp17, <8 x i16>* %A
+	store <8 x i16> %tmp17, ptr %A
 	ret void
 }
 
-define void @tw_l(<4 x i32>* %A, <4 x i32>* %B) {
+define void @tw_l(ptr %A, ptr %B) {
 ; CHECK-LABEL: tw_l:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lvx 2, 0, 3
@@ -210,8 +210,8 @@ define void @tw_l(<4 x i32>* %A, <4 x i32>* %B) {
 ; CHECK-NEXT:    stvx 2, 0, 3
 ; CHECK-NEXT:    blr
 entry:
-	%tmp = load <4 x i32>, <4 x i32>* %A		; <<4 x i32>> [#uses=2]
-	%tmp2 = load <4 x i32>, <4 x i32>* %B		; <<4 x i32>> [#uses=2]
+	%tmp = load <4 x i32>, ptr %A		; <<4 x i32>> [#uses=2]
+	%tmp2 = load <4 x i32>, ptr %B		; <<4 x i32>> [#uses=2]
 	%tmp.upgrd.14 = extractelement <4 x i32> %tmp, i32 2		; <i32> [#uses=1]
 	%tmp3 = extractelement <4 x i32> %tmp2, i32 2		; <i32> [#uses=1]
 	%tmp4 = extractelement <4 x i32> %tmp, i32 3		; <i32> [#uses=1]
@@ -220,11 +220,11 @@ entry:
 	%tmp7 = insertelement <4 x i32> %tmp6, i32 %tmp3, i32 1		; <<4 x i32>> [#uses=1]
 	%tmp8 = insertelement <4 x i32> %tmp7, i32 %tmp4, i32 2		; <<4 x i32>> [#uses=1]
 	%tmp9 = insertelement <4 x i32> %tmp8, i32 %tmp5, i32 3		; <<4 x i32>> [#uses=1]
-	store <4 x i32> %tmp9, <4 x i32>* %A
+	store <4 x i32> %tmp9, ptr %A
 	ret void
 }
 
-define void @tb_h(<16 x i8>* %A, <16 x i8>* %B) {
+define void @tb_h(ptr %A, ptr %B) {
 ; CHECK-LABEL: tb_h:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lvx 2, 0, 3
@@ -233,8 +233,8 @@ define void @tb_h(<16 x i8>* %A, <16 x i8>* %B) {
 ; CHECK-NEXT:    stvx 2, 0, 3
 ; CHECK-NEXT:    blr
 entry:
-	%tmp = load <16 x i8>, <16 x i8>* %A		; <<16 x i8>> [#uses=8]
-	%tmp2 = load <16 x i8>, <16 x i8>* %B		; <<16 x i8>> [#uses=8]
+	%tmp = load <16 x i8>, ptr %A		; <<16 x i8>> [#uses=8]
+	%tmp2 = load <16 x i8>, ptr %B		; <<16 x i8>> [#uses=8]
 	%tmp.upgrd.15 = extractelement <16 x i8> %tmp, i32 0		; <i8> [#uses=1]
 	%tmp3 = extractelement <16 x i8> %tmp2, i32 0		; <i8> [#uses=1]
 	%tmp4 = extractelement <16 x i8> %tmp, i32 1		; <i8> [#uses=1]
@@ -267,11 +267,11 @@ entry:
 	%tmp31 = insertelement <16 x i8> %tmp30, i8 %tmp15, i32 13		; <<16 x i8>> [#uses=1]
 	%tmp32 = insertelement <16 x i8> %tmp31, i8 %tmp16, i32 14		; <<16 x i8>> [#uses=1]
 	%tmp33 = insertelement <16 x i8> %tmp32, i8 %tmp17, i32 15		; <<16 x i8>> [#uses=1]
-	store <16 x i8> %tmp33, <16 x i8>* %A
+	store <16 x i8> %tmp33, ptr %A
 	ret void
 }
 
-define void @th_h(<8 x i16>* %A, <8 x i16>* %B) {
+define void @th_h(ptr %A, ptr %B) {
 ; CHECK-LABEL: th_h:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lvx 2, 0, 3
@@ -280,8 +280,8 @@ define void @th_h(<8 x i16>* %A, <8 x i16>* %B) {
 ; CHECK-NEXT:    stvx 2, 0, 3
 ; CHECK-NEXT:    blr
 entry:
-	%tmp = load <8 x i16>, <8 x i16>* %A		; <<8 x i16>> [#uses=4]
-	%tmp2 = load <8 x i16>, <8 x i16>* %B		; <<8 x i16>> [#uses=4]
+	%tmp = load <8 x i16>, ptr %A		; <<8 x i16>> [#uses=4]
+	%tmp2 = load <8 x i16>, ptr %B		; <<8 x i16>> [#uses=4]
 	%tmp.upgrd.16 = extractelement <8 x i16> %tmp, i32 0		; <i16> [#uses=1]
 	%tmp3 = extractelement <8 x i16> %tmp2, i32 0		; <i16> [#uses=1]
 	%tmp4 = extractelement <8 x i16> %tmp, i32 1		; <i16> [#uses=1]
@@ -298,11 +298,11 @@ entry:
 	%tmp15 = insertelement <8 x i16> %tmp14, i16 %tmp7, i32 5		; <<8 x i16>> [#uses=1]
 	%tmp16 = insertelement <8 x i16> %tmp15, i16 %tmp8, i32 6		; <<8 x i16>> [#uses=1]
 	%tmp17 = insertelement <8 x i16> %tmp16, i16 %tmp9, i32 7		; <<8 x i16>> [#uses=1]
-	store <8 x i16> %tmp17, <8 x i16>* %A
+	store <8 x i16> %tmp17, ptr %A
 	ret void
 }
 
-define void @tw_h(<4 x i32>* %A, <4 x i32>* %B) {
+define void @tw_h(ptr %A, ptr %B) {
 ; CHECK-LABEL: tw_h:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lvx 2, 0, 3
@@ -311,8 +311,8 @@ define void @tw_h(<4 x i32>* %A, <4 x i32>* %B) {
 ; CHECK-NEXT:    stvx 2, 0, 3
 ; CHECK-NEXT:    blr
 entry:
-	%tmp = load <4 x i32>, <4 x i32>* %A		; <<4 x i32>> [#uses=2]
-	%tmp2 = load <4 x i32>, <4 x i32>* %B		; <<4 x i32>> [#uses=2]
+	%tmp = load <4 x i32>, ptr %A		; <<4 x i32>> [#uses=2]
+	%tmp2 = load <4 x i32>, ptr %B		; <<4 x i32>> [#uses=2]
 	%tmp.upgrd.17 = extractelement <4 x i32> %tmp2, i32 0		; <i32> [#uses=1]
 	%tmp3 = extractelement <4 x i32> %tmp, i32 0		; <i32> [#uses=1]
 	%tmp4 = extractelement <4 x i32> %tmp2, i32 1		; <i32> [#uses=1]
@@ -321,11 +321,11 @@ entry:
 	%tmp7 = insertelement <4 x i32> %tmp6, i32 %tmp3, i32 1		; <<4 x i32>> [#uses=1]
 	%tmp8 = insertelement <4 x i32> %tmp7, i32 %tmp4, i32 2		; <<4 x i32>> [#uses=1]
 	%tmp9 = insertelement <4 x i32> %tmp8, i32 %tmp5, i32 3		; <<4 x i32>> [#uses=1]
-	store <4 x i32> %tmp9, <4 x i32>* %A
+	store <4 x i32> %tmp9, ptr %A
 	ret void
 }
 
-define void @tw_h_flop(<4 x i32>* %A, <4 x i32>* %B) {
+define void @tw_h_flop(ptr %A, ptr %B) {
 ; CHECK-LABEL: tw_h_flop:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lvx 2, 0, 3
@@ -333,8 +333,8 @@ define void @tw_h_flop(<4 x i32>* %A, <4 x i32>* %B) {
 ; CHECK-NEXT:    vmrghw 2, 2, 3
 ; CHECK-NEXT:    stvx 2, 0, 3
 ; CHECK-NEXT:    blr
-	%tmp = load <4 x i32>, <4 x i32>* %A		; <<4 x i32>> [#uses=2]
-	%tmp2 = load <4 x i32>, <4 x i32>* %B		; <<4 x i32>> [#uses=2]
+	%tmp = load <4 x i32>, ptr %A		; <<4 x i32>> [#uses=2]
+	%tmp2 = load <4 x i32>, ptr %B		; <<4 x i32>> [#uses=2]
 	%tmp.upgrd.18 = extractelement <4 x i32> %tmp, i32 0		; <i32> [#uses=1]
 	%tmp3 = extractelement <4 x i32> %tmp2, i32 0		; <i32> [#uses=1]
 	%tmp4 = extractelement <4 x i32> %tmp, i32 1		; <i32> [#uses=1]
@@ -343,11 +343,11 @@ define void @tw_h_flop(<4 x i32>* %A, <4 x i32>* %B) {
 	%tmp7 = insertelement <4 x i32> %tmp6, i32 %tmp3, i32 1		; <<4 x i32>> [#uses=1]
 	%tmp8 = insertelement <4 x i32> %tmp7, i32 %tmp4, i32 2		; <<4 x i32>> [#uses=1]
 	%tmp9 = insertelement <4 x i32> %tmp8, i32 %tmp5, i32 3		; <<4 x i32>> [#uses=1]
-	store <4 x i32> %tmp9, <4 x i32>* %A
+	store <4 x i32> %tmp9, ptr %A
 	ret void
 }
 
-define void @VMRG_UNARY_tb_l(<16 x i8>* %A, <16 x i8>* %B) {
+define void @VMRG_UNARY_tb_l(ptr %A, ptr %B) {
 ; CHECK-LABEL: VMRG_UNARY_tb_l:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lvx 2, 0, 3
@@ -355,7 +355,7 @@ define void @VMRG_UNARY_tb_l(<16 x i8>* %A, <16 x i8>* %B) {
 ; CHECK-NEXT:    stvx 2, 0, 3
 ; CHECK-NEXT:    blr
 entry:
-	%tmp = load <16 x i8>, <16 x i8>* %A		; <<16 x i8>> [#uses=16]
+	%tmp = load <16 x i8>, ptr %A		; <<16 x i8>> [#uses=16]
 	%tmp.upgrd.19 = extractelement <16 x i8> %tmp, i32 8		; <i8> [#uses=1]
 	%tmp3 = extractelement <16 x i8> %tmp, i32 8		; <i8> [#uses=1]
 	%tmp4 = extractelement <16 x i8> %tmp, i32 9		; <i8> [#uses=1]
@@ -388,11 +388,11 @@ entry:
 	%tmp31 = insertelement <16 x i8> %tmp30, i8 %tmp15, i32 13		; <<16 x i8>> [#uses=1]
 	%tmp32 = insertelement <16 x i8> %tmp31, i8 %tmp16, i32 14		; <<16 x i8>> [#uses=1]
 	%tmp33 = insertelement <16 x i8> %tmp32, i8 %tmp17, i32 15		; <<16 x i8>> [#uses=1]
-	store <16 x i8> %tmp33, <16 x i8>* %A
+	store <16 x i8> %tmp33, ptr %A
 	ret void
 }
 
-define void @VMRG_UNARY_th_l(<8 x i16>* %A, <8 x i16>* %B) {
+define void @VMRG_UNARY_th_l(ptr %A, ptr %B) {
 ; CHECK-LABEL: VMRG_UNARY_th_l:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lvx 2, 0, 3
@@ -400,7 +400,7 @@ define void @VMRG_UNARY_th_l(<8 x i16>* %A, <8 x i16>* %B) {
 ; CHECK-NEXT:    stvx 2, 0, 3
 ; CHECK-NEXT:    blr
 entry:
-	%tmp = load <8 x i16>, <8 x i16>* %A		; <<8 x i16>> [#uses=8]
+	%tmp = load <8 x i16>, ptr %A		; <<8 x i16>> [#uses=8]
 	%tmp.upgrd.20 = extractelement <8 x i16> %tmp, i32 4		; <i16> [#uses=1]
 	%tmp3 = extractelement <8 x i16> %tmp, i32 4		; <i16> [#uses=1]
 	%tmp4 = extractelement <8 x i16> %tmp, i32 5		; <i16> [#uses=1]
@@ -417,11 +417,11 @@ entry:
 	%tmp15 = insertelement <8 x i16> %tmp14, i16 %tmp7, i32 5		; <<8 x i16>> [#uses=1]
 	%tmp16 = insertelement <8 x i16> %tmp15, i16 %tmp8, i32 6		; <<8 x i16>> [#uses=1]
 	%tmp17 = insertelement <8 x i16> %tmp16, i16 %tmp9, i32 7		; <<8 x i16>> [#uses=1]
-	store <8 x i16> %tmp17, <8 x i16>* %A
+	store <8 x i16> %tmp17, ptr %A
 	ret void
 }
 
-define void @VMRG_UNARY_tw_l(<4 x i32>* %A, <4 x i32>* %B) {
+define void @VMRG_UNARY_tw_l(ptr %A, ptr %B) {
 ; CHECK-LABEL: VMRG_UNARY_tw_l:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lvx 2, 0, 3
@@ -429,7 +429,7 @@ define void @VMRG_UNARY_tw_l(<4 x i32>* %A, <4 x i32>* %B) {
 ; CHECK-NEXT:    stvx 2, 0, 3
 ; CHECK-NEXT:    blr
 entry:
-	%tmp = load <4 x i32>, <4 x i32>* %A		; <<4 x i32>> [#uses=4]
+	%tmp = load <4 x i32>, ptr %A		; <<4 x i32>> [#uses=4]
 	%tmp.upgrd.21 = extractelement <4 x i32> %tmp, i32 2		; <i32> [#uses=1]
 	%tmp3 = extractelement <4 x i32> %tmp, i32 2		; <i32> [#uses=1]
 	%tmp4 = extractelement <4 x i32> %tmp, i32 3		; <i32> [#uses=1]
@@ -438,11 +438,11 @@ entry:
 	%tmp7 = insertelement <4 x i32> %tmp6, i32 %tmp3, i32 1		; <<4 x i32>> [#uses=1]
 	%tmp8 = insertelement <4 x i32> %tmp7, i32 %tmp4, i32 2		; <<4 x i32>> [#uses=1]
 	%tmp9 = insertelement <4 x i32> %tmp8, i32 %tmp5, i32 3		; <<4 x i32>> [#uses=1]
-	store <4 x i32> %tmp9, <4 x i32>* %A
+	store <4 x i32> %tmp9, ptr %A
 	ret void
 }
 
-define void @VMRG_UNARY_tb_h(<16 x i8>* %A, <16 x i8>* %B) {
+define void @VMRG_UNARY_tb_h(ptr %A, ptr %B) {
 ; CHECK-LABEL: VMRG_UNARY_tb_h:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lvx 2, 0, 3
@@ -450,7 +450,7 @@ define void @VMRG_UNARY_tb_h(<16 x i8>* %A, <16 x i8>* %B) {
 ; CHECK-NEXT:    stvx 2, 0, 3
 ; CHECK-NEXT:    blr
 entry:
-	%tmp = load <16 x i8>, <16 x i8>* %A		; <<16 x i8>> [#uses=16]
+	%tmp = load <16 x i8>, ptr %A		; <<16 x i8>> [#uses=16]
 	%tmp.upgrd.22 = extractelement <16 x i8> %tmp, i32 0		; <i8> [#uses=1]
 	%tmp3 = extractelement <16 x i8> %tmp, i32 0		; <i8> [#uses=1]
 	%tmp4 = extractelement <16 x i8> %tmp, i32 1		; <i8> [#uses=1]
@@ -483,11 +483,11 @@ entry:
 	%tmp31 = insertelement <16 x i8> %tmp30, i8 %tmp15, i32 13		; <<16 x i8>> [#uses=1]
 	%tmp32 = insertelement <16 x i8> %tmp31, i8 %tmp16, i32 14		; <<16 x i8>> [#uses=1]
 	%tmp33 = insertelement <16 x i8> %tmp32, i8 %tmp17, i32 15		; <<16 x i8>> [#uses=1]
-	store <16 x i8> %tmp33, <16 x i8>* %A
+	store <16 x i8> %tmp33, ptr %A
 	ret void
 }
 
-define void @VMRG_UNARY_th_h(<8 x i16>* %A, <8 x i16>* %B) {
+define void @VMRG_UNARY_th_h(ptr %A, ptr %B) {
 ; CHECK-LABEL: VMRG_UNARY_th_h:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lvx 2, 0, 3
@@ -495,7 +495,7 @@ define void @VMRG_UNARY_th_h(<8 x i16>* %A, <8 x i16>* %B) {
 ; CHECK-NEXT:    stvx 2, 0, 3
 ; CHECK-NEXT:    blr
 entry:
-	%tmp = load <8 x i16>, <8 x i16>* %A		; <<8 x i16>> [#uses=8]
+	%tmp = load <8 x i16>, ptr %A		; <<8 x i16>> [#uses=8]
 	%tmp.upgrd.23 = extractelement <8 x i16> %tmp, i32 0		; <i16> [#uses=1]
 	%tmp3 = extractelement <8 x i16> %tmp, i32 0		; <i16> [#uses=1]
 	%tmp4 = extractelement <8 x i16> %tmp, i32 1		; <i16> [#uses=1]
@@ -512,11 +512,11 @@ entry:
 	%tmp15 = insertelement <8 x i16> %tmp14, i16 %tmp7, i32 5		; <<8 x i16>> [#uses=1]
 	%tmp16 = insertelement <8 x i16> %tmp15, i16 %tmp8, i32 6		; <<8 x i16>> [#uses=1]
 	%tmp17 = insertelement <8 x i16> %tmp16, i16 %tmp9, i32 7		; <<8 x i16>> [#uses=1]
-	store <8 x i16> %tmp17, <8 x i16>* %A
+	store <8 x i16> %tmp17, ptr %A
 	ret void
 }
 
-define void @VMRG_UNARY_tw_h(<4 x i32>* %A, <4 x i32>* %B) {
+define void @VMRG_UNARY_tw_h(ptr %A, ptr %B) {
 ; CHECK-LABEL: VMRG_UNARY_tw_h:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lvx 2, 0, 3
@@ -524,7 +524,7 @@ define void @VMRG_UNARY_tw_h(<4 x i32>* %A, <4 x i32>* %B) {
 ; CHECK-NEXT:    stvx 2, 0, 3
 ; CHECK-NEXT:    blr
 entry:
-	%tmp = load <4 x i32>, <4 x i32>* %A		; <<4 x i32>> [#uses=4]
+	%tmp = load <4 x i32>, ptr %A		; <<4 x i32>> [#uses=4]
 	%tmp.upgrd.24 = extractelement <4 x i32> %tmp, i32 0		; <i32> [#uses=1]
 	%tmp3 = extractelement <4 x i32> %tmp, i32 0		; <i32> [#uses=1]
 	%tmp4 = extractelement <4 x i32> %tmp, i32 1		; <i32> [#uses=1]
@@ -533,11 +533,11 @@ entry:
 	%tmp7 = insertelement <4 x i32> %tmp6, i32 %tmp3, i32 1		; <<4 x i32>> [#uses=1]
 	%tmp8 = insertelement <4 x i32> %tmp7, i32 %tmp4, i32 2		; <<4 x i32>> [#uses=1]
 	%tmp9 = insertelement <4 x i32> %tmp8, i32 %tmp5, i32 3		; <<4 x i32>> [#uses=1]
-	store <4 x i32> %tmp9, <4 x i32>* %A
+	store <4 x i32> %tmp9, ptr %A
 	ret void
 }
 
-define void @VPCKUHUM_unary(<8 x i16>* %A, <8 x i16>* %B) {
+define void @VPCKUHUM_unary(ptr %A, ptr %B) {
 ; CHECK-LABEL: VPCKUHUM_unary:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lvx 2, 0, 3
@@ -545,7 +545,7 @@ define void @VPCKUHUM_unary(<8 x i16>* %A, <8 x i16>* %B) {
 ; CHECK-NEXT:    stvx 2, 0, 3
 ; CHECK-NEXT:    blr
 entry:
-	%tmp = load <8 x i16>, <8 x i16>* %A		; <<8 x i16>> [#uses=2]
+	%tmp = load <8 x i16>, ptr %A		; <<8 x i16>> [#uses=2]
 	%tmp.upgrd.25 = bitcast <8 x i16> %tmp to <16 x i8>		; <<16 x i8>> [#uses=8]
 	%tmp3 = bitcast <8 x i16> %tmp to <16 x i8>		; <<16 x i8>> [#uses=8]
 	%tmp.upgrd.26 = extractelement <16 x i8> %tmp.upgrd.25, i32 1		; <i8> [#uses=1]
@@ -581,11 +581,11 @@ entry:
 	%tmp33 = insertelement <16 x i8> %tmp32, i8 %tmp17, i32 14		; <<16 x i8>> [#uses=1]
 	%tmp34 = insertelement <16 x i8> %tmp33, i8 %tmp18, i32 15		; <<16 x i8>> [#uses=1]
 	%tmp34.upgrd.27 = bitcast <16 x i8> %tmp34 to <8 x i16>		; <<8 x i16>> [#uses=1]
-	store <8 x i16> %tmp34.upgrd.27, <8 x i16>* %A
+	store <8 x i16> %tmp34.upgrd.27, ptr %A
 	ret void
 }
 
-define void @VPCKUWUM_unary(<4 x i32>* %A, <4 x i32>* %B) {
+define void @VPCKUWUM_unary(ptr %A, ptr %B) {
 ; CHECK-LABEL: VPCKUWUM_unary:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lvx 2, 0, 3
@@ -593,7 +593,7 @@ define void @VPCKUWUM_unary(<4 x i32>* %A, <4 x i32>* %B) {
 ; CHECK-NEXT:    stvx 2, 0, 3
 ; CHECK-NEXT:    blr
 entry:
-	%tmp = load <4 x i32>, <4 x i32>* %A		; <<4 x i32>> [#uses=2]
+	%tmp = load <4 x i32>, ptr %A		; <<4 x i32>> [#uses=2]
 	%tmp.upgrd.28 = bitcast <4 x i32> %tmp to <8 x i16>		; <<8 x i16>> [#uses=4]
 	%tmp3 = bitcast <4 x i32> %tmp to <8 x i16>		; <<8 x i16>> [#uses=4]
 	%tmp.upgrd.29 = extractelement <8 x i16> %tmp.upgrd.28, i32 1		; <i16> [#uses=1]
@@ -613,6 +613,6 @@ entry:
 	%tmp17 = insertelement <8 x i16> %tmp16, i16 %tmp9, i32 6		; <<8 x i16>> [#uses=1]
 	%tmp18 = insertelement <8 x i16> %tmp17, i16 %tmp10, i32 7		; <<8 x i16>> [#uses=1]
 	%tmp18.upgrd.30 = bitcast <8 x i16> %tmp18 to <4 x i32>		; <<4 x i32>> [#uses=1]
-	store <4 x i32> %tmp18.upgrd.30, <4 x i32>* %A
+	store <4 x i32> %tmp18.upgrd.30, ptr %A
 	ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/vec_shuffle_le.ll b/llvm/test/CodeGen/PowerPC/vec_shuffle_le.ll
index 2d456d06b258..67661ae05c38 100644
--- a/llvm/test/CodeGen/PowerPC/vec_shuffle_le.ll
+++ b/llvm/test/CodeGen/PowerPC/vec_shuffle_le.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc64le-unknown-linux-gnu -mattr=+altivec -mattr=-vsx -mcpu=pwr7 | FileCheck %s
 
-define void @VPKUHUM_xy(<16 x i8>* %A, <16 x i8>* %B) {
+define void @VPKUHUM_xy(ptr %A, ptr %B) {
 ; CHECK-LABEL: VPKUHUM_xy:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lvx 2, 0, 3
@@ -10,14 +10,14 @@ define void @VPKUHUM_xy(<16 x i8>* %A, <16 x i8>* %B) {
 ; CHECK-NEXT:    stvx 2, 0, 3
 ; CHECK-NEXT:    blr
 entry:
-        %tmp = load <16 x i8>, <16 x i8>* %A
-        %tmp2 = load <16 x i8>, <16 x i8>* %B
+        %tmp = load <16 x i8>, ptr %A
+        %tmp2 = load <16 x i8>, ptr %B
         %tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp2, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
-        store <16 x i8> %tmp3, <16 x i8>* %A
+        store <16 x i8> %tmp3, ptr %A
         ret void
 }
 
-define void @VPKUHUM_xx(<16 x i8>* %A) {
+define void @VPKUHUM_xx(ptr %A) {
 ; CHECK-LABEL: VPKUHUM_xx:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lvx 2, 0, 3
@@ -25,13 +25,13 @@ define void @VPKUHUM_xx(<16 x i8>* %A) {
 ; CHECK-NEXT:    stvx 2, 0, 3
 ; CHECK-NEXT:    blr
 entry:
-        %tmp = load <16 x i8>, <16 x i8>* %A
+        %tmp = load <16 x i8>, ptr %A
         %tmp2 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
-        store <16 x i8> %tmp2, <16 x i8>* %A
+        store <16 x i8> %tmp2, ptr %A
         ret void
 }
 
-define void @VPKUWUM_xy(<16 x i8>* %A, <16 x i8>* %B) {
+define void @VPKUWUM_xy(ptr %A, ptr %B) {
 ; CHECK-LABEL: VPKUWUM_xy:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lvx 2, 0, 3
@@ -40,14 +40,14 @@ define void @VPKUWUM_xy(<16 x i8>* %A, <16 x i8>* %B) {
 ; CHECK-NEXT:    stvx 2, 0, 3
 ; CHECK-NEXT:    blr
 entry:
-        %tmp = load <16 x i8>, <16 x i8>* %A
-        %tmp2 = load <16 x i8>, <16 x i8>* %B
+        %tmp = load <16 x i8>, ptr %A
+        %tmp2 = load <16 x i8>, ptr %B
         %tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp2, <16 x i32> <i32 0, i32 1, i32 4, i32 5, i32 8, i32 9, i32 12, i32 13, i32 16, i32 17, i32 20, i32 21, i32 24, i32 25, i32 28, i32 29>
-        store <16 x i8> %tmp3, <16 x i8>* %A
+        store <16 x i8> %tmp3, ptr %A
         ret void
 }
 
-define void @VPKUWUM_xx(<16 x i8>* %A) {
+define void @VPKUWUM_xx(ptr %A) {
 ; CHECK-LABEL: VPKUWUM_xx:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lvx 2, 0, 3
@@ -55,13 +55,13 @@ define void @VPKUWUM_xx(<16 x i8>* %A) {
 ; CHECK-NEXT:    stvx 2, 0, 3
 ; CHECK-NEXT:    blr
 entry:
-        %tmp = load <16 x i8>, <16 x i8>* %A
+        %tmp = load <16 x i8>, ptr %A
         %tmp2 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp, <16 x i32> <i32 0, i32 1, i32 4, i32 5, i32 8, i32 9, i32 12, i32 13, i32 0, i32 1, i32 4, i32 5, i32 8, i32 9, i32 12, i32 13>
-        store <16 x i8> %tmp2, <16 x i8>* %A
+        store <16 x i8> %tmp2, ptr %A
         ret void
 }
 
-define void @VMRGLB_xy(<16 x i8>* %A, <16 x i8>* %B) {
+define void @VMRGLB_xy(ptr %A, ptr %B) {
 ; CHECK-LABEL: VMRGLB_xy:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lvx 2, 0, 3
@@ -70,14 +70,14 @@ define void @VMRGLB_xy(<16 x i8>* %A, <16 x i8>* %B) {
 ; CHECK-NEXT:    stvx 2, 0, 3
 ; CHECK-NEXT:    blr
 entry:
-        %tmp = load <16 x i8>, <16 x i8>* %A
-        %tmp2 = load <16 x i8>, <16 x i8>* %B
+        %tmp = load <16 x i8>, ptr %A
+        %tmp2 = load <16 x i8>, ptr %B
         %tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp2, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23>
-        store <16 x i8> %tmp3, <16 x i8>* %A
+        store <16 x i8> %tmp3, ptr %A
         ret void
 }
 
-define void @VMRGLB_xx(<16 x i8>* %A) {
+define void @VMRGLB_xx(ptr %A) {
 ; CHECK-LABEL: VMRGLB_xx:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lvx 2, 0, 3
@@ -85,13 +85,13 @@ define void @VMRGLB_xx(<16 x i8>* %A) {
 ; CHECK-NEXT:    stvx 2, 0, 3
 ; CHECK-NEXT:    blr
 entry:
-        %tmp = load <16 x i8>, <16 x i8>* %A
+        %tmp = load <16 x i8>, ptr %A
         %tmp2 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp, <16 x i32> <i32 0, i32 0, i32 1, i32 1, i32 2, i32 2, i32 3, i32 3, i32 4, i32 4, i32 5, i32 5, i32 6, i32 6, i32 7, i32 7>
-        store <16 x i8> %tmp2, <16 x i8>* %A
+        store <16 x i8> %tmp2, ptr %A
         ret void
 }
 
-define void @VMRGHB_xy(<16 x i8>* %A, <16 x i8>* %B) {
+define void @VMRGHB_xy(ptr %A, ptr %B) {
 ; CHECK-LABEL: VMRGHB_xy:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lvx 2, 0, 3
@@ -100,14 +100,14 @@ define void @VMRGHB_xy(<16 x i8>* %A, <16 x i8>* %B) {
 ; CHECK-NEXT:    stvx 2, 0, 3
 ; CHECK-NEXT:    blr
 entry:
-        %tmp = load <16 x i8>, <16 x i8>* %A
-        %tmp2 = load <16 x i8>, <16 x i8>* %B
+        %tmp = load <16 x i8>, ptr %A
+        %tmp2 = load <16 x i8>, ptr %B
         %tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp2, <16 x i32> <i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
-        store <16 x i8> %tmp3, <16 x i8>* %A
+        store <16 x i8> %tmp3, ptr %A
         ret void
 }
 
-define void @VMRGHB_xx(<16 x i8>* %A) {
+define void @VMRGHB_xx(ptr %A) {
 ; CHECK-LABEL: VMRGHB_xx:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lvx 2, 0, 3
@@ -115,13 +115,13 @@ define void @VMRGHB_xx(<16 x i8>* %A) {
 ; CHECK-NEXT:    stvx 2, 0, 3
 ; CHECK-NEXT:    blr
 entry:
-        %tmp = load <16 x i8>, <16 x i8>* %A
+        %tmp = load <16 x i8>, ptr %A
         %tmp2 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp, <16 x i32> <i32 8, i32 8, i32 9, i32 9, i32 10, i32 10, i32 11, i32 11, i32 12, i32 12, i32 13, i32 13, i32 14, i32 14, i32 15, i32 15>
-        store <16 x i8> %tmp2, <16 x i8>* %A
+        store <16 x i8> %tmp2, ptr %A
         ret void
 }
 
-define void @VMRGLH_xy(<16 x i8>* %A, <16 x i8>* %B) {
+define void @VMRGLH_xy(ptr %A, ptr %B) {
 ; CHECK-LABEL: VMRGLH_xy:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lvx 2, 0, 3
@@ -130,14 +130,14 @@ define void @VMRGLH_xy(<16 x i8>* %A, <16 x i8>* %B) {
 ; CHECK-NEXT:    stvx 2, 0, 3
 ; CHECK-NEXT:    blr
 entry:
-        %tmp = load <16 x i8>, <16 x i8>* %A
-        %tmp2 = load <16 x i8>, <16 x i8>* %B
+        %tmp = load <16 x i8>, ptr %A
+        %tmp2 = load <16 x i8>, ptr %B
         %tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp2, <16 x i32> <i32 0, i32 1, i32 16, i32 17, i32 2, i32 3, i32 18, i32 19, i32 4, i32 5, i32 20, i32 21, i32 6, i32 7, i32 22, i32 23>
-        store <16 x i8> %tmp3, <16 x i8>* %A
+        store <16 x i8> %tmp3, ptr %A
         ret void
 }
 
-define void @VMRGLH_xx(<16 x i8>* %A) {
+define void @VMRGLH_xx(ptr %A) {
 ; CHECK-LABEL: VMRGLH_xx:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lvx 2, 0, 3
@@ -145,13 +145,13 @@ define void @VMRGLH_xx(<16 x i8>* %A) {
 ; CHECK-NEXT:    stvx 2, 0, 3
 ; CHECK-NEXT:    blr
 entry:
-        %tmp = load <16 x i8>, <16 x i8>* %A
+        %tmp = load <16 x i8>, ptr %A
         %tmp2 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp, <16 x i32> <i32 0, i32 1, i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 4, i32 5, i32 4, i32 5, i32 6, i32 7, i32 6, i32 7>
-        store <16 x i8> %tmp2, <16 x i8>* %A
+        store <16 x i8> %tmp2, ptr %A
         ret void
 }
 
-define void @VMRGHH_xy(<16 x i8>* %A, <16 x i8>* %B) {
+define void @VMRGHH_xy(ptr %A, ptr %B) {
 ; CHECK-LABEL: VMRGHH_xy:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lvx 2, 0, 3
@@ -160,14 +160,14 @@ define void @VMRGHH_xy(<16 x i8>* %A, <16 x i8>* %B) {
 ; CHECK-NEXT:    stvx 2, 0, 3
 ; CHECK-NEXT:    blr
 entry:
-        %tmp = load <16 x i8>, <16 x i8>* %A
-        %tmp2 = load <16 x i8>, <16 x i8>* %B
+        %tmp = load <16 x i8>, ptr %A
+        %tmp2 = load <16 x i8>, ptr %B
         %tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp2, <16 x i32> <i32 8, i32 9, i32 24, i32 25, i32 10, i32 11, i32 26, i32 27, i32 12, i32 13, i32 28, i32 29, i32 14, i32 15, i32 30, i32 31>
-        store <16 x i8> %tmp3, <16 x i8>* %A
+        store <16 x i8> %tmp3, ptr %A
         ret void
 }
 
-define void @VMRGHH_xx(<16 x i8>* %A) {
+define void @VMRGHH_xx(ptr %A) {
 ; CHECK-LABEL: VMRGHH_xx:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lvx 2, 0, 3
@@ -175,13 +175,13 @@ define void @VMRGHH_xx(<16 x i8>* %A) {
 ; CHECK-NEXT:    stvx 2, 0, 3
 ; CHECK-NEXT:    blr
 entry:
-        %tmp = load <16 x i8>, <16 x i8>* %A
+        %tmp = load <16 x i8>, ptr %A
         %tmp2 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp, <16 x i32> <i32 8, i32 9, i32 8, i32 9, i32 10, i32 11, i32 10, i32 11, i32 12, i32 13, i32 12, i32 13, i32 14, i32 15, i32 14, i32 15>
-        store <16 x i8> %tmp2, <16 x i8>* %A
+        store <16 x i8> %tmp2, ptr %A
         ret void
 }
 
-define void @VMRGLW_xy(<16 x i8>* %A, <16 x i8>* %B) {
+define void @VMRGLW_xy(ptr %A, ptr %B) {
 ; CHECK-LABEL: VMRGLW_xy:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lvx 2, 0, 3
@@ -190,14 +190,14 @@ define void @VMRGLW_xy(<16 x i8>* %A, <16 x i8>* %B) {
 ; CHECK-NEXT:    stvx 2, 0, 3
 ; CHECK-NEXT:    blr
 entry:
-        %tmp = load <16 x i8>, <16 x i8>* %A
-        %tmp2 = load <16 x i8>, <16 x i8>* %B
+        %tmp = load <16 x i8>, ptr %A
+        %tmp2 = load <16 x i8>, ptr %B
         %tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 16, i32 17, i32 18, i32 19, i32 4, i32 5, i32 6, i32 7, i32 20, i32 21, i32 22, i32 23>
-        store <16 x i8> %tmp3, <16 x i8>* %A
+        store <16 x i8> %tmp3, ptr %A
         ret void
 }
 
-define void @VMRGLW_xx(<16 x i8>* %A) {
+define void @VMRGLW_xx(ptr %A) {
 ; CHECK-LABEL: VMRGLW_xx:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lvx 2, 0, 3
@@ -205,13 +205,13 @@ define void @VMRGLW_xx(<16 x i8>* %A) {
 ; CHECK-NEXT:    stvx 2, 0, 3
 ; CHECK-NEXT:    blr
 entry:
-        %tmp = load <16 x i8>, <16 x i8>* %A
+        %tmp = load <16 x i8>, ptr %A
         %tmp2 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
-        store <16 x i8> %tmp2, <16 x i8>* %A
+        store <16 x i8> %tmp2, ptr %A
         ret void
 }
 
-define void @VMRGHW_xy(<16 x i8>* %A, <16 x i8>* %B) {
+define void @VMRGHW_xy(ptr %A, ptr %B) {
 ; CHECK-LABEL: VMRGHW_xy:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lvx 2, 0, 3
@@ -220,14 +220,14 @@ define void @VMRGHW_xy(<16 x i8>* %A, <16 x i8>* %B) {
 ; CHECK-NEXT:    stvx 2, 0, 3
 ; CHECK-NEXT:    blr
 entry:
-        %tmp = load <16 x i8>, <16 x i8>* %A
-        %tmp2 = load <16 x i8>, <16 x i8>* %B
+        %tmp = load <16 x i8>, ptr %A
+        %tmp2 = load <16 x i8>, ptr %B
         %tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp2, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 24, i32 25, i32 26, i32 27, i32 12, i32 13, i32 14, i32 15, i32 28, i32 29, i32 30, i32 31>
-        store <16 x i8> %tmp3, <16 x i8>* %A
+        store <16 x i8> %tmp3, ptr %A
         ret void
 }
 
-define void @VMRGHW_xx(<16 x i8>* %A) {
+define void @VMRGHW_xx(ptr %A) {
 ; CHECK-LABEL: VMRGHW_xx:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lvx 2, 0, 3
@@ -235,13 +235,13 @@ define void @VMRGHW_xx(<16 x i8>* %A) {
 ; CHECK-NEXT:    stvx 2, 0, 3
 ; CHECK-NEXT:    blr
 entry:
-        %tmp = load <16 x i8>, <16 x i8>* %A
+        %tmp = load <16 x i8>, ptr %A
         %tmp2 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 12, i32 13, i32 14, i32 15>
-        store <16 x i8> %tmp2, <16 x i8>* %A
+        store <16 x i8> %tmp2, ptr %A
         ret void
 }
 
-define void @VSLDOI_xy(<16 x i8>* %A, <16 x i8>* %B) {
+define void @VSLDOI_xy(ptr %A, ptr %B) {
 ; CHECK-LABEL: VSLDOI_xy:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lvx 2, 0, 3
@@ -250,14 +250,14 @@ define void @VSLDOI_xy(<16 x i8>* %A, <16 x i8>* %B) {
 ; CHECK-NEXT:    stvx 2, 0, 3
 ; CHECK-NEXT:    blr
 entry:
-        %tmp = load <16 x i8>, <16 x i8>* %A
-        %tmp2 = load <16 x i8>, <16 x i8>* %B
+        %tmp = load <16 x i8>, ptr %A
+        %tmp2 = load <16 x i8>, ptr %B
         %tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp2, <16 x i32> <i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27>
-        store <16 x i8> %tmp3, <16 x i8>* %A
+        store <16 x i8> %tmp3, ptr %A
         ret void
 }
 
-define void @VSLDOI_xx(<16 x i8>* %A) {
+define void @VSLDOI_xx(ptr %A) {
 ; CHECK-LABEL: VSLDOI_xx:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lvx 2, 0, 3
@@ -265,9 +265,9 @@ define void @VSLDOI_xx(<16 x i8>* %A) {
 ; CHECK-NEXT:    stvx 2, 0, 3
 ; CHECK-NEXT:    blr
 entry:
-        %tmp = load <16 x i8>, <16 x i8>* %A
+        %tmp = load <16 x i8>, ptr %A
         %tmp2 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp, <16 x i32> <i32 12, i32 13, i32 14, i32 15, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11>
-        store <16 x i8> %tmp2, <16 x i8>* %A
+        store <16 x i8> %tmp2, ptr %A
         ret void
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/vec_shuffle_p8vector.ll b/llvm/test/CodeGen/PowerPC/vec_shuffle_p8vector.ll
index 6201ac6911f5..5b50d71e9eda 100644
--- a/llvm/test/CodeGen/PowerPC/vec_shuffle_p8vector.ll
+++ b/llvm/test/CodeGen/PowerPC/vec_shuffle_p8vector.ll
@@ -4,7 +4,7 @@
 ; RUN: llc -verify-machineinstrs -mcpu=pwr7 -mtriple=powerpc64-unknown-linux-gnu < %s | FileCheck -check-prefix=CHECK-PWR7 %s
 ; RUN: llc -verify-machineinstrs -mcpu=pwr7 -mtriple=powerpc64-ibm-aix-xcoff -vec-extabi < %s | FileCheck -check-prefix=CHECK-PWR7-AIX %s
 
-define void @VPKUDUM_unary(<2 x i64>* %A) {
+define void @VPKUDUM_unary(ptr %A) {
 ; CHECK-LABEL: VPKUDUM_unary:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lxvw4x 34, 0, 3
@@ -31,7 +31,7 @@ define void @VPKUDUM_unary(<2 x i64>* %A) {
 ; CHECK-PWR7-AIX-NEXT:    stxvw4x 34, 0, 3
 ; CHECK-PWR7-AIX-NEXT:    blr
 entry:
-        %tmp = load <2 x i64>, <2 x i64>* %A
+        %tmp = load <2 x i64>, ptr %A
         %tmp2 = bitcast <2 x i64> %tmp to <4 x i32>
         %tmp3 = extractelement <4 x i32> %tmp2, i32 1
         %tmp4 = extractelement <4 x i32> %tmp2, i32 3
@@ -40,11 +40,11 @@ entry:
         %tmp7 = insertelement <4 x i32> %tmp6, i32 %tmp3, i32 2
         %tmp8 = insertelement <4 x i32> %tmp7, i32 %tmp4, i32 3
         %tmp9 = bitcast <4 x i32> %tmp8 to <2 x i64>
-        store <2 x i64> %tmp9, <2 x i64>* %A
+        store <2 x i64> %tmp9, ptr %A
         ret void
 }
 
-define void @VPKUDUM(<2 x i64>* %A, <2 x i64>* %B) {
+define void @VPKUDUM(ptr %A, ptr %B) {
 ; CHECK-LABEL: VPKUDUM:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lxvw4x 34, 0, 3
@@ -74,9 +74,9 @@ define void @VPKUDUM(<2 x i64>* %A, <2 x i64>* %B) {
 ; CHECK-PWR7-AIX-NEXT:    stxvw4x 34, 0, 3
 ; CHECK-PWR7-AIX-NEXT:    blr
 entry:
-        %tmp = load <2 x i64>, <2 x i64>* %A
+        %tmp = load <2 x i64>, ptr %A
         %tmp2 = bitcast <2 x i64> %tmp to <4 x i32>
-        %tmp3 = load <2 x i64>, <2 x i64>* %B
+        %tmp3 = load <2 x i64>, ptr %B
         %tmp4 = bitcast <2 x i64> %tmp3 to <4 x i32>
         %tmp5 = extractelement <4 x i32> %tmp2, i32 1
         %tmp6 = extractelement <4 x i32> %tmp2, i32 3
@@ -87,7 +87,7 @@ entry:
         %tmp11 = insertelement <4 x i32> %tmp10, i32 %tmp7, i32 2
         %tmp12 = insertelement <4 x i32> %tmp11, i32 %tmp8, i32 3
         %tmp13 = bitcast <4 x i32> %tmp12 to <2 x i64>
-        store <2 x i64> %tmp13, <2 x i64>* %A
+        store <2 x i64> %tmp13, ptr %A
         ret void
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/vec_shuffle_p8vector_le.ll b/llvm/test/CodeGen/PowerPC/vec_shuffle_p8vector_le.ll
index 7af2946fb18c..2756716609bd 100644
--- a/llvm/test/CodeGen/PowerPC/vec_shuffle_p8vector_le.ll
+++ b/llvm/test/CodeGen/PowerPC/vec_shuffle_p8vector_le.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -verify-machineinstrs -mcpu=pwr8 -mtriple=powerpc64le-unknown-linux-gnu -mattr=+power8-vector < %s | FileCheck %s
 
-define void @VPKUDUM_unary(<2 x i64>* %A) {
+define void @VPKUDUM_unary(ptr %A) {
 ; CHECK-LABEL: VPKUDUM_unary:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lxvd2x 0, 0, 3
@@ -11,7 +11,7 @@ define void @VPKUDUM_unary(<2 x i64>* %A) {
 ; CHECK-NEXT:    stxvd2x 0, 0, 3
 ; CHECK-NEXT:    blr
 entry:
-	%tmp = load <2 x i64>, <2 x i64>* %A
+	%tmp = load <2 x i64>, ptr %A
 	%tmp2 = bitcast <2 x i64> %tmp to <4 x i32>
 	%tmp3 = extractelement <4 x i32> %tmp2, i32 0
 	%tmp4 = extractelement <4 x i32> %tmp2, i32 2
@@ -20,11 +20,11 @@ entry:
 	%tmp7 = insertelement <4 x i32> %tmp6, i32 %tmp3, i32 2
 	%tmp8 = insertelement <4 x i32> %tmp7, i32 %tmp4, i32 3
 	%tmp9 = bitcast <4 x i32> %tmp8 to <2 x i64>
-	store <2 x i64> %tmp9, <2 x i64>* %A
+	store <2 x i64> %tmp9, ptr %A
 	ret void
 }
 
-define void @VPKUDUM(<2 x i64>* %A, <2 x i64>* %B) {
+define void @VPKUDUM(ptr %A, ptr %B) {
 ; CHECK-LABEL: VPKUDUM:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lxvd2x 0, 0, 3
@@ -36,9 +36,9 @@ define void @VPKUDUM(<2 x i64>* %A, <2 x i64>* %B) {
 ; CHECK-NEXT:    stxvd2x 0, 0, 3
 ; CHECK-NEXT:    blr
 entry:
-	%tmp = load <2 x i64>, <2 x i64>* %A
+	%tmp = load <2 x i64>, ptr %A
 	%tmp2 = bitcast <2 x i64> %tmp to <4 x i32>
-        %tmp3 = load <2 x i64>, <2 x i64>* %B
+        %tmp3 = load <2 x i64>, ptr %B
         %tmp4 = bitcast <2 x i64> %tmp3 to <4 x i32>
 	%tmp5 = extractelement <4 x i32> %tmp2, i32 0
 	%tmp6 = extractelement <4 x i32> %tmp2, i32 2
@@ -49,7 +49,7 @@ entry:
 	%tmp11 = insertelement <4 x i32> %tmp10, i32 %tmp7, i32 2
 	%tmp12 = insertelement <4 x i32> %tmp11, i32 %tmp8, i32 3
 	%tmp13 = bitcast <4 x i32> %tmp12 to <2 x i64>
-	store <2 x i64> %tmp13, <2 x i64>* %A
+	store <2 x i64> %tmp13, ptr %A
 	ret void
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/vec_splat.ll b/llvm/test/CodeGen/PowerPC/vec_splat.ll
index 8aa00f82aa79..4cf566935b91 100644
--- a/llvm/test/CodeGen/PowerPC/vec_splat.ll
+++ b/llvm/test/CodeGen/PowerPC/vec_splat.ll
@@ -7,7 +7,7 @@
 %f4 = type <4 x float>
 %i4 = type <4 x i32>
 
-define void @splat(%f4* %P, %f4* %Q, float %X) nounwind {
+define void @splat(ptr %P, ptr %Q, float %X) nounwind {
 ; G3-LABEL: splat:
 ; G3:       # %bb.0:
 ; G3-NEXT:    lfs 0, 12(4)
@@ -40,13 +40,13 @@ define void @splat(%f4* %P, %f4* %Q, float %X) nounwind {
   %tmp2 = insertelement %f4 %tmp, float %X, i32 1   ; <%f4> [#uses=1]
   %tmp4 = insertelement %f4 %tmp2, float %X, i32 2    ; <%f4> [#uses=1]
   %tmp6 = insertelement %f4 %tmp4, float %X, i32 3    ; <%f4> [#uses=1]
-  %q = load %f4, %f4* %Q         ; <%f4> [#uses=1]
+  %q = load %f4, ptr %Q         ; <%f4> [#uses=1]
   %R = fadd %f4 %q, %tmp6    ; <%f4> [#uses=1]
-  store %f4 %R, %f4* %P
+  store %f4 %R, ptr %P
   ret void
 }
 
-define void @splat_i4(%i4* %P, %i4* %Q, i32 %X) nounwind {
+define void @splat_i4(ptr %P, ptr %Q, i32 %X) nounwind {
 ; G3-LABEL: splat_i4:
 ; G3:       # %bb.0:
 ; G3-NEXT:    lwz 6, 12(4)
@@ -79,13 +79,13 @@ define void @splat_i4(%i4* %P, %i4* %Q, i32 %X) nounwind {
   %tmp2 = insertelement %i4 %tmp, i32 %X, i32 1     ; <%i4> [#uses=1]
   %tmp4 = insertelement %i4 %tmp2, i32 %X, i32 2    ; <%i4> [#uses=1]
   %tmp6 = insertelement %i4 %tmp4, i32 %X, i32 3    ; <%i4> [#uses=1]
-  %q = load %i4, %i4* %Q         ; <%i4> [#uses=1]
+  %q = load %i4, ptr %Q         ; <%i4> [#uses=1]
   %R = add %i4 %q, %tmp6    ; <%i4> [#uses=1]
-  store %i4 %R, %i4* %P
+  store %i4 %R, ptr %P
   ret void
 }
 
-define void @splat_imm_i32(%i4* %P, %i4* %Q, i32 %X) nounwind {
+define void @splat_imm_i32(ptr %P, ptr %Q, i32 %X) nounwind {
 ; G3-LABEL: splat_imm_i32:
 ; G3:       # %bb.0:
 ; G3-NEXT:    lwz 5, 12(4)
@@ -109,13 +109,13 @@ define void @splat_imm_i32(%i4* %P, %i4* %Q, i32 %X) nounwind {
 ; G5-NEXT:    vadduwm 2, 2, 3
 ; G5-NEXT:    stvx 2, 0, 3
 ; G5-NEXT:    blr
-  %q = load %i4, %i4* %Q         ; <%i4> [#uses=1]
+  %q = load %i4, ptr %Q         ; <%i4> [#uses=1]
   %R = add %i4 %q, < i32 -1, i32 -1, i32 -1, i32 -1 >       ; <%i4> [#uses=1]
-  store %i4 %R, %i4* %P
+  store %i4 %R, ptr %P
   ret void
 }
 
-define void @splat_imm_i16(%i4* %P, %i4* %Q, i32 %X) nounwind {
+define void @splat_imm_i16(ptr %P, ptr %Q, i32 %X) nounwind {
 ; G3-LABEL: splat_imm_i16:
 ; G3:       # %bb.0:
 ; G3-NEXT:    lwz 5, 8(4)
@@ -143,13 +143,13 @@ define void @splat_imm_i16(%i4* %P, %i4* %Q, i32 %X) nounwind {
 ; G5-NEXT:    vadduwm 2, 2, 3
 ; G5-NEXT:    stvx 2, 0, 3
 ; G5-NEXT:    blr
-  %q = load %i4, %i4* %Q         ; <%i4> [#uses=1]
+  %q = load %i4, ptr %Q         ; <%i4> [#uses=1]
   %R = add %i4 %q, < i32 65537, i32 65537, i32 65537, i32 65537 >   ; <%i4> [#uses=1]
-  store %i4 %R, %i4* %P
+  store %i4 %R, ptr %P
   ret void
 }
 
-define void @splat_h(i16 %tmp, <16 x i8>* %dst) nounwind {
+define void @splat_h(i16 %tmp, ptr %dst) nounwind {
 ; G3-LABEL: splat_h:
 ; G3:       # %bb.0:
 ; G3-NEXT:    sth 3, 14(4)
@@ -181,11 +181,11 @@ define void @splat_h(i16 %tmp, <16 x i8>* %dst) nounwind {
   %tmp77 = insertelement <8 x i16> %tmp76, i16 %tmp, i32 6
   %tmp78 = insertelement <8 x i16> %tmp77, i16 %tmp, i32 7
   %tmp78.upgrd.2 = bitcast <8 x i16> %tmp78 to <16 x i8>
-  store <16 x i8> %tmp78.upgrd.2, <16 x i8>* %dst
+  store <16 x i8> %tmp78.upgrd.2, ptr %dst
   ret void
 }
 
-define void @spltish(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+define void @spltish(ptr %A, ptr %B) nounwind {
 ; G3-LABEL: spltish:
 ; G3:       # %bb.0:
 ; G3-NEXT:    stwu 1, -48(1)
@@ -253,11 +253,11 @@ define void @spltish(<16 x i8>* %A, <16 x i8>* %B) nounwind {
 ; G5-NEXT:    vsububm 2, 2, 3
 ; G5-NEXT:    stvx 2, 0, 3
 ; G5-NEXT:    blr
-  %tmp = load <16 x i8>, <16 x i8>* %B         ; <<16 x i8>> [#uses=1]
+  %tmp = load <16 x i8>, ptr %B         ; <<16 x i8>> [#uses=1]
   %tmp.s = bitcast <16 x i8> %tmp to <16 x i8>      ; <<16 x i8>> [#uses=1]
   %tmp4 = sub <16 x i8> %tmp.s, bitcast (<8 x i16> < i16 15, i16 15, i16 15, i16 15, i16 15, i16
  15, i16 15, i16 15 > to <16 x i8>)       ; <<16 x i8>> [#uses=1]
   %tmp4.u = bitcast <16 x i8> %tmp4 to <16 x i8>    ; <<16 x i8>> [#uses=1]
-  store <16 x i8> %tmp4.u, <16 x i8>* %A
+  store <16 x i8> %tmp4.u, ptr %A
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/vec_splat_constant.ll b/llvm/test/CodeGen/PowerPC/vec_splat_constant.ll
index 1f6a38ee7c2b..8d190c3bab97 100644
--- a/llvm/test/CodeGen/PowerPC/vec_splat_constant.ll
+++ b/llvm/test/CodeGen/PowerPC/vec_splat_constant.ll
@@ -1,21 +1,21 @@
 ; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc-unknown-linux-gnu -mcpu=g5 | FileCheck %s
 ; Formerly incorrectly inserted vsldoi (endian confusion)
 
- at baz = common global <16 x i8> zeroinitializer    ; <<16 x i8>*> [#uses=1]
+ at baz = common global <16 x i8> zeroinitializer    ; <ptr> [#uses=1]
 
 define void @foo(<16 x i8> %x) nounwind ssp {
 entry:
 ; CHECK: foo:
 ; CHECK-NOT: vsldoi
-  %x_addr = alloca <16 x i8>                      ; <<16 x i8>*> [#uses=2]
-  %temp = alloca <16 x i8>                        ; <<16 x i8>*> [#uses=2]
+  %x_addr = alloca <16 x i8>                      ; <ptr> [#uses=2]
+  %temp = alloca <16 x i8>                        ; <ptr> [#uses=2]
   %"alloca point" = bitcast i32 0 to i32          ; <i32> [#uses=0]
-  store <16 x i8> %x, <16 x i8>* %x_addr
-  store <16 x i8> <i8 0, i8 0, i8 0, i8 14, i8 0, i8 0, i8 0, i8 14, i8 0, i8 0, i8 0, i8 14, i8 0, i8 0, i8 0, i8 14>, <16 x i8>* %temp, align 16
-  %0 = load <16 x i8>, <16 x i8>* %x_addr, align 16          ; <<16 x i8>> [#uses=1]
-  %1 = load <16 x i8>, <16 x i8>* %temp, align 16            ; <<16 x i8>> [#uses=1]
+  store <16 x i8> %x, ptr %x_addr
+  store <16 x i8> <i8 0, i8 0, i8 0, i8 14, i8 0, i8 0, i8 0, i8 14, i8 0, i8 0, i8 0, i8 14, i8 0, i8 0, i8 0, i8 14>, ptr %temp, align 16
+  %0 = load <16 x i8>, ptr %x_addr, align 16          ; <<16 x i8>> [#uses=1]
+  %1 = load <16 x i8>, ptr %temp, align 16            ; <<16 x i8>> [#uses=1]
   %tmp = add <16 x i8> %0, %1                     ; <<16 x i8>> [#uses=1]
-  store <16 x i8> %tmp, <16 x i8>* @baz, align 16
+  store <16 x i8> %tmp, ptr @baz, align 16
   br label %return
 
 return:                                           ; preds = %entry

diff  --git a/llvm/test/CodeGen/PowerPC/vec_zero.ll b/llvm/test/CodeGen/PowerPC/vec_zero.ll
index fe198bc8a556..cd04ee3d14d2 100644
--- a/llvm/test/CodeGen/PowerPC/vec_zero.ll
+++ b/llvm/test/CodeGen/PowerPC/vec_zero.ll
@@ -1,9 +1,9 @@
 ; RUN: llc -verify-machineinstrs < %s -mtriple=ppc32-- -mcpu=g5 | grep vxor
 
-define void @foo(<4 x float>* %P) {
-        %T = load <4 x float>, <4 x float>* %P               ; <<4 x float>> [#uses=1]
+define void @foo(ptr %P) {
+        %T = load <4 x float>, ptr %P               ; <<4 x float>> [#uses=1]
         %S = fadd <4 x float> zeroinitializer, %T                ; <<4 x float>> [#uses=1]
-        store <4 x float> %S, <4 x float>* %P
+        store <4 x float> %S, ptr %P
         ret void
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/vector-identity-shuffle.ll b/llvm/test/CodeGen/PowerPC/vector-identity-shuffle.ll
index 7af4b827e7d0..8851cbfb9658 100644
--- a/llvm/test/CodeGen/PowerPC/vector-identity-shuffle.ll
+++ b/llvm/test/CodeGen/PowerPC/vector-identity-shuffle.ll
@@ -1,8 +1,8 @@
 ; RUN: llc -verify-machineinstrs < %s -mtriple=ppc32-- -mcpu=g5 | grep test:
 ; RUN: llc -verify-machineinstrs < %s -mtriple=ppc32-- -mcpu=g5 | not grep vperm
 
-define void @test(<4 x float>* %tmp2.i) {
-        %tmp2.i.upgrd.1 = load <4 x float>, <4 x float>* %tmp2.i             ; <<4 x float>> [#uses=4]
+define void @test(ptr %tmp2.i) {
+        %tmp2.i.upgrd.1 = load <4 x float>, ptr %tmp2.i             ; <<4 x float>> [#uses=4]
         %xFloat0.48 = extractelement <4 x float> %tmp2.i.upgrd.1, i32 0      ; <float> [#uses=1]
         %inFloat0.49 = insertelement <4 x float> undef, float %xFloat0.48, i32 0              ; <<4 x float>> [#uses=1]
         %xFloat1.50 = extractelement <4 x float> %tmp2.i.upgrd.1, i32 1      ; <float> [#uses=1]
@@ -11,7 +11,7 @@ define void @test(<4 x float>* %tmp2.i) {
         %inFloat2.55 = insertelement <4 x float> %inFloat1.52, float %xFloat2.53, i32 2               ; <<4 x float>> [#uses=1]
         %xFloat3.56 = extractelement <4 x float> %tmp2.i.upgrd.1, i32 3      ; <float> [#uses=1]
         %inFloat3.58 = insertelement <4 x float> %inFloat2.55, float %xFloat3.56, i32 3               ; <<4 x float>> [#uses=1]
-        store <4 x float> %inFloat3.58, <4 x float>* %tmp2.i
+        store <4 x float> %inFloat3.58, ptr %tmp2.i
         ret void
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/vector-ldst.ll b/llvm/test/CodeGen/PowerPC/vector-ldst.ll
index f860fea56e7a..f39c663eadeb 100644
--- a/llvm/test/CodeGen/PowerPC/vector-ldst.ll
+++ b/llvm/test/CodeGen/PowerPC/vector-ldst.ll
@@ -36,13 +36,13 @@ define dso_local <16 x i8> @ld_0_vector(i64 %ptr) {
 ; CHECK-P8-BE-NEXT:    lxvw4x v2, 0, r3
 ; CHECK-P8-BE-NEXT:    blr
 entry:
-  %0 = inttoptr i64 %ptr to <16 x i8>*
-  %1 = load <16 x i8>, <16 x i8>* %0, align 16
+  %0 = inttoptr i64 %ptr to ptr
+  %1 = load <16 x i8>, ptr %0, align 16
   ret <16 x i8> %1
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local <16 x i8> @ld_unalign16_vector(i8* nocapture readonly %ptr) {
+define dso_local <16 x i8> @ld_unalign16_vector(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_unalign16_vector:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    plxv v2, 1(r3), 0
@@ -67,14 +67,13 @@ define dso_local <16 x i8> @ld_unalign16_vector(i8* nocapture readonly %ptr) {
 ; CHECK-P8-BE-NEXT:    lxvw4x v2, 0, r3
 ; CHECK-P8-BE-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1
-  %0 = bitcast i8* %add.ptr to <16 x i8>*
-  %1 = load <16 x i8>, <16 x i8>* %0, align 16
-  ret <16 x i8> %1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1
+  %0 = load <16 x i8>, ptr %add.ptr, align 16
+  ret <16 x i8> %0
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local <16 x i8> @ld_align16_vector(i8* nocapture readonly %ptr) {
+define dso_local <16 x i8> @ld_align16_vector(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align16_vector:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    plxv v2, 8(r3), 0
@@ -99,14 +98,13 @@ define dso_local <16 x i8> @ld_align16_vector(i8* nocapture readonly %ptr) {
 ; CHECK-P8-BE-NEXT:    lxvw4x v2, 0, r3
 ; CHECK-P8-BE-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to <16 x i8>*
-  %1 = load <16 x i8>, <16 x i8>* %0, align 16
-  ret <16 x i8> %1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  %0 = load <16 x i8>, ptr %add.ptr, align 16
+  ret <16 x i8> %0
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local <16 x i8> @ld_unalign32_vector(i8* nocapture readonly %ptr) {
+define dso_local <16 x i8> @ld_unalign32_vector(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_unalign32_vector:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    plxv v2, 99999(r3), 0
@@ -134,14 +132,13 @@ define dso_local <16 x i8> @ld_unalign32_vector(i8* nocapture readonly %ptr) {
 ; CHECK-P8-BE-NEXT:    lxvw4x v2, r3, r4
 ; CHECK-P8-BE-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999
-  %0 = bitcast i8* %add.ptr to <16 x i8>*
-  %1 = load <16 x i8>, <16 x i8>* %0, align 16
-  ret <16 x i8> %1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999
+  %0 = load <16 x i8>, ptr %add.ptr, align 16
+  ret <16 x i8> %0
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local <16 x i8> @ld_align32_vector(i8* nocapture readonly %ptr) {
+define dso_local <16 x i8> @ld_align32_vector(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align32_vector:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    plxv v2, 99999000(r3), 0
@@ -169,14 +166,13 @@ define dso_local <16 x i8> @ld_align32_vector(i8* nocapture readonly %ptr) {
 ; CHECK-P8-BE-NEXT:    lxvw4x v2, r3, r4
 ; CHECK-P8-BE-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to <16 x i8>*
-  %1 = load <16 x i8>, <16 x i8>* %0, align 16
-  ret <16 x i8> %1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  %0 = load <16 x i8>, ptr %add.ptr, align 16
+  ret <16 x i8> %0
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local <16 x i8> @ld_unalign64_vector(i8* nocapture readonly %ptr) {
+define dso_local <16 x i8> @ld_unalign64_vector(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_unalign64_vector:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r4, 232
@@ -213,14 +209,13 @@ define dso_local <16 x i8> @ld_unalign64_vector(i8* nocapture readonly %ptr) {
 ; CHECK-P8-BE-NEXT:    lxvw4x v2, r3, r4
 ; CHECK-P8-BE-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000001
-  %0 = bitcast i8* %add.ptr to <16 x i8>*
-  %1 = load <16 x i8>, <16 x i8>* %0, align 16
-  ret <16 x i8> %1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000001
+  %0 = load <16 x i8>, ptr %add.ptr, align 16
+  ret <16 x i8> %0
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local <16 x i8> @ld_align64_vector(i8* nocapture readonly %ptr) {
+define dso_local <16 x i8> @ld_align64_vector(ptr nocapture readonly %ptr) {
 ; CHECK-P10-LABEL: ld_align64_vector:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r4, 244140625
@@ -253,14 +248,13 @@ define dso_local <16 x i8> @ld_align64_vector(i8* nocapture readonly %ptr) {
 ; CHECK-P8-BE-NEXT:    lxvw4x v2, r3, r4
 ; CHECK-P8-BE-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to <16 x i8>*
-  %1 = load <16 x i8>, <16 x i8>* %0, align 16
-  ret <16 x i8> %1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  %0 = load <16 x i8>, ptr %add.ptr, align 16
+  ret <16 x i8> %0
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local <16 x i8> @ld_reg_vector(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local <16 x i8> @ld_reg_vector(ptr nocapture readonly %ptr, i64 %off) {
 ; CHECK-LABEL: ld_reg_vector:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lxvx v2, r3, r4
@@ -277,10 +271,9 @@ define dso_local <16 x i8> @ld_reg_vector(i8* nocapture readonly %ptr, i64 %off)
 ; CHECK-P8-BE-NEXT:    lxvw4x v2, r3, r4
 ; CHECK-P8-BE-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to <16 x i8>*
-  %1 = load <16 x i8>, <16 x i8>* %0, align 16
-  ret <16 x i8> %1
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  %0 = load <16 x i8>, ptr %add.ptr, align 16
+  ret <16 x i8> %0
 }
 
 ; Function Attrs: norecurse nounwind readonly uwtable willreturn
@@ -306,8 +299,8 @@ define dso_local <16 x i8> @ld_or_vector(i64 %ptr, i8 zeroext %off) {
 entry:
   %conv = zext i8 %off to i64
   %or = or i64 %conv, %ptr
-  %0 = inttoptr i64 %or to <16 x i8>*
-  %1 = load <16 x i8>, <16 x i8>* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  %1 = load <16 x i8>, ptr %0, align 16
   ret <16 x i8> %1
 }
 
@@ -335,8 +328,8 @@ entry:
   %and = and i64 %ptr, -4096
   %conv = zext i8 %off to i64
   %or = or i64 %and, %conv
-  %0 = inttoptr i64 %or to <16 x i8>*
-  %1 = load <16 x i8>, <16 x i8>* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  %1 = load <16 x i8>, ptr %0, align 16
   ret <16 x i8> %1
 }
 
@@ -362,8 +355,8 @@ define dso_local <16 x i8> @ld_not_disjoint16_vector(i64 %ptr) {
 ; CHECK-P8-BE-NEXT:    blr
 entry:
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to <16 x i8>*
-  %1 = load <16 x i8>, <16 x i8>* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  %1 = load <16 x i8>, ptr %0, align 16
   ret <16 x i8> %1
 }
 
@@ -399,8 +392,8 @@ define dso_local <16 x i8> @ld_disjoint_unalign16_vector(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -4096
   %or = or i64 %and, 6
-  %0 = inttoptr i64 %or to <16 x i8>*
-  %1 = load <16 x i8>, <16 x i8>* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  %1 = load <16 x i8>, ptr %0, align 16
   ret <16 x i8> %1
 }
 
@@ -436,8 +429,8 @@ define dso_local <16 x i8> @ld_disjoint_align16_vector(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -4096
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to <16 x i8>*
-  %1 = load <16 x i8>, <16 x i8>* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  %1 = load <16 x i8>, ptr %0, align 16
   ret <16 x i8> %1
 }
 
@@ -466,8 +459,8 @@ define dso_local <16 x i8> @ld_not_disjoint32_vector(i64 %ptr) {
 ; CHECK-P8-BE-NEXT:    blr
 entry:
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to <16 x i8>*
-  %1 = load <16 x i8>, <16 x i8>* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  %1 = load <16 x i8>, ptr %0, align 16
   ret <16 x i8> %1
 }
 
@@ -506,8 +499,8 @@ define dso_local <16 x i8> @ld_disjoint_unalign32_vector(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1048576
   %or = or i64 %and, 99999
-  %0 = inttoptr i64 %or to <16 x i8>*
-  %1 = load <16 x i8>, <16 x i8>* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  %1 = load <16 x i8>, ptr %0, align 16
   ret <16 x i8> %1
 }
 
@@ -550,8 +543,8 @@ define dso_local <16 x i8> @ld_disjoint_align32_vector(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1000341504
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to <16 x i8>*
-  %1 = load <16 x i8>, <16 x i8>* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  %1 = load <16 x i8>, ptr %0, align 16
   ret <16 x i8> %1
 }
 
@@ -598,8 +591,8 @@ define dso_local <16 x i8> @ld_not_disjoint64_vector(i64 %ptr) {
 ; CHECK-P8-BE-NEXT:    blr
 entry:
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to <16 x i8>*
-  %1 = load <16 x i8>, <16 x i8>* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  %1 = load <16 x i8>, ptr %0, align 16
   ret <16 x i8> %1
 }
 
@@ -647,8 +640,8 @@ define dso_local <16 x i8> @ld_disjoint_unalign64_vector(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000001
-  %0 = inttoptr i64 %or to <16 x i8>*
-  %1 = load <16 x i8>, <16 x i8>* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  %1 = load <16 x i8>, ptr %0, align 16
   ret <16 x i8> %1
 }
 
@@ -692,8 +685,8 @@ define dso_local <16 x i8> @ld_disjoint_align64_vector(i64 %ptr) {
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to <16 x i8>*
-  %1 = load <16 x i8>, <16 x i8>* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  %1 = load <16 x i8>, ptr %0, align 4096
   ret <16 x i8> %1
 }
 
@@ -718,7 +711,7 @@ define dso_local <16 x i8> @ld_cst_unalign16_vector() {
 ; CHECK-P8-BE-NEXT:    lxvw4x v2, 0, r3
 ; CHECK-P8-BE-NEXT:    blr
 entry:
-  %0 = load <16 x i8>, <16 x i8>* inttoptr (i64 255 to <16 x i8>*), align 16
+  %0 = load <16 x i8>, ptr inttoptr (i64 255 to ptr), align 16
   ret <16 x i8> %0
 }
 
@@ -742,7 +735,7 @@ define dso_local <16 x i8> @ld_cst_align16_vector() {
 ; CHECK-P8-BE-NEXT:    lxvw4x v2, 0, r3
 ; CHECK-P8-BE-NEXT:    blr
 entry:
-  %0 = load <16 x i8>, <16 x i8>* inttoptr (i64 4080 to <16 x i8>*), align 16
+  %0 = load <16 x i8>, ptr inttoptr (i64 4080 to ptr), align 16
   ret <16 x i8> %0
 }
 
@@ -776,7 +769,7 @@ define dso_local <16 x i8> @ld_cst_unalign32_vector() {
 ; CHECK-P8-BE-NEXT:    lxvw4x v2, 0, r3
 ; CHECK-P8-BE-NEXT:    blr
 entry:
-  %0 = load <16 x i8>, <16 x i8>* inttoptr (i64 99999 to <16 x i8>*), align 16
+  %0 = load <16 x i8>, ptr inttoptr (i64 99999 to ptr), align 16
   ret <16 x i8> %0
 }
 
@@ -810,7 +803,7 @@ define dso_local <16 x i8> @ld_cst_align32_vector() {
 ; CHECK-P8-BE-NEXT:    lxvw4x v2, 0, r3
 ; CHECK-P8-BE-NEXT:    blr
 entry:
-  %0 = load <16 x i8>, <16 x i8>* inttoptr (i64 9999900 to <16 x i8>*), align 16
+  %0 = load <16 x i8>, ptr inttoptr (i64 9999900 to ptr), align 16
   ret <16 x i8> %0
 }
 
@@ -852,7 +845,7 @@ define dso_local <16 x i8> @ld_cst_unalign64_vector() {
 ; CHECK-P8-BE-NEXT:    lxvw4x v2, 0, r3
 ; CHECK-P8-BE-NEXT:    blr
 entry:
-  %0 = load <16 x i8>, <16 x i8>* inttoptr (i64 1000000000001 to <16 x i8>*), align 16
+  %0 = load <16 x i8>, ptr inttoptr (i64 1000000000001 to ptr), align 16
   ret <16 x i8> %0
 }
 
@@ -890,7 +883,7 @@ define dso_local <16 x i8> @ld_cst_align64_vector() {
 ; CHECK-P8-BE-NEXT:    lxvw4x v2, 0, r3
 ; CHECK-P8-BE-NEXT:    blr
 entry:
-  %0 = load <16 x i8>, <16 x i8>* inttoptr (i64 1000000000000 to <16 x i8>*), align 4096
+  %0 = load <16 x i8>, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   ret <16 x i8> %0
 }
 
@@ -912,13 +905,13 @@ define dso_local void @st_0_vector(i64 %ptr, <16 x i8> %str) {
 ; CHECK-P8-BE-NEXT:    stxvw4x v2, 0, r3
 ; CHECK-P8-BE-NEXT:    blr
 entry:
-  %0 = inttoptr i64 %ptr to <16 x i8>*
-  store <16 x i8> %str, <16 x i8>* %0, align 16
+  %0 = inttoptr i64 %ptr to ptr
+  store <16 x i8> %str, ptr %0, align 16
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_unalign16_vector(i8* nocapture %ptr, <16 x i8> %str) {
+define dso_local void @st_unalign16_vector(ptr nocapture %ptr, <16 x i8> %str) {
 ; CHECK-P10-LABEL: st_unalign16_vector:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pstxv v2, 1(r3), 0
@@ -943,14 +936,13 @@ define dso_local void @st_unalign16_vector(i8* nocapture %ptr, <16 x i8> %str) {
 ; CHECK-P8-BE-NEXT:    stxvw4x v2, 0, r3
 ; CHECK-P8-BE-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1
-  %0 = bitcast i8* %add.ptr to <16 x i8>*
-  store <16 x i8> %str, <16 x i8>* %0, align 16
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1
+  store <16 x i8> %str, ptr %add.ptr, align 16
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_vector(i8* nocapture %ptr, <16 x i8> %str) {
+define dso_local void @st_align16_vector(ptr nocapture %ptr, <16 x i8> %str) {
 ; CHECK-P10-LABEL: st_align16_vector:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pstxv v2, 8(r3), 0
@@ -975,14 +967,13 @@ define dso_local void @st_align16_vector(i8* nocapture %ptr, <16 x i8> %str) {
 ; CHECK-P8-BE-NEXT:    stxvw4x v2, 0, r3
 ; CHECK-P8-BE-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
-  %0 = bitcast i8* %add.ptr to <16 x i8>*
-  store <16 x i8> %str, <16 x i8>* %0, align 16
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+  store <16 x i8> %str, ptr %add.ptr, align 16
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_unalign32_vector(i8* nocapture %ptr, <16 x i8> %str) {
+define dso_local void @st_unalign32_vector(ptr nocapture %ptr, <16 x i8> %str) {
 ; CHECK-P10-LABEL: st_unalign32_vector:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pstxv v2, 99999(r3), 0
@@ -1010,14 +1001,13 @@ define dso_local void @st_unalign32_vector(i8* nocapture %ptr, <16 x i8> %str) {
 ; CHECK-P8-BE-NEXT:    stxvw4x v2, r3, r4
 ; CHECK-P8-BE-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999
-  %0 = bitcast i8* %add.ptr to <16 x i8>*
-  store <16 x i8> %str, <16 x i8>* %0, align 16
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999
+  store <16 x i8> %str, ptr %add.ptr, align 16
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_vector(i8* nocapture %ptr, <16 x i8> %str) {
+define dso_local void @st_align32_vector(ptr nocapture %ptr, <16 x i8> %str) {
 ; CHECK-P10-LABEL: st_align32_vector:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pstxv v2, 99999000(r3), 0
@@ -1045,14 +1035,13 @@ define dso_local void @st_align32_vector(i8* nocapture %ptr, <16 x i8> %str) {
 ; CHECK-P8-BE-NEXT:    stxvw4x v2, r3, r4
 ; CHECK-P8-BE-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
-  %0 = bitcast i8* %add.ptr to <16 x i8>*
-  store <16 x i8> %str, <16 x i8>* %0, align 16
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+  store <16 x i8> %str, ptr %add.ptr, align 16
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_unalign64_vector(i8* nocapture %ptr, <16 x i8> %str) {
+define dso_local void @st_unalign64_vector(ptr nocapture %ptr, <16 x i8> %str) {
 ; CHECK-P10-LABEL: st_unalign64_vector:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r4, 232
@@ -1089,14 +1078,13 @@ define dso_local void @st_unalign64_vector(i8* nocapture %ptr, <16 x i8> %str) {
 ; CHECK-P8-BE-NEXT:    stxvw4x v2, r3, r4
 ; CHECK-P8-BE-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000001
-  %0 = bitcast i8* %add.ptr to <16 x i8>*
-  store <16 x i8> %str, <16 x i8>* %0, align 16
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000001
+  store <16 x i8> %str, ptr %add.ptr, align 16
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_vector(i8* nocapture %ptr, <16 x i8> %str) {
+define dso_local void @st_align64_vector(ptr nocapture %ptr, <16 x i8> %str) {
 ; CHECK-P10-LABEL: st_align64_vector:
 ; CHECK-P10:       # %bb.0: # %entry
 ; CHECK-P10-NEXT:    pli r4, 244140625
@@ -1129,14 +1117,13 @@ define dso_local void @st_align64_vector(i8* nocapture %ptr, <16 x i8> %str) {
 ; CHECK-P8-BE-NEXT:    stxvw4x v2, r3, r4
 ; CHECK-P8-BE-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
-  %0 = bitcast i8* %add.ptr to <16 x i8>*
-  store <16 x i8> %str, <16 x i8>* %0, align 16
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+  store <16 x i8> %str, ptr %add.ptr, align 16
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_vector(i8* nocapture %ptr, i64 %off, <16 x i8> %str) {
+define dso_local void @st_reg_vector(ptr nocapture %ptr, i64 %off, <16 x i8> %str) {
 ; CHECK-LABEL: st_reg_vector:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    stxvx v2, r3, r4
@@ -1153,9 +1140,8 @@ define dso_local void @st_reg_vector(i8* nocapture %ptr, i64 %off, <16 x i8> %st
 ; CHECK-P8-BE-NEXT:    stxvw4x v2, r3, r4
 ; CHECK-P8-BE-NEXT:    blr
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
-  %0 = bitcast i8* %add.ptr to <16 x i8>*
-  store <16 x i8> %str, <16 x i8>* %0, align 16
+  %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+  store <16 x i8> %str, ptr %add.ptr, align 16
   ret void
 }
 
@@ -1182,8 +1168,8 @@ define dso_local void @st_or1_vector(i64 %ptr, i8 zeroext %off, <16 x i8> %str)
 entry:
   %conv = zext i8 %off to i64
   %or = or i64 %conv, %ptr
-  %0 = inttoptr i64 %or to <16 x i8>*
-  store <16 x i8> %str, <16 x i8>* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  store <16 x i8> %str, ptr %0, align 16
   ret void
 }
 
@@ -1211,8 +1197,8 @@ entry:
   %and = and i64 %ptr, -4096
   %conv = zext i8 %off to i64
   %or = or i64 %and, %conv
-  %0 = inttoptr i64 %or to <16 x i8>*
-  store <16 x i8> %str, <16 x i8>* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  store <16 x i8> %str, ptr %0, align 16
   ret void
 }
 
@@ -1238,8 +1224,8 @@ define dso_local void @st_not_disjoint16_vector(i64 %ptr, <16 x i8> %str) {
 ; CHECK-P8-BE-NEXT:    blr
 entry:
   %or = or i64 %ptr, 6
-  %0 = inttoptr i64 %or to <16 x i8>*
-  store <16 x i8> %str, <16 x i8>* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  store <16 x i8> %str, ptr %0, align 16
   ret void
 }
 
@@ -1275,8 +1261,8 @@ define dso_local void @st_disjoint_unalign16_vector(i64 %ptr, <16 x i8> %str) {
 entry:
   %and = and i64 %ptr, -4096
   %or = or i64 %and, 6
-  %0 = inttoptr i64 %or to <16 x i8>*
-  store <16 x i8> %str, <16 x i8>* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  store <16 x i8> %str, ptr %0, align 16
   ret void
 }
 
@@ -1312,8 +1298,8 @@ define dso_local void @st_disjoint_align16_vector(i64 %ptr, <16 x i8> %str) {
 entry:
   %and = and i64 %ptr, -4096
   %or = or i64 %and, 24
-  %0 = inttoptr i64 %or to <16 x i8>*
-  store <16 x i8> %str, <16 x i8>* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  store <16 x i8> %str, ptr %0, align 16
   ret void
 }
 
@@ -1342,8 +1328,8 @@ define dso_local void @st_not_disjoint32_vector(i64 %ptr, <16 x i8> %str) {
 ; CHECK-P8-BE-NEXT:    blr
 entry:
   %or = or i64 %ptr, 99999
-  %0 = inttoptr i64 %or to <16 x i8>*
-  store <16 x i8> %str, <16 x i8>* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  store <16 x i8> %str, ptr %0, align 16
   ret void
 }
 
@@ -1382,8 +1368,8 @@ define dso_local void @st_disjoint_unalign32_vector(i64 %ptr, <16 x i8> %str) {
 entry:
   %and = and i64 %ptr, -1048576
   %or = or i64 %and, 99999
-  %0 = inttoptr i64 %or to <16 x i8>*
-  store <16 x i8> %str, <16 x i8>* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  store <16 x i8> %str, ptr %0, align 16
   ret void
 }
 
@@ -1426,8 +1412,8 @@ define dso_local void @st_disjoint_align32_vector(i64 %ptr, <16 x i8> %str) {
 entry:
   %and = and i64 %ptr, -1000341504
   %or = or i64 %and, 999990000
-  %0 = inttoptr i64 %or to <16 x i8>*
-  store <16 x i8> %str, <16 x i8>* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  store <16 x i8> %str, ptr %0, align 16
   ret void
 }
 
@@ -1474,8 +1460,8 @@ define dso_local void @st_not_disjoint64_vector(i64 %ptr, <16 x i8> %str) {
 ; CHECK-P8-BE-NEXT:    blr
 entry:
   %or = or i64 %ptr, 1000000000001
-  %0 = inttoptr i64 %or to <16 x i8>*
-  store <16 x i8> %str, <16 x i8>* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  store <16 x i8> %str, ptr %0, align 16
   ret void
 }
 
@@ -1523,8 +1509,8 @@ define dso_local void @st_disjoint_unalign64_vector(i64 %ptr, <16 x i8> %str) {
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000001
-  %0 = inttoptr i64 %or to <16 x i8>*
-  store <16 x i8> %str, <16 x i8>* %0, align 16
+  %0 = inttoptr i64 %or to ptr
+  store <16 x i8> %str, ptr %0, align 16
   ret void
 }
 
@@ -1568,8 +1554,8 @@ define dso_local void @st_disjoint_align64_vector(i64 %ptr, <16 x i8> %str) {
 entry:
   %and = and i64 %ptr, -1099511627776
   %or = or i64 %and, 1000000000000
-  %0 = inttoptr i64 %or to <16 x i8>*
-  store <16 x i8> %str, <16 x i8>* %0, align 4096
+  %0 = inttoptr i64 %or to ptr
+  store <16 x i8> %str, ptr %0, align 4096
   ret void
 }
 
@@ -1594,7 +1580,7 @@ define dso_local void @st_cst_unalign16_vector(<16 x i8> %str) {
 ; CHECK-P8-BE-NEXT:    stxvw4x v2, 0, r3
 ; CHECK-P8-BE-NEXT:    blr
 entry:
-  store <16 x i8> %str, <16 x i8>* inttoptr (i64 255 to <16 x i8>*), align 16
+  store <16 x i8> %str, ptr inttoptr (i64 255 to ptr), align 16
   ret void
 }
 
@@ -1618,7 +1604,7 @@ define dso_local void @st_cst_align16_vector(<16 x i8> %str) {
 ; CHECK-P8-BE-NEXT:    stxvw4x v2, 0, r3
 ; CHECK-P8-BE-NEXT:    blr
 entry:
-  store <16 x i8> %str, <16 x i8>* inttoptr (i64 4080 to <16 x i8>*), align 16
+  store <16 x i8> %str, ptr inttoptr (i64 4080 to ptr), align 16
   ret void
 }
 
@@ -1652,7 +1638,7 @@ define dso_local void @st_cst_unalign32_vector(<16 x i8> %str) {
 ; CHECK-P8-BE-NEXT:    stxvw4x v2, 0, r3
 ; CHECK-P8-BE-NEXT:    blr
 entry:
-  store <16 x i8> %str, <16 x i8>* inttoptr (i64 99999 to <16 x i8>*), align 16
+  store <16 x i8> %str, ptr inttoptr (i64 99999 to ptr), align 16
   ret void
 }
 
@@ -1686,7 +1672,7 @@ define dso_local void @st_cst_align32_vector(<16 x i8> %str) {
 ; CHECK-P8-BE-NEXT:    stxvw4x v2, 0, r3
 ; CHECK-P8-BE-NEXT:    blr
 entry:
-  store <16 x i8> %str, <16 x i8>* inttoptr (i64 9999900 to <16 x i8>*), align 16
+  store <16 x i8> %str, ptr inttoptr (i64 9999900 to ptr), align 16
   ret void
 }
 
@@ -1728,7 +1714,7 @@ define dso_local void @st_cst_unalign64_vector(<16 x i8> %str) {
 ; CHECK-P8-BE-NEXT:    stxvw4x v2, 0, r3
 ; CHECK-P8-BE-NEXT:    blr
 entry:
-  store <16 x i8> %str, <16 x i8>* inttoptr (i64 1000000000001 to <16 x i8>*), align 16
+  store <16 x i8> %str, ptr inttoptr (i64 1000000000001 to ptr), align 16
   ret void
 }
 
@@ -1766,6 +1752,6 @@ define dso_local void @st_cst_align64_vector(<16 x i8> %str) {
 ; CHECK-P8-BE-NEXT:    stxvw4x v2, 0, r3
 ; CHECK-P8-BE-NEXT:    blr
 entry:
-  store <16 x i8> %str, <16 x i8>* inttoptr (i64 1000000000000 to <16 x i8>*), align 4096
+  store <16 x i8> %str, ptr inttoptr (i64 1000000000000 to ptr), align 4096
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/vector-merge-store-fp-constants.ll b/llvm/test/CodeGen/PowerPC/vector-merge-store-fp-constants.ll
index 9d7b11c78c1c..6b3df79d9b46 100644
--- a/llvm/test/CodeGen/PowerPC/vector-merge-store-fp-constants.ll
+++ b/llvm/test/CodeGen/PowerPC/vector-merge-store-fp-constants.ll
@@ -7,22 +7,21 @@
 ; CHECK-DAG: std [[ZEROREG]], 16([[PTR]])
 ; CHECK-DAG: std [[ZEROREG]], 24([[PTR]])
 ; CHECK: blr
-define void @merge_8_float_zero_stores(float* %ptr) {
-  %idx0 = getelementptr float, float* %ptr, i64 0
-  %idx1 = getelementptr float, float* %ptr, i64 1
-  %idx2 = getelementptr float, float* %ptr, i64 2
-  %idx3 = getelementptr float, float* %ptr, i64 3
-  %idx4 = getelementptr float, float* %ptr, i64 4
-  %idx5 = getelementptr float, float* %ptr, i64 5
-  %idx6 = getelementptr float, float* %ptr, i64 6
-  %idx7 = getelementptr float, float* %ptr, i64 7
-  store float 0.0, float* %idx0, align 4
-  store float 0.0, float* %idx1, align 4
-  store float 0.0, float* %idx2, align 4
-  store float 0.0, float* %idx3, align 4
-  store float 0.0, float* %idx4, align 4
-  store float 0.0, float* %idx5, align 4
-  store float 0.0, float* %idx6, align 4
-  store float 0.0, float* %idx7, align 4
+define void @merge_8_float_zero_stores(ptr %ptr) {
+  %idx1 = getelementptr float, ptr %ptr, i64 1
+  %idx2 = getelementptr float, ptr %ptr, i64 2
+  %idx3 = getelementptr float, ptr %ptr, i64 3
+  %idx4 = getelementptr float, ptr %ptr, i64 4
+  %idx5 = getelementptr float, ptr %ptr, i64 5
+  %idx6 = getelementptr float, ptr %ptr, i64 6
+  %idx7 = getelementptr float, ptr %ptr, i64 7
+  store float 0.0, ptr %ptr, align 4
+  store float 0.0, ptr %idx1, align 4
+  store float 0.0, ptr %idx2, align 4
+  store float 0.0, ptr %idx3, align 4
+  store float 0.0, ptr %idx4, align 4
+  store float 0.0, ptr %idx5, align 4
+  store float 0.0, ptr %idx6, align 4
+  store float 0.0, ptr %idx7, align 4
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/vector.ll b/llvm/test/CodeGen/PowerPC/vector.ll
index 8b1f98065a57..07f45109971f 100644
--- a/llvm/test/CodeGen/PowerPC/vector.ll
+++ b/llvm/test/CodeGen/PowerPC/vector.ll
@@ -11,156 +11,156 @@
 
 ;;; TEST HANDLING OF VARIOUS VECTOR SIZES
 
-define void @test_f1(%f1* %P, %f1* %Q, %f1* %S) {
-        %p = load %f1, %f1* %P               ; <%f1> [#uses=1]
-        %q = load %f1, %f1* %Q               ; <%f1> [#uses=1]
+define void @test_f1(ptr %P, ptr %Q, ptr %S) {
+        %p = load %f1, ptr %P               ; <%f1> [#uses=1]
+        %q = load %f1, ptr %Q               ; <%f1> [#uses=1]
         %R = fadd %f1 %p, %q             ; <%f1> [#uses=1]
-        store %f1 %R, %f1* %S
+        store %f1 %R, ptr %S
         ret void
 }
 
-define void @test_f2(%f2* %P, %f2* %Q, %f2* %S) {
-        %p = load %f2, %f2* %P               ; <%f2> [#uses=1]
-        %q = load %f2, %f2* %Q               ; <%f2> [#uses=1]
+define void @test_f2(ptr %P, ptr %Q, ptr %S) {
+        %p = load %f2, ptr %P               ; <%f2> [#uses=1]
+        %q = load %f2, ptr %Q               ; <%f2> [#uses=1]
         %R = fadd %f2 %p, %q             ; <%f2> [#uses=1]
-        store %f2 %R, %f2* %S
+        store %f2 %R, ptr %S
         ret void
 }
 
-define void @test_f4(%f4* %P, %f4* %Q, %f4* %S) {
-        %p = load %f4, %f4* %P               ; <%f4> [#uses=1]
-        %q = load %f4, %f4* %Q               ; <%f4> [#uses=1]
+define void @test_f4(ptr %P, ptr %Q, ptr %S) {
+        %p = load %f4, ptr %P               ; <%f4> [#uses=1]
+        %q = load %f4, ptr %Q               ; <%f4> [#uses=1]
         %R = fadd %f4 %p, %q             ; <%f4> [#uses=1]
-        store %f4 %R, %f4* %S
+        store %f4 %R, ptr %S
         ret void
 }
 
-define void @test_f8(%f8* %P, %f8* %Q, %f8* %S) {
-        %p = load %f8, %f8* %P               ; <%f8> [#uses=1]
-        %q = load %f8, %f8* %Q               ; <%f8> [#uses=1]
+define void @test_f8(ptr %P, ptr %Q, ptr %S) {
+        %p = load %f8, ptr %P               ; <%f8> [#uses=1]
+        %q = load %f8, ptr %Q               ; <%f8> [#uses=1]
         %R = fadd %f8 %p, %q             ; <%f8> [#uses=1]
-        store %f8 %R, %f8* %S
+        store %f8 %R, ptr %S
         ret void
 }
 
-define void @test_fmul(%f8* %P, %f8* %Q, %f8* %S) {
-        %p = load %f8, %f8* %P               ; <%f8> [#uses=1]
-        %q = load %f8, %f8* %Q               ; <%f8> [#uses=1]
+define void @test_fmul(ptr %P, ptr %Q, ptr %S) {
+        %p = load %f8, ptr %P               ; <%f8> [#uses=1]
+        %q = load %f8, ptr %Q               ; <%f8> [#uses=1]
         %R = fmul %f8 %p, %q             ; <%f8> [#uses=1]
-        store %f8 %R, %f8* %S
+        store %f8 %R, ptr %S
         ret void
 }
 
-define void @test_div(%f8* %P, %f8* %Q, %f8* %S) {
-        %p = load %f8, %f8* %P               ; <%f8> [#uses=1]
-        %q = load %f8, %f8* %Q               ; <%f8> [#uses=1]
+define void @test_div(ptr %P, ptr %Q, ptr %S) {
+        %p = load %f8, ptr %P               ; <%f8> [#uses=1]
+        %q = load %f8, ptr %Q               ; <%f8> [#uses=1]
         %R = fdiv %f8 %p, %q            ; <%f8> [#uses=1]
-        store %f8 %R, %f8* %S
+        store %f8 %R, ptr %S
         ret void
 }
 
-define void @test_rem(%f8* %P, %f8* %Q, %f8* %S) {
-        %p = load %f8, %f8* %P               ; <%f8> [#uses=1]
-        %q = load %f8, %f8* %Q               ; <%f8> [#uses=1]
+define void @test_rem(ptr %P, ptr %Q, ptr %S) {
+        %p = load %f8, ptr %P               ; <%f8> [#uses=1]
+        %q = load %f8, ptr %Q               ; <%f8> [#uses=1]
         %R = frem %f8 %p, %q            ; <%f8> [#uses=1]
-        store %f8 %R, %f8* %S
+        store %f8 %R, ptr %S
         ret void
 }
 
 ;;; TEST VECTOR CONSTRUCTS
 
-define void @test_cst(%f4* %P, %f4* %S) {
-        %p = load %f4, %f4* %P               ; <%f4> [#uses=1]
+define void @test_cst(ptr %P, ptr %S) {
+        %p = load %f4, ptr %P               ; <%f4> [#uses=1]
         %R = fadd %f4 %p, < float 0x3FB99999A0000000, float 1.000000e+00, float
  2.000000e+00, float 4.500000e+00 >             ; <%f4> [#uses=1]
-        store %f4 %R, %f4* %S
+        store %f4 %R, ptr %S
         ret void
 }
 
-define void @test_zero(%f4* %P, %f4* %S) {
-        %p = load %f4, %f4* %P               ; <%f4> [#uses=1]
+define void @test_zero(ptr %P, ptr %S) {
+        %p = load %f4, ptr %P               ; <%f4> [#uses=1]
         %R = fadd %f4 %p, zeroinitializer                ; <%f4> [#uses=1]
-        store %f4 %R, %f4* %S
+        store %f4 %R, ptr %S
         ret void
 }
 
-define void @test_undef(%f4* %P, %f4* %S) {
-        %p = load %f4, %f4* %P               ; <%f4> [#uses=1]
+define void @test_undef(ptr %P, ptr %S) {
+        %p = load %f4, ptr %P               ; <%f4> [#uses=1]
         %R = fadd %f4 %p, undef          ; <%f4> [#uses=1]
-        store %f4 %R, %f4* %S
+        store %f4 %R, ptr %S
         ret void
 }
 
-define void @test_constant_insert(%f4* %S) {
+define void @test_constant_insert(ptr %S) {
         %R = insertelement %f4 zeroinitializer, float 1.000000e+01, i32 0     
                 ; <%f4> [#uses=1]
-        store %f4 %R, %f4* %S
+        store %f4 %R, ptr %S
         ret void
 }
 
-define void @test_variable_buildvector(float %F, %f4* %S) {
+define void @test_variable_buildvector(float %F, ptr %S) {
         %R = insertelement %f4 zeroinitializer, float %F, i32 0        
-        store %f4 %R, %f4* %S
+        store %f4 %R, ptr %S
         ret void
 }
 
-define void @test_scalar_to_vector(float %F, %f4* %S) {
+define void @test_scalar_to_vector(float %F, ptr %S) {
         %R = insertelement %f4 undef, float %F, i32 0           
-        store %f4 %R, %f4* %S
+        store %f4 %R, ptr %S
         ret void
 }
 
-define float @test_extract_elt(%f8* %P) {
-        %p = load %f8, %f8* %P               ; <%f8> [#uses=1]
+define float @test_extract_elt(ptr %P) {
+        %p = load %f8, ptr %P               ; <%f8> [#uses=1]
         %R = extractelement %f8 %p, i32 3               ; <float> [#uses=1]
         ret float %R
 }
 
-define double @test_extract_elt2(%d8* %P) {
-        %p = load %d8, %d8* %P               ; <%d8> [#uses=1]
+define double @test_extract_elt2(ptr %P) {
+        %p = load %d8, ptr %P               ; <%d8> [#uses=1]
         %R = extractelement %d8 %p, i32 3               ; <double> [#uses=1]
         ret double %R
 }
 
-define void @test_cast_1(%f4* %b, %i4* %a) {
-        %tmp = load %f4, %f4* %b             ; <%f4> [#uses=1]
+define void @test_cast_1(ptr %b, ptr %a) {
+        %tmp = load %f4, ptr %b             ; <%f4> [#uses=1]
         %tmp2 = fadd %f4 %tmp, < float 1.000000e+00, float 2.000000e+00, float
 3.000000e+00, float 4.000000e+00 >              ; <%f4> [#uses=1]
         %tmp3 = bitcast %f4 %tmp2 to %i4                ; <%i4> [#uses=1]
         %tmp4 = add %i4 %tmp3, < i32 1, i32 2, i32 3, i32 4 >           
-        store %i4 %tmp4, %i4* %a
+        store %i4 %tmp4, ptr %a
         ret void
 }
 
-define void @test_cast_2(%f8* %a, <8 x i32>* %b) {
-        %T = load %f8, %f8* %a               ; <%f8> [#uses=1]
+define void @test_cast_2(ptr %a, ptr %b) {
+        %T = load %f8, ptr %a               ; <%f8> [#uses=1]
         %T2 = bitcast %f8 %T to <8 x i32>               
-        store <8 x i32> %T2, <8 x i32>* %b
+        store <8 x i32> %T2, ptr %b
         ret void
 }
 
 
 ;;; TEST IMPORTANT IDIOMS
 
-define void @splat(%f4* %P, %f4* %Q, float %X) {
+define void @splat(ptr %P, ptr %Q, float %X) {
         %tmp = insertelement %f4 undef, float %X, i32 0        
         %tmp2 = insertelement %f4 %tmp, float %X, i32 1       
         %tmp4 = insertelement %f4 %tmp2, float %X, i32 2    
         %tmp6 = insertelement %f4 %tmp4, float %X, i32 3   
-        %q = load %f4, %f4* %Q               ; <%f4> [#uses=1]
+        %q = load %f4, ptr %Q               ; <%f4> [#uses=1]
         %R = fadd %f4 %q, %tmp6          ; <%f4> [#uses=1]
-        store %f4 %R, %f4* %P
+        store %f4 %R, ptr %P
         ret void
 }
 
-define void @splat_i4(%i4* %P, %i4* %Q, i32 %X) {
+define void @splat_i4(ptr %P, ptr %Q, i32 %X) {
         %tmp = insertelement %i4 undef, i32 %X, i32 0          
         %tmp2 = insertelement %i4 %tmp, i32 %X, i32 1         
         %tmp4 = insertelement %i4 %tmp2, i32 %X, i32 2       
         %tmp6 = insertelement %i4 %tmp4, i32 %X, i32 3     
-        %q = load %i4, %i4* %Q               ; <%i4> [#uses=1]
+        %q = load %i4, ptr %Q               ; <%i4> [#uses=1]
         %R = add %i4 %q, %tmp6          ; <%i4> [#uses=1]
-        store %i4 %R, %i4* %P
+        store %i4 %R, ptr %P
         ret void
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/vrspill.ll b/llvm/test/CodeGen/PowerPC/vrspill.ll
index b55e12960fa6..5ebccce34280 100644
--- a/llvm/test/CodeGen/PowerPC/vrspill.ll
+++ b/llvm/test/CodeGen/PowerPC/vrspill.ll
@@ -8,9 +8,9 @@ define void @addrtaken(i32 %i, <4 x float> %w) nounwind {
 entry:
   %i.addr = alloca i32, align 4
   %w.addr = alloca <4 x float>, align 16
-  store i32 %i, i32* %i.addr, align 4
-  store <4 x float> %w, <4 x float>* %w.addr, align 16
-  call void @foo(i32* %i.addr)
+  store i32 %i, ptr %i.addr, align 4
+  store <4 x float> %w, ptr %w.addr, align 16
+  call void @foo(ptr %i.addr)
   ret void
 }
 
@@ -21,4 +21,4 @@ entry:
 ; the opcode.
 ; CHECK-VSX: stxvw4x
 
-declare void @foo(i32*)
+declare void @foo(ptr)

diff  --git a/llvm/test/CodeGen/PowerPC/vsel-prom.ll b/llvm/test/CodeGen/PowerPC/vsel-prom.ll
index 79d1d83209cf..66cb94274e90 100644
--- a/llvm/test/CodeGen/PowerPC/vsel-prom.ll
+++ b/llvm/test/CodeGen/PowerPC/vsel-prom.ll
@@ -13,7 +13,7 @@ if.then:                                          ; preds = %entry
 if.end:                                           ; preds = %entry
   %0 = select i1 undef, <2 x double> undef, <2 x double> zeroinitializer
   %1 = extractelement <2 x double> %0, i32 1
-  store double %1, double* undef, align 8
+  store double %1, ptr undef, align 8
   ret void
 
 ; CHECK-LABEL: @Compute_Lateral

diff  --git a/llvm/test/CodeGen/PowerPC/vsx-div.ll b/llvm/test/CodeGen/PowerPC/vsx-div.ll
index eebe8d7a9c52..248c47e7dd73 100644
--- a/llvm/test/CodeGen/PowerPC/vsx-div.ll
+++ b/llvm/test/CodeGen/PowerPC/vsx-div.ll
@@ -7,9 +7,9 @@
 
 define void @test1() {
 entry:
-  %0 = load <4 x float>, <4 x float>* @vf, align 16
+  %0 = load <4 x float>, ptr @vf, align 16
   %1 = tail call <4 x float> @llvm.ppc.vsx.xvdivsp(<4 x float> %0, <4 x float> %0)
-  store <4 x float> %1, <4 x float>* @vf_res, align 16
+  store <4 x float> %1, ptr @vf_res, align 16
   ret void
 }
 ; CHECK-LABEL: @test1
@@ -17,9 +17,9 @@ entry:
 
 define void @test2() {
 entry:
-  %0 = load <2 x double>, <2 x double>* @vd, align 16
+  %0 = load <2 x double>, ptr @vd, align 16
   %1 = tail call <2 x double> @llvm.ppc.vsx.xvdivdp(<2 x double> %0, <2 x double> %0)
-  store <2 x double> %1, <2 x double>* @vd_res, align 16
+  store <2 x double> %1, ptr @vd_res, align 16
   ret void
 }
 ; CHECK-LABEL: @test2

diff  --git a/llvm/test/CodeGen/PowerPC/vsx-elementary-arith.ll b/llvm/test/CodeGen/PowerPC/vsx-elementary-arith.ll
index 49a5014a1369..dc281c71d129 100644
--- a/llvm/test/CodeGen/PowerPC/vsx-elementary-arith.ll
+++ b/llvm/test/CodeGen/PowerPC/vsx-elementary-arith.ll
@@ -8,8 +8,8 @@
 ; Function Attrs: nounwind
 define float @emit_xsaddsp() {
 entry:
-  %0 = load float, float* @a, align 4
-  %1 = load float, float* @b, align 4
+  %0 = load float, ptr @a, align 4
+  %1 = load float, ptr @b, align 4
   %add = fadd float %0, %1
   ret float %add
 ; CHECK-LABEL: @emit_xsaddsp
@@ -19,8 +19,8 @@ entry:
 ; Function Attrs: nounwind
 define float @emit_xssubsp() {
 entry:
-  %0 = load float, float* @a, align 4
-  %1 = load float, float* @b, align 4
+  %0 = load float, ptr @a, align 4
+  %1 = load float, ptr @b, align 4
   %sub = fsub float %0, %1
   ret float %sub
 ; CHECK-LABEL: @emit_xssubsp
@@ -30,8 +30,8 @@ entry:
 ; Function Attrs: nounwind
 define float @emit_xsdivsp() {
 entry:
-  %0 = load float, float* @a, align 4
-  %1 = load float, float* @b, align 4
+  %0 = load float, ptr @a, align 4
+  %1 = load float, ptr @b, align 4
   %div = fdiv float %0, %1
   ret float %div
 ; CHECK-LABEL: @emit_xsdivsp
@@ -41,8 +41,8 @@ entry:
 ; Function Attrs: nounwind
 define float @emit_xsmulsp() {
 entry:
-  %0 = load float, float* @a, align 4
-  %1 = load float, float* @b, align 4
+  %0 = load float, ptr @a, align 4
+  %1 = load float, ptr @b, align 4
   %mul = fmul float %0, %1
   ret float %mul
 ; CHECK-LABEL: @emit_xsmulsp
@@ -52,7 +52,7 @@ entry:
 ; Function Attrs: nounwind
 define float @emit_xssqrtsp() {
 entry:
-  %0 = load float, float* @b, align 4
+  %0 = load float, ptr @b, align 4
   %call = call float @sqrtf(float %0)
   ret float %call
 ; CHECK-LABEL: @emit_xssqrtsp
@@ -65,8 +65,8 @@ declare float @sqrtf(float)
 ; Function Attrs: nounwind
 define double @emit_xsadddp() {
 entry:
-  %0 = load double, double* @c, align 8
-  %1 = load double, double* @d, align 8
+  %0 = load double, ptr @c, align 8
+  %1 = load double, ptr @d, align 8
   %add = fadd double %0, %1
   ret double %add
 ; CHECK-LABEL: @emit_xsadddp
@@ -76,8 +76,8 @@ entry:
 ; Function Attrs: nounwind
 define double @emit_xssubdp() {
 entry:
-  %0 = load double, double* @c, align 8
-  %1 = load double, double* @d, align 8
+  %0 = load double, ptr @c, align 8
+  %1 = load double, ptr @d, align 8
   %sub = fsub double %0, %1
   ret double %sub
 ; CHECK-LABEL: @emit_xssubdp
@@ -87,8 +87,8 @@ entry:
 ; Function Attrs: nounwind
 define double @emit_xsdivdp() {
 entry:
-  %0 = load double, double* @c, align 8
-  %1 = load double, double* @d, align 8
+  %0 = load double, ptr @c, align 8
+  %1 = load double, ptr @d, align 8
   %div = fdiv double %0, %1
   ret double %div
 ; CHECK-LABEL: @emit_xsdivdp
@@ -98,8 +98,8 @@ entry:
 ; Function Attrs: nounwind
 define double @emit_xsmuldp() {
 entry:
-  %0 = load double, double* @c, align 8
-  %1 = load double, double* @d, align 8
+  %0 = load double, ptr @c, align 8
+  %1 = load double, ptr @d, align 8
   %mul = fmul double %0, %1
   ret double %mul
 ; CHECK-LABEL: @emit_xsmuldp
@@ -109,7 +109,7 @@ entry:
 ; Function Attrs: nounwind
 define double @emit_xssqrtdp() {
 entry:
-  %0 = load double, double* @d, align 8
+  %0 = load double, ptr @d, align 8
   %call = call double @sqrt(double %0)
   ret double %call
 ; CHECK-LABEL: @emit_xssqrtdp
@@ -123,7 +123,7 @@ entry:
 ; CHECK-LABEL: @emit_xvrsqrtesp
   %vf = alloca <4 x float>, align 16
   %vfr = alloca <4 x float>, align 16
-  %0 = load <4 x float>, <4 x float>* %vf, align 16
+  %0 = load <4 x float>, ptr %vf, align 16
   %call = call <4 x float> @llvm.ppc.vsx.xvrsqrtesp(<4 x float> %0)
 ; CHECK: xvrsqrtesp {{[0-9]+}}, {{[0-9]+}}
   ret <4 x float> %call
@@ -135,7 +135,7 @@ entry:
 ; CHECK-LABEL: @emit_xvrsqrtedp
   %vd = alloca <2 x double>, align 16
   %vdr = alloca <2 x double>, align 16
-  %0 = load <2 x double>, <2 x double>* %vd, align 16
+  %0 = load <2 x double>, ptr %vd, align 16
   %call = call <2 x double> @llvm.ppc.vsx.xvrsqrtedp(<2 x double> %0)
   ret <2 x double> %call
 ; CHECK: xvrsqrtedp {{[0-9]+}}, {{[0-9]+}}

diff  --git a/llvm/test/CodeGen/PowerPC/vsx-fma-m.ll b/llvm/test/CodeGen/PowerPC/vsx-fma-m.ll
index eb3f54a7bfdd..1feb630a1eb0 100644
--- a/llvm/test/CodeGen/PowerPC/vsx-fma-m.ll
+++ b/llvm/test/CodeGen/PowerPC/vsx-fma-m.ll
@@ -8,13 +8,13 @@
 target datalayout = "E-m:e-i64:64-n32:64"
 target triple = "powerpc64-unknown-linux-gnu"
 
-define void @test1(double %a, double %b, double %c, double %e, double* nocapture %d) #0 {
+define void @test1(double %a, double %b, double %c, double %e, ptr nocapture %d) #0 {
 entry:
   %0 = tail call double @llvm.fma.f64(double %b, double %c, double %a)
-  store double %0, double* %d, align 8
+  store double %0, ptr %d, align 8
   %1 = tail call double @llvm.fma.f64(double %b, double %e, double %a)
-  %arrayidx1 = getelementptr inbounds double, double* %d, i64 1
-  store double %1, double* %arrayidx1, align 8
+  %arrayidx1 = getelementptr inbounds double, ptr %d, i64 1
+  store double %1, ptr %arrayidx1, align 8
   ret void
 
 ; CHECK-LABEL: @test1
@@ -35,16 +35,16 @@ entry:
 ; CHECK-FISL: blr
 }
 
-define void @test2(double %a, double %b, double %c, double %e, double %f, double* nocapture %d) #0 {
+define void @test2(double %a, double %b, double %c, double %e, double %f, ptr nocapture %d) #0 {
 entry:
   %0 = tail call double @llvm.fma.f64(double %b, double %c, double %a)
-  store double %0, double* %d, align 8
+  store double %0, ptr %d, align 8
   %1 = tail call double @llvm.fma.f64(double %b, double %e, double %a)
-  %arrayidx1 = getelementptr inbounds double, double* %d, i64 1
-  store double %1, double* %arrayidx1, align 8
+  %arrayidx1 = getelementptr inbounds double, ptr %d, i64 1
+  store double %1, ptr %arrayidx1, align 8
   %2 = tail call double @llvm.fma.f64(double %b, double %f, double %a)
-  %arrayidx2 = getelementptr inbounds double, double* %d, i64 2
-  store double %2, double* %arrayidx2, align 8
+  %arrayidx2 = getelementptr inbounds double, ptr %d, i64 2
+  store double %2, ptr %arrayidx2, align 8
   ret void
 
 ; CHECK-LABEL: @test2
@@ -73,19 +73,19 @@ entry:
 ; CHECK-FISL: blr
 }
 
-define void @test3(double %a, double %b, double %c, double %e, double %f, double* nocapture %d) #0 {
+define void @test3(double %a, double %b, double %c, double %e, double %f, ptr nocapture %d) #0 {
 entry:
   %0 = tail call double @llvm.fma.f64(double %b, double %c, double %a)
-  store double %0, double* %d, align 8
+  store double %0, ptr %d, align 8
   %1 = tail call double @llvm.fma.f64(double %b, double %e, double %a)
   %2 = tail call double @llvm.fma.f64(double %b, double %c, double %1)
-  %arrayidx1 = getelementptr inbounds double, double* %d, i64 3
-  store double %2, double* %arrayidx1, align 8
+  %arrayidx1 = getelementptr inbounds double, ptr %d, i64 3
+  store double %2, ptr %arrayidx1, align 8
   %3 = tail call double @llvm.fma.f64(double %b, double %f, double %a)
-  %arrayidx2 = getelementptr inbounds double, double* %d, i64 2
-  store double %3, double* %arrayidx2, align 8
-  %arrayidx3 = getelementptr inbounds double, double* %d, i64 1
-  store double %1, double* %arrayidx3, align 8
+  %arrayidx2 = getelementptr inbounds double, ptr %d, i64 2
+  store double %3, ptr %arrayidx2, align 8
+  %arrayidx3 = getelementptr inbounds double, ptr %d, i64 1
+  store double %1, ptr %arrayidx3, align 8
   ret void
 
 ; CHECK-LABEL: @test3
@@ -122,19 +122,19 @@ entry:
 ; CHECK-FISL: blr
 }
 
-define void @test4(double %a, double %b, double %c, double %e, double %f, double* nocapture %d) #0 {
+define void @test4(double %a, double %b, double %c, double %e, double %f, ptr nocapture %d) #0 {
 entry:
   %0 = tail call double @llvm.fma.f64(double %b, double %c, double %a)
-  store double %0, double* %d, align 8
+  store double %0, ptr %d, align 8
   %1 = tail call double @llvm.fma.f64(double %b, double %e, double %a)
-  %arrayidx1 = getelementptr inbounds double, double* %d, i64 1
-  store double %1, double* %arrayidx1, align 8
+  %arrayidx1 = getelementptr inbounds double, ptr %d, i64 1
+  store double %1, ptr %arrayidx1, align 8
   %2 = tail call double @llvm.fma.f64(double %b, double %c, double %1)
-  %arrayidx3 = getelementptr inbounds double, double* %d, i64 3
-  store double %2, double* %arrayidx3, align 8
+  %arrayidx3 = getelementptr inbounds double, ptr %d, i64 3
+  store double %2, ptr %arrayidx3, align 8
   %3 = tail call double @llvm.fma.f64(double %b, double %f, double %a)
-  %arrayidx4 = getelementptr inbounds double, double* %d, i64 2
-  store double %3, double* %arrayidx4, align 8
+  %arrayidx4 = getelementptr inbounds double, ptr %d, i64 2
+  store double %3, ptr %arrayidx4, align 8
   ret void
 
 ; CHECK-LABEL: @test4
@@ -175,13 +175,13 @@ entry:
 
 declare double @llvm.fma.f64(double, double, double) #0
 
-define void @testv1(<2 x double> %a, <2 x double> %b, <2 x double> %c, <2 x double> %e, <2 x double>* nocapture %d) #0 {
+define void @testv1(<2 x double> %a, <2 x double> %b, <2 x double> %c, <2 x double> %e, ptr nocapture %d) #0 {
 entry:
   %0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %b, <2 x double> %c, <2 x double> %a)
-  store <2 x double> %0, <2 x double>* %d, align 8
+  store <2 x double> %0, ptr %d, align 8
   %1 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %b, <2 x double> %e, <2 x double> %a)
-  %arrayidx1 = getelementptr inbounds <2 x double>, <2 x double>* %d, i64 1
-  store <2 x double> %1, <2 x double>* %arrayidx1, align 8
+  %arrayidx1 = getelementptr inbounds <2 x double>, ptr %d, i64 1
+  store <2 x double> %1, ptr %arrayidx1, align 8
   ret void
 
 ; CHECK-LABEL: @testv1
@@ -202,16 +202,16 @@ entry:
 ; CHECK-FISL: blr
 }
 
-define void @testv2(<2 x double> %a, <2 x double> %b, <2 x double> %c, <2 x double> %e, <2 x double> %f, <2 x double>* nocapture %d) #0 {
+define void @testv2(<2 x double> %a, <2 x double> %b, <2 x double> %c, <2 x double> %e, <2 x double> %f, ptr nocapture %d) #0 {
 entry:
   %0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %b, <2 x double> %c, <2 x double> %a)
-  store <2 x double> %0, <2 x double>* %d, align 8
+  store <2 x double> %0, ptr %d, align 8
   %1 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %b, <2 x double> %e, <2 x double> %a)
-  %arrayidx1 = getelementptr inbounds <2 x double>, <2 x double>* %d, i64 1
-  store <2 x double> %1, <2 x double>* %arrayidx1, align 8
+  %arrayidx1 = getelementptr inbounds <2 x double>, ptr %d, i64 1
+  store <2 x double> %1, ptr %arrayidx1, align 8
   %2 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %b, <2 x double> %f, <2 x double> %a)
-  %arrayidx2 = getelementptr inbounds <2 x double>, <2 x double>* %d, i64 2
-  store <2 x double> %2, <2 x double>* %arrayidx2, align 8
+  %arrayidx2 = getelementptr inbounds <2 x double>, ptr %d, i64 2
+  store <2 x double> %2, ptr %arrayidx2, align 8
   ret void
 
 ; CHECK-LABEL: @testv2
@@ -240,19 +240,19 @@ entry:
 ; CHECK-FISL: blr
 }
 
-define void @testv3(<2 x double> %a, <2 x double> %b, <2 x double> %c, <2 x double> %e, <2 x double> %f, <2 x double>* nocapture %d) #0 {
+define void @testv3(<2 x double> %a, <2 x double> %b, <2 x double> %c, <2 x double> %e, <2 x double> %f, ptr nocapture %d) #0 {
 entry:
   %0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %b, <2 x double> %c, <2 x double> %a)
-  store <2 x double> %0, <2 x double>* %d, align 8
+  store <2 x double> %0, ptr %d, align 8
   %1 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %b, <2 x double> %e, <2 x double> %a)
   %2 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %b, <2 x double> %c, <2 x double> %1)
-  %arrayidx1 = getelementptr inbounds <2 x double>, <2 x double>* %d, i64 3
-  store <2 x double> %2, <2 x double>* %arrayidx1, align 8
+  %arrayidx1 = getelementptr inbounds <2 x double>, ptr %d, i64 3
+  store <2 x double> %2, ptr %arrayidx1, align 8
   %3 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %b, <2 x double> %f, <2 x double> %a)
-  %arrayidx2 = getelementptr inbounds <2 x double>, <2 x double>* %d, i64 2
-  store <2 x double> %3, <2 x double>* %arrayidx2, align 8
-  %arrayidx3 = getelementptr inbounds <2 x double>, <2 x double>* %d, i64 1
-  store <2 x double> %1, <2 x double>* %arrayidx3, align 8
+  %arrayidx2 = getelementptr inbounds <2 x double>, ptr %d, i64 2
+  store <2 x double> %3, ptr %arrayidx2, align 8
+  %arrayidx3 = getelementptr inbounds <2 x double>, ptr %d, i64 1
+  store <2 x double> %1, ptr %arrayidx3, align 8
   ret void
 
 ; Note: There is some unavoidable changeability in this variant.  If the
@@ -298,19 +298,19 @@ entry:
 ; CHECK-FISL: blr
 }
 
-define void @testv4(<2 x double> %a, <2 x double> %b, <2 x double> %c, <2 x double> %e, <2 x double> %f, <2 x double>* nocapture %d) #0 {
+define void @testv4(<2 x double> %a, <2 x double> %b, <2 x double> %c, <2 x double> %e, <2 x double> %f, ptr nocapture %d) #0 {
 entry:
   %0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %b, <2 x double> %c, <2 x double> %a)
-  store <2 x double> %0, <2 x double>* %d, align 8
+  store <2 x double> %0, ptr %d, align 8
   %1 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %b, <2 x double> %e, <2 x double> %a)
-  %arrayidx1 = getelementptr inbounds <2 x double>, <2 x double>* %d, i64 1
-  store <2 x double> %1, <2 x double>* %arrayidx1, align 8
+  %arrayidx1 = getelementptr inbounds <2 x double>, ptr %d, i64 1
+  store <2 x double> %1, ptr %arrayidx1, align 8
   %2 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %b, <2 x double> %c, <2 x double> %1)
-  %arrayidx3 = getelementptr inbounds <2 x double>, <2 x double>* %d, i64 3
-  store <2 x double> %2, <2 x double>* %arrayidx3, align 8
+  %arrayidx3 = getelementptr inbounds <2 x double>, ptr %d, i64 3
+  store <2 x double> %2, ptr %arrayidx3, align 8
   %3 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %b, <2 x double> %f, <2 x double> %a)
-  %arrayidx4 = getelementptr inbounds <2 x double>, <2 x double>* %d, i64 2
-  store <2 x double> %3, <2 x double>* %arrayidx4, align 8
+  %arrayidx4 = getelementptr inbounds <2 x double>, ptr %d, i64 2
+  store <2 x double> %3, ptr %arrayidx4, align 8
   ret void
 
 ; CHECK-LABEL: @testv4

diff  --git a/llvm/test/CodeGen/PowerPC/vsx-fma-mutate-trivial-copy.ll b/llvm/test/CodeGen/PowerPC/vsx-fma-mutate-trivial-copy.ll
index 9809287021f6..96aa58000f9b 100644
--- a/llvm/test/CodeGen/PowerPC/vsx-fma-mutate-trivial-copy.ll
+++ b/llvm/test/CodeGen/PowerPC/vsx-fma-mutate-trivial-copy.ll
@@ -19,7 +19,7 @@ for.body:                                         ; preds = %for.body, %for.body
   %conv2 = fpext float %add to double
   %0 = tail call double @llvm.sqrt.f64(double %conv2)
   %div4 = fdiv reassoc arcp double %conv3, %0
-  %call = tail call signext i32 bitcast (i32 (...)* @p_col_helper to i32 (double)*)(double %div4) #2
+  %call = tail call signext i32 @p_col_helper(double %div4) #2
   br label %for.body
 
 for.end:                                          ; preds = %entry

diff  --git a/llvm/test/CodeGen/PowerPC/vsx-fma-mutate-undef.ll b/llvm/test/CodeGen/PowerPC/vsx-fma-mutate-undef.ll
index 301446a9ee14..fdddcd049c03 100644
--- a/llvm/test/CodeGen/PowerPC/vsx-fma-mutate-undef.ll
+++ b/llvm/test/CodeGen/PowerPC/vsx-fma-mutate-undef.ll
@@ -15,7 +15,7 @@ if.then:                                          ; preds = %entry
   %astype5.i.i.80.i = bitcast <4 x i32> %or.i.i.79.i to <4 x float>
   %1 = shufflevector <4 x float> %astype5.i.i.80.i, <4 x float> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
   %2 = shufflevector <8 x float> undef, <8 x float> %1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11>
-  store <8 x float> %2, <8 x float>* undef, align 32
+  store <8 x float> %2, ptr undef, align 32
   br label %if.end
 
 ; CHECK-LABEL: @acosh_float8

diff  --git a/llvm/test/CodeGen/PowerPC/vsx-fma-sp.ll b/llvm/test/CodeGen/PowerPC/vsx-fma-sp.ll
index d7db33584107..793fd9ab55dd 100644
--- a/llvm/test/CodeGen/PowerPC/vsx-fma-sp.ll
+++ b/llvm/test/CodeGen/PowerPC/vsx-fma-sp.ll
@@ -2,13 +2,13 @@
 ; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc64le-unknown-linux-gnu -mcpu=pwr8 -mattr=+vsx -fast-isel -O0 | FileCheck -check-prefix=CHECK-FISL %s
 ; XFAIL: *
 
-define void @test1sp(float %a, float %b, float %c, float %e, float* nocapture %d) #0 {
+define void @test1sp(float %a, float %b, float %c, float %e, ptr nocapture %d) #0 {
 entry:
   %0 = tail call float @llvm.fma.f32(float %b, float %c, float %a)
-  store float %0, float* %d, align 4
+  store float %0, ptr %d, align 4
   %1 = tail call float @llvm.fma.f32(float %b, float %e, float %a)
-  %arrayidx1 = getelementptr inbounds float, float* %d, i64 1
-  store float %1, float* %arrayidx1, align 4
+  %arrayidx1 = getelementptr inbounds float, ptr %d, i64 1
+  store float %1, ptr %arrayidx1, align 4
   ret void
 
 ; CHECK-LABEL: @test1sp
@@ -29,16 +29,16 @@ entry:
 ; CHECK-FISL: blr
 }
 
-define void @test2sp(float %a, float %b, float %c, float %e, float %f, float* nocapture %d) #0 {
+define void @test2sp(float %a, float %b, float %c, float %e, float %f, ptr nocapture %d) #0 {
 entry:
   %0 = tail call float @llvm.fma.f32(float %b, float %c, float %a)
-  store float %0, float* %d, align 4
+  store float %0, ptr %d, align 4
   %1 = tail call float @llvm.fma.f32(float %b, float %e, float %a)
-  %arrayidx1 = getelementptr inbounds float, float* %d, i64 1
-  store float %1, float* %arrayidx1, align 4
+  %arrayidx1 = getelementptr inbounds float, ptr %d, i64 1
+  store float %1, ptr %arrayidx1, align 4
   %2 = tail call float @llvm.fma.f32(float %b, float %f, float %a)
-  %arrayidx2 = getelementptr inbounds float, float* %d, i64 2
-  store float %2, float* %arrayidx2, align 4
+  %arrayidx2 = getelementptr inbounds float, ptr %d, i64 2
+  store float %2, ptr %arrayidx2, align 4
   ret void
 
 ; CHECK-LABEL: @test2sp
@@ -67,19 +67,19 @@ entry:
 ; CHECK-FISL: blr
 }
 
-define void @test3sp(float %a, float %b, float %c, float %e, float %f, float* nocapture %d) #0 {
+define void @test3sp(float %a, float %b, float %c, float %e, float %f, ptr nocapture %d) #0 {
 entry:
   %0 = tail call float @llvm.fma.f32(float %b, float %c, float %a)
-  store float %0, float* %d, align 4
+  store float %0, ptr %d, align 4
   %1 = tail call float @llvm.fma.f32(float %b, float %e, float %a)
   %2 = tail call float @llvm.fma.f32(float %b, float %c, float %1)
-  %arrayidx1 = getelementptr inbounds float, float* %d, i64 3
-  store float %2, float* %arrayidx1, align 4
+  %arrayidx1 = getelementptr inbounds float, ptr %d, i64 3
+  store float %2, ptr %arrayidx1, align 4
   %3 = tail call float @llvm.fma.f32(float %b, float %f, float %a)
-  %arrayidx2 = getelementptr inbounds float, float* %d, i64 2
-  store float %3, float* %arrayidx2, align 4
-  %arrayidx3 = getelementptr inbounds float, float* %d, i64 1
-  store float %1, float* %arrayidx3, align 4
+  %arrayidx2 = getelementptr inbounds float, ptr %d, i64 2
+  store float %3, ptr %arrayidx2, align 4
+  %arrayidx3 = getelementptr inbounds float, ptr %d, i64 1
+  store float %1, ptr %arrayidx3, align 4
   ret void
 
 ; CHECK-LABEL: @test3sp
@@ -116,19 +116,19 @@ entry:
 ; CHECK-FISL: blr
 }
 
-define void @test4sp(float %a, float %b, float %c, float %e, float %f, float* nocapture %d) #0 {
+define void @test4sp(float %a, float %b, float %c, float %e, float %f, ptr nocapture %d) #0 {
 entry:
   %0 = tail call float @llvm.fma.f32(float %b, float %c, float %a)
-  store float %0, float* %d, align 4
+  store float %0, ptr %d, align 4
   %1 = tail call float @llvm.fma.f32(float %b, float %e, float %a)
-  %arrayidx1 = getelementptr inbounds float, float* %d, i64 1
-  store float %1, float* %arrayidx1, align 4
+  %arrayidx1 = getelementptr inbounds float, ptr %d, i64 1
+  store float %1, ptr %arrayidx1, align 4
   %2 = tail call float @llvm.fma.f32(float %b, float %c, float %1)
-  %arrayidx3 = getelementptr inbounds float, float* %d, i64 3
-  store float %2, float* %arrayidx3, align 4
+  %arrayidx3 = getelementptr inbounds float, ptr %d, i64 3
+  store float %2, ptr %arrayidx3, align 4
   %3 = tail call float @llvm.fma.f32(float %b, float %f, float %a)
-  %arrayidx4 = getelementptr inbounds float, float* %d, i64 2
-  store float %3, float* %arrayidx4, align 4
+  %arrayidx4 = getelementptr inbounds float, ptr %d, i64 2
+  store float %3, ptr %arrayidx4, align 4
   ret void
 
 ; CHECK-LABEL: @test4sp

diff  --git a/llvm/test/CodeGen/PowerPC/vsx-ldst-builtin-le.ll b/llvm/test/CodeGen/PowerPC/vsx-ldst-builtin-le.ll
index 51a1d3e3b353..60bdfe0a79ed 100644
--- a/llvm/test/CodeGen/PowerPC/vsx-ldst-builtin-le.ll
+++ b/llvm/test/CodeGen/PowerPC/vsx-ldst-builtin-le.ll
@@ -31,86 +31,86 @@ entry:
 ; CHECK-P9UP-LABEL: test1
 ; CHECK: lxvd2x
 ; CHECK-P9UP-DAG: lxv
-  %0 = call <4 x i32> @llvm.ppc.vsx.lxvw4x(i8* bitcast (<4 x i32>* @vsi to i8*))
+  %0 = call <4 x i32> @llvm.ppc.vsx.lxvw4x(ptr @vsi)
 ; CHECK: stxvd2x
 ; CHECK-P9UP-DAG: stxv
-  store <4 x i32> %0, <4 x i32>* @res_vsi, align 16
+  store <4 x i32> %0, ptr @res_vsi, align 16
 ; CHECK: lxvd2x
 ; CHECK-P9UP-DAG: lxv
-  %1 = call <4 x i32> @llvm.ppc.vsx.lxvw4x(i8* bitcast (<4 x i32>* @vui to i8*))
+  %1 = call <4 x i32> @llvm.ppc.vsx.lxvw4x(ptr @vui)
 ; CHECK: stxvd2x
 ; CHECK-P9UP-DAG: stxv
-  store <4 x i32> %1, <4 x i32>* @res_vui, align 16
+  store <4 x i32> %1, ptr @res_vui, align 16
 ; CHECK: lxvd2x
 ; CHECK-P9UP-DAG: lxv
-  %2 = call <4 x i32> @llvm.ppc.vsx.lxvw4x(i8* bitcast (<4 x float>* @vf to i8*))
+  %2 = call <4 x i32> @llvm.ppc.vsx.lxvw4x(ptr @vf)
   %3 = bitcast <4 x i32> %2 to <4 x float>
 ; CHECK: stxvd2x
 ; CHECK-P9UP-DAG: stxv
-  store <4 x float> %3, <4 x float>* @res_vf, align 16
+  store <4 x float> %3, ptr @res_vf, align 16
 ; CHECK: lxvd2x
 ; CHECK-P9UP-DAG: lxv
-  %4 = call <2 x double> @llvm.ppc.vsx.lxvd2x(i8* bitcast (<2 x i64>* @vsll to i8*))
+  %4 = call <2 x double> @llvm.ppc.vsx.lxvd2x(ptr @vsll)
   %5 = bitcast <2 x double> %4 to <2 x i64>
 ; CHECK: stxvd2x
 ; CHECK-P9UP-DAG: stxv
-  store <2 x i64> %5, <2 x i64>* @res_vsll, align 16
+  store <2 x i64> %5, ptr @res_vsll, align 16
 ; CHECK: lxvd2x
 ; CHECK-P9UP-DAG: lxv
-  %6 = call <2 x double> @llvm.ppc.vsx.lxvd2x(i8* bitcast (<2 x i64>* @vull to i8*))
+  %6 = call <2 x double> @llvm.ppc.vsx.lxvd2x(ptr @vull)
   %7 = bitcast <2 x double> %6 to <2 x i64>
 ; CHECK: stxvd2x
 ; CHECK-P9UP-DAG: stxv
-  store <2 x i64> %7, <2 x i64>* @res_vull, align 16
+  store <2 x i64> %7, ptr @res_vull, align 16
 ; CHECK: lxvd2x
 ; CHECK-P9UP-DAG: lxv
-  %8 = call <2 x double> @llvm.ppc.vsx.lxvd2x(i8* bitcast (<2 x double>* @vd to i8*))
+  %8 = call <2 x double> @llvm.ppc.vsx.lxvd2x(ptr @vd)
 ; CHECK: stxvd2x
 ; CHECK-P9UP-DAG: stxv
-  store <2 x double> %8, <2 x double>* @res_vd, align 16
+  store <2 x double> %8, ptr @res_vd, align 16
 ; CHECK: lxvd2x
 ; CHECK-P9UP-DAG: lxv
-  %9 = load <4 x i32>, <4 x i32>* @vsi, align 16
+  %9 = load <4 x i32>, ptr @vsi, align 16
 ; CHECK: stxvd2x
 ; CHECK-P9UP-DAG: stxv
-  call void @llvm.ppc.vsx.stxvw4x(<4 x i32> %9, i8* bitcast (<4 x i32>* @res_vsi to i8*))
+  call void @llvm.ppc.vsx.stxvw4x(<4 x i32> %9, ptr @res_vsi)
 ; CHECK: lxvd2x
 ; CHECK-P9UP-DAG: lxv
-  %10 = load <4 x i32>, <4 x i32>* @vui, align 16
+  %10 = load <4 x i32>, ptr @vui, align 16
 ; CHECK: stxvd2x
 ; CHECK-P9UP-DAG: stxv
-  call void @llvm.ppc.vsx.stxvw4x(<4 x i32> %10, i8* bitcast (<4 x i32>* @res_vui to i8*))
+  call void @llvm.ppc.vsx.stxvw4x(<4 x i32> %10, ptr @res_vui)
 ; CHECK: lxvd2x
 ; CHECK-P9UP-DAG: lxv
-  %11 = load <4 x float>, <4 x float>* @vf, align 16
+  %11 = load <4 x float>, ptr @vf, align 16
   %12 = bitcast <4 x float> %11 to <4 x i32>
 ; CHECK: stxvd2x
 ; CHECK-P9UP-DAG: stxv
-  call void @llvm.ppc.vsx.stxvw4x(<4 x i32> %12, i8* bitcast (<4 x float>* @res_vf to i8*))
+  call void @llvm.ppc.vsx.stxvw4x(<4 x i32> %12, ptr @res_vf)
 ; CHECK: lxvd2x
 ; CHECK-P9UP-DAG: lxv
-  %13 = load <2 x i64>, <2 x i64>* @vsll, align 16
+  %13 = load <2 x i64>, ptr @vsll, align 16
   %14 = bitcast <2 x i64> %13 to <2 x double>
 ; CHECK: stxvd2x
 ; CHECK-P9UP-DAG: stxv
-  call void @llvm.ppc.vsx.stxvd2x(<2 x double> %14, i8* bitcast (<2 x i64>* @res_vsll to i8*))
+  call void @llvm.ppc.vsx.stxvd2x(<2 x double> %14, ptr @res_vsll)
 ; CHECK: lxvd2x
 ; CHECK-P9UP-DAG: lxv
-  %15 = load <2 x i64>, <2 x i64>* @vull, align 16
+  %15 = load <2 x i64>, ptr @vull, align 16
   %16 = bitcast <2 x i64> %15 to <2 x double>
 ; CHECK: stxvd2x
 ; CHECK-P9UP-DAG: stxv
-  call void @llvm.ppc.vsx.stxvd2x(<2 x double> %16, i8* bitcast (<2 x i64>* @res_vull to i8*))
+  call void @llvm.ppc.vsx.stxvd2x(<2 x double> %16, ptr @res_vull)
 ; CHECK: lxvd2x
 ; CHECK-P9UP-DAG: lxv
-  %17 = load <2 x double>, <2 x double>* @vd, align 16
+  %17 = load <2 x double>, ptr @vd, align 16
 ; CHECK: stxvd2x
 ; CHECK-P9UP-DAG: stxv
-  call void @llvm.ppc.vsx.stxvd2x(<2 x double> %17, i8* bitcast (<2 x double>* @res_vd to i8*))
+  call void @llvm.ppc.vsx.stxvd2x(<2 x double> %17, ptr @res_vd)
   ret void
 }
 
-declare void @llvm.ppc.vsx.stxvd2x(<2 x double>, i8*)
-declare void @llvm.ppc.vsx.stxvw4x(<4 x i32>, i8*)
-declare <2 x double> @llvm.ppc.vsx.lxvd2x(i8*)
-declare <4 x i32> @llvm.ppc.vsx.lxvw4x(i8*)
+declare void @llvm.ppc.vsx.stxvd2x(<2 x double>, ptr)
+declare void @llvm.ppc.vsx.stxvw4x(<4 x i32>, ptr)
+declare <2 x double> @llvm.ppc.vsx.lxvd2x(ptr)
+declare <4 x i32> @llvm.ppc.vsx.lxvw4x(ptr)

diff  --git a/llvm/test/CodeGen/PowerPC/vsx-ldst.ll b/llvm/test/CodeGen/PowerPC/vsx-ldst.ll
index 0f3fadc32507..b7fc44d0b0e8 100644
--- a/llvm/test/CodeGen/PowerPC/vsx-ldst.ll
+++ b/llvm/test/CodeGen/PowerPC/vsx-ldst.ll
@@ -39,17 +39,17 @@
 ; Function Attrs: nounwind
 define void @test1() {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @vsi, align 16
-  %1 = load <4 x i32>, <4 x i32>* @vui, align 16
-  %2 = load <4 x i32>, <4 x i32>* bitcast (<4 x float>* @vf to <4 x i32>*), align 16
-  %3 = load <2 x double>, <2 x double>* bitcast (<2 x i64>* @vsll to <2 x double>*), align 16
-  %4 = load <2 x double>, <2 x double>* bitcast (<2 x i64>* @vull to <2 x double>*), align 16
-  %5 = load <2 x double>, <2 x double>* @vd, align 16
-  store <4 x i32> %0, <4 x i32>* @res_vsi, align 16
-  store <4 x i32> %1, <4 x i32>* @res_vui, align 16
-  store <4 x i32> %2, <4 x i32>* bitcast (<4 x float>* @res_vf to <4 x i32>*), align 16
-  store <2 x double> %3, <2 x double>* bitcast (<2 x i64>* @res_vsll to <2 x double>*), align 16
-  store <2 x double> %4, <2 x double>* bitcast (<2 x i64>* @res_vull to <2 x double>*), align 16
-  store <2 x double> %5, <2 x double>* @res_vd, align 16
+  %0 = load <4 x i32>, ptr @vsi, align 16
+  %1 = load <4 x i32>, ptr @vui, align 16
+  %2 = load <4 x i32>, ptr @vf, align 16
+  %3 = load <2 x double>, ptr @vsll, align 16
+  %4 = load <2 x double>, ptr @vull, align 16
+  %5 = load <2 x double>, ptr @vd, align 16
+  store <4 x i32> %0, ptr @res_vsi, align 16
+  store <4 x i32> %1, ptr @res_vui, align 16
+  store <4 x i32> %2, ptr @res_vf, align 16
+  store <2 x double> %3, ptr @res_vsll, align 16
+  store <2 x double> %4, ptr @res_vull, align 16
+  store <2 x double> %5, ptr @res_vd, align 16
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/vsx-minmax.ll b/llvm/test/CodeGen/PowerPC/vsx-minmax.ll
index 51dbe056f784..3d60edad1925 100644
--- a/llvm/test/CodeGen/PowerPC/vsx-minmax.ll
+++ b/llvm/test/CodeGen/PowerPC/vsx-minmax.ll
@@ -18,41 +18,41 @@ target triple = "powerpc64-unknown-linux-gnu"
 define void @test1() #0 {
 ; CHECK-LABEL: @test1
 entry:
-  %0 = load volatile <4 x float>, <4 x float>* @vf, align 16
-  %1 = load volatile <4 x float>, <4 x float>* @vf, align 16
+  %0 = load volatile <4 x float>, ptr @vf, align 16
+  %1 = load volatile <4 x float>, ptr @vf, align 16
   %2 = tail call <4 x float> @llvm.ppc.vsx.xvmaxsp(<4 x float> %0, <4 x float> %1)
 ; CHECK: xvmaxsp
-  store <4 x float> %2, <4 x float>* @vf1, align 16
-  %3 = load <2 x double>, <2 x double>* @vd, align 16
+  store <4 x float> %2, ptr @vf1, align 16
+  %3 = load <2 x double>, ptr @vd, align 16
   %4 = tail call <2 x double> @llvm.ppc.vsx.xvmaxdp(<2 x double> %3, <2 x double> %3)
 ; CHECK: xvmaxdp
-  store <2 x double> %4, <2 x double>* @vd1, align 16
-  %5 = load volatile <4 x float>, <4 x float>* @vf, align 16
-  %6 = load volatile <4 x float>, <4 x float>* @vf, align 16
+  store <2 x double> %4, ptr @vd1, align 16
+  %5 = load volatile <4 x float>, ptr @vf, align 16
+  %6 = load volatile <4 x float>, ptr @vf, align 16
   %7 = tail call <4 x float> @llvm.ppc.vsx.xvmaxsp(<4 x float> %5, <4 x float> %6)
 ; CHECK: xvmaxsp
-  store <4 x float> %7, <4 x float>* @vf2, align 16
-  %8 = load volatile <4 x float>, <4 x float>* @vf, align 16
-  %9 = load volatile <4 x float>, <4 x float>* @vf, align 16
+  store <4 x float> %7, ptr @vf2, align 16
+  %8 = load volatile <4 x float>, ptr @vf, align 16
+  %9 = load volatile <4 x float>, ptr @vf, align 16
   %10 = tail call <4 x float> @llvm.ppc.vsx.xvminsp(<4 x float> %8, <4 x float> %9)
 ; CHECK: xvminsp
-  store <4 x float> %10, <4 x float>* @vf3, align 16
-  %11 = load <2 x double>, <2 x double>* @vd, align 16
+  store <4 x float> %10, ptr @vf3, align 16
+  %11 = load <2 x double>, ptr @vd, align 16
   %12 = tail call <2 x double> @llvm.ppc.vsx.xvmindp(<2 x double> %11, <2 x double> %11)
 ; CHECK: xvmindp
-  store <2 x double> %12, <2 x double>* @vd2, align 16
-  %13 = load volatile <4 x float>, <4 x float>* @vf, align 16
-  %14 = load volatile <4 x float>, <4 x float>* @vf, align 16
+  store <2 x double> %12, ptr @vd2, align 16
+  %13 = load volatile <4 x float>, ptr @vf, align 16
+  %14 = load volatile <4 x float>, ptr @vf, align 16
   %15 = tail call <4 x float> @llvm.ppc.vsx.xvminsp(<4 x float> %13, <4 x float> %14)
 ; CHECK: xvminsp
-  store <4 x float> %15, <4 x float>* @vf4, align 16
-  %16 = load double, double* @d, align 8
+  store <4 x float> %15, ptr @vf4, align 16
+  %16 = load double, ptr @d, align 8
   %17 = tail call double @llvm.ppc.vsx.xsmaxdp(double %16, double %16)
 ; CHECK: xsmaxdp
-  store double %17, double* @d1, align 8
+  store double %17, ptr @d1, align 8
   %18 = tail call double @llvm.ppc.vsx.xsmindp(double %16, double %16)
 ; CHECK: xsmindp
-  store double %18, double* @d2, align 8
+  store double %18, ptr @d2, align 8
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/vsx-p8.ll b/llvm/test/CodeGen/PowerPC/vsx-p8.ll
index f42eb6ab0d74..a0cd7991b693 100644
--- a/llvm/test/CodeGen/PowerPC/vsx-p8.ll
+++ b/llvm/test/CodeGen/PowerPC/vsx-p8.ll
@@ -7,8 +7,8 @@ target triple = "powerpc64-unknown-linux-gnu"
 
 ; Unaligned loads/stores on P8 and later should use VSX where possible.
 
-define <2 x double> @test28u(<2 x double>* %a) {
-  %v = load <2 x double>, <2 x double>* %a, align 8
+define <2 x double> @test28u(ptr %a) {
+  %v = load <2 x double>, ptr %a, align 8
   ret <2 x double> %v
 
 ; CHECK-LABEL: @test28u
@@ -16,8 +16,8 @@ define <2 x double> @test28u(<2 x double>* %a) {
 ; CHECK: blr
 }
 
-define void @test29u(<2 x double>* %a, <2 x double> %b) {
-  store <2 x double> %b, <2 x double>* %a, align 8
+define void @test29u(ptr %a, <2 x double> %b) {
+  store <2 x double> %b, ptr %a, align 8
   ret void
 
 ; CHECK-LABEL: @test29u
@@ -25,8 +25,8 @@ define void @test29u(<2 x double>* %a, <2 x double> %b) {
 ; CHECK: blr
 }
 
-define <4 x float> @test32u(<4 x float>* %a) {
-  %v = load <4 x float>, <4 x float>* %a, align 8
+define <4 x float> @test32u(ptr %a) {
+  %v = load <4 x float>, ptr %a, align 8
   ret <4 x float> %v
 
 ; CHECK-REG-LABEL: @test32u
@@ -38,8 +38,8 @@ define <4 x float> @test32u(<4 x float>* %a) {
 ; CHECK-FISL: blr
 }
 
-define void @test33u(<4 x float>* %a, <4 x float> %b) {
-  store <4 x float> %b, <4 x float>* %a, align 8
+define void @test33u(ptr %a, <4 x float> %b) {
+  store <4 x float> %b, ptr %a, align 8
   ret void
 
 ; CHECK-REG-LABEL: @test33u

diff  --git a/llvm/test/CodeGen/PowerPC/vsx-p9.ll b/llvm/test/CodeGen/PowerPC/vsx-p9.ll
index 636a53a81149..b7598c6de4c1 100644
--- a/llvm/test/CodeGen/PowerPC/vsx-p9.ll
+++ b/llvm/test/CodeGen/PowerPC/vsx-p9.ll
@@ -32,8 +32,8 @@
 define void @_Z4testv() {
 entry:
 ; CHECK-LABEL: @_Z4testv
-  %0 = load <16 x i8>, <16 x i8>* @uca, align 16
-  %1 = load <16 x i8>, <16 x i8>* @ucb, align 16
+  %0 = load <16 x i8>, ptr @uca, align 16
+  %1 = load <16 x i8>, ptr @ucb, align 16
   %add.i = add <16 x i8> %1, %0
   tail call void (...) @sink(<16 x i8> %add.i)
 ; CHECK: lxv 34, 0(3)
@@ -41,8 +41,8 @@ entry:
 ; CHECK: vaddubm 2, 3, 2
 ; CHECK: stxv 34,
 ; CHECK: bl sink
-  %2 = load <16 x i8>, <16 x i8>* @sca, align 16
-  %3 = load <16 x i8>, <16 x i8>* @scb, align 16
+  %2 = load <16 x i8>, ptr @sca, align 16
+  %3 = load <16 x i8>, ptr @scb, align 16
   %add.i22 = add <16 x i8> %3, %2
   tail call void (...) @sink(<16 x i8> %add.i22)
 ; CHECK: lxv 34, 0(3)
@@ -50,8 +50,8 @@ entry:
 ; CHECK: vaddubm 2, 3, 2
 ; CHECK: stxv 34,
 ; CHECK: bl sink
-  %4 = load <8 x i16>, <8 x i16>* @usa, align 16
-  %5 = load <8 x i16>, <8 x i16>* @usb, align 16
+  %4 = load <8 x i16>, ptr @usa, align 16
+  %5 = load <8 x i16>, ptr @usb, align 16
   %add.i21 = add <8 x i16> %5, %4
   tail call void (...) @sink(<8 x i16> %add.i21)
 ; CHECK: lxv 34, 0(3)
@@ -59,8 +59,8 @@ entry:
 ; CHECK: vadduhm 2, 3, 2
 ; CHECK: stxv 34,
 ; CHECK: bl sink
-  %6 = load <8 x i16>, <8 x i16>* @ssa, align 16
-  %7 = load <8 x i16>, <8 x i16>* @ssb, align 16
+  %6 = load <8 x i16>, ptr @ssa, align 16
+  %7 = load <8 x i16>, ptr @ssb, align 16
   %add.i20 = add <8 x i16> %7, %6
   tail call void (...) @sink(<8 x i16> %add.i20)
 ; CHECK: lxv 34, 0(3)
@@ -68,8 +68,8 @@ entry:
 ; CHECK: vadduhm 2, 3, 2
 ; CHECK: stxv 34,
 ; CHECK: bl sink
-  %8 = load <4 x i32>, <4 x i32>* @uia, align 16
-  %9 = load <4 x i32>, <4 x i32>* @uib, align 16
+  %8 = load <4 x i32>, ptr @uia, align 16
+  %9 = load <4 x i32>, ptr @uib, align 16
   %add.i19 = add <4 x i32> %9, %8
   tail call void (...) @sink(<4 x i32> %add.i19)
 ; CHECK: lxv 34, 0(3)
@@ -77,8 +77,8 @@ entry:
 ; CHECK: vadduwm 2, 3, 2
 ; CHECK: stxv 34,
 ; CHECK: bl sink
-  %10 = load <4 x i32>, <4 x i32>* @sia, align 16
-  %11 = load <4 x i32>, <4 x i32>* @sib, align 16
+  %10 = load <4 x i32>, ptr @sia, align 16
+  %11 = load <4 x i32>, ptr @sib, align 16
   %add.i18 = add <4 x i32> %11, %10
   tail call void (...) @sink(<4 x i32> %add.i18)
 ; CHECK: lxv 34, 0(3)
@@ -86,8 +86,8 @@ entry:
 ; CHECK: vadduwm 2, 3, 2
 ; CHECK: stxv 34,
 ; CHECK: bl sink
-  %12 = load <2 x i64>, <2 x i64>* @ulla, align 16
-  %13 = load <2 x i64>, <2 x i64>* @ullb, align 16
+  %12 = load <2 x i64>, ptr @ulla, align 16
+  %13 = load <2 x i64>, ptr @ullb, align 16
   %add.i17 = add <2 x i64> %13, %12
   tail call void (...) @sink(<2 x i64> %add.i17)
 ; CHECK: lxv 34, 0(3)
@@ -95,8 +95,8 @@ entry:
 ; CHECK: vaddudm 2, 3, 2
 ; CHECK: stxv 34,
 ; CHECK: bl sink
-  %14 = load <2 x i64>, <2 x i64>* @slla, align 16
-  %15 = load <2 x i64>, <2 x i64>* @sllb, align 16
+  %14 = load <2 x i64>, ptr @slla, align 16
+  %15 = load <2 x i64>, ptr @sllb, align 16
   %add.i16 = add <2 x i64> %15, %14
   tail call void (...) @sink(<2 x i64> %add.i16)
 ; CHECK: lxv 34, 0(3)
@@ -104,8 +104,8 @@ entry:
 ; CHECK: vaddudm 2, 3, 2
 ; CHECK: stxv 34,
 ; CHECK: bl sink
-  %16 = load <1 x i128>, <1 x i128>* @uxa, align 16
-  %17 = load <1 x i128>, <1 x i128>* @uxb, align 16
+  %16 = load <1 x i128>, ptr @uxa, align 16
+  %17 = load <1 x i128>, ptr @uxb, align 16
   %add.i15 = add <1 x i128> %17, %16
   tail call void (...) @sink(<1 x i128> %add.i15)
 ; CHECK: lxv 34, 0(3)
@@ -113,8 +113,8 @@ entry:
 ; CHECK: vadduqm 2, 3, 2
 ; CHECK: stxv 34,
 ; CHECK: bl sink
-  %18 = load <1 x i128>, <1 x i128>* @sxa, align 16
-  %19 = load <1 x i128>, <1 x i128>* @sxb, align 16
+  %18 = load <1 x i128>, ptr @sxa, align 16
+  %19 = load <1 x i128>, ptr @sxb, align 16
   %add.i14 = add <1 x i128> %19, %18
   tail call void (...) @sink(<1 x i128> %add.i14)
 ; CHECK: lxv 34, 0(3)
@@ -122,8 +122,8 @@ entry:
 ; CHECK: vadduqm 2, 3, 2
 ; CHECK: stxv 34,
 ; CHECK: bl sink
-  %20 = load <4 x float>, <4 x float>* @vfa, align 16
-  %21 = load <4 x float>, <4 x float>* @vfb, align 16
+  %20 = load <4 x float>, ptr @vfa, align 16
+  %21 = load <4 x float>, ptr @vfb, align 16
   %add.i13 = fadd <4 x float> %20, %21
   tail call void (...) @sink(<4 x float> %add.i13)
 ; CHECK: lxv 0, 0(3)
@@ -131,8 +131,8 @@ entry:
 ; CHECK: xvaddsp 34, 0, 1
 ; CHECK: stxv 34,
 ; CHECK: bl sink
-  %22 = load <2 x double>, <2 x double>* @vda, align 16
-  %23 = load <2 x double>, <2 x double>* @vdb, align 16
+  %22 = load <2 x double>, ptr @vda, align 16
+  %23 = load <2 x double>, ptr @vdb, align 16
   %add.i12 = fadd <2 x double> %22, %23
   tail call void (...) @sink(<2 x double> %add.i12)
 ; CHECK: lxv 0, 0(3)
@@ -343,50 +343,50 @@ entry:
 declare <4 x float>@llvm.ppc.vsx.xvcvhpsp(<8 x i16>)
 
 ; Function Attrs: nounwind readnone
-define <4 x i32> @testLXVL(i8* %a, i64 %b) {
+define <4 x i32> @testLXVL(ptr %a, i64 %b) {
 entry:
-  %0 = tail call <4 x i32> @llvm.ppc.vsx.lxvl(i8* %a, i64 %b)
+  %0 = tail call <4 x i32> @llvm.ppc.vsx.lxvl(ptr %a, i64 %b)
   ret <4 x i32> %0
 ; CHECK-LABEL: testLXVL
 ; CHECK: lxvl 34, 3, 4
 ; CHECK: blr
 }
 ; Function Attrs: nounwind readnone
-declare <4 x i32> @llvm.ppc.vsx.lxvl(i8*, i64)
+declare <4 x i32> @llvm.ppc.vsx.lxvl(ptr, i64)
 
-define void @testSTXVL(<4 x i32> %a, i8* %b, i64 %c) {
+define void @testSTXVL(<4 x i32> %a, ptr %b, i64 %c) {
 entry:
-  tail call void @llvm.ppc.vsx.stxvl(<4 x i32> %a, i8* %b, i64 %c)
+  tail call void @llvm.ppc.vsx.stxvl(<4 x i32> %a, ptr %b, i64 %c)
   ret void
 ; CHECK-LABEL: testSTXVL
 ; CHECK: stxvl 34, 5, 6
 ; CHECK: blr
 }
 ; Function Attrs: nounwind readnone
-declare void @llvm.ppc.vsx.stxvl(<4 x i32>, i8*, i64)
+declare void @llvm.ppc.vsx.stxvl(<4 x i32>, ptr, i64)
 
 ; Function Attrs: nounwind readnone
-define <4 x i32> @testLXVLL(i8* %a, i64 %b) {
+define <4 x i32> @testLXVLL(ptr %a, i64 %b) {
 entry:
-  %0 = tail call <4 x i32> @llvm.ppc.vsx.lxvll(i8* %a, i64 %b)
+  %0 = tail call <4 x i32> @llvm.ppc.vsx.lxvll(ptr %a, i64 %b)
   ret <4 x i32> %0
 ; CHECK-LABEL: testLXVLL
 ; CHECK: lxvll 34, 3, 4
 ; CHECK: blr
 }
 ; Function Attrs: nounwind readnone
-declare <4 x i32> @llvm.ppc.vsx.lxvll(i8*, i64)
+declare <4 x i32> @llvm.ppc.vsx.lxvll(ptr, i64)
 
-define void @testSTXVLL(<4 x i32> %a, i8* %b, i64 %c) {
+define void @testSTXVLL(<4 x i32> %a, ptr %b, i64 %c) {
 entry:
-  tail call void @llvm.ppc.vsx.stxvll(<4 x i32> %a, i8* %b, i64 %c)
+  tail call void @llvm.ppc.vsx.stxvll(<4 x i32> %a, ptr %b, i64 %c)
   ret void
 ; CHECK-LABEL: testSTXVLL
 ; CHECK: stxvll 34, 5, 6
 ; CHECK: blr
 }
 ; Function Attrs: nounwind readnone
-declare void @llvm.ppc.vsx.stxvll(<4 x i32>, i8*, i64)
+declare void @llvm.ppc.vsx.stxvll(<4 x i32>, ptr, i64)
 
 define <4 x i32> @test0(<4 x i32> %a) local_unnamed_addr #0 {
 entry:
@@ -421,10 +421,8 @@ define signext i32 @func1() {
 ; CHECK: blr
 entry:
   %a = alloca [4 x i32], align 4
-  %0 = bitcast [4 x i32]* %a to i8*
-  call void @llvm.memset.p0i8.i64(i8* nonnull align 4 %0, i8 0, i64 16, i1 false)
-  %arraydecay = getelementptr inbounds [4 x i32], [4 x i32]* %a, i64 0, i64 0
-  %call = call signext i32 @callee(i32* nonnull %arraydecay) #3
+  call void @llvm.memset.p0.i64(ptr nonnull align 4 %a, i8 0, i64 16, i1 false)
+  %call = call signext i32 @callee(ptr nonnull %a) #3
   ret i32 %call
 }
 
@@ -440,12 +438,10 @@ define signext i32 @func2() {
 ; CHECK: blr
 entry:
   %a = alloca [16 x i32], align 4
-  %0 = bitcast [16 x i32]* %a to i8*
-  call void @llvm.memset.p0i8.i64(i8* nonnull align 4 %0, i8 0, i64 64, i1 false)
-  %arraydecay = getelementptr inbounds [16 x i32], [16 x i32]* %a, i64 0, i64 0
-  %call = call signext i32 @callee(i32* nonnull %arraydecay) #3
+  call void @llvm.memset.p0.i64(ptr nonnull align 4 %a, i8 0, i64 64, i1 false)
+  %call = call signext i32 @callee(ptr nonnull %a) #3
   ret i32 %call
 }
 
-declare void @llvm.memset.p0i8.i64(i8* nocapture writeonly, i8, i64, i1) #1
-declare signext i32 @callee(i32*) local_unnamed_addr #2
+declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1) #1
+declare signext i32 @callee(ptr) local_unnamed_addr #2

diff  --git a/llvm/test/CodeGen/PowerPC/vsx-partword-int-loads-and-stores.ll b/llvm/test/CodeGen/PowerPC/vsx-partword-int-loads-and-stores.ll
index a9f5ba69dd4f..8ade21e7b40f 100644
--- a/llvm/test/CodeGen/PowerPC/vsx-partword-int-loads-and-stores.ll
+++ b/llvm/test/CodeGen/PowerPC/vsx-partword-int-loads-and-stores.ll
@@ -2,9 +2,9 @@
 ; RUN: llc -mcpu=pwr9 -mtriple=powerpc64-unknown-unknown < %s | FileCheck %s \
 ; RUN:   --check-prefix=CHECK-BE
 ; Function Attrs: norecurse nounwind readonly
-define <16 x i8> @vecucuc(i8* nocapture readonly %ptr) {
+define <16 x i8> @vecucuc(ptr nocapture readonly %ptr) {
 entry:
-  %0 = load i8, i8* %ptr, align 1
+  %0 = load i8, ptr %ptr, align 1
   %splat.splatinsert = insertelement <16 x i8> undef, i8 %0, i32 0
   %splat.splat = shufflevector <16 x i8> %splat.splatinsert, <16 x i8> undef, <16 x i32> zeroinitializer
   ret <16 x i8> %splat.splat
@@ -17,9 +17,9 @@ entry:
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define <8 x i16> @vecusuc(i8* nocapture readonly %ptr) {
+define <8 x i16> @vecusuc(ptr nocapture readonly %ptr) {
 entry:
-  %0 = load i8, i8* %ptr, align 1
+  %0 = load i8, ptr %ptr, align 1
   %conv = zext i8 %0 to i16
   %splat.splatinsert = insertelement <8 x i16> undef, i16 %conv, i32 0
   %splat.splat = shufflevector <8 x i16> %splat.splatinsert, <8 x i16> undef, <8 x i32> zeroinitializer
@@ -33,9 +33,9 @@ entry:
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define <4 x i32> @vecuiuc(i8* nocapture readonly %ptr) {
+define <4 x i32> @vecuiuc(ptr nocapture readonly %ptr) {
 entry:
-  %0 = load i8, i8* %ptr, align 1
+  %0 = load i8, ptr %ptr, align 1
   %conv = zext i8 %0 to i32
   %splat.splatinsert = insertelement <4 x i32> undef, i32 %conv, i32 0
   %splat.splat = shufflevector <4 x i32> %splat.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer
@@ -49,9 +49,9 @@ entry:
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define <2 x i64> @veculuc(i8* nocapture readonly %ptr) {
+define <2 x i64> @veculuc(ptr nocapture readonly %ptr) {
 entry:
-  %0 = load i8, i8* %ptr, align 1
+  %0 = load i8, ptr %ptr, align 1
   %conv = zext i8 %0 to i64
   %splat.splatinsert = insertelement <2 x i64> undef, i64 %conv, i32 0
   %splat.splat = shufflevector <2 x i64> %splat.splatinsert, <2 x i64> undef, <2 x i32> zeroinitializer
@@ -65,9 +65,9 @@ entry:
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define <16 x i8> @vecscuc(i8* nocapture readonly %ptr) {
+define <16 x i8> @vecscuc(ptr nocapture readonly %ptr) {
 entry:
-  %0 = load i8, i8* %ptr, align 1
+  %0 = load i8, ptr %ptr, align 1
   %splat.splatinsert = insertelement <16 x i8> undef, i8 %0, i32 0
   %splat.splat = shufflevector <16 x i8> %splat.splatinsert, <16 x i8> undef, <16 x i32> zeroinitializer
   ret <16 x i8> %splat.splat
@@ -80,9 +80,9 @@ entry:
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define <8 x i16> @vecssuc(i8* nocapture readonly %ptr) {
+define <8 x i16> @vecssuc(ptr nocapture readonly %ptr) {
 entry:
-  %0 = load i8, i8* %ptr, align 1
+  %0 = load i8, ptr %ptr, align 1
   %conv = zext i8 %0 to i16
   %splat.splatinsert = insertelement <8 x i16> undef, i16 %conv, i32 0
   %splat.splat = shufflevector <8 x i16> %splat.splatinsert, <8 x i16> undef, <8 x i32> zeroinitializer
@@ -96,9 +96,9 @@ entry:
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define <4 x i32> @vecsiuc(i8* nocapture readonly %ptr) {
+define <4 x i32> @vecsiuc(ptr nocapture readonly %ptr) {
 entry:
-  %0 = load i8, i8* %ptr, align 1
+  %0 = load i8, ptr %ptr, align 1
   %conv = zext i8 %0 to i32
   %splat.splatinsert = insertelement <4 x i32> undef, i32 %conv, i32 0
   %splat.splat = shufflevector <4 x i32> %splat.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer
@@ -112,9 +112,9 @@ entry:
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define <2 x i64> @vecsluc(i8* nocapture readonly %ptr) {
+define <2 x i64> @vecsluc(ptr nocapture readonly %ptr) {
 entry:
-  %0 = load i8, i8* %ptr, align 1
+  %0 = load i8, ptr %ptr, align 1
   %conv = zext i8 %0 to i64
   %splat.splatinsert = insertelement <2 x i64> undef, i64 %conv, i32 0
   %splat.splat = shufflevector <2 x i64> %splat.splatinsert, <2 x i64> undef, <2 x i32> zeroinitializer
@@ -128,9 +128,9 @@ entry:
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define <4 x float> @vecfuc(i8* nocapture readonly %ptr) {
+define <4 x float> @vecfuc(ptr nocapture readonly %ptr) {
 entry:
-  %0 = load i8, i8* %ptr, align 1
+  %0 = load i8, ptr %ptr, align 1
   %conv = uitofp i8 %0 to float
   %splat.splatinsert = insertelement <4 x float> undef, float %conv, i32 0
   %splat.splat = shufflevector <4 x float> %splat.splatinsert, <4 x float> undef, <4 x i32> zeroinitializer
@@ -148,9 +148,9 @@ entry:
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define <2 x double> @vecduc(i8* nocapture readonly %ptr) {
+define <2 x double> @vecduc(ptr nocapture readonly %ptr) {
 entry:
-  %0 = load i8, i8* %ptr, align 1
+  %0 = load i8, ptr %ptr, align 1
   %conv = uitofp i8 %0 to double
   %splat.splatinsert = insertelement <2 x double> undef, double %conv, i32 0
   %splat.splat = shufflevector <2 x double> %splat.splatinsert, <2 x double> undef, <2 x i32> zeroinitializer
@@ -166,9 +166,9 @@ entry:
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define <16 x i8> @vecucsc(i8* nocapture readonly %ptr) {
+define <16 x i8> @vecucsc(ptr nocapture readonly %ptr) {
 entry:
-  %0 = load i8, i8* %ptr, align 1
+  %0 = load i8, ptr %ptr, align 1
   %splat.splatinsert = insertelement <16 x i8> undef, i8 %0, i32 0
   %splat.splat = shufflevector <16 x i8> %splat.splatinsert, <16 x i8> undef, <16 x i32> zeroinitializer
   ret <16 x i8> %splat.splat
@@ -181,9 +181,9 @@ entry:
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define <4 x i32> @vecuisc(i8* nocapture readonly %ptr) {
+define <4 x i32> @vecuisc(ptr nocapture readonly %ptr) {
 entry:
-  %0 = load i8, i8* %ptr, align 1
+  %0 = load i8, ptr %ptr, align 1
   %conv = sext i8 %0 to i32
   %splat.splatinsert = insertelement <4 x i32> undef, i32 %conv, i32 0
   %splat.splat = shufflevector <4 x i32> %splat.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer
@@ -199,9 +199,9 @@ entry:
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define <2 x i64> @veculsc(i8* nocapture readonly %ptr) {
+define <2 x i64> @veculsc(ptr nocapture readonly %ptr) {
 entry:
-  %0 = load i8, i8* %ptr, align 1
+  %0 = load i8, ptr %ptr, align 1
   %conv = sext i8 %0 to i64
   %splat.splatinsert = insertelement <2 x i64> undef, i64 %conv, i32 0
   %splat.splat = shufflevector <2 x i64> %splat.splatinsert, <2 x i64> undef, <2 x i32> zeroinitializer
@@ -217,9 +217,9 @@ entry:
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define <16 x i8> @vecscsc(i8* nocapture readonly %ptr) {
+define <16 x i8> @vecscsc(ptr nocapture readonly %ptr) {
 entry:
-  %0 = load i8, i8* %ptr, align 1
+  %0 = load i8, ptr %ptr, align 1
   %splat.splatinsert = insertelement <16 x i8> undef, i8 %0, i32 0
   %splat.splat = shufflevector <16 x i8> %splat.splatinsert, <16 x i8> undef, <16 x i32> zeroinitializer
   ret <16 x i8> %splat.splat
@@ -232,9 +232,9 @@ entry:
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define <4 x i32> @vecsisc(i8* nocapture readonly %ptr) {
+define <4 x i32> @vecsisc(ptr nocapture readonly %ptr) {
 entry:
-  %0 = load i8, i8* %ptr, align 1
+  %0 = load i8, ptr %ptr, align 1
   %conv = sext i8 %0 to i32
   %splat.splatinsert = insertelement <4 x i32> undef, i32 %conv, i32 0
   %splat.splat = shufflevector <4 x i32> %splat.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer
@@ -250,9 +250,9 @@ entry:
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define <2 x i64> @vecslsc(i8* nocapture readonly %ptr) {
+define <2 x i64> @vecslsc(ptr nocapture readonly %ptr) {
 entry:
-  %0 = load i8, i8* %ptr, align 1
+  %0 = load i8, ptr %ptr, align 1
   %conv = sext i8 %0 to i64
   %splat.splatinsert = insertelement <2 x i64> undef, i64 %conv, i32 0
   %splat.splat = shufflevector <2 x i64> %splat.splatinsert, <2 x i64> undef, <2 x i32> zeroinitializer
@@ -268,9 +268,9 @@ entry:
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define <4 x float> @vecfsc(i8* nocapture readonly %ptr) {
+define <4 x float> @vecfsc(ptr nocapture readonly %ptr) {
 entry:
-  %0 = load i8, i8* %ptr, align 1
+  %0 = load i8, ptr %ptr, align 1
   %conv = sitofp i8 %0 to float
   %splat.splatinsert = insertelement <4 x float> undef, float %conv, i32 0
   %splat.splat = shufflevector <4 x float> %splat.splatinsert, <4 x float> undef, <4 x i32> zeroinitializer
@@ -290,9 +290,9 @@ entry:
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define <2 x double> @vecdsc(i8* nocapture readonly %ptr) {
+define <2 x double> @vecdsc(ptr nocapture readonly %ptr) {
 entry:
-  %0 = load i8, i8* %ptr, align 1
+  %0 = load i8, ptr %ptr, align 1
   %conv = sitofp i8 %0 to double
   %splat.splatinsert = insertelement <2 x double> undef, double %conv, i32 0
   %splat.splat = shufflevector <2 x double> %splat.splatinsert, <2 x double> undef, <2 x i32> zeroinitializer
@@ -310,9 +310,9 @@ entry:
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define <16 x i8> @vecucus(i16* nocapture readonly %ptr) {
+define <16 x i8> @vecucus(ptr nocapture readonly %ptr) {
 entry:
-  %0 = load i16, i16* %ptr, align 2
+  %0 = load i16, ptr %ptr, align 2
   %conv = trunc i16 %0 to i8
   %splat.splatinsert = insertelement <16 x i8> undef, i8 %conv, i32 0
   %splat.splat = shufflevector <16 x i8> %splat.splatinsert, <16 x i8> undef, <16 x i32> zeroinitializer
@@ -327,9 +327,9 @@ entry:
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define <8 x i16> @vecusus(i16* nocapture readonly %ptr) {
+define <8 x i16> @vecusus(ptr nocapture readonly %ptr) {
 entry:
-  %0 = load i16, i16* %ptr, align 2
+  %0 = load i16, ptr %ptr, align 2
   %splat.splatinsert = insertelement <8 x i16> undef, i16 %0, i32 0
   %splat.splat = shufflevector <8 x i16> %splat.splatinsert, <8 x i16> undef, <8 x i32> zeroinitializer
   ret <8 x i16> %splat.splat
@@ -342,9 +342,9 @@ entry:
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define <4 x i32> @vecuius(i16* nocapture readonly %ptr) {
+define <4 x i32> @vecuius(ptr nocapture readonly %ptr) {
 entry:
-  %0 = load i16, i16* %ptr, align 2
+  %0 = load i16, ptr %ptr, align 2
   %conv = zext i16 %0 to i32
   %splat.splatinsert = insertelement <4 x i32> undef, i32 %conv, i32 0
   %splat.splat = shufflevector <4 x i32> %splat.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer
@@ -358,9 +358,9 @@ entry:
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define <2 x i64> @veculus(i16* nocapture readonly %ptr) {
+define <2 x i64> @veculus(ptr nocapture readonly %ptr) {
 entry:
-  %0 = load i16, i16* %ptr, align 2
+  %0 = load i16, ptr %ptr, align 2
   %conv = zext i16 %0 to i64
   %splat.splatinsert = insertelement <2 x i64> undef, i64 %conv, i32 0
   %splat.splat = shufflevector <2 x i64> %splat.splatinsert, <2 x i64> undef, <2 x i32> zeroinitializer
@@ -374,9 +374,9 @@ entry:
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define <16 x i8> @vecscus(i16* nocapture readonly %ptr) {
+define <16 x i8> @vecscus(ptr nocapture readonly %ptr) {
 entry:
-  %0 = load i16, i16* %ptr, align 2
+  %0 = load i16, ptr %ptr, align 2
   %conv = trunc i16 %0 to i8
   %splat.splatinsert = insertelement <16 x i8> undef, i8 %conv, i32 0
   %splat.splat = shufflevector <16 x i8> %splat.splatinsert, <16 x i8> undef, <16 x i32> zeroinitializer
@@ -391,9 +391,9 @@ entry:
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define <8 x i16> @vecssus(i16* nocapture readonly %ptr) {
+define <8 x i16> @vecssus(ptr nocapture readonly %ptr) {
 entry:
-  %0 = load i16, i16* %ptr, align 2
+  %0 = load i16, ptr %ptr, align 2
   %splat.splatinsert = insertelement <8 x i16> undef, i16 %0, i32 0
   %splat.splat = shufflevector <8 x i16> %splat.splatinsert, <8 x i16> undef, <8 x i32> zeroinitializer
   ret <8 x i16> %splat.splat
@@ -406,9 +406,9 @@ entry:
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define <4 x i32> @vecsius(i16* nocapture readonly %ptr) {
+define <4 x i32> @vecsius(ptr nocapture readonly %ptr) {
 entry:
-  %0 = load i16, i16* %ptr, align 2
+  %0 = load i16, ptr %ptr, align 2
   %conv = zext i16 %0 to i32
   %splat.splatinsert = insertelement <4 x i32> undef, i32 %conv, i32 0
   %splat.splat = shufflevector <4 x i32> %splat.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer
@@ -422,9 +422,9 @@ entry:
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define <2 x i64> @vecslus(i16* nocapture readonly %ptr) {
+define <2 x i64> @vecslus(ptr nocapture readonly %ptr) {
 entry:
-  %0 = load i16, i16* %ptr, align 2
+  %0 = load i16, ptr %ptr, align 2
   %conv = zext i16 %0 to i64
   %splat.splatinsert = insertelement <2 x i64> undef, i64 %conv, i32 0
   %splat.splat = shufflevector <2 x i64> %splat.splatinsert, <2 x i64> undef, <2 x i32> zeroinitializer
@@ -438,9 +438,9 @@ entry:
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define <4 x float> @vecfus(i16* nocapture readonly %ptr) {
+define <4 x float> @vecfus(ptr nocapture readonly %ptr) {
 entry:
-  %0 = load i16, i16* %ptr, align 2
+  %0 = load i16, ptr %ptr, align 2
   %conv = uitofp i16 %0 to float
   %splat.splatinsert = insertelement <4 x float> undef, float %conv, i32 0
   %splat.splat = shufflevector <4 x float> %splat.splatinsert, <4 x float> undef, <4 x i32> zeroinitializer
@@ -458,9 +458,9 @@ entry:
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define <2 x double> @vecdus(i16* nocapture readonly %ptr) {
+define <2 x double> @vecdus(ptr nocapture readonly %ptr) {
 entry:
-  %0 = load i16, i16* %ptr, align 2
+  %0 = load i16, ptr %ptr, align 2
   %conv = uitofp i16 %0 to double
   %splat.splatinsert = insertelement <2 x double> undef, double %conv, i32 0
   %splat.splat = shufflevector <2 x double> %splat.splatinsert, <2 x double> undef, <2 x i32> zeroinitializer
@@ -476,9 +476,9 @@ entry:
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define <16 x i8> @vecucss(i16* nocapture readonly %ptr) {
+define <16 x i8> @vecucss(ptr nocapture readonly %ptr) {
 entry:
-  %0 = load i16, i16* %ptr, align 2
+  %0 = load i16, ptr %ptr, align 2
   %conv = trunc i16 %0 to i8
   %splat.splatinsert = insertelement <16 x i8> undef, i8 %conv, i32 0
   %splat.splat = shufflevector <16 x i8> %splat.splatinsert, <16 x i8> undef, <16 x i32> zeroinitializer
@@ -493,9 +493,9 @@ entry:
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define <4 x i32> @vecuiss(i16* nocapture readonly %ptr) {
+define <4 x i32> @vecuiss(ptr nocapture readonly %ptr) {
 entry:
-  %0 = load i16, i16* %ptr, align 2
+  %0 = load i16, ptr %ptr, align 2
   %conv = sext i16 %0 to i32
   %splat.splatinsert = insertelement <4 x i32> undef, i32 %conv, i32 0
   %splat.splat = shufflevector <4 x i32> %splat.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer
@@ -511,9 +511,9 @@ entry:
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define <2 x i64> @veculss(i16* nocapture readonly %ptr) {
+define <2 x i64> @veculss(ptr nocapture readonly %ptr) {
 entry:
-  %0 = load i16, i16* %ptr, align 2
+  %0 = load i16, ptr %ptr, align 2
   %conv = sext i16 %0 to i64
   %splat.splatinsert = insertelement <2 x i64> undef, i64 %conv, i32 0
   %splat.splat = shufflevector <2 x i64> %splat.splatinsert, <2 x i64> undef, <2 x i32> zeroinitializer
@@ -529,9 +529,9 @@ entry:
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define <16 x i8> @vecscss(i16* nocapture readonly %ptr) {
+define <16 x i8> @vecscss(ptr nocapture readonly %ptr) {
 entry:
-  %0 = load i16, i16* %ptr, align 2
+  %0 = load i16, ptr %ptr, align 2
   %conv = trunc i16 %0 to i8
   %splat.splatinsert = insertelement <16 x i8> undef, i8 %conv, i32 0
   %splat.splat = shufflevector <16 x i8> %splat.splatinsert, <16 x i8> undef, <16 x i32> zeroinitializer
@@ -546,9 +546,9 @@ entry:
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define <4 x i32> @vecsiss(i16* nocapture readonly %ptr) {
+define <4 x i32> @vecsiss(ptr nocapture readonly %ptr) {
 entry:
-  %0 = load i16, i16* %ptr, align 2
+  %0 = load i16, ptr %ptr, align 2
   %conv = sext i16 %0 to i32
   %splat.splatinsert = insertelement <4 x i32> undef, i32 %conv, i32 0
   %splat.splat = shufflevector <4 x i32> %splat.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer
@@ -564,9 +564,9 @@ entry:
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define <2 x i64> @vecslss(i16* nocapture readonly %ptr) {
+define <2 x i64> @vecslss(ptr nocapture readonly %ptr) {
 entry:
-  %0 = load i16, i16* %ptr, align 2
+  %0 = load i16, ptr %ptr, align 2
   %conv = sext i16 %0 to i64
   %splat.splatinsert = insertelement <2 x i64> undef, i64 %conv, i32 0
   %splat.splat = shufflevector <2 x i64> %splat.splatinsert, <2 x i64> undef, <2 x i32> zeroinitializer
@@ -582,9 +582,9 @@ entry:
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define <4 x float> @vecfss(i16* nocapture readonly %ptr) {
+define <4 x float> @vecfss(ptr nocapture readonly %ptr) {
 entry:
-  %0 = load i16, i16* %ptr, align 2
+  %0 = load i16, ptr %ptr, align 2
   %conv = sitofp i16 %0 to float
   %splat.splatinsert = insertelement <4 x float> undef, float %conv, i32 0
   %splat.splat = shufflevector <4 x float> %splat.splatinsert, <4 x float> undef, <4 x i32> zeroinitializer
@@ -604,9 +604,9 @@ entry:
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define <2 x double> @vecdss(i16* nocapture readonly %ptr) {
+define <2 x double> @vecdss(ptr nocapture readonly %ptr) {
 entry:
-  %0 = load i16, i16* %ptr, align 2
+  %0 = load i16, ptr %ptr, align 2
   %conv = sitofp i16 %0 to double
   %splat.splatinsert = insertelement <2 x double> undef, double %conv, i32 0
   %splat.splat = shufflevector <2 x double> %splat.splatinsert, <2 x double> undef, <2 x i32> zeroinitializer
@@ -624,10 +624,10 @@ entry:
 }
 
 ; Function Attrs: norecurse nounwind
-define void @storefsc(float %f, i8* nocapture %ptr) {
+define void @storefsc(float %f, ptr nocapture %ptr) {
 entry:
   %conv = fptosi float %f to i8
-  store i8 %conv, i8* %ptr, align 1
+  store i8 %conv, ptr %ptr, align 1
   ret void
 ; CHECK-LABEL: storefsc
 ; CHECK: xscvdpsxws 0, 1
@@ -638,10 +638,10 @@ entry:
 }
 
 ; Function Attrs: norecurse nounwind
-define void @storedsc(double %d, i8* nocapture %ptr) {
+define void @storedsc(double %d, ptr nocapture %ptr) {
 entry:
   %conv = fptosi double %d to i8
-  store i8 %conv, i8* %ptr, align 1
+  store i8 %conv, ptr %ptr, align 1
   ret void
 ; CHECK-LABEL: storedsc
 ; CHECK: xscvdpsxws 0, 1
@@ -652,10 +652,10 @@ entry:
 }
 
 ; Function Attrs: norecurse nounwind
-define void @storevcsc0(<16 x i8> %v, i8* nocapture %ptr) {
+define void @storevcsc0(<16 x i8> %v, ptr nocapture %ptr) {
 entry:
   %vecext = extractelement <16 x i8> %v, i32 0
-  store i8 %vecext, i8* %ptr, align 1
+  store i8 %vecext, ptr %ptr, align 1
   ret void
 ; CHECK-LABEL: storevcsc0
 ; CHECK: vsldoi 2, 2, 2, 8
@@ -666,10 +666,10 @@ entry:
 }
 
 ; Function Attrs: norecurse nounwind
-define void @storevcsc1(<16 x i8> %v, i8* nocapture %ptr) {
+define void @storevcsc1(<16 x i8> %v, ptr nocapture %ptr) {
 entry:
   %vecext = extractelement <16 x i8> %v, i32 1
-  store i8 %vecext, i8* %ptr, align 1
+  store i8 %vecext, ptr %ptr, align 1
   ret void
 ; CHECK-LABEL: storevcsc1
 ; CHECK: vsldoi 2, 2, 2, 7
@@ -680,10 +680,10 @@ entry:
 }
 
 ; Function Attrs: norecurse nounwind
-define void @storevcsc2(<16 x i8> %v, i8* nocapture %ptr) {
+define void @storevcsc2(<16 x i8> %v, ptr nocapture %ptr) {
 entry:
   %vecext = extractelement <16 x i8> %v, i32 2
-  store i8 %vecext, i8* %ptr, align 1
+  store i8 %vecext, ptr %ptr, align 1
   ret void
 ; CHECK-LABEL: storevcsc2
 ; CHECK: vsldoi 2, 2, 2, 6
@@ -694,10 +694,10 @@ entry:
 }
 
 ; Function Attrs: norecurse nounwind
-define void @storevcsc3(<16 x i8> %v, i8* nocapture %ptr) {
+define void @storevcsc3(<16 x i8> %v, ptr nocapture %ptr) {
 entry:
   %vecext = extractelement <16 x i8> %v, i32 3
-  store i8 %vecext, i8* %ptr, align 1
+  store i8 %vecext, ptr %ptr, align 1
   ret void
 ; CHECK-LABEL: storevcsc3
 ; CHECK: vsldoi 2, 2, 2, 5
@@ -708,10 +708,10 @@ entry:
 }
 
 ; Function Attrs: norecurse nounwind
-define void @storevcsc4(<16 x i8> %v, i8* nocapture %ptr) {
+define void @storevcsc4(<16 x i8> %v, ptr nocapture %ptr) {
 entry:
   %vecext = extractelement <16 x i8> %v, i32 4
-  store i8 %vecext, i8* %ptr, align 1
+  store i8 %vecext, ptr %ptr, align 1
   ret void
 ; CHECK-LABEL: storevcsc4
 ; CHECK: vsldoi 2, 2, 2, 4
@@ -722,10 +722,10 @@ entry:
 }
 
 ; Function Attrs: norecurse nounwind
-define void @storevcsc5(<16 x i8> %v, i8* nocapture %ptr) {
+define void @storevcsc5(<16 x i8> %v, ptr nocapture %ptr) {
 entry:
   %vecext = extractelement <16 x i8> %v, i32 5
-  store i8 %vecext, i8* %ptr, align 1
+  store i8 %vecext, ptr %ptr, align 1
   ret void
 ; CHECK-LABEL: storevcsc5
 ; CHECK: vsldoi 2, 2, 2, 3
@@ -736,10 +736,10 @@ entry:
 }
 
 ; Function Attrs: norecurse nounwind
-define void @storevcsc6(<16 x i8> %v, i8* nocapture %ptr) {
+define void @storevcsc6(<16 x i8> %v, ptr nocapture %ptr) {
 entry:
   %vecext = extractelement <16 x i8> %v, i32 6
-  store i8 %vecext, i8* %ptr, align 1
+  store i8 %vecext, ptr %ptr, align 1
   ret void
 ; CHECK-LABEL: storevcsc6
 ; CHECK: vsldoi 2, 2, 2, 2
@@ -750,10 +750,10 @@ entry:
 }
 
 ; Function Attrs: norecurse nounwind
-define void @storevcsc7(<16 x i8> %v, i8* nocapture %ptr) {
+define void @storevcsc7(<16 x i8> %v, ptr nocapture %ptr) {
 entry:
   %vecext = extractelement <16 x i8> %v, i32 7
-  store i8 %vecext, i8* %ptr, align 1
+  store i8 %vecext, ptr %ptr, align 1
   ret void
 ; CHECK-LABEL: storevcsc7
 ; CHECK: vsldoi 2, 2, 2, 1
@@ -763,10 +763,10 @@ entry:
 }
 
 ; Function Attrs: norecurse nounwind
-define void @storevcsc8(<16 x i8> %v, i8* nocapture %ptr) {
+define void @storevcsc8(<16 x i8> %v, ptr nocapture %ptr) {
 entry:
   %vecext = extractelement <16 x i8> %v, i32 8
-  store i8 %vecext, i8* %ptr, align 1
+  store i8 %vecext, ptr %ptr, align 1
   ret void
 ; CHECK-LABEL: storevcsc8
 ; CHECK: stxsibx 34, 0, 5
@@ -776,10 +776,10 @@ entry:
 }
 
 ; Function Attrs: norecurse nounwind
-define void @storevcsc9(<16 x i8> %v, i8* nocapture %ptr) {
+define void @storevcsc9(<16 x i8> %v, ptr nocapture %ptr) {
 entry:
   %vecext = extractelement <16 x i8> %v, i32 9
-  store i8 %vecext, i8* %ptr, align 1
+  store i8 %vecext, ptr %ptr, align 1
   ret void
 ; CHECK-LABEL: storevcsc9
 ; CHECK: vsldoi 2, 2, 2, 15
@@ -790,10 +790,10 @@ entry:
 }
 
 ; Function Attrs: norecurse nounwind
-define void @storevcsc10(<16 x i8> %v, i8* nocapture %ptr) {
+define void @storevcsc10(<16 x i8> %v, ptr nocapture %ptr) {
 entry:
   %vecext = extractelement <16 x i8> %v, i32 10
-  store i8 %vecext, i8* %ptr, align 1
+  store i8 %vecext, ptr %ptr, align 1
   ret void
 ; CHECK-LABEL: storevcsc10
 ; CHECK: vsldoi 2, 2, 2, 14
@@ -804,10 +804,10 @@ entry:
 }
 
 ; Function Attrs: norecurse nounwind
-define void @storevcsc11(<16 x i8> %v, i8* nocapture %ptr) {
+define void @storevcsc11(<16 x i8> %v, ptr nocapture %ptr) {
 entry:
   %vecext = extractelement <16 x i8> %v, i32 11
-  store i8 %vecext, i8* %ptr, align 1
+  store i8 %vecext, ptr %ptr, align 1
   ret void
 ; CHECK-LABEL: storevcsc11
 ; CHECK: vsldoi 2, 2, 2, 13
@@ -818,10 +818,10 @@ entry:
 }
 
 ; Function Attrs: norecurse nounwind
-define void @storevcsc12(<16 x i8> %v, i8* nocapture %ptr) {
+define void @storevcsc12(<16 x i8> %v, ptr nocapture %ptr) {
 entry:
   %vecext = extractelement <16 x i8> %v, i32 12
-  store i8 %vecext, i8* %ptr, align 1
+  store i8 %vecext, ptr %ptr, align 1
   ret void
 ; CHECK-LABEL: storevcsc12
 ; CHECK: vsldoi 2, 2, 2, 12
@@ -832,10 +832,10 @@ entry:
 }
 
 ; Function Attrs: norecurse nounwind
-define void @storevcsc13(<16 x i8> %v, i8* nocapture %ptr) {
+define void @storevcsc13(<16 x i8> %v, ptr nocapture %ptr) {
 entry:
   %vecext = extractelement <16 x i8> %v, i32 13
-  store i8 %vecext, i8* %ptr, align 1
+  store i8 %vecext, ptr %ptr, align 1
   ret void
 ; CHECK-LABEL: storevcsc13
 ; CHECK: vsldoi 2, 2, 2, 11
@@ -846,10 +846,10 @@ entry:
 }
 
 ; Function Attrs: norecurse nounwind
-define void @storevcsc14(<16 x i8> %v, i8* nocapture %ptr) {
+define void @storevcsc14(<16 x i8> %v, ptr nocapture %ptr) {
 entry:
   %vecext = extractelement <16 x i8> %v, i32 14
-  store i8 %vecext, i8* %ptr, align 1
+  store i8 %vecext, ptr %ptr, align 1
   ret void
 ; CHECK-LABEL: storevcsc14
 ; CHECK: vsldoi 2, 2, 2, 10
@@ -860,10 +860,10 @@ entry:
 }
 
 ; Function Attrs: norecurse nounwind
-define void @storevcsc15(<16 x i8> %v, i8* nocapture %ptr) {
+define void @storevcsc15(<16 x i8> %v, ptr nocapture %ptr) {
 entry:
   %vecext = extractelement <16 x i8> %v, i32 15
-  store i8 %vecext, i8* %ptr, align 1
+  store i8 %vecext, ptr %ptr, align 1
   ret void
 ; CHECK-LABEL: storevcsc15
 ; CHECK: vsldoi 2, 2, 2, 9
@@ -874,10 +874,10 @@ entry:
 }
 
 ; Function Attrs: norecurse nounwind
-define void @storefss(float %f, i16* nocapture %ptr) {
+define void @storefss(float %f, ptr nocapture %ptr) {
 entry:
   %conv = fptosi float %f to i16
-  store i16 %conv, i16* %ptr, align 2
+  store i16 %conv, ptr %ptr, align 2
   ret void
 ; CHECK-LABEL: storefss
 ; CHECK: xscvdpsxws 0, 1
@@ -888,10 +888,10 @@ entry:
 }
 
 ; Function Attrs: norecurse nounwind
-define void @storedss(double %d, i16* nocapture %ptr) {
+define void @storedss(double %d, ptr nocapture %ptr) {
 entry:
   %conv = fptosi double %d to i16
-  store i16 %conv, i16* %ptr, align 2
+  store i16 %conv, ptr %ptr, align 2
   ret void
 ; CHECK-LABEL: storedss
 ; CHECK: xscvdpsxws 0, 1
@@ -902,10 +902,10 @@ entry:
 }
 
 ; Function Attrs: norecurse nounwind
-define void @storevsss0(<8 x i16> %v, i16* nocapture %ptr) {
+define void @storevsss0(<8 x i16> %v, ptr nocapture %ptr) {
 entry:
   %vecext = extractelement <8 x i16> %v, i32 0
-  store i16 %vecext, i16* %ptr, align 2
+  store i16 %vecext, ptr %ptr, align 2
   ret void
 ; CHECK-LABEL: storevsss0
 ; CHECK: vsldoi 2, 2, 2, 8
@@ -916,10 +916,10 @@ entry:
 }
 
 ; Function Attrs: norecurse nounwind
-define void @storevsss1(<8 x i16> %v, i16* nocapture %ptr) {
+define void @storevsss1(<8 x i16> %v, ptr nocapture %ptr) {
 entry:
   %vecext = extractelement <8 x i16> %v, i32 1
-  store i16 %vecext, i16* %ptr, align 2
+  store i16 %vecext, ptr %ptr, align 2
   ret void
 ; CHECK-LABEL: storevsss1
 ; CHECK: vsldoi 2, 2, 2, 6
@@ -930,10 +930,10 @@ entry:
 }
 
 ; Function Attrs: norecurse nounwind
-define void @storevsss2(<8 x i16> %v, i16* nocapture %ptr) {
+define void @storevsss2(<8 x i16> %v, ptr nocapture %ptr) {
 entry:
   %vecext = extractelement <8 x i16> %v, i32 2
-  store i16 %vecext, i16* %ptr, align 2
+  store i16 %vecext, ptr %ptr, align 2
   ret void
 ; CHECK-LABEL: storevsss2
 ; CHECK: vsldoi 2, 2, 2, 4
@@ -944,10 +944,10 @@ entry:
 }
 
 ; Function Attrs: norecurse nounwind
-define void @storevsss3(<8 x i16> %v, i16* nocapture %ptr) {
+define void @storevsss3(<8 x i16> %v, ptr nocapture %ptr) {
 entry:
   %vecext = extractelement <8 x i16> %v, i32 3
-  store i16 %vecext, i16* %ptr, align 2
+  store i16 %vecext, ptr %ptr, align 2
   ret void
 ; CHECK-LABEL: storevsss3
 ; CHECK: vsldoi 2, 2, 2, 2
@@ -957,10 +957,10 @@ entry:
 }
 
 ; Function Attrs: norecurse nounwind
-define void @storevsss4(<8 x i16> %v, i16* nocapture %ptr) {
+define void @storevsss4(<8 x i16> %v, ptr nocapture %ptr) {
 entry:
   %vecext = extractelement <8 x i16> %v, i32 4
-  store i16 %vecext, i16* %ptr, align 2
+  store i16 %vecext, ptr %ptr, align 2
   ret void
 ; CHECK-LABEL: storevsss4
 ; CHECK: stxsihx 34, 0, 5
@@ -970,10 +970,10 @@ entry:
 }
 
 ; Function Attrs: norecurse nounwind
-define void @storevsss5(<8 x i16> %v, i16* nocapture %ptr) {
+define void @storevsss5(<8 x i16> %v, ptr nocapture %ptr) {
 entry:
   %vecext = extractelement <8 x i16> %v, i32 5
-  store i16 %vecext, i16* %ptr, align 2
+  store i16 %vecext, ptr %ptr, align 2
   ret void
 ; CHECK-LABEL: storevsss5
 ; CHECK: vsldoi 2, 2, 2, 14
@@ -984,10 +984,10 @@ entry:
 }
 
 ; Function Attrs: norecurse nounwind
-define void @storevsss6(<8 x i16> %v, i16* nocapture %ptr) {
+define void @storevsss6(<8 x i16> %v, ptr nocapture %ptr) {
 entry:
   %vecext = extractelement <8 x i16> %v, i32 6
-  store i16 %vecext, i16* %ptr, align 2
+  store i16 %vecext, ptr %ptr, align 2
   ret void
 ; CHECK-LABEL: storevsss6
 ; CHECK: vsldoi 2, 2, 2, 12
@@ -998,10 +998,10 @@ entry:
 }
 
 ; Function Attrs: norecurse nounwind
-define void @storevsss7(<8 x i16> %v, i16* nocapture %ptr) {
+define void @storevsss7(<8 x i16> %v, ptr nocapture %ptr) {
 entry:
   %vecext = extractelement <8 x i16> %v, i32 7
-  store i16 %vecext, i16* %ptr, align 2
+  store i16 %vecext, ptr %ptr, align 2
   ret void
 ; CHECK-LABEL: storevsss7
 ; CHECK: vsldoi 2, 2, 2, 10
@@ -1012,9 +1012,9 @@ entry:
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define float @convscf(i8* nocapture readonly %ptr) {
+define float @convscf(ptr nocapture readonly %ptr) {
 entry:
-  %0 = load i8, i8* %ptr, align 1
+  %0 = load i8, ptr %ptr, align 1
   %conv = sitofp i8 %0 to float
   ret float %conv
 ; CHECK-LABEL: convscf
@@ -1028,9 +1028,9 @@ entry:
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define float @convucf(i8* nocapture readonly %ptr) {
+define float @convucf(ptr nocapture readonly %ptr) {
 entry:
-  %0 = load i8, i8* %ptr, align 1
+  %0 = load i8, ptr %ptr, align 1
   %conv = uitofp i8 %0 to float
   ret float %conv
 ; CHECK-LABEL: convucf
@@ -1042,9 +1042,9 @@ entry:
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define double @convscd(i8* nocapture readonly %ptr) {
+define double @convscd(ptr nocapture readonly %ptr) {
 entry:
-  %0 = load i8, i8* %ptr, align 1
+  %0 = load i8, ptr %ptr, align 1
   %conv = sitofp i8 %0 to double
 ; CHECK-LABEL: convscd
 ; CHECK: lxsibzx 34, 0, 3
@@ -1058,9 +1058,9 @@ entry:
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define double @convucd(i8* nocapture readonly %ptr) {
+define double @convucd(ptr nocapture readonly %ptr) {
 entry:
-  %0 = load i8, i8* %ptr, align 1
+  %0 = load i8, ptr %ptr, align 1
   %conv = uitofp i8 %0 to double
   ret double %conv
 ; CHECK-LABEL: convucd
@@ -1072,9 +1072,9 @@ entry:
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define float @convssf(i16* nocapture readonly %ptr) {
+define float @convssf(ptr nocapture readonly %ptr) {
 entry:
-  %0 = load i16, i16* %ptr, align 2
+  %0 = load i16, ptr %ptr, align 2
   %conv = sitofp i16 %0 to float
   ret float %conv
 ; CHECK-LABEL: convssf
@@ -1088,9 +1088,9 @@ entry:
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define float @convusf(i16* nocapture readonly %ptr) {
+define float @convusf(ptr nocapture readonly %ptr) {
 entry:
-  %0 = load i16, i16* %ptr, align 2
+  %0 = load i16, ptr %ptr, align 2
   %conv = uitofp i16 %0 to float
   ret float %conv
 ; CHECK-LABEL: convusf
@@ -1102,9 +1102,9 @@ entry:
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define double @convssd(i16* nocapture readonly %ptr) {
+define double @convssd(ptr nocapture readonly %ptr) {
 entry:
-  %0 = load i16, i16* %ptr, align 2
+  %0 = load i16, ptr %ptr, align 2
   %conv = sitofp i16 %0 to double
   ret double %conv
 ; CHECK-LABEL: convssd
@@ -1118,9 +1118,9 @@ entry:
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define double @convusd(i16* nocapture readonly %ptr) {
+define double @convusd(ptr nocapture readonly %ptr) {
 entry:
-  %0 = load i16, i16* %ptr, align 2
+  %0 = load i16, ptr %ptr, align 2
   %conv = uitofp i16 %0 to double
   ret double %conv
 ; CHECK-LABEL: convusd

diff  --git a/llvm/test/CodeGen/PowerPC/vsx-recip-est.ll b/llvm/test/CodeGen/PowerPC/vsx-recip-est.ll
index bab1dcb791c1..4b9d17c26d01 100644
--- a/llvm/test/CodeGen/PowerPC/vsx-recip-est.ll
+++ b/llvm/test/CodeGen/PowerPC/vsx-recip-est.ll
@@ -8,8 +8,8 @@
 ; Function Attrs: nounwind
 define float @emit_xsresp() {
 entry:
-  %0 = load float, float* @a, align 4
-  %1 = load float, float* @b, align 4
+  %0 = load float, ptr @a, align 4
+  %1 = load float, ptr @b, align 4
   %div = fdiv arcp ninf float %0, %1
   ret float %div
 ; CHECK-LABEL: @emit_xsresp
@@ -20,9 +20,9 @@ entry:
 define float @emit_xsrsqrtesp(float %f) {
 entry:
   %f.addr = alloca float, align 4
-  store float %f, float* %f.addr, align 4
-  %0 = load float, float* %f.addr, align 4
-  %1 = load float, float* @b, align 4
+  store float %f, ptr %f.addr, align 4
+  %0 = load float, ptr %f.addr, align 4
+  %1 = load float, ptr @b, align 4
   %2 = call float @llvm.sqrt.f32(float %1)
   %div = fdiv arcp float %0, %2
   ret float %div
@@ -36,8 +36,8 @@ declare float @llvm.sqrt.f32(float)
 ; Function Attrs: nounwind
 define double @emit_xsredp() {
 entry:
-  %0 = load double, double* @c, align 8
-  %1 = load double, double* @d, align 8
+  %0 = load double, ptr @c, align 8
+  %1 = load double, ptr @d, align 8
   %div = fdiv arcp ninf double %0, %1
   ret double %div
 ; CHECK-LABEL: @emit_xsredp
@@ -48,9 +48,9 @@ entry:
 define double @emit_xsrsqrtedp(double %f) {
 entry:
   %f.addr = alloca double, align 8
-  store double %f, double* %f.addr, align 8
-  %0 = load double, double* %f.addr, align 8
-  %1 = load double, double* @d, align 8
+  store double %f, ptr %f.addr, align 8
+  %0 = load double, ptr %f.addr, align 8
+  %1 = load double, ptr @d, align 8
   %2 = call double @llvm.sqrt.f64(double %1)
   %div = fdiv arcp double %0, %2
   ret double %div

diff  --git a/llvm/test/CodeGen/PowerPC/vsx-shuffle-le-load.ll b/llvm/test/CodeGen/PowerPC/vsx-shuffle-le-load.ll
index 15391f3a2494..879dd323b1ee 100644
--- a/llvm/test/CodeGen/PowerPC/vsx-shuffle-le-load.ll
+++ b/llvm/test/CodeGen/PowerPC/vsx-shuffle-le-load.ll
@@ -2,15 +2,15 @@
 ; RUN: llc -verify-machineinstrs -mcpu=pwr9 -mattr=+vsx \
 ; RUN:   -mtriple=powerpc64le-unknown-linux-gnu < %s | FileCheck %s
 
-define <2 x double> @loadChainHasUser(<2 x double>* %p1, <2 x double> %v2) {
+define <2 x double> @loadChainHasUser(ptr %p1, <2 x double> %v2) {
 ; CHECK-LABEL: loadChainHasUser:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lxvd2x 0, 0, 3
 ; CHECK-NEXT:    stxv 34, 0(3)
 ; CHECK-NEXT:    xxlor 34, 0, 0
 ; CHECK-NEXT:    blr
-  %v1 = load <2 x double>, <2 x double>* %p1
-  store <2 x double> %v2, <2 x double>* %p1, align 16
+  %v1 = load <2 x double>, ptr %p1
+  store <2 x double> %v2, ptr %p1, align 16
   %v3 = shufflevector <2 x double> %v1, <2 x double> %v1, <2 x i32> < i32 1, i32 0>
   ret <2 x double> %v3
 }

diff  --git a/llvm/test/CodeGen/PowerPC/vsx-shuffle-le-multiple-uses.ll b/llvm/test/CodeGen/PowerPC/vsx-shuffle-le-multiple-uses.ll
index d771b80bb9a9..392aa685d906 100644
--- a/llvm/test/CodeGen/PowerPC/vsx-shuffle-le-multiple-uses.ll
+++ b/llvm/test/CodeGen/PowerPC/vsx-shuffle-le-multiple-uses.ll
@@ -3,28 +3,28 @@
 ; RUN: llc -verify-machineinstrs -mcpu=pwr9 -mattr=+vsx \
 ; RUN:   -mtriple=powerpc64le-unknown-linux-gnu < %s | FileCheck %s
 
-define <2 x double> @loadHasMultipleUses(<2 x double>* %p1, <2 x double>* %p2) {
+define <2 x double> @loadHasMultipleUses(ptr %p1, ptr %p2) {
 ; CHECK-LABEL: loadHasMultipleUses:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lxv 0, 0(3)
 ; CHECK-NEXT:    xxswapd 34, 0
 ; CHECK-NEXT:    stxv 0, 0(4)
 ; CHECK-NEXT:    blr
-  %v1 = load <2 x double>, <2 x double>* %p1
-  store <2 x double> %v1, <2 x double>* %p2, align 16
+  %v1 = load <2 x double>, ptr %p1
+  store <2 x double> %v1, ptr %p2, align 16
   %v2 = shufflevector <2 x double> %v1, <2 x double> %v1, <2 x i32> < i32 1, i32 0>
   ret <2 x double> %v2
 }
 
-define <2 x double> @storeHasMultipleUses(<2 x double> %v, <2 x double>* %p) {
+define <2 x double> @storeHasMultipleUses(<2 x double> %v, ptr %p) {
 ; CHECK-LABEL: storeHasMultipleUses:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xxswapd 34, 34
 ; CHECK-NEXT:    stxv 34, 256(5)
 ; CHECK-NEXT:    blr
   %v1 = shufflevector <2 x double> %v, <2 x double> %v, <2 x i32> < i32 1, i32 0>
-  %addr = getelementptr inbounds <2 x double>, <2 x double>* %p, i64 16
-  store <2 x double> %v1, <2 x double>* %addr, align 16
+  %addr = getelementptr inbounds <2 x double>, ptr %p, i64 16
+  store <2 x double> %v1, ptr %addr, align 16
   %v2 = shufflevector <2 x double> %v, <2 x double> %v, <2 x i32> < i32 1, i32 2>
   ret <2 x double> %v2
 }

diff  --git a/llvm/test/CodeGen/PowerPC/vsx-spill-norwstore.ll b/llvm/test/CodeGen/PowerPC/vsx-spill-norwstore.ll
index c194ad5d4bf1..1e0a53877ba7 100644
--- a/llvm/test/CodeGen/PowerPC/vsx-spill-norwstore.ll
+++ b/llvm/test/CodeGen/PowerPC/vsx-spill-norwstore.ll
@@ -13,7 +13,7 @@ define void @main() #0 {
 ; CHECK: stxvd2x
 
 entry:
-  %val = load <2 x double>, <2 x double>* @.v2f64, align 16
+  %val = load <2 x double>, ptr @.v2f64, align 16
   %0 = tail call <8 x i16> @llvm.ppc.altivec.vupkhsb(<16 x i8> <i8 0, i8 -1, i8 -1, i8 0, i8 0, i8 0, i8 -1, i8 0, i8 -1, i8 0, i8 0, i8 -1, i8 -1, i8 -1, i8 0, i8 -1>) #0
   %1 = tail call <8 x i16> @llvm.ppc.altivec.vupklsb(<16 x i8> <i8 0, i8 -1, i8 -1, i8 0, i8 0, i8 0, i8 -1, i8 0, i8 -1, i8 0, i8 0, i8 -1, i8 -1, i8 -1, i8 0, i8 -1>) #0
   br i1 false, label %if.then.i68.i, label %check.exit69.i
@@ -25,7 +25,7 @@ check.exit69.i:                                   ; preds = %entry
   br i1 undef, label %if.then.i63.i, label %check.exit64.i
 
 if.then.i63.i:                                    ; preds = %check.exit69.i
-  tail call void (i8*, ...) @printf(i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str10, i64 0, i64 0), i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str1, i64 0, i64 0), <2 x double> %val) #0
+  tail call void (ptr, ...) @printf(ptr @.str10, ptr @.str1, <2 x double> %val) #0
   br label %check.exit64.i
 
 check.exit64.i:                                   ; preds = %if.then.i63.i, %check.exit69.i
@@ -55,7 +55,7 @@ declare <8 x i16> @llvm.ppc.altivec.vupkhsb(<16 x i8>) #1
 declare <8 x i16> @llvm.ppc.altivec.vupklsb(<16 x i8>) #1
 
 ; Function Attrs: nounwind
-declare void @printf(i8* nocapture readonly, ...) #0
+declare void @printf(ptr nocapture readonly, ...) #0
 
 ; Function Attrs: nounwind readnone
 declare i32 @llvm.ppc.altivec.vcmpequh.p(i32, <8 x i16>, <8 x i16>) #1

diff  --git a/llvm/test/CodeGen/PowerPC/vsx.ll b/llvm/test/CodeGen/PowerPC/vsx.ll
index 18e621de905e..3f056970e703 100644
--- a/llvm/test/CodeGen/PowerPC/vsx.ll
+++ b/llvm/test/CodeGen/PowerPC/vsx.ll
@@ -857,7 +857,7 @@ define <2 x i64> @test27(<2 x i64> %a, <2 x i64> %b) {
 
 }
 
-define <2 x double> @test28(<2 x double>* %a) {
+define <2 x double> @test28(ptr %a) {
 ; CHECK-LABEL: test28:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lxvd2x v2, 0, r3
@@ -878,13 +878,13 @@ define <2 x double> @test28(<2 x double>* %a) {
 ; CHECK-LE-NEXT:    lxvd2x vs0, 0, r3
 ; CHECK-LE-NEXT:    xxswapd v2, vs0
 ; CHECK-LE-NEXT:    blr
-  %v = load <2 x double>, <2 x double>* %a, align 16
+  %v = load <2 x double>, ptr %a, align 16
   ret <2 x double> %v
 
 
 }
 
-define void @test29(<2 x double>* %a, <2 x double> %b) {
+define void @test29(ptr %a, <2 x double> %b) {
 ; CHECK-LABEL: test29:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    stxvd2x v2, 0, r3
@@ -905,13 +905,13 @@ define void @test29(<2 x double>* %a, <2 x double> %b) {
 ; CHECK-LE-NEXT:    xxswapd vs0, v2
 ; CHECK-LE-NEXT:    stxvd2x vs0, 0, r3
 ; CHECK-LE-NEXT:    blr
-  store <2 x double> %b, <2 x double>* %a, align 16
+  store <2 x double> %b, ptr %a, align 16
   ret void
 
 
 }
 
-define <2 x double> @test28u(<2 x double>* %a) {
+define <2 x double> @test28u(ptr %a) {
 ; CHECK-LABEL: test28u:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lxvd2x v2, 0, r3
@@ -932,13 +932,13 @@ define <2 x double> @test28u(<2 x double>* %a) {
 ; CHECK-LE-NEXT:    lxvd2x vs0, 0, r3
 ; CHECK-LE-NEXT:    xxswapd v2, vs0
 ; CHECK-LE-NEXT:    blr
-  %v = load <2 x double>, <2 x double>* %a, align 8
+  %v = load <2 x double>, ptr %a, align 8
   ret <2 x double> %v
 
 
 }
 
-define void @test29u(<2 x double>* %a, <2 x double> %b) {
+define void @test29u(ptr %a, <2 x double> %b) {
 ; CHECK-LABEL: test29u:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    stxvd2x v2, 0, r3
@@ -959,13 +959,13 @@ define void @test29u(<2 x double>* %a, <2 x double> %b) {
 ; CHECK-LE-NEXT:    xxswapd vs0, v2
 ; CHECK-LE-NEXT:    stxvd2x vs0, 0, r3
 ; CHECK-LE-NEXT:    blr
-  store <2 x double> %b, <2 x double>* %a, align 8
+  store <2 x double> %b, ptr %a, align 8
   ret void
 
 
 }
 
-define <2 x i64> @test30(<2 x i64>* %a) {
+define <2 x i64> @test30(ptr %a) {
 ; CHECK-LABEL: test30:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lxvd2x v2, 0, r3
@@ -986,14 +986,14 @@ define <2 x i64> @test30(<2 x i64>* %a) {
 ; CHECK-LE-NEXT:    lxvd2x vs0, 0, r3
 ; CHECK-LE-NEXT:    xxswapd v2, vs0
 ; CHECK-LE-NEXT:    blr
-  %v = load <2 x i64>, <2 x i64>* %a, align 16
+  %v = load <2 x i64>, ptr %a, align 16
   ret <2 x i64> %v
 
 
 
 }
 
-define void @test31(<2 x i64>* %a, <2 x i64> %b) {
+define void @test31(ptr %a, <2 x i64> %b) {
 ; CHECK-LABEL: test31:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    stxvd2x v2, 0, r3
@@ -1014,13 +1014,13 @@ define void @test31(<2 x i64>* %a, <2 x i64> %b) {
 ; CHECK-LE-NEXT:    xxswapd vs0, v2
 ; CHECK-LE-NEXT:    stxvd2x vs0, 0, r3
 ; CHECK-LE-NEXT:    blr
-  store <2 x i64> %b, <2 x i64>* %a, align 16
+  store <2 x i64> %b, ptr %a, align 16
   ret void
 
 
 }
 
-define <4 x float> @test32(<4 x float>* %a) {
+define <4 x float> @test32(ptr %a) {
 ; CHECK-LABEL: test32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lxvw4x v2, 0, r3
@@ -1041,14 +1041,14 @@ define <4 x float> @test32(<4 x float>* %a) {
 ; CHECK-LE-NEXT:    lxvd2x vs0, 0, r3
 ; CHECK-LE-NEXT:    xxswapd v2, vs0
 ; CHECK-LE-NEXT:    blr
-  %v = load <4 x float>, <4 x float>* %a, align 16
+  %v = load <4 x float>, ptr %a, align 16
   ret <4 x float> %v
 
 
 
 }
 
-define void @test33(<4 x float>* %a, <4 x float> %b) {
+define void @test33(ptr %a, <4 x float> %b) {
 ; CHECK-LABEL: test33:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    stxvw4x v2, 0, r3
@@ -1069,14 +1069,14 @@ define void @test33(<4 x float>* %a, <4 x float> %b) {
 ; CHECK-LE-NEXT:    xxswapd vs0, v2
 ; CHECK-LE-NEXT:    stxvd2x vs0, 0, r3
 ; CHECK-LE-NEXT:    blr
-  store <4 x float> %b, <4 x float>* %a, align 16
+  store <4 x float> %b, ptr %a, align 16
   ret void
 
 
 
 }
 
-define <4 x float> @test32u(<4 x float>* %a) {
+define <4 x float> @test32u(ptr %a) {
 ; CHECK-LABEL: test32u:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    li r4, 15
@@ -1109,13 +1109,13 @@ define <4 x float> @test32u(<4 x float>* %a) {
 ; CHECK-LE-NEXT:    lxvd2x vs0, 0, r3
 ; CHECK-LE-NEXT:    xxswapd v2, vs0
 ; CHECK-LE-NEXT:    blr
-  %v = load <4 x float>, <4 x float>* %a, align 8
+  %v = load <4 x float>, ptr %a, align 8
   ret <4 x float> %v
 
 
 }
 
-define void @test33u(<4 x float>* %a, <4 x float> %b) {
+define void @test33u(ptr %a, <4 x float> %b) {
 ; CHECK-LABEL: test33u:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    stxvw4x v2, 0, r3
@@ -1136,14 +1136,14 @@ define void @test33u(<4 x float>* %a, <4 x float> %b) {
 ; CHECK-LE-NEXT:    xxswapd vs0, v2
 ; CHECK-LE-NEXT:    stxvd2x vs0, 0, r3
 ; CHECK-LE-NEXT:    blr
-  store <4 x float> %b, <4 x float>* %a, align 8
+  store <4 x float> %b, ptr %a, align 8
   ret void
 
 
 
 }
 
-define <4 x i32> @test34(<4 x i32>* %a) {
+define <4 x i32> @test34(ptr %a) {
 ; CHECK-LABEL: test34:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lxvw4x v2, 0, r3
@@ -1164,14 +1164,14 @@ define <4 x i32> @test34(<4 x i32>* %a) {
 ; CHECK-LE-NEXT:    lxvd2x vs0, 0, r3
 ; CHECK-LE-NEXT:    xxswapd v2, vs0
 ; CHECK-LE-NEXT:    blr
-  %v = load <4 x i32>, <4 x i32>* %a, align 16
+  %v = load <4 x i32>, ptr %a, align 16
   ret <4 x i32> %v
 
 
 
 }
 
-define void @test35(<4 x i32>* %a, <4 x i32> %b) {
+define void @test35(ptr %a, <4 x i32> %b) {
 ; CHECK-LABEL: test35:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    stxvw4x v2, 0, r3
@@ -1192,7 +1192,7 @@ define void @test35(<4 x i32>* %a, <4 x i32> %b) {
 ; CHECK-LE-NEXT:    xxswapd vs0, v2
 ; CHECK-LE-NEXT:    stxvd2x vs0, 0, r3
 ; CHECK-LE-NEXT:    blr
-  store <4 x i32> %b, <4 x i32>* %a, align 16
+  store <4 x i32> %b, ptr %a, align 16
   ret void
 
 
@@ -1595,7 +1595,7 @@ define <2 x i64> @test47(<2 x float> %a) {
 ; FIXME: The code quality here looks pretty bad.
 }
 
-define <2 x double> @test50(double* %a) {
+define <2 x double> @test50(ptr %a) {
 ; CHECK-LABEL: test50:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lxvdsx v2, 0, r3
@@ -1615,7 +1615,7 @@ define <2 x double> @test50(double* %a) {
 ; CHECK-LE:       # %bb.0:
 ; CHECK-LE-NEXT:    lxvdsx v2, 0, r3
 ; CHECK-LE-NEXT:    blr
-  %v = load double, double* %a, align 8
+  %v = load double, ptr %a, align 8
   %w = insertelement <2 x double> undef, double %v, i32 0
   %x = insertelement <2 x double> %w, double %v, i32 1
   ret <2 x double> %x

diff  --git a/llvm/test/CodeGen/PowerPC/vsx_builtins.ll b/llvm/test/CodeGen/PowerPC/vsx_builtins.ll
index d039779d2652..694981b67a6c 100644
--- a/llvm/test/CodeGen/PowerPC/vsx_builtins.ll
+++ b/llvm/test/CodeGen/PowerPC/vsx_builtins.ll
@@ -33,56 +33,56 @@
 ; RUN:     --check-prefixes=CHECK,CHECK-P9UP
 
 ; Function Attrs: nounwind readnone
-define <4 x i32> @test1(i8* %a) {
+define <4 x i32> @test1(ptr %a) {
 ; CHECK-LABEL: test1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lxvw4x v2, 0, r3
 ; CHECK-NEXT:    blr
   entry:
-    %0 = tail call <4 x i32> @llvm.ppc.vsx.lxvw4x.be(i8* %a)
+    %0 = tail call <4 x i32> @llvm.ppc.vsx.lxvw4x.be(ptr %a)
       ret <4 x i32> %0
 }
 ; Function Attrs: nounwind readnone
-declare <4 x i32> @llvm.ppc.vsx.lxvw4x.be(i8*)
+declare <4 x i32> @llvm.ppc.vsx.lxvw4x.be(ptr)
 
 ; Function Attrs: nounwind readnone
-define <2 x double> @test2(i8* %a) {
+define <2 x double> @test2(ptr %a) {
 ; CHECK-LABEL: test2:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lxvd2x v2, 0, r3
 ; CHECK-NEXT:    blr
   entry:
-    %0 = tail call <2 x double> @llvm.ppc.vsx.lxvd2x.be(i8* %a)
+    %0 = tail call <2 x double> @llvm.ppc.vsx.lxvd2x.be(ptr %a)
       ret <2 x double> %0
 }
 ; Function Attrs: nounwind readnone
-declare <2 x double> @llvm.ppc.vsx.lxvd2x.be(i8*)
+declare <2 x double> @llvm.ppc.vsx.lxvd2x.be(ptr)
 
 ; Function Attrs: nounwind readnone
-define void @test3(<4 x i32> %a, i8* %b) {
+define void @test3(<4 x i32> %a, ptr %b) {
 ; CHECK-LABEL: test3:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    stxvw4x v2, 0, r5
 ; CHECK-NEXT:    blr
   entry:
-    tail call void @llvm.ppc.vsx.stxvw4x.be(<4 x i32> %a, i8* %b)
+    tail call void @llvm.ppc.vsx.stxvw4x.be(<4 x i32> %a, ptr %b)
     ret void
 }
 ; Function Attrs: nounwind readnone
-declare void @llvm.ppc.vsx.stxvw4x.be(<4 x i32>, i8*)
+declare void @llvm.ppc.vsx.stxvw4x.be(<4 x i32>, ptr)
 
 ; Function Attrs: nounwind readnone
-define void @test4(<2 x double> %a, i8* %b) {
+define void @test4(<2 x double> %a, ptr %b) {
 ; CHECK-LABEL: test4:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    stxvd2x v2, 0, r5
 ; CHECK-NEXT:    blr
   entry:
-    tail call void @llvm.ppc.vsx.stxvd2x.be(<2 x double> %a, i8* %b)
+    tail call void @llvm.ppc.vsx.stxvd2x.be(<2 x double> %a, ptr %b)
     ret void
 }
 ; Function Attrs: nounwind readnone
-declare void @llvm.ppc.vsx.stxvd2x.be(<2 x double>, i8*)
+declare void @llvm.ppc.vsx.stxvd2x.be(<2 x double>, ptr)
 
 define i32 @test_vec_test_swdiv(<2 x double> %a, <2 x double> %b) {
 ; CHECK-LABEL: test_vec_test_swdiv:
@@ -171,7 +171,7 @@ entry:
 }
 
 ; Function Attrs: nounwind readnone
-define <2 x double> @test_lxvd2x(i8* %a) {
+define <2 x double> @test_lxvd2x(ptr %a) {
 ; CHECK-P9UP-LABEL: test_lxvd2x:
 ; CHECK-P9UP:       # %bb.0: # %entry
 ; CHECK-P9UP-NEXT:    lxv v2, 0(r3)
@@ -188,14 +188,14 @@ define <2 x double> @test_lxvd2x(i8* %a) {
 ; CHECK-INTRIN-NEXT:    lxvd2x v2, 0, r3
 ; CHECK-INTRIN-NEXT:    blr
 entry:
-  %0 = tail call <2 x double> @llvm.ppc.vsx.lxvd2x(i8* %a)
+  %0 = tail call <2 x double> @llvm.ppc.vsx.lxvd2x(ptr %a)
   ret <2 x double> %0
 }
 ; Function Attrs: nounwind readnone
-declare <2 x double> @llvm.ppc.vsx.lxvd2x(i8*)
+declare <2 x double> @llvm.ppc.vsx.lxvd2x(ptr)
 
 ; Function Attrs: nounwind readnone
-define void @test_stxvd2x(<2 x double> %a, i8* %b) {
+define void @test_stxvd2x(<2 x double> %a, ptr %b) {
 ; CHECK-P9UP-LABEL: test_stxvd2x:
 ; CHECK-P9UP:       # %bb.0: # %entry
 ; CHECK-P9UP-NEXT:    stxv v2, 0(r5)
@@ -212,8 +212,8 @@ define void @test_stxvd2x(<2 x double> %a, i8* %b) {
 ; CHECK-INTRIN-NEXT:    stxvd2x v2, 0, r5
 ; CHECK-INTRIN-NEXT:    blr
 entry:
-  tail call void @llvm.ppc.vsx.stxvd2x(<2 x double> %a, i8* %b)
+  tail call void @llvm.ppc.vsx.stxvd2x(<2 x double> %a, ptr %b)
   ret void
 }
 ; Function Attrs: nounwind readnone
-declare void @llvm.ppc.vsx.stxvd2x(<2 x double>, i8*)
+declare void @llvm.ppc.vsx.stxvd2x(<2 x double>, ptr)

diff  --git a/llvm/test/CodeGen/PowerPC/vsx_insert_extract_le.ll b/llvm/test/CodeGen/PowerPC/vsx_insert_extract_le.ll
index 929b65dc76d4..5be5e05dabf9 100644
--- a/llvm/test/CodeGen/PowerPC/vsx_insert_extract_le.ll
+++ b/llvm/test/CodeGen/PowerPC/vsx_insert_extract_le.ll
@@ -15,7 +15,7 @@
 ; RUN:   -mtriple=powerpc64le-unknown-linux-gnu < %s | FileCheck %s \
 ; RUN:   --check-prefix=CHECK-P9 --implicit-check-not xxswapd
 
-define <2 x double> @testi0(<2 x double>* %p1, double* %p2) {
+define <2 x double> @testi0(ptr %p1, ptr %p2) {
 ; CHECK-LABEL: testi0:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lxvd2x vs0, 0, r3
@@ -45,15 +45,15 @@ define <2 x double> @testi0(<2 x double>* %p1, double* %p2) {
 ; CHECK-P9-NEXT:    lfd f1, 0(r4)
 ; CHECK-P9-NEXT:    xxmrghd v2, vs0, vs1
 ; CHECK-P9-NEXT:    blr
-  %v = load <2 x double>, <2 x double>* %p1
-  %s = load double, double* %p2
+  %v = load <2 x double>, ptr %p1
+  %s = load double, ptr %p2
   %r = insertelement <2 x double> %v, double %s, i32 0
   ret <2 x double> %r
 
 
 }
 
-define <2 x double> @testi1(<2 x double>* %p1, double* %p2) {
+define <2 x double> @testi1(ptr %p1, ptr %p2) {
 ; CHECK-LABEL: testi1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lxvd2x vs0, 0, r3
@@ -83,15 +83,15 @@ define <2 x double> @testi1(<2 x double>* %p1, double* %p2) {
 ; CHECK-P9-NEXT:    lfd f1, 0(r4)
 ; CHECK-P9-NEXT:    xxpermdi v2, vs1, vs0, 1
 ; CHECK-P9-NEXT:    blr
-  %v = load <2 x double>, <2 x double>* %p1
-  %s = load double, double* %p2
+  %v = load <2 x double>, ptr %p1
+  %s = load double, ptr %p2
   %r = insertelement <2 x double> %v, double %s, i32 1
   ret <2 x double> %r
 
 
 }
 
-define double @teste0(<2 x double>* %p1) {
+define double @teste0(ptr %p1) {
 ; CHECK-LABEL: teste0:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lfd f1, 0(r3)
@@ -111,14 +111,14 @@ define double @teste0(<2 x double>* %p1) {
 ; CHECK-P9:       # %bb.0:
 ; CHECK-P9-NEXT:    lfd f1, 0(r3)
 ; CHECK-P9-NEXT:    blr
-  %v = load <2 x double>, <2 x double>* %p1
+  %v = load <2 x double>, ptr %p1
   %r = extractelement <2 x double> %v, i32 0
   ret double %r
 
 
 }
 
-define double @teste1(<2 x double>* %p1) {
+define double @teste1(ptr %p1) {
 ; CHECK-LABEL: teste1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lfd f1, 8(r3)
@@ -138,7 +138,7 @@ define double @teste1(<2 x double>* %p1) {
 ; CHECK-P9:       # %bb.0:
 ; CHECK-P9-NEXT:    lfd f1, 8(r3)
 ; CHECK-P9-NEXT:    blr
-  %v = load <2 x double>, <2 x double>* %p1
+  %v = load <2 x double>, ptr %p1
   %r = extractelement <2 x double> %v, i32 1
   ret double %r
 

diff  --git a/llvm/test/CodeGen/PowerPC/vsx_scalar_ld_st.ll b/llvm/test/CodeGen/PowerPC/vsx_scalar_ld_st.ll
index 2244523c5705..056e9036715f 100644
--- a/llvm/test/CodeGen/PowerPC/vsx_scalar_ld_st.ll
+++ b/llvm/test/CodeGen/PowerPC/vsx_scalar_ld_st.ll
@@ -14,9 +14,9 @@
 define void @dblToInt() #0 {
 entry:
   %ii = alloca i32, align 4
-  %0 = load double, double* @d, align 8
+  %0 = load double, ptr @d, align 8
   %conv = fptosi double %0 to i32
-  store volatile i32 %conv, i32* %ii, align 4
+  store volatile i32 %conv, ptr %ii, align 4
   ret void
 ; CHECK-LABEL: @dblToInt
 ; CHECK: xscvdpsxws [[REGCONV1:[0-9]+]],
@@ -27,9 +27,9 @@ entry:
 define void @fltToInt() #0 {
 entry:
   %ii = alloca i32, align 4
-  %0 = load float, float* @f, align 4
+  %0 = load float, ptr @f, align 4
   %conv = fptosi float %0 to i32
-  store volatile i32 %conv, i32* %ii, align 4
+  store volatile i32 %conv, ptr %ii, align 4
   ret void
 ; CHECK-LABEL: @fltToInt
 ; CHECK: xscvdpsxws [[REGCONV2:[0-9]+]],
@@ -40,9 +40,9 @@ entry:
 define void @intToDbl() #0 {
 entry:
   %dd = alloca double, align 8
-  %0 = load i32, i32* @i, align 4
+  %0 = load i32, ptr @i, align 4
   %conv = sitofp i32 %0 to double
-  store volatile double %conv, double* %dd, align 8
+  store volatile double %conv, ptr %dd, align 8
   ret void
 ; CHECK-LABEL: @intToDbl
 ; CHECK: lfiwax [[REGLD1:[0-9]+]],
@@ -53,9 +53,9 @@ entry:
 define void @intToFlt() #0 {
 entry:
   %ff = alloca float, align 4
-  %0 = load i32, i32* @i, align 4
+  %0 = load i32, ptr @i, align 4
   %conv = sitofp i32 %0 to float
-  store volatile float %conv, float* %ff, align 4
+  store volatile float %conv, ptr %ff, align 4
   ret void
 ; CHECK-LABEL: @intToFlt
 ; CHECK: lfiwax [[REGLD2:[0-9]+]],
@@ -66,9 +66,9 @@ entry:
 define void @dblToUInt() #0 {
 entry:
   %uiui = alloca i32, align 4
-  %0 = load double, double* @d, align 8
+  %0 = load double, ptr @d, align 8
   %conv = fptoui double %0 to i32
-  store volatile i32 %conv, i32* %uiui, align 4
+  store volatile i32 %conv, ptr %uiui, align 4
   ret void
 ; CHECK-LABEL: @dblToUInt
 ; CHECK: xscvdpuxws [[REGCONV3:[0-9]+]],
@@ -79,9 +79,9 @@ entry:
 define void @fltToUInt() #0 {
 entry:
   %uiui = alloca i32, align 4
-  %0 = load float, float* @f, align 4
+  %0 = load float, ptr @f, align 4
   %conv = fptoui float %0 to i32
-  store volatile i32 %conv, i32* %uiui, align 4
+  store volatile i32 %conv, ptr %uiui, align 4
   ret void
 ; CHECK-LABEL: @fltToUInt
 ; CHECK: xscvdpuxws [[REGCONV4:[0-9]+]],
@@ -92,9 +92,9 @@ entry:
 define void @uIntToDbl() #0 {
 entry:
   %dd = alloca double, align 8
-  %0 = load i32, i32* @ui, align 4
+  %0 = load i32, ptr @ui, align 4
   %conv = uitofp i32 %0 to double
-  store volatile double %conv, double* %dd, align 8
+  store volatile double %conv, ptr %dd, align 8
   ret void
 ; CHECK-LABEL: @uIntToDbl
 ; CHECK: lfiwzx [[REGLD3:[0-9]+]],
@@ -105,9 +105,9 @@ entry:
 define void @uIntToFlt() #0 {
 entry:
   %ff = alloca float, align 4
-  %0 = load i32, i32* @ui, align 4
+  %0 = load i32, ptr @ui, align 4
   %conv = uitofp i32 %0 to float
-  store volatile float %conv, float* %ff, align 4
+  store volatile float %conv, ptr %ff, align 4
   ret void
 ; CHECK-LABEL: @uIntToFlt
 ; CHECK: lfiwzx [[REGLD4:[0-9]+]],
@@ -118,9 +118,9 @@ entry:
 define void @dblToFloat() #0 {
 entry:
   %ff = alloca float, align 4
-  %0 = load double, double* @d, align 8
+  %0 = load double, ptr @d, align 8
   %conv = fptrunc double %0 to float
-  store volatile float %conv, float* %ff, align 4
+  store volatile float %conv, ptr %ff, align 4
   ret void
 ; CHECK-LABEL: @dblToFloat
 ; CHECK: lfd [[REGLD5:[0-9]+]],
@@ -134,9 +134,9 @@ entry:
 define void @floatToDbl() #0 {
 entry:
   %dd = alloca double, align 8
-  %0 = load float, float* @f, align 4
+  %0 = load float, ptr @f, align 4
   %conv = fpext float %0 to double
-  store volatile double %conv, double* %dd, align 8
+  store volatile double %conv, ptr %dd, align 8
   ret void
 ; CHECK-LABEL: @floatToDbl
 ; CHECK: lfs [[REGLD5:[0-9]+]],

diff  --git a/llvm/test/CodeGen/PowerPC/vsx_shuffle_le.ll b/llvm/test/CodeGen/PowerPC/vsx_shuffle_le.ll
index a9415f400a45..66c1b6f6d26d 100644
--- a/llvm/test/CodeGen/PowerPC/vsx_shuffle_le.ll
+++ b/llvm/test/CodeGen/PowerPC/vsx_shuffle_le.ll
@@ -9,7 +9,7 @@
 ; RUN:   -mtriple=powerpc64le-unknown-linux-gnu < %s | FileCheck %s \
 ; RUN:   --check-prefix=CHECK-P9 --implicit-check-not xxswapd
 
-define <2 x double> @test00(<2 x double>* %p1, <2 x double>* %p2) {
+define <2 x double> @test00(ptr %p1, ptr %p2) {
 ; CHECK-LABEL: test00:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lxvdsx 34, 0, 3
@@ -19,13 +19,13 @@ define <2 x double> @test00(<2 x double>* %p1, <2 x double>* %p2) {
 ; CHECK-P9:       # %bb.0:
 ; CHECK-P9-NEXT:    lxvdsx 34, 0, 3
 ; CHECK-P9-NEXT:    blr
-  %v1 = load <2 x double>, <2 x double>* %p1
-  %v2 = load <2 x double>, <2 x double>* %p2
+  %v1 = load <2 x double>, ptr %p1
+  %v2 = load <2 x double>, ptr %p2
   %v3 = shufflevector <2 x double> %v1, <2 x double> %v2, <2 x i32> < i32 0, i32 0>
   ret <2 x double> %v3
 }
 
-define <2 x double> @test01(<2 x double>* %p1, <2 x double>* %p2) {
+define <2 x double> @test01(ptr %p1, ptr %p2) {
 ; CHECK-LABEL: test01:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lxvd2x 0, 0, 3
@@ -36,13 +36,13 @@ define <2 x double> @test01(<2 x double>* %p1, <2 x double>* %p2) {
 ; CHECK-P9:       # %bb.0:
 ; CHECK-P9-NEXT:    lxv 34, 0(3)
 ; CHECK-P9-NEXT:    blr
-  %v1 = load <2 x double>, <2 x double>* %p1
-  %v2 = load <2 x double>, <2 x double>* %p2
+  %v1 = load <2 x double>, ptr %p1
+  %v2 = load <2 x double>, ptr %p2
   %v3 = shufflevector <2 x double> %v1, <2 x double> %v2, <2 x i32> < i32 0, i32 1>
   ret <2 x double> %v3
 }
 
-define <2 x double> @test02(<2 x double>* %p1, <2 x double>* %p2) {
+define <2 x double> @test02(ptr %p1, ptr %p2) {
 ; CHECK-LABEL: test02:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lxvd2x 0, 0, 3
@@ -58,13 +58,13 @@ define <2 x double> @test02(<2 x double>* %p1, <2 x double>* %p2) {
 ; CHECK-P9-NEXT:    lxv 1, 0(4)
 ; CHECK-P9-NEXT:    xxmrgld 34, 1, 0
 ; CHECK-P9-NEXT:    blr
-  %v1 = load <2 x double>, <2 x double>* %p1
-  %v2 = load <2 x double>, <2 x double>* %p2
+  %v1 = load <2 x double>, ptr %p1
+  %v2 = load <2 x double>, ptr %p2
   %v3 = shufflevector <2 x double> %v1, <2 x double> %v2, <2 x i32> < i32 0, i32 2>
   ret <2 x double> %v3
 }
 
-define <2 x double> @test03(<2 x double>* %p1, <2 x double>* %p2) {
+define <2 x double> @test03(ptr %p1, ptr %p2) {
 ; CHECK-LABEL: test03:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lxvd2x 0, 0, 3
@@ -80,13 +80,13 @@ define <2 x double> @test03(<2 x double>* %p1, <2 x double>* %p2) {
 ; CHECK-P9-NEXT:    lxv 1, 0(4)
 ; CHECK-P9-NEXT:    xxpermdi 34, 1, 0, 1
 ; CHECK-P9-NEXT:    blr
-  %v1 = load <2 x double>, <2 x double>* %p1
-  %v2 = load <2 x double>, <2 x double>* %p2
+  %v1 = load <2 x double>, ptr %p1
+  %v2 = load <2 x double>, ptr %p2
   %v3 = shufflevector <2 x double> %v1, <2 x double> %v2, <2 x i32> < i32 0, i32 3>
   ret <2 x double> %v3
 }
 
-define <2 x double> @test10(<2 x double>* %p1, <2 x double>* %p2) {
+define <2 x double> @test10(ptr %p1, ptr %p2) {
 ; CHECK-LABEL: test10:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lxvd2x 34, 0, 3
@@ -96,13 +96,13 @@ define <2 x double> @test10(<2 x double>* %p1, <2 x double>* %p2) {
 ; CHECK-P9:       # %bb.0:
 ; CHECK-P9-NEXT:    lxvd2x 34, 0, 3
 ; CHECK-P9-NEXT:    blr
-  %v1 = load <2 x double>, <2 x double>* %p1
-  %v2 = load <2 x double>, <2 x double>* %p2
+  %v1 = load <2 x double>, ptr %p1
+  %v2 = load <2 x double>, ptr %p2
   %v3 = shufflevector <2 x double> %v1, <2 x double> %v2, <2 x i32> < i32 1, i32 0>
   ret <2 x double> %v3
 }
 
-define <2 x double> @test11(<2 x double>* %p1, <2 x double>* %p2) {
+define <2 x double> @test11(ptr %p1, ptr %p2) {
 ; CHECK-LABEL: test11:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    addi 3, 3, 8
@@ -114,13 +114,13 @@ define <2 x double> @test11(<2 x double>* %p1, <2 x double>* %p2) {
 ; CHECK-P9-NEXT:    addi 3, 3, 8
 ; CHECK-P9-NEXT:    lxvdsx 34, 0, 3
 ; CHECK-P9-NEXT:    blr
-  %v1 = load <2 x double>, <2 x double>* %p1
-  %v2 = load <2 x double>, <2 x double>* %p2
+  %v1 = load <2 x double>, ptr %p1
+  %v2 = load <2 x double>, ptr %p2
   %v3 = shufflevector <2 x double> %v1, <2 x double> %v2, <2 x i32> < i32 1, i32 1>
   ret <2 x double> %v3
 }
 
-define <2 x double> @test12(<2 x double>* %p1, <2 x double>* %p2) {
+define <2 x double> @test12(ptr %p1, ptr %p2) {
 ; CHECK-LABEL: test12:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lxvd2x 0, 0, 3
@@ -136,13 +136,13 @@ define <2 x double> @test12(<2 x double>* %p1, <2 x double>* %p2) {
 ; CHECK-P9-NEXT:    lxv 1, 0(4)
 ; CHECK-P9-NEXT:    xxpermdi 34, 1, 0, 2
 ; CHECK-P9-NEXT:    blr
-  %v1 = load <2 x double>, <2 x double>* %p1
-  %v2 = load <2 x double>, <2 x double>* %p2
+  %v1 = load <2 x double>, ptr %p1
+  %v2 = load <2 x double>, ptr %p2
   %v3 = shufflevector <2 x double> %v1, <2 x double> %v2, <2 x i32> < i32 1, i32 2>
   ret <2 x double> %v3
 }
 
-define <2 x double> @test13(<2 x double>* %p1, <2 x double>* %p2) {
+define <2 x double> @test13(ptr %p1, ptr %p2) {
 ; CHECK-LABEL: test13:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lxvd2x 0, 0, 3
@@ -158,13 +158,13 @@ define <2 x double> @test13(<2 x double>* %p1, <2 x double>* %p2) {
 ; CHECK-P9-NEXT:    lxv 1, 0(4)
 ; CHECK-P9-NEXT:    xxmrghd 34, 1, 0
 ; CHECK-P9-NEXT:    blr
-  %v1 = load <2 x double>, <2 x double>* %p1
-  %v2 = load <2 x double>, <2 x double>* %p2
+  %v1 = load <2 x double>, ptr %p1
+  %v2 = load <2 x double>, ptr %p2
   %v3 = shufflevector <2 x double> %v1, <2 x double> %v2, <2 x i32> < i32 1, i32 3>
   ret <2 x double> %v3
 }
 
-define <2 x double> @test20(<2 x double>* %p1, <2 x double>* %p2) {
+define <2 x double> @test20(ptr %p1, ptr %p2) {
 ; CHECK-LABEL: test20:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lxvd2x 0, 0, 3
@@ -180,13 +180,13 @@ define <2 x double> @test20(<2 x double>* %p1, <2 x double>* %p2) {
 ; CHECK-P9-NEXT:    lxv 1, 0(4)
 ; CHECK-P9-NEXT:    xxmrgld 34, 0, 1
 ; CHECK-P9-NEXT:    blr
-  %v1 = load <2 x double>, <2 x double>* %p1
-  %v2 = load <2 x double>, <2 x double>* %p2
+  %v1 = load <2 x double>, ptr %p1
+  %v2 = load <2 x double>, ptr %p2
   %v3 = shufflevector <2 x double> %v1, <2 x double> %v2, <2 x i32> < i32 2, i32 0>
   ret <2 x double> %v3
 }
 
-define <2 x double> @test21(<2 x double>* %p1, <2 x double>* %p2) {
+define <2 x double> @test21(ptr %p1, ptr %p2) {
 ; CHECK-LABEL: test21:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lxvd2x 0, 0, 3
@@ -202,13 +202,13 @@ define <2 x double> @test21(<2 x double>* %p1, <2 x double>* %p2) {
 ; CHECK-P9-NEXT:    lxv 1, 0(4)
 ; CHECK-P9-NEXT:    xxpermdi 34, 0, 1, 1
 ; CHECK-P9-NEXT:    blr
-  %v1 = load <2 x double>, <2 x double>* %p1
-  %v2 = load <2 x double>, <2 x double>* %p2
+  %v1 = load <2 x double>, ptr %p1
+  %v2 = load <2 x double>, ptr %p2
   %v3 = shufflevector <2 x double> %v1, <2 x double> %v2, <2 x i32> < i32 2, i32 1>
   ret <2 x double> %v3
 }
 
-define <2 x double> @test22(<2 x double>* %p1, <2 x double>* %p2) {
+define <2 x double> @test22(ptr %p1, ptr %p2) {
 ; CHECK-LABEL: test22:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lxvdsx 34, 0, 4
@@ -218,13 +218,13 @@ define <2 x double> @test22(<2 x double>* %p1, <2 x double>* %p2) {
 ; CHECK-P9:       # %bb.0:
 ; CHECK-P9-NEXT:    lxvdsx 34, 0, 4
 ; CHECK-P9-NEXT:    blr
-  %v1 = load <2 x double>, <2 x double>* %p1
-  %v2 = load <2 x double>, <2 x double>* %p2
+  %v1 = load <2 x double>, ptr %p1
+  %v2 = load <2 x double>, ptr %p2
   %v3 = shufflevector <2 x double> %v1, <2 x double> %v2, <2 x i32> < i32 2, i32 2>
   ret <2 x double> %v3
 }
 
-define <2 x double> @test23(<2 x double>* %p1, <2 x double>* %p2) {
+define <2 x double> @test23(ptr %p1, ptr %p2) {
 ; CHECK-LABEL: test23:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lxvd2x 0, 0, 4
@@ -235,13 +235,13 @@ define <2 x double> @test23(<2 x double>* %p1, <2 x double>* %p2) {
 ; CHECK-P9:       # %bb.0:
 ; CHECK-P9-NEXT:    lxv 34, 0(4)
 ; CHECK-P9-NEXT:    blr
-  %v1 = load <2 x double>, <2 x double>* %p1
-  %v2 = load <2 x double>, <2 x double>* %p2
+  %v1 = load <2 x double>, ptr %p1
+  %v2 = load <2 x double>, ptr %p2
   %v3 = shufflevector <2 x double> %v1, <2 x double> %v2, <2 x i32> < i32 2, i32 3>
   ret <2 x double> %v3
 }
 
-define <2 x double> @test30(<2 x double>* %p1, <2 x double>* %p2) {
+define <2 x double> @test30(ptr %p1, ptr %p2) {
 ; CHECK-LABEL: test30:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lxvd2x 0, 0, 3
@@ -257,13 +257,13 @@ define <2 x double> @test30(<2 x double>* %p1, <2 x double>* %p2) {
 ; CHECK-P9-NEXT:    lxv 1, 0(4)
 ; CHECK-P9-NEXT:    xxpermdi 34, 0, 1, 2
 ; CHECK-P9-NEXT:    blr
-  %v1 = load <2 x double>, <2 x double>* %p1
-  %v2 = load <2 x double>, <2 x double>* %p2
+  %v1 = load <2 x double>, ptr %p1
+  %v2 = load <2 x double>, ptr %p2
   %v3 = shufflevector <2 x double> %v1, <2 x double> %v2, <2 x i32> < i32 3, i32 0>
   ret <2 x double> %v3
 }
 
-define <2 x double> @test31(<2 x double>* %p1, <2 x double>* %p2) {
+define <2 x double> @test31(ptr %p1, ptr %p2) {
 ; CHECK-LABEL: test31:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lxvd2x 0, 0, 3
@@ -279,13 +279,13 @@ define <2 x double> @test31(<2 x double>* %p1, <2 x double>* %p2) {
 ; CHECK-P9-NEXT:    lxv 1, 0(4)
 ; CHECK-P9-NEXT:    xxmrghd 34, 0, 1
 ; CHECK-P9-NEXT:    blr
-  %v1 = load <2 x double>, <2 x double>* %p1
-  %v2 = load <2 x double>, <2 x double>* %p2
+  %v1 = load <2 x double>, ptr %p1
+  %v2 = load <2 x double>, ptr %p2
   %v3 = shufflevector <2 x double> %v1, <2 x double> %v2, <2 x i32> < i32 3, i32 1>
   ret <2 x double> %v3
 }
 
-define <2 x double> @test32(<2 x double>* %p1, <2 x double>* %p2) {
+define <2 x double> @test32(ptr %p1, ptr %p2) {
 ; CHECK-LABEL: test32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lxvd2x 34, 0, 4
@@ -295,13 +295,13 @@ define <2 x double> @test32(<2 x double>* %p1, <2 x double>* %p2) {
 ; CHECK-P9:       # %bb.0:
 ; CHECK-P9-NEXT:    lxvd2x 34, 0, 4
 ; CHECK-P9-NEXT:    blr
-  %v1 = load <2 x double>, <2 x double>* %p1
-  %v2 = load <2 x double>, <2 x double>* %p2
+  %v1 = load <2 x double>, ptr %p1
+  %v2 = load <2 x double>, ptr %p2
   %v3 = shufflevector <2 x double> %v1, <2 x double> %v2, <2 x i32> < i32 3, i32 2>
   ret <2 x double> %v3
 }
 
-define <2 x double> @test33(<2 x double>* %p1, <2 x double>* %p2) {
+define <2 x double> @test33(ptr %p1, ptr %p2) {
 ; CHECK-LABEL: test33:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    addi 3, 4, 8
@@ -313,8 +313,8 @@ define <2 x double> @test33(<2 x double>* %p1, <2 x double>* %p2) {
 ; CHECK-P9-NEXT:    addi 3, 4, 8
 ; CHECK-P9-NEXT:    lxvdsx 34, 0, 3
 ; CHECK-P9-NEXT:    blr
-  %v1 = load <2 x double>, <2 x double>* %p1
-  %v2 = load <2 x double>, <2 x double>* %p2
+  %v1 = load <2 x double>, ptr %p1
+  %v2 = load <2 x double>, ptr %p2
   %v3 = shufflevector <2 x double> %v1, <2 x double> %v2, <2 x i32> < i32 3, i32 3>
   ret <2 x double> %v3
 }

diff  --git a/llvm/test/CodeGen/PowerPC/vtable-reloc.ll b/llvm/test/CodeGen/PowerPC/vtable-reloc.ll
index 17ea8e3c47e0..10802e4cf6d5 100644
--- a/llvm/test/CodeGen/PowerPC/vtable-reloc.ll
+++ b/llvm/test/CodeGen/PowerPC/vtable-reloc.ll
@@ -3,7 +3,7 @@
 target datalayout = "E-m:e-i64:64-n32:64"
 target triple = "powerpc64-unknown-linux-gnu"
 
- at _ZTV3foo = linkonce_odr unnamed_addr constant [1 x i8*] [i8* bitcast (void ()* @__cxa_pure_virtual to i8*)]
+ at _ZTV3foo = linkonce_odr unnamed_addr constant [1 x ptr] [ptr @__cxa_pure_virtual]
 declare void @__cxa_pure_virtual()
 
 ; CHECK: .section .data.rel.ro

diff  --git a/llvm/test/CodeGen/PowerPC/weak_def_can_be_hidden.ll b/llvm/test/CodeGen/PowerPC/weak_def_can_be_hidden.ll
index 6bc3400deed2..e2a34d302670 100644
--- a/llvm/test/CodeGen/PowerPC/weak_def_can_be_hidden.ll
+++ b/llvm/test/CodeGen/PowerPC/weak_def_can_be_hidden.ll
@@ -7,29 +7,29 @@
 ; CHECK: .weak v1
 
 define i32 @f1() {
-  %x = load i32 , i32 * @v1
+  %x = load i32 , ptr @v1
   ret i32 %x
 }
 
 @v2 = linkonce_odr constant i32 32
 ; CHECK: .weak v2
 
-define i32* @f2() {
-  ret i32* @v2
+define ptr @f2() {
+  ret ptr @v2
 }
 
 @v3 = linkonce_odr unnamed_addr constant i32 32
 ; CHECK: .section .rodata.cst4,"aM",
 ; CHECK: .weak v3
 
-define i32* @f3() {
-  ret i32* @v3
+define ptr @f3() {
+  ret ptr @v3
 }
 
 @v4 = linkonce_odr unnamed_addr global i32 32
 ; CHECK: .weak v4
 
 define i32 @f4() {
-  %x = load i32 , i32 * @v4
+  %x = load i32 , ptr @v4
   ret i32 %x
 }

diff  --git a/llvm/test/CodeGen/PowerPC/xray-ret-is-terminator.ll b/llvm/test/CodeGen/PowerPC/xray-ret-is-terminator.ll
index 3f15d46290a1..887aff4b6819 100644
--- a/llvm/test/CodeGen/PowerPC/xray-ret-is-terminator.ll
+++ b/llvm/test/CodeGen/PowerPC/xray-ret-is-terminator.ll
@@ -9,12 +9,12 @@ bb:
   br i1 undef, label %bb1, label %bb8
 
 bb1:
-  %tmp = tail call i64 asm sideeffect "", "=&r,=*m,b,r,*m,~{cc}"(i64* elementtype(i64) nonnull undef, i64* nonnull undef, i64 1, i64* elementtype(i64) nonnull undef)
+  %tmp = tail call i64 asm sideeffect "", "=&r,=*m,b,r,*m,~{cc}"(ptr elementtype(i64) nonnull undef, ptr nonnull undef, i64 1, ptr elementtype(i64) nonnull undef)
   %tmp2 = icmp eq i64 %tmp, 0
   br i1 %tmp2, label %bb3, label %bb8
 
 bb3:
-  %tmp4 = tail call i64 asm sideeffect "", "=&r,=*m,b,r,r,*m,~{cc}"(i64* elementtype(i64) undef, i64* undef, i64 0, i64 undef, i64* elementtype(i64) undef)
+  %tmp4 = tail call i64 asm sideeffect "", "=&r,=*m,b,r,r,*m,~{cc}"(ptr elementtype(i64) undef, ptr undef, i64 0, i64 undef, ptr elementtype(i64) undef)
   %tmp5 = icmp eq i64 0, %tmp4
   br i1 %tmp5, label %bb6, label %bb3
 

diff  --git a/llvm/test/CodeGen/PowerPC/xvcmpeqdp-v2f64.ll b/llvm/test/CodeGen/PowerPC/xvcmpeqdp-v2f64.ll
index 263a7590cf99..521bcf2a78dd 100644
--- a/llvm/test/CodeGen/PowerPC/xvcmpeqdp-v2f64.ll
+++ b/llvm/test/CodeGen/PowerPC/xvcmpeqdp-v2f64.ll
@@ -5,7 +5,7 @@ target triple = "powerpc64le-unknown-linux-gnu"
 ; Function Attrs: nounwind
 define void @__fmax_double3_3D_exec(<3 x double> %input1, <3 x i64> %input2, 
                                     <3 x i1> %input3, <3 x i64> %input4,
-                                    <3 x i64> %input5,  <4 x double>* %input6) #0 {
+                                    <3 x i64> %input5,  ptr %input6) #0 {
 entry:
   br i1 undef, label %if.then.i, label %fmax_double3.exit
 
@@ -20,7 +20,7 @@ if.then.i:                                        ; preds = %entry
   %or.i.i.i = or <3 x i64> %and.i.i.i, %and26.i.i.i
   %astype32.i.i.i = bitcast <3 x i64> %or.i.i.i to <3 x double>
   %extractVec33.i.i.i = shufflevector <3 x double> %astype32.i.i.i, <3 x double> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 undef>
-  store <4 x double> %extractVec33.i.i.i, <4 x double>* %input6, align 32
+  store <4 x double> %extractVec33.i.i.i, ptr %input6, align 32
   br label %fmax_double3.exit
 
 ; CHECK-LABEL: @__fmax_double3_3D_exec

diff  --git a/llvm/test/CodeGen/PowerPC/zero-not-run.ll b/llvm/test/CodeGen/PowerPC/zero-not-run.ll
index 3e09b3b770a9..6d43191a8c7d 100644
--- a/llvm/test/CodeGen/PowerPC/zero-not-run.ll
+++ b/llvm/test/CodeGen/PowerPC/zero-not-run.ll
@@ -3,14 +3,14 @@ target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3
 target triple = "powerpc64-unknown-linux-gnu"
 
 ; Function Attrs: nounwind
-define internal i32* @func_65(i32* %p_66) #0 {
+define internal ptr @func_65(ptr %p_66) #0 {
 entry:
   br i1 undef, label %for.body, label %for.end731
 
 for.body:                                         ; preds = %entry
-  %0 = load i32, i32* undef, align 4
+  %0 = load i32, ptr undef, align 4
   %or31 = or i32 %0, 319143828
-  store i32 %or31, i32* undef, align 4
+  store i32 %or31, ptr undef, align 4
   %cmp32 = icmp eq i32 319143828, %or31
   %conv33 = zext i1 %cmp32 to i32
   %conv34 = sext i32 %conv33 to i64
@@ -18,7 +18,7 @@ for.body:                                         ; preds = %entry
   unreachable
 
 for.end731:                                       ; preds = %entry
-  ret i32* undef
+  ret ptr undef
 }
 
 ; Function Attrs: nounwind

diff  --git a/llvm/test/CodeGen/PowerPC/zext-and-cmp.ll b/llvm/test/CodeGen/PowerPC/zext-and-cmp.ll
index 69474bd0805a..d8926e4952ff 100644
--- a/llvm/test/CodeGen/PowerPC/zext-and-cmp.ll
+++ b/llvm/test/CodeGen/PowerPC/zext-and-cmp.ll
@@ -7,18 +7,18 @@
 @k = local_unnamed_addr global i32 0, align 4
 
 ; Function Attrs: norecurse nounwind
-define signext i32 @cmplwi(i32* nocapture readonly %p, i32* nocapture readonly %q, i32 signext %j, i32 signext %r10) {
+define signext i32 @cmplwi(ptr nocapture readonly %p, ptr nocapture readonly %q, i32 signext %j, i32 signext %r10) {
 entry:
-  %0 = load i32, i32* %q, align 4
+  %0 = load i32, ptr %q, align 4
   %shl = shl i32 %0, %j
-  %1 = load i32, i32* %p, align 4
+  %1 = load i32, ptr %p, align 4
   %and = and i32 %shl, %r10
   %and1 = and i32 %and, %1
   %tobool = icmp eq i32 %and1, 0
   br i1 %tobool, label %cleanup, label %if.then
 
 if.then:
-  store i32 %j, i32* @k, align 4
+  store i32 %j, ptr @k, align 4
   br label %cleanup
 
 cleanup:

diff  --git a/llvm/test/CodeGen/PowerPC/zext-bitperm.ll b/llvm/test/CodeGen/PowerPC/zext-bitperm.ll
index b6d751d6f2fd..2d5a324ddad5 100644
--- a/llvm/test/CodeGen/PowerPC/zext-bitperm.ll
+++ b/llvm/test/CodeGen/PowerPC/zext-bitperm.ll
@@ -5,7 +5,7 @@
 ; Test case for PPCTargetLowering::extendSubTreeForBitPermutation.
 ; We expect mask and rotate are folded into a rlwinm instruction.
 
-define zeroext i32 @func(i32* %p, i32 zeroext %i) {
+define zeroext i32 @func(ptr %p, i32 zeroext %i) {
 ; CHECK-LABEL: @func
 ; CHECK: addi [[REG1:[0-9]+]], 4, 1
 ; CHECK: rlwinm [[REG2:[0-9]+]], [[REG1]], 2, 22, 29
@@ -16,8 +16,8 @@ entry:
   %add = add i32 %i, 1
   %and = and i32 %add, 255
   %idxprom = zext i32 %and to i64
-  %arrayidx = getelementptr inbounds i32, i32* %p, i64 %idxprom
-  %0 = load i32, i32* %arrayidx, align 4
+  %arrayidx = getelementptr inbounds i32, ptr %p, i64 %idxprom
+  %0 = load i32, ptr %arrayidx, align 4
   ret i32 %0
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/zext-free.ll b/llvm/test/CodeGen/PowerPC/zext-free.ll
index 57fc521da8bd..5482ffd0c21a 100644
--- a/llvm/test/CodeGen/PowerPC/zext-free.ll
+++ b/llvm/test/CodeGen/PowerPC/zext-free.ll
@@ -3,29 +3,29 @@ target datalayout = "E-m:e-i64:64-n32:64"
 target triple = "powerpc64-unknown-linux-gnu"
 
 ; Function Attrs: noreturn nounwind
-define signext i32 @_Z1fRPc(i8** nocapture dereferenceable(8) %p) #0 {
+define signext i32 @_Z1fRPc(ptr nocapture dereferenceable(8) %p) #0 {
 entry:
-  %.pre = load i8*, i8** %p, align 8
+  %.pre = load ptr, ptr %p, align 8
   br label %loop
 
 loop:                                             ; preds = %loop.backedge, %entry
-  %0 = phi i8* [ %.pre, %entry ], [ %.be, %loop.backedge ]
-  %1 = load i8, i8* %0, align 1
+  %0 = phi ptr [ %.pre, %entry ], [ %.be, %loop.backedge ]
+  %1 = load i8, ptr %0, align 1
   %tobool = icmp eq i8 %1, 0
-  %incdec.ptr = getelementptr inbounds i8, i8* %0, i64 1
-  store i8* %incdec.ptr, i8** %p, align 8
-  %2 = load i8, i8* %incdec.ptr, align 1
+  %incdec.ptr = getelementptr inbounds i8, ptr %0, i64 1
+  store ptr %incdec.ptr, ptr %p, align 8
+  %2 = load i8, ptr %incdec.ptr, align 1
   %tobool2 = icmp ne i8 %2, 0
   %or.cond = and i1 %tobool, %tobool2
   br i1 %or.cond, label %if.then3, label %loop.backedge
 
 if.then3:                                         ; preds = %loop
-  %incdec.ptr4 = getelementptr inbounds i8, i8* %0, i64 2
-  store i8* %incdec.ptr4, i8** %p, align 8
+  %incdec.ptr4 = getelementptr inbounds i8, ptr %0, i64 2
+  store ptr %incdec.ptr4, ptr %p, align 8
   br label %loop.backedge
 
 loop.backedge:                                    ; preds = %if.then3, %loop
-  %.be = phi i8* [ %incdec.ptr4, %if.then3 ], [ %incdec.ptr, %loop ]
+  %.be = phi ptr [ %incdec.ptr4, %if.then3 ], [ %incdec.ptr, %loop ]
   br label %loop
 
 ; CHECK-LABEL: @_Z1fRPc


        


More information about the llvm-commits mailing list