[llvm] a96f691 - [Hexagon] Convert some tests to opaque pointers (NFC)

Nikita Popov via llvm-commits llvm-commits at lists.llvm.org
Mon Dec 19 03:54:01 PST 2022


Author: Nikita Popov
Date: 2022-12-19T12:53:32+01:00
New Revision: a96f691985c8546e826012fdc3481c88f034a194

URL: https://github.com/llvm/llvm-project/commit/a96f691985c8546e826012fdc3481c88f034a194
DIFF: https://github.com/llvm/llvm-project/commit/a96f691985c8546e826012fdc3481c88f034a194.diff

LOG: [Hexagon] Convert some tests to opaque pointers (NFC)

Added: 
    

Modified: 
    llvm/test/CodeGen/Hexagon/64bit_tstbit.ll
    llvm/test/CodeGen/Hexagon/Atomics.ll
    llvm/test/CodeGen/Hexagon/BranchPredict.ll
    llvm/test/CodeGen/Hexagon/Halide_vec_cast_trunc1.ll
    llvm/test/CodeGen/Hexagon/Halide_vec_cast_trunc2.ll
    llvm/test/CodeGen/Hexagon/M4_mpyri_addi_global.ll
    llvm/test/CodeGen/Hexagon/M4_mpyrr_addi_global.ll
    llvm/test/CodeGen/Hexagon/NVJumpCmp.ll
    llvm/test/CodeGen/Hexagon/P08214.ll
    llvm/test/CodeGen/Hexagon/PR33749.ll
    llvm/test/CodeGen/Hexagon/S3_2op.ll
    llvm/test/CodeGen/Hexagon/SUnit-boundary-prob.ll
    llvm/test/CodeGen/Hexagon/V60-VDblNew.ll
    llvm/test/CodeGen/Hexagon/abi-padding-2.ll
    llvm/test/CodeGen/Hexagon/abi-padding.ll
    llvm/test/CodeGen/Hexagon/absaddr-store.ll
    llvm/test/CodeGen/Hexagon/absimm.ll
    llvm/test/CodeGen/Hexagon/add_mpi_RRR.ll
    llvm/test/CodeGen/Hexagon/addaddi.ll
    llvm/test/CodeGen/Hexagon/addasl-address.ll
    llvm/test/CodeGen/Hexagon/addr-calc-opt.ll
    llvm/test/CodeGen/Hexagon/addr-mode-opt.ll
    llvm/test/CodeGen/Hexagon/addrmode-align.ll
    llvm/test/CodeGen/Hexagon/addrmode-indoff.ll
    llvm/test/CodeGen/Hexagon/addrmode-keepdeadphis.ll
    llvm/test/CodeGen/Hexagon/addrmode-offset.ll
    llvm/test/CodeGen/Hexagon/addsubcarry.ll
    llvm/test/CodeGen/Hexagon/adjust-latency-stackST.ll
    llvm/test/CodeGen/Hexagon/aggr-antidep-tied.ll
    llvm/test/CodeGen/Hexagon/aggr-copy-order.ll
    llvm/test/CodeGen/Hexagon/aggressive_licm.ll
    llvm/test/CodeGen/Hexagon/align_test.ll
    llvm/test/CodeGen/Hexagon/always-ext.ll
    llvm/test/CodeGen/Hexagon/asr-rnd.ll
    llvm/test/CodeGen/Hexagon/asr-rnd64.ll
    llvm/test/CodeGen/Hexagon/assert-postinc-ptr-not-value.ll
    llvm/test/CodeGen/Hexagon/atomic-opaque-basic.ll
    llvm/test/CodeGen/Hexagon/atomic-rmw-add.ll
    llvm/test/CodeGen/Hexagon/atomic-store-byte.ll
    llvm/test/CodeGen/Hexagon/autohvx/build-vector-float-type.ll
    llvm/test/CodeGen/Hexagon/autohvx/build-vector-i32-type.ll
    llvm/test/CodeGen/Hexagon/autohvx/calling-conv.ll
    llvm/test/CodeGen/Hexagon/autohvx/float-cost.ll
    llvm/test/CodeGen/Hexagon/autohvx/hfinsert.ll
    llvm/test/CodeGen/Hexagon/autohvx/hfnosplat_cp.ll
    llvm/test/CodeGen/Hexagon/autohvx/hvx-idiom-empty-results.ll
    llvm/test/CodeGen/Hexagon/autohvx/interleave.ll
    llvm/test/CodeGen/Hexagon/autohvx/isel-anyext-inreg.ll
    llvm/test/CodeGen/Hexagon/autohvx/isel-anyext-pair.ll
    llvm/test/CodeGen/Hexagon/autohvx/isel-bitcast-vsplat.ll
    llvm/test/CodeGen/Hexagon/autohvx/isel-bitcast-vsplat2.ll
    llvm/test/CodeGen/Hexagon/autohvx/isel-bool-vector.ll
    llvm/test/CodeGen/Hexagon/autohvx/isel-build-undef.ll
    llvm/test/CodeGen/Hexagon/autohvx/isel-concat-multiple.ll
    llvm/test/CodeGen/Hexagon/autohvx/isel-concat-vectors-bool.ll
    llvm/test/CodeGen/Hexagon/autohvx/isel-concat-vectors.ll
    llvm/test/CodeGen/Hexagon/autohvx/isel-const-splat-bitcast.ll
    llvm/test/CodeGen/Hexagon/autohvx/isel-const-splat-imm.ll
    llvm/test/CodeGen/Hexagon/autohvx/isel-const-vector.ll
    llvm/test/CodeGen/Hexagon/autohvx/isel-expand-unaligned-loads-noindexed.ll
    llvm/test/CodeGen/Hexagon/autohvx/isel-expand-unaligned-loads.ll
    llvm/test/CodeGen/Hexagon/autohvx/isel-extractelt-illegal-type.ll
    llvm/test/CodeGen/Hexagon/autohvx/isel-hvx-concat-truncate.ll
    llvm/test/CodeGen/Hexagon/autohvx/isel-mstore-fp16.ll
    llvm/test/CodeGen/Hexagon/autohvx/isel-q-legalization-loop.ll
    llvm/test/CodeGen/Hexagon/autohvx/isel-q2v-pair.ll
    llvm/test/CodeGen/Hexagon/autohvx/isel-select-const.ll
    llvm/test/CodeGen/Hexagon/autohvx/isel-sext-inreg.ll
    llvm/test/CodeGen/Hexagon/autohvx/isel-shift-byte.ll
    llvm/test/CodeGen/Hexagon/autohvx/isel-shuffle-gather.ll
    llvm/test/CodeGen/Hexagon/autohvx/isel-split-masked.ll
    llvm/test/CodeGen/Hexagon/autohvx/isel-store-bitcast-v128i1.ll
    llvm/test/CodeGen/Hexagon/autohvx/isel-truncate-legal.ll
    llvm/test/CodeGen/Hexagon/autohvx/isel-truncate.ll
    llvm/test/CodeGen/Hexagon/autohvx/isel-undef-not-zero.ll
    llvm/test/CodeGen/Hexagon/autohvx/isel-vec-ext.ll
    llvm/test/CodeGen/Hexagon/autohvx/isel-vsplat-pair.ll
    llvm/test/CodeGen/Hexagon/autohvx/isel-widen-memop.ll
    llvm/test/CodeGen/Hexagon/autohvx/isel-widen-store.ll
    llvm/test/CodeGen/Hexagon/autohvx/isel-widen-truncate-illegal-elem.ll
    llvm/test/CodeGen/Hexagon/autohvx/isel-widen-truncate-op.ll
    llvm/test/CodeGen/Hexagon/autohvx/isel-widen-truncate-pair.ll
    llvm/test/CodeGen/Hexagon/autohvx/isel-widen-truncate.ll
    llvm/test/CodeGen/Hexagon/autohvx/lower-insert-elt.ll
    llvm/test/CodeGen/Hexagon/autohvx/masked-vmem-basic.ll
    llvm/test/CodeGen/Hexagon/autohvx/maximize-bandwidth.ll
    llvm/test/CodeGen/Hexagon/autohvx/non-simple-hvx-type.ll
    llvm/test/CodeGen/Hexagon/autohvx/pred-vmem-128b.ll
    llvm/test/CodeGen/Hexagon/autohvx/pred-vmem-64b.ll
    llvm/test/CodeGen/Hexagon/autohvx/vector-align-addr.ll
    llvm/test/CodeGen/Hexagon/autohvx/vector-align-bad-move.ll
    llvm/test/CodeGen/Hexagon/autohvx/vector-align-basic.ll
    llvm/test/CodeGen/Hexagon/autohvx/vector-align-interleaved.ll
    llvm/test/CodeGen/Hexagon/autohvx/vector-align-only-phi-use.ll
    llvm/test/CodeGen/Hexagon/autohvx/vector-align-rescale-nonint.ll
    llvm/test/CodeGen/Hexagon/autohvx/vector-align-scalar-mask.ll
    llvm/test/CodeGen/Hexagon/autohvx/vector-align-store-mask.ll
    llvm/test/CodeGen/Hexagon/autohvx/vector-align-store.ll
    llvm/test/CodeGen/Hexagon/autohvx/vector-align-use-in-different-block.ll
    llvm/test/CodeGen/Hexagon/autohvx/vector-load-store-basic.ll
    llvm/test/CodeGen/Hexagon/autohvx/widen-ext.ll
    llvm/test/CodeGen/Hexagon/autohvx/widen-setcc.ll
    llvm/test/CodeGen/Hexagon/autohvx/widen-trunc.ll
    llvm/test/CodeGen/Hexagon/avoidVectorLowering.ll
    llvm/test/CodeGen/Hexagon/barrier-flag.ll
    llvm/test/CodeGen/Hexagon/base-offset-addr.ll
    llvm/test/CodeGen/Hexagon/base-offset-post.ll
    llvm/test/CodeGen/Hexagon/base-offset-stv4.ll
    llvm/test/CodeGen/Hexagon/bit-bitsplit-at.ll
    llvm/test/CodeGen/Hexagon/bit-bitsplit-regclass.ll
    llvm/test/CodeGen/Hexagon/bit-bitsplit-src.ll
    llvm/test/CodeGen/Hexagon/bit-bitsplit.ll
    llvm/test/CodeGen/Hexagon/bit-extract-off.ll
    llvm/test/CodeGen/Hexagon/bit-gen-rseq.ll
    llvm/test/CodeGen/Hexagon/bit-has.ll
    llvm/test/CodeGen/Hexagon/bit-loop-rc-mismatch.ll
    llvm/test/CodeGen/Hexagon/bit-loop.ll
    llvm/test/CodeGen/Hexagon/bit-phi.ll
    llvm/test/CodeGen/Hexagon/bit-rie.ll
    llvm/test/CodeGen/Hexagon/bit-skip-byval.ll
    llvm/test/CodeGen/Hexagon/bit-visit-flowq.ll
    llvm/test/CodeGen/Hexagon/bitcast-i128-to-v128i1.ll
    llvm/test/CodeGen/Hexagon/bitconvert-vector.ll
    llvm/test/CodeGen/Hexagon/bkfir.ll
    llvm/test/CodeGen/Hexagon/block-addr.ll
    llvm/test/CodeGen/Hexagon/block-address.ll
    llvm/test/CodeGen/Hexagon/block-ranges-nodef.ll
    llvm/test/CodeGen/Hexagon/blockaddr-fpic.ll
    llvm/test/CodeGen/Hexagon/branch-non-mbb.ll
    llvm/test/CodeGen/Hexagon/branchfolder-keep-impdef.ll
    llvm/test/CodeGen/Hexagon/brcond-setne.ll
    llvm/test/CodeGen/Hexagon/brev_ld.ll
    llvm/test/CodeGen/Hexagon/brev_st.ll
    llvm/test/CodeGen/Hexagon/bss-local.ll
    llvm/test/CodeGen/Hexagon/bug-aa4463-ifconv-vecpred.ll
    llvm/test/CodeGen/Hexagon/bug-allocframe-size.ll
    llvm/test/CodeGen/Hexagon/bug-hcp-tied-kill.ll
    llvm/test/CodeGen/Hexagon/bug14859-iv-cleanup-lpad.ll
    llvm/test/CodeGen/Hexagon/bug14859-split-const-block-addr.ll
    llvm/test/CodeGen/Hexagon/bug15515-shuffle.ll
    llvm/test/CodeGen/Hexagon/bug17276.ll
    llvm/test/CodeGen/Hexagon/bug17386.ll
    llvm/test/CodeGen/Hexagon/bug18008.ll
    llvm/test/CodeGen/Hexagon/bug18491-optsize.ll
    llvm/test/CodeGen/Hexagon/bug19076.ll
    llvm/test/CodeGen/Hexagon/bug19119.ll
    llvm/test/CodeGen/Hexagon/bug19254-ifconv-vec.ll
    llvm/test/CodeGen/Hexagon/bug27085.ll
    llvm/test/CodeGen/Hexagon/bug31839.ll
    llvm/test/CodeGen/Hexagon/bug6757-endloop.ll
    llvm/test/CodeGen/Hexagon/bug9049.ll
    llvm/test/CodeGen/Hexagon/bugAsmHWloop.ll
    llvm/test/CodeGen/Hexagon/build-vector-shuffle.ll
    llvm/test/CodeGen/Hexagon/builtin-prefetch-offset.ll
    llvm/test/CodeGen/Hexagon/builtin-prefetch.ll
    llvm/test/CodeGen/Hexagon/call-long1.ll
    llvm/test/CodeGen/Hexagon/call-ret-i1.ll
    llvm/test/CodeGen/Hexagon/call-v4.ll
    llvm/test/CodeGen/Hexagon/callR_noreturn.ll
    llvm/test/CodeGen/Hexagon/calling-conv-2.ll
    llvm/test/CodeGen/Hexagon/calling-conv.ll
    llvm/test/CodeGen/Hexagon/callr-dep-edge.ll
    llvm/test/CodeGen/Hexagon/cext-check.ll
    llvm/test/CodeGen/Hexagon/cext-ice.ll
    llvm/test/CodeGen/Hexagon/cext-valid-packet2.ll
    llvm/test/CodeGen/Hexagon/cext.ll
    llvm/test/CodeGen/Hexagon/cexti16.ll
    llvm/test/CodeGen/Hexagon/cfgopt-fall-through.ll
    llvm/test/CodeGen/Hexagon/cfi-offset.ll
    llvm/test/CodeGen/Hexagon/cfi_offset.ll
    llvm/test/CodeGen/Hexagon/cfi_offset2.ll
    llvm/test/CodeGen/Hexagon/check-dot-new.ll
    llvm/test/CodeGen/Hexagon/check-subregister-for-latency.ll
    llvm/test/CodeGen/Hexagon/circ-load-isel.ll
    llvm/test/CodeGen/Hexagon/circ_ld.ll
    llvm/test/CodeGen/Hexagon/circ_ldd_bug.ll
    llvm/test/CodeGen/Hexagon/circ_ldw.ll
    llvm/test/CodeGen/Hexagon/circ_new.ll
    llvm/test/CodeGen/Hexagon/circ_pcr_assert.ll
    llvm/test/CodeGen/Hexagon/circ_st.ll
    llvm/test/CodeGen/Hexagon/clr_set_toggle.ll
    llvm/test/CodeGen/Hexagon/cmp-extend.ll
    llvm/test/CodeGen/Hexagon/cmp-promote.ll
    llvm/test/CodeGen/Hexagon/cmp.ll
    llvm/test/CodeGen/Hexagon/cmp_pred2.ll
    llvm/test/CodeGen/Hexagon/cmpb-dec-imm.ll
    llvm/test/CodeGen/Hexagon/cmpb-eq.ll
    llvm/test/CodeGen/Hexagon/cmpb_gtu.ll
    llvm/test/CodeGen/Hexagon/cmpb_pred.ll
    llvm/test/CodeGen/Hexagon/cmpbeq.ll
    llvm/test/CodeGen/Hexagon/cmph-gtu.ll
    llvm/test/CodeGen/Hexagon/coalesce_tfri.ll
    llvm/test/CodeGen/Hexagon/coalescing-hvx-across-calls.ll
    llvm/test/CodeGen/Hexagon/combine-imm-ext.ll
    llvm/test/CodeGen/Hexagon/combine-imm-ext2.ll
    llvm/test/CodeGen/Hexagon/combine.ll
    llvm/test/CodeGen/Hexagon/combine_ir.ll
    llvm/test/CodeGen/Hexagon/combiner-lts.ll
    llvm/test/CodeGen/Hexagon/common-gep-basic.ll
    llvm/test/CodeGen/Hexagon/common-gep-icm.ll
    llvm/test/CodeGen/Hexagon/common-global-addr.ll
    llvm/test/CodeGen/Hexagon/concat-vectors-legalize.ll
    llvm/test/CodeGen/Hexagon/const-combine.ll
    llvm/test/CodeGen/Hexagon/constext-call.ll
    llvm/test/CodeGen/Hexagon/constext-immstore.ll
    llvm/test/CodeGen/Hexagon/constext-replace.ll
    llvm/test/CodeGen/Hexagon/constp-combine-neg.ll
    llvm/test/CodeGen/Hexagon/constp-extract.ll
    llvm/test/CodeGen/Hexagon/convert-to-dot-old.ll
    llvm/test/CodeGen/Hexagon/convert_const_i1_to_i8.ll
    llvm/test/CodeGen/Hexagon/convertdptoint.ll
    llvm/test/CodeGen/Hexagon/convertdptoll.ll
    llvm/test/CodeGen/Hexagon/convertsptoint.ll
    llvm/test/CodeGen/Hexagon/convertsptoll.ll
    llvm/test/CodeGen/Hexagon/copy-to-combine-dbg.ll
    llvm/test/CodeGen/Hexagon/csr-func-usedef.ll
    llvm/test/CodeGen/Hexagon/ctor.ll
    llvm/test/CodeGen/Hexagon/dadd.ll
    llvm/test/CodeGen/Hexagon/dag-indexed.ll
    llvm/test/CodeGen/Hexagon/dccleana.ll
    llvm/test/CodeGen/Hexagon/dead-store-stack.ll
    llvm/test/CodeGen/Hexagon/dealloc-store.ll
    llvm/test/CodeGen/Hexagon/dealloc_return.ll
    llvm/test/CodeGen/Hexagon/debug-line_table_start.ll
    llvm/test/CodeGen/Hexagon/debug-prologue.ll
    llvm/test/CodeGen/Hexagon/def-undef-deps.ll
    llvm/test/CodeGen/Hexagon/default-align.ll
    llvm/test/CodeGen/Hexagon/deflate.ll
    llvm/test/CodeGen/Hexagon/dhry.ll
    llvm/test/CodeGen/Hexagon/dhry_proc8.ll
    llvm/test/CodeGen/Hexagon/dhry_stall.ll
    llvm/test/CodeGen/Hexagon/disable-const64-tinycore.ll
    llvm/test/CodeGen/Hexagon/disable-const64.ll
    llvm/test/CodeGen/Hexagon/dmul.ll
    llvm/test/CodeGen/Hexagon/double.ll
    llvm/test/CodeGen/Hexagon/dsub.ll
    llvm/test/CodeGen/Hexagon/dualstore.ll
    llvm/test/CodeGen/Hexagon/early-if-conversion-bug1.ll
    llvm/test/CodeGen/Hexagon/early-if-merge-loop.ll
    llvm/test/CodeGen/Hexagon/early-if-phi-i1.ll
    llvm/test/CodeGen/Hexagon/early-if-spare.ll
    llvm/test/CodeGen/Hexagon/early-if-vecpi.ll
    llvm/test/CodeGen/Hexagon/early-if-vecpred.ll
    llvm/test/CodeGen/Hexagon/early-if.ll
    llvm/test/CodeGen/Hexagon/eh_return-r30.ll
    llvm/test/CodeGen/Hexagon/eh_return.ll
    llvm/test/CodeGen/Hexagon/eh_save_restore.ll
    llvm/test/CodeGen/Hexagon/ehabi.ll
    llvm/test/CodeGen/Hexagon/eliminate-pred-spill.ll
    llvm/test/CodeGen/Hexagon/expand-condsets-copy-lis.ll
    llvm/test/CodeGen/Hexagon/expand-condsets-dead-bad.ll
    llvm/test/CodeGen/Hexagon/expand-condsets-dead-pred.ll
    llvm/test/CodeGen/Hexagon/expand-condsets-dead.ll
    llvm/test/CodeGen/Hexagon/expand-condsets-extend.ll
    llvm/test/CodeGen/Hexagon/expand-condsets-pred-undef.ll
    llvm/test/CodeGen/Hexagon/expand-condsets-rm-segment.ll
    llvm/test/CodeGen/Hexagon/expand-condsets-undef.ll
    llvm/test/CodeGen/Hexagon/expand-condsets-undef2.ll
    llvm/test/CodeGen/Hexagon/expand-condsets.ll
    llvm/test/CodeGen/Hexagon/expand-vstorerw-undef.ll
    llvm/test/CodeGen/Hexagon/expand-vstorerw-undef2.ll
    llvm/test/CodeGen/Hexagon/extload-combine.ll
    llvm/test/CodeGen/Hexagon/extlow.ll
    llvm/test/CodeGen/Hexagon/extract-basic.ll
    llvm/test/CodeGen/Hexagon/fadd.ll
    llvm/test/CodeGen/Hexagon/fcmp.ll
    llvm/test/CodeGen/Hexagon/feature-memops.ll
    llvm/test/CodeGen/Hexagon/find-loop-instr.ll
    llvm/test/CodeGen/Hexagon/find-loop.ll
    llvm/test/CodeGen/Hexagon/float-amode.ll
    llvm/test/CodeGen/Hexagon/float-gen-cmpop.ll
    llvm/test/CodeGen/Hexagon/float.ll
    llvm/test/CodeGen/Hexagon/floatconvert-ieee-rnd-near.ll
    llvm/test/CodeGen/Hexagon/fltnvjump.ll
    llvm/test/CodeGen/Hexagon/fmadd.ll
    llvm/test/CodeGen/Hexagon/fmul.ll
    llvm/test/CodeGen/Hexagon/formal-args-i1.ll
    llvm/test/CodeGen/Hexagon/fp16.ll
    llvm/test/CodeGen/Hexagon/fp_latency.ll
    llvm/test/CodeGen/Hexagon/fpelim-basic.ll
    llvm/test/CodeGen/Hexagon/frame-offset-overflow.ll
    llvm/test/CodeGen/Hexagon/fsub.ll
    llvm/test/CodeGen/Hexagon/funnel-shift2.ll
    llvm/test/CodeGen/Hexagon/fusedandshift.ll
    llvm/test/CodeGen/Hexagon/generate-const-buildvector32.ll
    llvm/test/CodeGen/Hexagon/getBlockAddress.ll
    llvm/test/CodeGen/Hexagon/glob-align-volatile.ll
    llvm/test/CodeGen/Hexagon/global-const-gep.ll
    llvm/test/CodeGen/Hexagon/global-ctor-pcrel.ll
    llvm/test/CodeGen/Hexagon/gp-plus-offset-load.ll
    llvm/test/CodeGen/Hexagon/gp-plus-offset-store.ll
    llvm/test/CodeGen/Hexagon/gp-rel.ll
    llvm/test/CodeGen/Hexagon/hasfp-crash1.ll
    llvm/test/CodeGen/Hexagon/hello-world-v55.ll
    llvm/test/CodeGen/Hexagon/hello-world-v60.ll
    llvm/test/CodeGen/Hexagon/hexagon-cond-jumpr31.ll
    llvm/test/CodeGen/Hexagon/hexagon-tfr-add.ll
    llvm/test/CodeGen/Hexagon/hexagon-verify-implicit-use.ll
    llvm/test/CodeGen/Hexagon/hexagon_cfi_offset.ll
    llvm/test/CodeGen/Hexagon/hexagon_vector_loop_carried_reuse.ll
    llvm/test/CodeGen/Hexagon/hexagon_vector_loop_carried_reuse_commutative.ll
    llvm/test/CodeGen/Hexagon/hexagon_vector_loop_carried_reuse_constant.ll
    llvm/test/CodeGen/Hexagon/hidden-relocation.ll
    llvm/test/CodeGen/Hexagon/honor-optsize.ll
    llvm/test/CodeGen/Hexagon/hrc-stack-coloring.ll
    llvm/test/CodeGen/Hexagon/hvx-bitcast-v64i1.ll
    llvm/test/CodeGen/Hexagon/hvx-byte-store-double.ll
    llvm/test/CodeGen/Hexagon/hvx-byte-store.ll
    llvm/test/CodeGen/Hexagon/hvx-dbl-dual-output.ll
    llvm/test/CodeGen/Hexagon/hvx-double-vzero.ll
    llvm/test/CodeGen/Hexagon/hvx-dual-output.ll
    llvm/test/CodeGen/Hexagon/hvx-loopidiom-memcpy.ll
    llvm/test/CodeGen/Hexagon/hvx-nontemporal.ll
    llvm/test/CodeGen/Hexagon/hvx-reuse-fi-base.ll
    llvm/test/CodeGen/Hexagon/hvx-vzero.ll
    llvm/test/CodeGen/Hexagon/hwloop-cleanup.ll
    llvm/test/CodeGen/Hexagon/hwloop-const.ll
    llvm/test/CodeGen/Hexagon/hwloop-crit-edge.ll
    llvm/test/CodeGen/Hexagon/hwloop-dbg.ll
    llvm/test/CodeGen/Hexagon/hwloop-le.ll
    llvm/test/CodeGen/Hexagon/hwloop-long.ll
    llvm/test/CodeGen/Hexagon/hwloop-loop1.ll
    llvm/test/CodeGen/Hexagon/hwloop-lt.ll
    llvm/test/CodeGen/Hexagon/hwloop-lt1.ll
    llvm/test/CodeGen/Hexagon/hwloop-missed.ll
    llvm/test/CodeGen/Hexagon/hwloop-ne.ll
    llvm/test/CodeGen/Hexagon/hwloop-noreturn-call.ll
    llvm/test/CodeGen/Hexagon/hwloop-ph-deadcode.ll
    llvm/test/CodeGen/Hexagon/hwloop-phi-subreg.ll
    llvm/test/CodeGen/Hexagon/hwloop-preh.ll
    llvm/test/CodeGen/Hexagon/hwloop-range.ll
    llvm/test/CodeGen/Hexagon/hwloop-recursion.ll
    llvm/test/CodeGen/Hexagon/hwloop-subreg.ll
    llvm/test/CodeGen/Hexagon/hwloop-wrap2.ll
    llvm/test/CodeGen/Hexagon/hwloop1.ll
    llvm/test/CodeGen/Hexagon/hwloop2.ll
    llvm/test/CodeGen/Hexagon/hwloop3.ll
    llvm/test/CodeGen/Hexagon/hwloop4.ll
    llvm/test/CodeGen/Hexagon/hwloop5.ll
    llvm/test/CodeGen/Hexagon/hx_V6_lo_hi.ll
    llvm/test/CodeGen/Hexagon/i128-bitop.ll
    llvm/test/CodeGen/Hexagon/i16_VarArg.ll
    llvm/test/CodeGen/Hexagon/i1_VarArg.ll
    llvm/test/CodeGen/Hexagon/i8_VarArg.ll
    llvm/test/CodeGen/Hexagon/idxload-with-zero-offset.ll
    llvm/test/CodeGen/Hexagon/ifcvt-diamond-bad.ll
    llvm/test/CodeGen/Hexagon/ifcvt-diamond-bug-2016-08-26.ll
    llvm/test/CodeGen/Hexagon/ifcvt-edge-weight.ll
    llvm/test/CodeGen/Hexagon/ignore-terminal-mbb.ll
    llvm/test/CodeGen/Hexagon/indirect-br.ll
    llvm/test/CodeGen/Hexagon/initial-exec.ll
    llvm/test/CodeGen/Hexagon/inline-asm-a.ll
    llvm/test/CodeGen/Hexagon/inline-asm-bad-constraint.ll
    llvm/test/CodeGen/Hexagon/inline-asm-clobber-lr.ll
    llvm/test/CodeGen/Hexagon/inline-asm-error.ll
    llvm/test/CodeGen/Hexagon/inline-asm-hexagon.ll
    llvm/test/CodeGen/Hexagon/inline-asm-qv.ll
    llvm/test/CodeGen/Hexagon/inline-asm-vecpred128.ll
    llvm/test/CodeGen/Hexagon/inlineasm-output-template.ll
    llvm/test/CodeGen/Hexagon/insert-basic.ll
    llvm/test/CodeGen/Hexagon/insert.ll
    llvm/test/CodeGen/Hexagon/insert4.ll
    llvm/test/CodeGen/Hexagon/instrprof-custom.ll
    llvm/test/CodeGen/Hexagon/intrinsics-v60-misc.ll
    llvm/test/CodeGen/Hexagon/intrinsics-v60-permute.ll
    llvm/test/CodeGen/Hexagon/intrinsics-v60-shift.ll
    llvm/test/CodeGen/Hexagon/intrinsics-v60-vcmp.ll
    llvm/test/CodeGen/Hexagon/intrinsics-v60-vmpy-acc-128B.ll
    llvm/test/CodeGen/Hexagon/intrinsics-v60-vmpy-acc.ll
    llvm/test/CodeGen/Hexagon/intrinsics-v60-vmpy.ll
    llvm/test/CodeGen/Hexagon/intrinsics/atomic_load.ll
    llvm/test/CodeGen/Hexagon/intrinsics/atomic_store.ll
    llvm/test/CodeGen/Hexagon/intrinsics/atomicrmw_addsub_native.ll
    llvm/test/CodeGen/Hexagon/intrinsics/atomicrmw_bitwise_native.ll
    llvm/test/CodeGen/Hexagon/intrinsics/atomicrmw_nand.ll
    llvm/test/CodeGen/Hexagon/intrinsics/byte-store-double.ll
    llvm/test/CodeGen/Hexagon/intrinsics/byte-store.ll
    llvm/test/CodeGen/Hexagon/intrinsics/llsc_bundling.ll
    llvm/test/CodeGen/Hexagon/intrinsics/system_user.ll
    llvm/test/CodeGen/Hexagon/intrinsics/v65-gather-double.ll
    llvm/test/CodeGen/Hexagon/intrinsics/v65-gather.ll
    llvm/test/CodeGen/Hexagon/intrinsics/v65-scatter-gather.ll
    llvm/test/CodeGen/Hexagon/invalid-memrefs.ll
    llvm/test/CodeGen/Hexagon/is-legal-void.ll
    llvm/test/CodeGen/Hexagon/isel-bitcast-v1i8-i8.ll
    llvm/test/CodeGen/Hexagon/isel-buildvector-v2f16.ll
    llvm/test/CodeGen/Hexagon/isel-dcfetch-intrin-map.ll
    llvm/test/CodeGen/Hexagon/isel-global-offset-alignment.ll
    llvm/test/CodeGen/Hexagon/isel-memory-vNi1.ll
    llvm/test/CodeGen/Hexagon/isel-prefer.ll
    llvm/test/CodeGen/Hexagon/isel-select-v4i8.ll
    llvm/test/CodeGen/Hexagon/isel-setcc-i1.ll
    llvm/test/CodeGen/Hexagon/isel-simplify-crash.ll
    llvm/test/CodeGen/Hexagon/isel-splat-vector-crash.ll
    llvm/test/CodeGen/Hexagon/isel-splat-vector-dag-crash.ll
    llvm/test/CodeGen/Hexagon/isel-uaddo-1.ll
    llvm/test/CodeGen/Hexagon/isel-v3i16.ll
    llvm/test/CodeGen/Hexagon/isel-vacopy.ll
    llvm/test/CodeGen/Hexagon/isel-vlsr-v2i16.ll
    llvm/test/CodeGen/Hexagon/isel/extload-i1.ll
    llvm/test/CodeGen/Hexagon/jt-in-text.ll
    llvm/test/CodeGen/Hexagon/jump-prob.ll
    llvm/test/CodeGen/Hexagon/jump-table-isel.ll
    llvm/test/CodeGen/Hexagon/large-number-of-preds.ll
    llvm/test/CodeGen/Hexagon/late-pred.ll
    llvm/test/CodeGen/Hexagon/late_instr.ll
    llvm/test/CodeGen/Hexagon/lcomm.ll
    llvm/test/CodeGen/Hexagon/load-abs.ll
    llvm/test/CodeGen/Hexagon/loadi1-G0.ll
    llvm/test/CodeGen/Hexagon/loadi1-v4-G0.ll
    llvm/test/CodeGen/Hexagon/loadi1-v4.ll
    llvm/test/CodeGen/Hexagon/loadi1.ll
    llvm/test/CodeGen/Hexagon/local-exec.ll
    llvm/test/CodeGen/Hexagon/loop-idiom/hexagon-memmove1.ll
    llvm/test/CodeGen/Hexagon/loop-idiom/hexagon-memmove2.ll
    llvm/test/CodeGen/Hexagon/loop-idiom/lcssa.ll
    llvm/test/CodeGen/Hexagon/loop-idiom/memmove-rt-check.ll
    llvm/test/CodeGen/Hexagon/loop-idiom/pmpy-infinite-loop.ll
    llvm/test/CodeGen/Hexagon/loop-idiom/pmpy-shiftconv-fail.ll
    llvm/test/CodeGen/Hexagon/loop-prefetch.ll
    llvm/test/CodeGen/Hexagon/loop-rotate-bug.ll
    llvm/test/CodeGen/Hexagon/loop-rotate-liveins.ll
    llvm/test/CodeGen/Hexagon/loop_correctness.ll
    llvm/test/CodeGen/Hexagon/lower-extract-subvector.ll
    llvm/test/CodeGen/Hexagon/lsr-post-inc-cross-use-offsets.ll
    llvm/test/CodeGen/Hexagon/lsr-postinc-nested-loop.ll
    llvm/test/CodeGen/Hexagon/machine-sink.ll
    llvm/test/CodeGen/Hexagon/macint.ll
    llvm/test/CodeGen/Hexagon/mem-fi-add.ll
    llvm/test/CodeGen/Hexagon/mem-load-circ.ll
    llvm/test/CodeGen/Hexagon/mem-ops-sub.ll
    llvm/test/CodeGen/Hexagon/mem-ops-sub_01.ll
    llvm/test/CodeGen/Hexagon/mem-ops-sub_i16.ll
    llvm/test/CodeGen/Hexagon/mem-ops-sub_i16_01.ll
    llvm/test/CodeGen/Hexagon/memcmp.ll
    llvm/test/CodeGen/Hexagon/memcpy-likely-aligned.ll
    llvm/test/CodeGen/Hexagon/memcpy-memmove-inline.ll
    llvm/test/CodeGen/Hexagon/memop-bit18.ll
    llvm/test/CodeGen/Hexagon/memops-stack.ll
    llvm/test/CodeGen/Hexagon/memops.ll
    llvm/test/CodeGen/Hexagon/memops1.ll
    llvm/test/CodeGen/Hexagon/memops2.ll
    llvm/test/CodeGen/Hexagon/memops3.ll
    llvm/test/CodeGen/Hexagon/memops_global.ll
    llvm/test/CodeGen/Hexagon/memset-inline.ll
    llvm/test/CodeGen/Hexagon/minu-zext-16.ll
    llvm/test/CodeGen/Hexagon/minu-zext-8.ll
    llvm/test/CodeGen/Hexagon/mipi-double-small.ll
    llvm/test/CodeGen/Hexagon/misaligned-access.ll
    llvm/test/CodeGen/Hexagon/misaligned-const-load.ll
    llvm/test/CodeGen/Hexagon/misaligned-const-store.ll
    llvm/test/CodeGen/Hexagon/misaligned_double_vector_store_not_fast.ll
    llvm/test/CodeGen/Hexagon/misched-top-rptracker-sync.ll
    llvm/test/CodeGen/Hexagon/mpy.ll
    llvm/test/CodeGen/Hexagon/mpysin-imm.ll
    llvm/test/CodeGen/Hexagon/mul64-sext.ll
    llvm/test/CodeGen/Hexagon/mul64.ll
    llvm/test/CodeGen/Hexagon/mulhs.ll
    llvm/test/CodeGen/Hexagon/multi-cycle.ll
    llvm/test/CodeGen/Hexagon/mux-basic.ll
    llvm/test/CodeGen/Hexagon/muxii-crash.ll
    llvm/test/CodeGen/Hexagon/nbench1.ll
    llvm/test/CodeGen/Hexagon/newvalueSameReg.ll
    llvm/test/CodeGen/Hexagon/newvaluejump-kill.ll
    llvm/test/CodeGen/Hexagon/newvaluejump-postinc.ll
    llvm/test/CodeGen/Hexagon/newvaluejump.ll
    llvm/test/CodeGen/Hexagon/newvaluejump2.ll
    llvm/test/CodeGen/Hexagon/newvaluejump3.ll
    llvm/test/CodeGen/Hexagon/newvaluestore.ll
    llvm/test/CodeGen/Hexagon/newvaluestore2.ll
    llvm/test/CodeGen/Hexagon/no-packets-gather.ll
    llvm/test/CodeGen/Hexagon/no-packets.ll
    llvm/test/CodeGen/Hexagon/no_struct_element.ll
    llvm/test/CodeGen/Hexagon/noreturn-noepilog.ll
    llvm/test/CodeGen/Hexagon/noreturn-notail.ll
    llvm/test/CodeGen/Hexagon/noreturn-stack-elim.ll
    llvm/test/CodeGen/Hexagon/not-op.ll
    llvm/test/CodeGen/Hexagon/notcheap.ll
    llvm/test/CodeGen/Hexagon/ntstbit.ll
    llvm/test/CodeGen/Hexagon/nv_store_vec.ll
    llvm/test/CodeGen/Hexagon/opt-addr-mode-subreg-use.ll
    llvm/test/CodeGen/Hexagon/opt-addr-mode.ll
    llvm/test/CodeGen/Hexagon/opt-fabs.ll
    llvm/test/CodeGen/Hexagon/opt-fneg.ll
    llvm/test/CodeGen/Hexagon/opt-glob-addrs-000.ll
    llvm/test/CodeGen/Hexagon/opt-glob-addrs-001.ll
    llvm/test/CodeGen/Hexagon/opt-glob-addrs-003.ll
    llvm/test/CodeGen/Hexagon/opt-sext-intrinsics.ll
    llvm/test/CodeGen/Hexagon/opt-spill-volatile.ll
    llvm/test/CodeGen/Hexagon/packed-store.ll
    llvm/test/CodeGen/Hexagon/packetize-allocframe.ll
    llvm/test/CodeGen/Hexagon/packetize-call-r29.ll
    llvm/test/CodeGen/Hexagon/packetize-cfi-location.ll
    llvm/test/CodeGen/Hexagon/packetize-impdef-1.ll
    llvm/test/CodeGen/Hexagon/packetize-impdef.ll
    llvm/test/CodeGen/Hexagon/packetize-l2fetch.ll
    llvm/test/CodeGen/Hexagon/packetize-return-arg.ll
    llvm/test/CodeGen/Hexagon/packetize-tailcall-arg.ll
    llvm/test/CodeGen/Hexagon/packetize-volatiles.ll
    llvm/test/CodeGen/Hexagon/partword-cmpxchg.ll
    llvm/test/CodeGen/Hexagon/peephole-kill-flags.ll
    llvm/test/CodeGen/Hexagon/peephole-move-phi.ll
    llvm/test/CodeGen/Hexagon/pic-jt-big.ll
    llvm/test/CodeGen/Hexagon/pic-jumptables.ll
    llvm/test/CodeGen/Hexagon/pic-local.ll
    llvm/test/CodeGen/Hexagon/pic-regusage.ll
    llvm/test/CodeGen/Hexagon/pic-sdata.ll
    llvm/test/CodeGen/Hexagon/pic-simple.ll
    llvm/test/CodeGen/Hexagon/pic-static.ll
    llvm/test/CodeGen/Hexagon/plt-rel.ll
    llvm/test/CodeGen/Hexagon/pmpyw_acc.ll
    llvm/test/CodeGen/Hexagon/post-inc-aa-metadata.ll
    llvm/test/CodeGen/Hexagon/postinc-aggr-dag-cycle.ll
    llvm/test/CodeGen/Hexagon/postinc-float.ll
    llvm/test/CodeGen/Hexagon/postinc-load.ll
    llvm/test/CodeGen/Hexagon/postinc-offset.ll
    llvm/test/CodeGen/Hexagon/postinc-order.ll
    llvm/test/CodeGen/Hexagon/postinc-store.ll
    llvm/test/CodeGen/Hexagon/pred-absolute-store.ll
    llvm/test/CodeGen/Hexagon/pred-gp.ll
    llvm/test/CodeGen/Hexagon/pred-instrs.ll
    llvm/test/CodeGen/Hexagon/pred-taken-jump.ll
    llvm/test/CodeGen/Hexagon/predicate-logical.ll
    llvm/test/CodeGen/Hexagon/predicate-rcmp.ll
    llvm/test/CodeGen/Hexagon/predtfrs.ll
    llvm/test/CodeGen/Hexagon/prefetch-intr.ll
    llvm/test/CodeGen/Hexagon/prefetch-shuffler-ice.ll
    llvm/test/CodeGen/Hexagon/prob-types.ll
    llvm/test/CodeGen/Hexagon/prof-early-if.ll
    llvm/test/CodeGen/Hexagon/propagate-vcombine.ll
    llvm/test/CodeGen/Hexagon/ps_call_nr.ll
    llvm/test/CodeGen/Hexagon/rdf-copy-undef.ll
    llvm/test/CodeGen/Hexagon/rdf-copy-undef2.ll
    llvm/test/CodeGen/Hexagon/rdf-copy.ll
    llvm/test/CodeGen/Hexagon/rdf-dead-loop.ll
    llvm/test/CodeGen/Hexagon/rdf-def-mask.ll
    llvm/test/CodeGen/Hexagon/rdf-extra-livein.ll
    llvm/test/CodeGen/Hexagon/rdf-filter-defs.ll
    llvm/test/CodeGen/Hexagon/rdf-ignore-undef.ll
    llvm/test/CodeGen/Hexagon/rdf-inline-asm-fixed.ll
    llvm/test/CodeGen/Hexagon/rdf-inline-asm.ll
    llvm/test/CodeGen/Hexagon/rdf-kill-last-op.ll
    llvm/test/CodeGen/Hexagon/rdf-multiple-phis-up.ll
    llvm/test/CodeGen/Hexagon/rdf-phi-shadows.ll
    llvm/test/CodeGen/Hexagon/rdf-phi-up.ll
    llvm/test/CodeGen/Hexagon/redundant-branching2.ll
    llvm/test/CodeGen/Hexagon/reg-eq-cmp.ll
    llvm/test/CodeGen/Hexagon/reg-scav-imp-use-dbl-vec.ll
    llvm/test/CodeGen/Hexagon/reg-scavengebug-2.ll
    llvm/test/CodeGen/Hexagon/reg-scavengebug-3.ll
    llvm/test/CodeGen/Hexagon/reg-scavengebug-4.ll
    llvm/test/CodeGen/Hexagon/reg-scavengebug-5.ll
    llvm/test/CodeGen/Hexagon/reg-scavengebug.ll
    llvm/test/CodeGen/Hexagon/reg-scavenger-valid-slot.ll
    llvm/test/CodeGen/Hexagon/reg_seq.ll
    llvm/test/CodeGen/Hexagon/regalloc-block-overlap.ll
    llvm/test/CodeGen/Hexagon/registerscav-missing-spill-slot.ll
    llvm/test/CodeGen/Hexagon/registerscavenger-fail1.ll
    llvm/test/CodeGen/Hexagon/regp-underflow.ll
    llvm/test/CodeGen/Hexagon/regscav-wrong-super-sub-regs.ll
    llvm/test/CodeGen/Hexagon/regscavenger_fail_hwloop.ll
    llvm/test/CodeGen/Hexagon/regscavengerbug.ll
    llvm/test/CodeGen/Hexagon/remove-endloop.ll
    llvm/test/CodeGen/Hexagon/remove_lsr.ll
    llvm/test/CodeGen/Hexagon/retval-redundant-copy.ll
    llvm/test/CodeGen/Hexagon/rotl-i64.ll
    llvm/test/CodeGen/Hexagon/runtime-stkchk.ll
    llvm/test/CodeGen/Hexagon/save-kill-csr.ll
    llvm/test/CodeGen/Hexagon/save-regs-thresh.ll
    llvm/test/CodeGen/Hexagon/sdata-array.ll
    llvm/test/CodeGen/Hexagon/sdata-basic.ll
    llvm/test/CodeGen/Hexagon/sdata-expand-const.ll
    llvm/test/CodeGen/Hexagon/sdata-load-size.ll
    llvm/test/CodeGen/Hexagon/sdata-opaque-type.ll
    llvm/test/CodeGen/Hexagon/sdata-stack-guard.ll
    llvm/test/CodeGen/Hexagon/sdr-nosplit1.ll
    llvm/test/CodeGen/Hexagon/sdr-reg-profit.ll
    llvm/test/CodeGen/Hexagon/section_7275.ll
    llvm/test/CodeGen/Hexagon/select-instr-align.ll
    llvm/test/CodeGen/Hexagon/select-vector-pred.ll
    llvm/test/CodeGen/Hexagon/setmemrefs.ll
    llvm/test/CodeGen/Hexagon/sffms.ll
    llvm/test/CodeGen/Hexagon/sfmpyacc_scale.ll
    llvm/test/CodeGen/Hexagon/shrink-frame-basic.ll
    llvm/test/CodeGen/Hexagon/signed_immediates.ll
    llvm/test/CodeGen/Hexagon/simpletailcall.ll
    llvm/test/CodeGen/Hexagon/simplify64bitops_7223.ll
    llvm/test/CodeGen/Hexagon/split-const32-const64.ll
    llvm/test/CodeGen/Hexagon/split-muxii.ll
    llvm/test/CodeGen/Hexagon/split-vecpred.ll
    llvm/test/CodeGen/Hexagon/stack-align-reset.ll
    llvm/test/CodeGen/Hexagon/stack-align1.ll
    llvm/test/CodeGen/Hexagon/stack-align2.ll
    llvm/test/CodeGen/Hexagon/stack-alloca1.ll
    llvm/test/CodeGen/Hexagon/stack-alloca2.ll
    llvm/test/CodeGen/Hexagon/stack-guard-acceptable-type.ll
    llvm/test/CodeGen/Hexagon/static.ll
    llvm/test/CodeGen/Hexagon/store-AbsSet.ll
    llvm/test/CodeGen/Hexagon/store-abs.ll
    llvm/test/CodeGen/Hexagon/store-constant.ll
    llvm/test/CodeGen/Hexagon/store-imm-amode.ll
    llvm/test/CodeGen/Hexagon/store-imm-byte.ll
    llvm/test/CodeGen/Hexagon/store-imm-halword.ll
    llvm/test/CodeGen/Hexagon/store-imm-large-stack.ll
    llvm/test/CodeGen/Hexagon/store-imm-stack-object.ll
    llvm/test/CodeGen/Hexagon/store-imm-word.ll
    llvm/test/CodeGen/Hexagon/store-shift.ll
    llvm/test/CodeGen/Hexagon/store-vector-pred.ll
    llvm/test/CodeGen/Hexagon/store-widen-aliased-load.ll
    llvm/test/CodeGen/Hexagon/store-widen-negv.ll
    llvm/test/CodeGen/Hexagon/store-widen-negv2.ll
    llvm/test/CodeGen/Hexagon/store-widen-subreg.ll
    llvm/test/CodeGen/Hexagon/store-widen.ll
    llvm/test/CodeGen/Hexagon/store1.ll
    llvm/test/CodeGen/Hexagon/store_abs.ll
    llvm/test/CodeGen/Hexagon/storerd-io-over-rr.ll
    llvm/test/CodeGen/Hexagon/storerinewabs.ll
    llvm/test/CodeGen/Hexagon/struct-const.ll
    llvm/test/CodeGen/Hexagon/struct_args.ll
    llvm/test/CodeGen/Hexagon/struct_args_large.ll
    llvm/test/CodeGen/Hexagon/struct_copy.ll
    llvm/test/CodeGen/Hexagon/struct_copy_sched_r16.ll
    llvm/test/CodeGen/Hexagon/sub-add.ll
    llvm/test/CodeGen/Hexagon/subi-asl.ll
    llvm/test/CodeGen/Hexagon/switch-lut-explicit-section.ll
    llvm/test/CodeGen/Hexagon/switch-lut-function-section.ll
    llvm/test/CodeGen/Hexagon/switch-lut-multiple-functions.ll
    llvm/test/CodeGen/Hexagon/switch-lut-text-section.ll
    llvm/test/CodeGen/Hexagon/swp-art-deps-rec.ll
    llvm/test/CodeGen/Hexagon/swp-bad-sched.ll
    llvm/test/CodeGen/Hexagon/swp-badorder.ll
    llvm/test/CodeGen/Hexagon/swp-carried-1.ll
    llvm/test/CodeGen/Hexagon/swp-chain-refs.ll
    llvm/test/CodeGen/Hexagon/swp-change-dep-cycle.ll
    llvm/test/CodeGen/Hexagon/swp-change-dep.ll
    llvm/test/CodeGen/Hexagon/swp-change-dep1.ll
    llvm/test/CodeGen/Hexagon/swp-change-deps.ll
    llvm/test/CodeGen/Hexagon/swp-check-offset.ll
    llvm/test/CodeGen/Hexagon/swp-const-tc.ll
    llvm/test/CodeGen/Hexagon/swp-const-tc1.ll
    llvm/test/CodeGen/Hexagon/swp-const-tc2.ll
    llvm/test/CodeGen/Hexagon/swp-const-tc3.ll
    llvm/test/CodeGen/Hexagon/swp-conv3x3-nested.ll
    llvm/test/CodeGen/Hexagon/swp-copytophi-dag.ll
    llvm/test/CodeGen/Hexagon/swp-cse-phi.ll
    llvm/test/CodeGen/Hexagon/swp-dag-phi.ll
    llvm/test/CodeGen/Hexagon/swp-dag-phi1.ll
    llvm/test/CodeGen/Hexagon/swp-dead-regseq.ll
    llvm/test/CodeGen/Hexagon/swp-dep-neg-offset.ll
    llvm/test/CodeGen/Hexagon/swp-disable-Os.ll
    llvm/test/CodeGen/Hexagon/swp-epilog-numphis.ll
    llvm/test/CodeGen/Hexagon/swp-epilog-phi10.ll
    llvm/test/CodeGen/Hexagon/swp-epilog-phi11.ll
    llvm/test/CodeGen/Hexagon/swp-epilog-phi12.ll
    llvm/test/CodeGen/Hexagon/swp-epilog-phi13.ll
    llvm/test/CodeGen/Hexagon/swp-epilog-phi2.ll
    llvm/test/CodeGen/Hexagon/swp-epilog-phi4.ll
    llvm/test/CodeGen/Hexagon/swp-epilog-phi5.ll
    llvm/test/CodeGen/Hexagon/swp-epilog-phi6.ll
    llvm/test/CodeGen/Hexagon/swp-epilog-phi8.ll
    llvm/test/CodeGen/Hexagon/swp-epilog-phi9.ll
    llvm/test/CodeGen/Hexagon/swp-epilog-reuse-1.ll
    llvm/test/CodeGen/Hexagon/swp-epilog-reuse.ll
    llvm/test/CodeGen/Hexagon/swp-epilog-reuse2.ll
    llvm/test/CodeGen/Hexagon/swp-epilog-reuse3.ll
    llvm/test/CodeGen/Hexagon/swp-epilog-reuse4.ll
    llvm/test/CodeGen/Hexagon/swp-exit-fixup.ll
    llvm/test/CodeGen/Hexagon/swp-fix-last-use.ll
    llvm/test/CodeGen/Hexagon/swp-fix-last-use1.ll
    llvm/test/CodeGen/Hexagon/swp-kernel-last-use.ll
    llvm/test/CodeGen/Hexagon/swp-kernel-phi1.ll
    llvm/test/CodeGen/Hexagon/swp-large-rec.ll
    llvm/test/CodeGen/Hexagon/swp-listen-loop3.ll
    llvm/test/CodeGen/Hexagon/swp-loop-carried-crash.ll
    llvm/test/CodeGen/Hexagon/swp-loop-carried-unknown.ll
    llvm/test/CodeGen/Hexagon/swp-loopval.ll
    llvm/test/CodeGen/Hexagon/swp-lots-deps.ll
    llvm/test/CodeGen/Hexagon/swp-matmul-bitext.ll
    llvm/test/CodeGen/Hexagon/swp-max-stage3.ll
    llvm/test/CodeGen/Hexagon/swp-max.ll
    llvm/test/CodeGen/Hexagon/swp-maxstart.ll
    llvm/test/CodeGen/Hexagon/swp-more-phi.ll
    llvm/test/CodeGen/Hexagon/swp-multi-loops.ll
    llvm/test/CodeGen/Hexagon/swp-multi-phi-refs.ll
    llvm/test/CodeGen/Hexagon/swp-node-order.ll
    llvm/test/CodeGen/Hexagon/swp-order-carried.ll
    llvm/test/CodeGen/Hexagon/swp-order-copies.ll
    llvm/test/CodeGen/Hexagon/swp-order-deps1.ll
    llvm/test/CodeGen/Hexagon/swp-order-deps3.ll
    llvm/test/CodeGen/Hexagon/swp-order-deps4.ll
    llvm/test/CodeGen/Hexagon/swp-order-deps5.ll
    llvm/test/CodeGen/Hexagon/swp-order-deps6.ll
    llvm/test/CodeGen/Hexagon/swp-order-deps7.ll
    llvm/test/CodeGen/Hexagon/swp-order-prec.ll
    llvm/test/CodeGen/Hexagon/swp-order.ll
    llvm/test/CodeGen/Hexagon/swp-order1.ll
    llvm/test/CodeGen/Hexagon/swp-phi-ch-offset.ll
    llvm/test/CodeGen/Hexagon/swp-phi-chains.ll
    llvm/test/CodeGen/Hexagon/swp-phi-def-use.ll
    llvm/test/CodeGen/Hexagon/swp-phi-dep.ll
    llvm/test/CodeGen/Hexagon/swp-phi-dep1.ll
    llvm/test/CodeGen/Hexagon/swp-phi-order.ll
    llvm/test/CodeGen/Hexagon/swp-phi-ref.ll
    llvm/test/CodeGen/Hexagon/swp-phi-ref1.ll
    llvm/test/CodeGen/Hexagon/swp-phi-start.ll
    llvm/test/CodeGen/Hexagon/swp-phi.ll
    llvm/test/CodeGen/Hexagon/swp-physreg.ll
    llvm/test/CodeGen/Hexagon/swp-pragma-disable-bug.ll
    llvm/test/CodeGen/Hexagon/swp-prolog-phi4.ll
    llvm/test/CodeGen/Hexagon/swp-regseq.ll
    llvm/test/CodeGen/Hexagon/swp-remove-dep-ice.ll
    llvm/test/CodeGen/Hexagon/swp-rename-dead-phi.ll
    llvm/test/CodeGen/Hexagon/swp-rename.ll
    llvm/test/CodeGen/Hexagon/swp-replace-uses1.ll
    llvm/test/CodeGen/Hexagon/swp-resmii-1.ll
    llvm/test/CodeGen/Hexagon/swp-resmii.ll
    llvm/test/CodeGen/Hexagon/swp-reuse-phi-1.ll
    llvm/test/CodeGen/Hexagon/swp-reuse-phi-2.ll
    llvm/test/CodeGen/Hexagon/swp-reuse-phi-4.ll
    llvm/test/CodeGen/Hexagon/swp-reuse-phi-5.ll
    llvm/test/CodeGen/Hexagon/swp-reuse-phi-6.ll
    llvm/test/CodeGen/Hexagon/swp-reuse-phi.ll
    llvm/test/CodeGen/Hexagon/swp-sigma.ll
    llvm/test/CodeGen/Hexagon/swp-stages.ll
    llvm/test/CodeGen/Hexagon/swp-stages3.ll
    llvm/test/CodeGen/Hexagon/swp-stages4.ll
    llvm/test/CodeGen/Hexagon/swp-stages5.ll
    llvm/test/CodeGen/Hexagon/swp-swap.ll
    llvm/test/CodeGen/Hexagon/swp-tfri.ll
    llvm/test/CodeGen/Hexagon/swp-vect-dotprod.ll
    llvm/test/CodeGen/Hexagon/swp-vmult.ll
    llvm/test/CodeGen/Hexagon/swp-vsum.ll
    llvm/test/CodeGen/Hexagon/swp-xxh2.ll
    llvm/test/CodeGen/Hexagon/tail-call-mem-intrinsics.ll
    llvm/test/CodeGen/Hexagon/tail-dup-subreg-map.ll
    llvm/test/CodeGen/Hexagon/tc_duplex.ll
    llvm/test/CodeGen/Hexagon/tc_duplex_asm.ll
    llvm/test/CodeGen/Hexagon/tc_sched.ll
    llvm/test/CodeGen/Hexagon/tc_sched1.ll
    llvm/test/CodeGen/Hexagon/tfr-to-combine.ll
    llvm/test/CodeGen/Hexagon/tied_oper.ll
    llvm/test/CodeGen/Hexagon/tiny_bkfir_artdeps.ll
    llvm/test/CodeGen/Hexagon/tiny_bkfir_loop_align.ll
    llvm/test/CodeGen/Hexagon/tinycore.ll
    llvm/test/CodeGen/Hexagon/tls_gd.ll
    llvm/test/CodeGen/Hexagon/tls_pic.ll
    llvm/test/CodeGen/Hexagon/tls_static.ll
    llvm/test/CodeGen/Hexagon/trivialmemaliascheck.ll
    llvm/test/CodeGen/Hexagon/trunc-mpy.ll
    llvm/test/CodeGen/Hexagon/twoaddressbug.ll
    llvm/test/CodeGen/Hexagon/undef-ret.ll
    llvm/test/CodeGen/Hexagon/undo-dag-shift.ll
    llvm/test/CodeGen/Hexagon/union-1.ll
    llvm/test/CodeGen/Hexagon/upper-mpy.ll
    llvm/test/CodeGen/Hexagon/v6-inlasm1.ll
    llvm/test/CodeGen/Hexagon/v6-inlasm2.ll
    llvm/test/CodeGen/Hexagon/v6-inlasm3.ll
    llvm/test/CodeGen/Hexagon/v6-inlasm4.ll
    llvm/test/CodeGen/Hexagon/v6-shuffl.ll
    llvm/test/CodeGen/Hexagon/v6-spill1.ll
    llvm/test/CodeGen/Hexagon/v6-unaligned-spill.ll
    llvm/test/CodeGen/Hexagon/v6-vecpred-copy.ll
    llvm/test/CodeGen/Hexagon/v60-align.ll
    llvm/test/CodeGen/Hexagon/v60-cur.ll
    llvm/test/CodeGen/Hexagon/v60-haar-postinc.ll
    llvm/test/CodeGen/Hexagon/v60-halide-vcombinei8.ll
    llvm/test/CodeGen/Hexagon/v60-vec-128b-1.ll
    llvm/test/CodeGen/Hexagon/v60-vecpred-spill.ll
    llvm/test/CodeGen/Hexagon/v60-vsel1.ll
    llvm/test/CodeGen/Hexagon/v60-vsel2.ll
    llvm/test/CodeGen/Hexagon/v60Intrins.ll
    llvm/test/CodeGen/Hexagon/v60_Q6_P_rol_PI.ll
    llvm/test/CodeGen/Hexagon/v60_sort16.ll
    llvm/test/CodeGen/Hexagon/v60rol-instrs.ll
    llvm/test/CodeGen/Hexagon/v60small.ll
    llvm/test/CodeGen/Hexagon/v62-CJAllSlots.ll
    llvm/test/CodeGen/Hexagon/v62-inlasm4.ll
    llvm/test/CodeGen/Hexagon/v6vassignp.ll
    llvm/test/CodeGen/Hexagon/v6vec-vmemu1.ll
    llvm/test/CodeGen/Hexagon/v6vec-vmemu2.ll
    llvm/test/CodeGen/Hexagon/v6vec-vprint.ll
    llvm/test/CodeGen/Hexagon/v6vec-vshuff.ll
    llvm/test/CodeGen/Hexagon/v6vec_inc1.ll
    llvm/test/CodeGen/Hexagon/v6vec_zero.ll
    llvm/test/CodeGen/Hexagon/v6vect-dbl-fail1.ll
    llvm/test/CodeGen/Hexagon/v6vect-dbl-spill.ll
    llvm/test/CodeGen/Hexagon/v6vect-dbl.ll
    llvm/test/CodeGen/Hexagon/v6vect-dh1.ll
    llvm/test/CodeGen/Hexagon/v6vect-locals1.ll
    llvm/test/CodeGen/Hexagon/v6vect-no-sideeffects.ll
    llvm/test/CodeGen/Hexagon/v6vect-pred2.ll
    llvm/test/CodeGen/Hexagon/v6vect-spill-kill.ll
    llvm/test/CodeGen/Hexagon/v6vect-vmem1.ll
    llvm/test/CodeGen/Hexagon/v6vect-vsplat.ll
    llvm/test/CodeGen/Hexagon/vacopy.ll
    llvm/test/CodeGen/Hexagon/vadd1.ll
    llvm/test/CodeGen/Hexagon/vaddh.ll
    llvm/test/CodeGen/Hexagon/validate-offset.ll
    llvm/test/CodeGen/Hexagon/vararg-linux-abi.ll
    llvm/test/CodeGen/Hexagon/vararg.ll
    llvm/test/CodeGen/Hexagon/vararg_align_check.ll
    llvm/test/CodeGen/Hexagon/vararg_double_onstack.ll
    llvm/test/CodeGen/Hexagon/vararg_named.ll
    llvm/test/CodeGen/Hexagon/varargs-memv.ll
    llvm/test/CodeGen/Hexagon/vassign-to-combine.ll
    llvm/test/CodeGen/Hexagon/vcombine128_to_req_seq.ll
    llvm/test/CodeGen/Hexagon/vcombine_subreg.ll
    llvm/test/CodeGen/Hexagon/vcombine_to_req_seq.ll
    llvm/test/CodeGen/Hexagon/vcombine_zero_diff_ptrs.ll
    llvm/test/CodeGen/Hexagon/vdotprod.ll
    llvm/test/CodeGen/Hexagon/vec-align.ll
    llvm/test/CodeGen/Hexagon/vec-pred-spill1.ll
    llvm/test/CodeGen/Hexagon/vec-vararg-align.ll
    llvm/test/CodeGen/Hexagon/vecPred2Vec.ll
    llvm/test/CodeGen/Hexagon/vect-any_extend.ll
    llvm/test/CodeGen/Hexagon/vect-dbl-post-inc.ll
    llvm/test/CodeGen/Hexagon/vect-downscale.ll
    llvm/test/CodeGen/Hexagon/vect-set_cc_v2i32.ll
    llvm/test/CodeGen/Hexagon/vect-vd0.ll
    llvm/test/CodeGen/Hexagon/vect-zero_extend.ll
    llvm/test/CodeGen/Hexagon/vect/extract-v4i1.ll
    llvm/test/CodeGen/Hexagon/vect/setcc-v2i32.ll
    llvm/test/CodeGen/Hexagon/vect/vect-anyextend.ll
    llvm/test/CodeGen/Hexagon/vect/vect-apint-truncate.ll
    llvm/test/CodeGen/Hexagon/vect/vect-bad-bitcast.ll
    llvm/test/CodeGen/Hexagon/vect/vect-bitcast-1.ll
    llvm/test/CodeGen/Hexagon/vect/vect-bitcast.ll
    llvm/test/CodeGen/Hexagon/vect/vect-bool-isel-crash.ll
    llvm/test/CodeGen/Hexagon/vect/vect-cst-v4i32.ll
    llvm/test/CodeGen/Hexagon/vect/vect-cst-v4i8.ll
    llvm/test/CodeGen/Hexagon/vect/vect-cst.ll
    llvm/test/CodeGen/Hexagon/vect/vect-extract.ll
    llvm/test/CodeGen/Hexagon/vect/vect-fma.ll
    llvm/test/CodeGen/Hexagon/vect/vect-illegal-type.ll
    llvm/test/CodeGen/Hexagon/vect/vect-infloop.ll
    llvm/test/CodeGen/Hexagon/vect/vect-insert-extract-elt.ll
    llvm/test/CodeGen/Hexagon/vect/vect-load-1.ll
    llvm/test/CodeGen/Hexagon/vect/vect-load-v4i16.ll
    llvm/test/CodeGen/Hexagon/vect/vect-load.ll
    llvm/test/CodeGen/Hexagon/vect/vect-shuffle.ll
    llvm/test/CodeGen/Hexagon/vect/vect-splat.ll
    llvm/test/CodeGen/Hexagon/vect/vect-store-v2i16.ll
    llvm/test/CodeGen/Hexagon/vect/vect-truncate.ll
    llvm/test/CodeGen/Hexagon/vect/vect-v4i16.ll
    llvm/test/CodeGen/Hexagon/vect/vect-vaslw.ll
    llvm/test/CodeGen/Hexagon/vect/vect-vshifts.ll
    llvm/test/CodeGen/Hexagon/vect/vect-vsplatb.ll
    llvm/test/CodeGen/Hexagon/vect/vect-vsplath.ll
    llvm/test/CodeGen/Hexagon/vect/vect-xor.ll
    llvm/test/CodeGen/Hexagon/vect/vect-zeroextend.ll
    llvm/test/CodeGen/Hexagon/vect/zext-v4i1.ll
    llvm/test/CodeGen/Hexagon/vect_setcc.ll
    llvm/test/CodeGen/Hexagon/vector-align.ll
    llvm/test/CodeGen/Hexagon/vector-ext-load.ll
    llvm/test/CodeGen/Hexagon/vector-sint-to-fp.ll
    llvm/test/CodeGen/Hexagon/verify-sink-code.ll
    llvm/test/CodeGen/Hexagon/verify-undef.ll
    llvm/test/CodeGen/Hexagon/vgather-opt-addr.ll
    llvm/test/CodeGen/Hexagon/vload-postinc-sel.ll
    llvm/test/CodeGen/Hexagon/vmemu-128.ll
    llvm/test/CodeGen/Hexagon/vpack_eo.ll
    llvm/test/CodeGen/Hexagon/vrcmpys.ll
    llvm/test/CodeGen/Hexagon/vselect-pseudo.ll
    llvm/test/CodeGen/Hexagon/wcsrtomb.ll
    llvm/test/CodeGen/Hexagon/zextloadi1.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/Hexagon/64bit_tstbit.ll b/llvm/test/CodeGen/Hexagon/64bit_tstbit.ll
index b265d1bef6fe3..81bc2f95ae886 100644
--- a/llvm/test/CodeGen/Hexagon/64bit_tstbit.ll
+++ b/llvm/test/CodeGen/Hexagon/64bit_tstbit.ll
@@ -8,27 +8,26 @@
 
 target triple = "hexagon-unknown-unknown-elf"
 
-%struct.hlist_node.45.966.3115.3729.4036.4650.4957.6492.6799.7413.7720.9562.10790.11097.11404.11711.14474.17192 = type { %struct.hlist_node.45.966.3115.3729.4036.4650.4957.6492.6799.7413.7720.9562.10790.11097.11404.11711.14474.17192*, %struct.hlist_node.45.966.3115.3729.4036.4650.4957.6492.6799.7413.7720.9562.10790.11097.11404.11711.14474.17192** }
+%struct.hlist_node.45.966.3115.3729.4036.4650.4957.6492.6799.7413.7720.9562.10790.11097.11404.11711.14474.17192 = type { ptr, ptr }
 
 @.str.8 = external dso_local unnamed_addr constant [5 x i8], align 1
 
-declare dso_local void @panic(i8*, ...) local_unnamed_addr
+declare dso_local void @panic(ptr, ...) local_unnamed_addr
 
 define dso_local fastcc void @elv_rqhash_find() unnamed_addr {
 entry:
-  %cmd_flags = getelementptr inbounds %struct.hlist_node.45.966.3115.3729.4036.4650.4957.6492.6799.7413.7720.9562.10790.11097.11404.11711.14474.17192, %struct.hlist_node.45.966.3115.3729.4036.4650.4957.6492.6799.7413.7720.9562.10790.11097.11404.11711.14474.17192* null, i32 -5
-  %0 = bitcast %struct.hlist_node.45.966.3115.3729.4036.4650.4957.6492.6799.7413.7720.9562.10790.11097.11404.11711.14474.17192* %cmd_flags to i64*
-  %1 = load i64, i64* %0, align 8
-  %2 = and i64 %1, 4294967296
-  %tobool10 = icmp eq i64 %2, 0
+  %cmd_flags = getelementptr inbounds %struct.hlist_node.45.966.3115.3729.4036.4650.4957.6492.6799.7413.7720.9562.10790.11097.11404.11711.14474.17192, ptr null, i32 -5
+  %0 = load i64, ptr %cmd_flags, align 8
+  %1 = and i64 %0, 4294967296
+  %tobool10 = icmp eq i64 %1, 0
   br i1 %tobool10, label %do.body11, label %do.end14
 
 do.body11:                                        ; preds = %entry
-  tail call void (i8*, ...) @panic(i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str.8, i32 0, i32 0)) #1
+  tail call void (ptr, ...) @panic(ptr @.str.8) #1
   unreachable
 
 do.end14:                                         ; preds = %entry
-  %and.i = and i64 %1, -4294967297
-  store i64 %and.i, i64* %0, align 8
+  %and.i = and i64 %0, -4294967297
+  store i64 %and.i, ptr %cmd_flags, align 8
   ret void
 }

diff  --git a/llvm/test/CodeGen/Hexagon/Atomics.ll b/llvm/test/CodeGen/Hexagon/Atomics.ll
index cedf9a48754be..886ddd08bd278 100644
--- a/llvm/test/CodeGen/Hexagon/Atomics.ll
+++ b/llvm/test/CodeGen/Hexagon/Atomics.ll
@@ -5,18 +5,18 @@
 
 define void @test_op_ignore() nounwind {
 entry:
-  %t00 = atomicrmw add i32* @si, i32 1 monotonic
-  %t01 = atomicrmw add i64* @sll, i64 1 monotonic
-  %t10 = atomicrmw sub i32* @si, i32 1 monotonic
-  %t11 = atomicrmw sub i64* @sll, i64 1 monotonic
-  %t20 = atomicrmw or i32* @si, i32 1 monotonic
-  %t21 = atomicrmw or i64* @sll, i64 1 monotonic
-  %t30 = atomicrmw xor i32* @si, i32 1 monotonic
-  %t31 = atomicrmw xor i64* @sll, i64 1 monotonic
-  %t40 = atomicrmw and i32* @si, i32 1 monotonic
-  %t41 = atomicrmw and i64* @sll, i64 1 monotonic
-  %t50 = atomicrmw nand i32* @si, i32 1 monotonic
-  %t51 = atomicrmw nand i64* @sll, i64 1 monotonic
+  %t00 = atomicrmw add ptr @si, i32 1 monotonic
+  %t01 = atomicrmw add ptr @sll, i64 1 monotonic
+  %t10 = atomicrmw sub ptr @si, i32 1 monotonic
+  %t11 = atomicrmw sub ptr @sll, i64 1 monotonic
+  %t20 = atomicrmw or ptr @si, i32 1 monotonic
+  %t21 = atomicrmw or ptr @sll, i64 1 monotonic
+  %t30 = atomicrmw xor ptr @si, i32 1 monotonic
+  %t31 = atomicrmw xor ptr @sll, i64 1 monotonic
+  %t40 = atomicrmw and ptr @si, i32 1 monotonic
+  %t41 = atomicrmw and ptr @sll, i64 1 monotonic
+  %t50 = atomicrmw nand ptr @si, i32 1 monotonic
+  %t51 = atomicrmw nand ptr @sll, i64 1 monotonic
   br label %return
 
 return:                                           ; preds = %entry
@@ -25,30 +25,30 @@ return:                                           ; preds = %entry
 
 define void @test_fetch_and_op() nounwind {
 entry:
-  %t00 = atomicrmw add i32* @si, i32 11 monotonic
-  store i32 %t00, i32* @si, align 4
-  %t01 = atomicrmw add i64* @sll, i64 11 monotonic
-  store i64 %t01, i64* @sll, align 8
-  %t10 = atomicrmw sub i32* @si, i32 11 monotonic
-  store i32 %t10, i32* @si, align 4
-  %t11 = atomicrmw sub i64* @sll, i64 11 monotonic
-  store i64 %t11, i64* @sll, align 8
-  %t20 = atomicrmw or i32* @si, i32 11 monotonic
-  store i32 %t20, i32* @si, align 4
-  %t21 = atomicrmw or i64* @sll, i64 11 monotonic
-  store i64 %t21, i64* @sll, align 8
-  %t30 = atomicrmw xor i32* @si, i32 11 monotonic
-  store i32 %t30, i32* @si, align 4
-  %t31 = atomicrmw xor i64* @sll, i64 11 monotonic
-  store i64 %t31, i64* @sll, align 8
-  %t40 = atomicrmw and i32* @si, i32 11 monotonic
-  store i32 %t40, i32* @si, align 4
-  %t41 = atomicrmw and i64* @sll, i64 11 monotonic
-  store i64 %t41, i64* @sll, align 8
-  %t50 = atomicrmw nand i32* @si, i32 11 monotonic
-  store i32 %t50, i32* @si, align 4
-  %t51 = atomicrmw nand i64* @sll, i64 11 monotonic
-  store i64 %t51, i64* @sll, align 8
+  %t00 = atomicrmw add ptr @si, i32 11 monotonic
+  store i32 %t00, ptr @si, align 4
+  %t01 = atomicrmw add ptr @sll, i64 11 monotonic
+  store i64 %t01, ptr @sll, align 8
+  %t10 = atomicrmw sub ptr @si, i32 11 monotonic
+  store i32 %t10, ptr @si, align 4
+  %t11 = atomicrmw sub ptr @sll, i64 11 monotonic
+  store i64 %t11, ptr @sll, align 8
+  %t20 = atomicrmw or ptr @si, i32 11 monotonic
+  store i32 %t20, ptr @si, align 4
+  %t21 = atomicrmw or ptr @sll, i64 11 monotonic
+  store i64 %t21, ptr @sll, align 8
+  %t30 = atomicrmw xor ptr @si, i32 11 monotonic
+  store i32 %t30, ptr @si, align 4
+  %t31 = atomicrmw xor ptr @sll, i64 11 monotonic
+  store i64 %t31, ptr @sll, align 8
+  %t40 = atomicrmw and ptr @si, i32 11 monotonic
+  store i32 %t40, ptr @si, align 4
+  %t41 = atomicrmw and ptr @sll, i64 11 monotonic
+  store i64 %t41, ptr @sll, align 8
+  %t50 = atomicrmw nand ptr @si, i32 11 monotonic
+  store i32 %t50, ptr @si, align 4
+  %t51 = atomicrmw nand ptr @sll, i64 11 monotonic
+  store i64 %t51, ptr @sll, align 8
   br label %return
 
 return:                                           ; preds = %entry
@@ -57,13 +57,13 @@ return:                                           ; preds = %entry
 
 define void @test_lock() nounwind {
 entry:
-  %t00 = atomicrmw xchg i32* @si, i32 1 monotonic
-  store i32 %t00, i32* @si, align 4
-  %t01 = atomicrmw xchg i64* @sll, i64 1 monotonic
-  store i64 %t01, i64* @sll, align 8
+  %t00 = atomicrmw xchg ptr @si, i32 1 monotonic
+  store i32 %t00, ptr @si, align 4
+  %t01 = atomicrmw xchg ptr @sll, i64 1 monotonic
+  store i64 %t01, ptr @sll, align 8
   fence seq_cst
-  store volatile i32 0, i32* @si, align 4
-  store volatile i64 0, i64* @sll, align 8
+  store volatile i32 0, ptr @si, align 4
+  store volatile i64 0, ptr @sll, align 8
   br label %return
 
 return:                                           ; preds = %entry
@@ -73,9 +73,9 @@ return:                                           ; preds = %entry
 
 define i64 @fred() nounwind {
 entry:
-  %s0 = cmpxchg i32* undef, i32 undef, i32 undef seq_cst seq_cst
+  %s0 = cmpxchg ptr undef, i32 undef, i32 undef seq_cst seq_cst
   %s1 = extractvalue { i32, i1 } %s0, 0
-  %t0 = cmpxchg i64* undef, i64 undef, i64 undef seq_cst seq_cst
+  %t0 = cmpxchg ptr undef, i64 undef, i64 undef seq_cst seq_cst
   %t1 = extractvalue { i64, i1 } %t0, 0
   %u0 = zext i32 %s1 to i64
   %u1 = add i64 %u0, %t1

diff  --git a/llvm/test/CodeGen/Hexagon/BranchPredict.ll b/llvm/test/CodeGen/Hexagon/BranchPredict.ll
index 60d12df180799..ba60a4e2c4557 100644
--- a/llvm/test/CodeGen/Hexagon/BranchPredict.ll
+++ b/llvm/test/CodeGen/Hexagon/BranchPredict.ll
@@ -16,11 +16,11 @@ entry:
 
 if.then:                                          ; preds = %entry
   %add = add nsw i32 %a, 10
-  %call = tail call i32 bitcast (i32 (...)* @foobar to i32 (i32)*)(i32 %add) nounwind
+  %call = tail call i32 @foobar(i32 %add) nounwind
   br label %return
 
 if.else:                                          ; preds = %entry
-  %call2 = tail call i32 bitcast (i32 (...)* @foobar to i32 (i32)*)(i32 4) nounwind
+  %call2 = tail call i32 @foobar(i32 4) nounwind
   br label %return
 
 return:                                           ; preds = %if.else, %if.then
@@ -38,11 +38,11 @@ entry:
 
 if.then:                                          ; preds = %entry
   %add = add nsw i32 %a, 10
-  %call = tail call i32 bitcast (i32 (...)* @foobar to i32 (i32)*)(i32 %add) nounwind
+  %call = tail call i32 @foobar(i32 %add) nounwind
   br label %return
 
 if.else:                                          ; preds = %entry
-  %call2 = tail call i32 bitcast (i32 (...)* @foobar to i32 (i32)*)(i32 4) nounwind
+  %call2 = tail call i32 @foobar(i32 4) nounwind
   br label %return
 
 return:                                           ; preds = %if.else, %if.then
@@ -53,18 +53,18 @@ return:                                           ; preds = %if.else, %if.then
 define i32 @foo_bar(i32 %a, i16 signext %b) nounwind {
 ; CHECK: if (!cmp.eq(r{{[0-9]*}}.new,#0)) jump:nt
 entry:
-  %0 = load i32, i32* @j, align 4
+  %0 = load i32, ptr @j, align 4
   %tobool = icmp eq i32 %0, 0
   br i1 %tobool, label %if.else, label %if.then, !prof !0
 
 if.then:                                          ; preds = %entry
   %add = add nsw i32 %a, 10
-  %call = tail call i32 bitcast (i32 (...)* @foobar to i32 (i32)*)(i32 %add) nounwind
+  %call = tail call i32 @foobar(i32 %add) nounwind
   br label %return
 
 if.else:                                          ; preds = %entry
   %add1 = add nsw i32 %a, 4
-  %call2 = tail call i32 bitcast (i32 (...)* @foobar to i32 (i32)*)(i32 %add1) nounwind
+  %call2 = tail call i32 @foobar(i32 %add1) nounwind
   br label %return
 
 return:                                           ; preds = %if.else, %if.then

diff  --git a/llvm/test/CodeGen/Hexagon/Halide_vec_cast_trunc1.ll b/llvm/test/CodeGen/Hexagon/Halide_vec_cast_trunc1.ll
index 430452b006d05..14aef485526b7 100644
--- a/llvm/test/CodeGen/Hexagon/Halide_vec_cast_trunc1.ll
+++ b/llvm/test/CodeGen/Hexagon/Halide_vec_cast_trunc1.ll
@@ -12,7 +12,7 @@ b1:                                               ; preds = %b3, %b0
   br label %b2
 
 b2:                                               ; preds = %b2, %b1
-  %v0 = load <32 x i32>, <32 x i32>* undef, align 512, !tbaa !4
+  %v0 = load <32 x i32>, ptr undef, align 512, !tbaa !4
   %v1 = shufflevector <32 x i32> %v0, <32 x i32> undef, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
   %v2 = shufflevector <64 x i32> %v1, <64 x i32> undef, <128 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127>
   %v3 = trunc <128 x i32> %v2 to <128 x i16>
@@ -25,7 +25,7 @@ b2:                                               ; preds = %b2, %b1
 b3:                                               ; preds = %b2
   %v8 = tail call <32 x i32> @llvm.hexagon.V6.hi.128B(<64 x i32> %v7)
   %v9 = tail call <32 x i32> @llvm.hexagon.V6.vasrhubsat.128B(<32 x i32> %v8, <32 x i32> undef, i32 4)
-  store <32 x i32> %v9, <32 x i32>* undef, align 1, !tbaa !7
+  store <32 x i32> %v9, ptr undef, align 1, !tbaa !7
   br label %b1
 
 b4:                                               ; preds = %b0

diff  --git a/llvm/test/CodeGen/Hexagon/Halide_vec_cast_trunc2.ll b/llvm/test/CodeGen/Hexagon/Halide_vec_cast_trunc2.ll
index da1af063555f7..7ff3c357c79df 100644
--- a/llvm/test/CodeGen/Hexagon/Halide_vec_cast_trunc2.ll
+++ b/llvm/test/CodeGen/Hexagon/Halide_vec_cast_trunc2.ll
@@ -18,7 +18,7 @@ b3:                                               ; preds = %b1
   br label %b4
 
 b4:                                               ; preds = %b4, %b3
-  %v0 = load <32 x i32>, <32 x i32>* undef, align 512, !tbaa !4
+  %v0 = load <32 x i32>, ptr undef, align 512, !tbaa !4
   %v1 = shufflevector <32 x i32> %v0, <32 x i32> undef, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
   %v2 = shufflevector <64 x i32> undef, <64 x i32> %v1, <128 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127>
   %v3 = trunc <128 x i32> %v2 to <128 x i16>
@@ -30,7 +30,7 @@ b4:                                               ; preds = %b4, %b3
   br i1 undef, label %b5, label %b4
 
 b5:                                               ; preds = %b4
-  store <64 x i16> %v8, <64 x i16>* undef, align 1024, !tbaa !7
+  store <64 x i16> %v8, ptr undef, align 1024, !tbaa !7
   br label %b6
 
 b6:                                               ; preds = %b5, %b0

diff  --git a/llvm/test/CodeGen/Hexagon/M4_mpyri_addi_global.ll b/llvm/test/CodeGen/Hexagon/M4_mpyri_addi_global.ll
index 72aeb022cf079..ac4083ba37598 100644
--- a/llvm/test/CodeGen/Hexagon/M4_mpyri_addi_global.ll
+++ b/llvm/test/CodeGen/Hexagon/M4_mpyri_addi_global.ll
@@ -5,13 +5,13 @@
 
 @g0 = common global [2 x %s.0] zeroinitializer, align 8
 
-declare void @f0(%s.0*)
+declare void @f0(ptr)
 
 ; Function Attrs: nounwind readnone
 define void @f1(i32 %a0) #0 {
 b0:
-  %v0 = getelementptr inbounds [2 x %s.0], [2 x %s.0]* @g0, i32 0, i32 %a0
-  call void @f0(%s.0* %v0) #1
+  %v0 = getelementptr inbounds [2 x %s.0], ptr @g0, i32 0, i32 %a0
+  call void @f0(ptr %v0) #1
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/M4_mpyrr_addi_global.ll b/llvm/test/CodeGen/Hexagon/M4_mpyrr_addi_global.ll
index fddaeb1b05811..9247a6cbe8366 100644
--- a/llvm/test/CodeGen/Hexagon/M4_mpyrr_addi_global.ll
+++ b/llvm/test/CodeGen/Hexagon/M4_mpyrr_addi_global.ll
@@ -1,20 +1,20 @@
 ; RUN: llc -march=hexagon < %s | FileCheck %s
 ; CHECK: r{{[0-9]+}} = add(##g0{{.*}},mpyi(r{{[0-9]+}},r{{[0-9]+}}))
 
-%s.0 = type { %s.1, %s.1* }
+%s.0 = type { %s.1, ptr }
 %s.1 = type { i8, i8, i8, i8, i16, i16, i8, [3 x i8], [20 x %s.2] }
 %s.2 = type { i8, i8, [2 x i8], [2 x i8] }
 
 @g0 = external global [2 x %s.0]
 
-declare void @f0(%s.1**)
+declare void @f0(ptr)
 
 ; Function Attrs: nounwind readnone
 define void @f1(i32 %a0) #0 {
 b0:
-  %v0 = getelementptr inbounds [2 x %s.0], [2 x %s.0]* @g0, i32 0, i32 %a0
-  %v1 = getelementptr inbounds %s.0, %s.0* %v0, i32 0, i32 1
-  call void @f0(%s.1** %v1) #1
+  %v0 = getelementptr inbounds [2 x %s.0], ptr @g0, i32 0, i32 %a0
+  %v1 = getelementptr inbounds %s.0, ptr %v0, i32 0, i32 1
+  call void @f0(ptr %v1) #1
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/NVJumpCmp.ll b/llvm/test/CodeGen/Hexagon/NVJumpCmp.ll
index 57d1824fa6cb6..2ef35172279f3 100644
--- a/llvm/test/CodeGen/Hexagon/NVJumpCmp.ll
+++ b/llvm/test/CodeGen/Hexagon/NVJumpCmp.ll
@@ -26,8 +26,8 @@ if.then17:                                        ; preds = %while.end
   unreachable
 
 if.end:                                           ; preds = %while.end
-  %arrayidx21 = getelementptr inbounds [4096 x i8], [4096 x i8]* undef, i32 0, i32 8
-  store i8 undef, i8* %arrayidx21, align 4, !tbaa !1
+  %arrayidx21 = getelementptr inbounds [4096 x i8], ptr undef, i32 0, i32 8
+  store i8 undef, ptr %arrayidx21, align 4, !tbaa !1
   br i1 undef, label %for.body42.preheader6, label %min.iters.checked
 
 for.body42.preheader6:                            ; preds = %vector.body.preheader, %min.iters.checked, %if.end
@@ -49,8 +49,8 @@ min.iters.checked595:                             ; preds = %for.body61.lr.ph
   br i1 undef, label %for.body61, label %vector.memcheck608
 
 vector.memcheck608:                               ; preds = %min.iters.checked595
-  %scevgep600 = getelementptr [4096 x i8], [4096 x i8]* undef, i32 0, i32 %add
-  %bound0604 = icmp ule i8* %scevgep600, undef
+  %scevgep600 = getelementptr [4096 x i8], ptr undef, i32 0, i32 %add
+  %bound0604 = icmp ule ptr %scevgep600, undef
   %memcheck.conflict607 = and i1 undef, %bound0604
   br i1 %memcheck.conflict607, label %for.body61, label %vector.body590
 

diff  --git a/llvm/test/CodeGen/Hexagon/P08214.ll b/llvm/test/CodeGen/Hexagon/P08214.ll
index ea2ca90071950..c06aa2c5a7293 100644
--- a/llvm/test/CodeGen/Hexagon/P08214.ll
+++ b/llvm/test/CodeGen/Hexagon/P08214.ll
@@ -4,11 +4,11 @@
 
 target triple = "hexagon-unknown--elf"
 
-%s.0 = type { i32 (...)** }
+%s.0 = type { ptr }
 %s.1 = type { i32 }
 %s.2 = type { %s.1 }
 
- at g0 = global { i32, i32 } { i32 ptrtoint (i32 (%s.1*)* @f0 to i32), i32 0 }, align 4
+ at g0 = global { i32, i32 } { i32 ptrtoint (ptr @f0 to i32), i32 0 }, align 4
 @g1 = global i32 0, align 4
 @g2 = global %s.0 zeroinitializer, align 4
 @g3 = global { i32, i32 } { i32 1, i32 0 }, align 4
@@ -25,54 +25,51 @@ target triple = "hexagon-unknown--elf"
 @g14 = private unnamed_addr constant [7 x i8] c"%s\0A%s\0A\00", align 1
 @g15 = private unnamed_addr constant [51 x i8] c"Testing dereferencing a pointer to member function\00", align 1
 @g16 = private unnamed_addr constant [24 x i8] c"in a complex expression\00", align 1
- at g17 = linkonce_odr unnamed_addr constant [3 x i8*] [i8* null, i8* bitcast ({ i8*, i8* }* @g20 to i8*), i8* bitcast (i32 (%s.0*)* @f9 to i8*)]
- at g18 = external global i8*
+ at g17 = linkonce_odr unnamed_addr constant [3 x ptr] [ptr null, ptr @g20, ptr @f9]
+ at g18 = external global ptr
 @g19 = linkonce_odr constant [3 x i8] c"1S\00"
- at g20 = linkonce_odr constant { i8*, i8* } { i8* bitcast (i8** getelementptr inbounds (i8*, i8** @g18, i32 2) to i8*), i8* getelementptr inbounds ([3 x i8], [3 x i8]* @g19, i32 0, i32 0) }
+ at g20 = linkonce_odr constant { ptr, ptr } { ptr getelementptr inbounds (ptr, ptr @g18, i32 2), ptr @g19 }
 
 ; Function Attrs: nounwind readnone
-define linkonce_odr i32 @f0(%s.1* nocapture readnone %a0) #0 align 2 {
+define linkonce_odr i32 @f0(ptr nocapture readnone %a0) #0 align 2 {
 b0:
   ret i32 11
 }
 
 ; Function Attrs: nounwind readnone
-define %s.0* @f1() #0 {
+define ptr @f1() #0 {
 b0:
-  ret %s.0* @g2
+  ret ptr @g2
 }
 
 define internal fastcc void @f2() {
 b0:
-  %v0 = load i32, i32* @g5, align 4, !tbaa !0
+  %v0 = load i32, ptr @g5, align 4, !tbaa !0
   %v1 = add nsw i32 %v0, 5
-  store i32 %v1, i32* @g5, align 4, !tbaa !0
-  %v2 = load { i32, i32 }, { i32, i32 }* @g3, align 4, !tbaa !4
+  store i32 %v1, ptr @g5, align 4, !tbaa !0
+  %v2 = load { i32, i32 }, ptr @g3, align 4, !tbaa !4
   %v3 = extractvalue { i32, i32 } %v2, 1
-  %v4 = getelementptr inbounds i8, i8* bitcast (%s.0* @g2 to i8*), i32 %v3
-  %v5 = bitcast i8* %v4 to %s.0*
+  %v4 = getelementptr inbounds i8, ptr @g2, i32 %v3
   %v6 = extractvalue { i32, i32 } %v2, 0
   %v7 = and i32 %v6, 1
   %v8 = icmp eq i32 %v7, 0
   br i1 %v8, label %b2, label %b1
 
 b1:                                               ; preds = %b0
-  %v9 = bitcast i8* %v4 to i8**
-  %v10 = load i8*, i8** %v9, align 4, !tbaa !5
+  %v10 = load ptr, ptr %v4, align 4, !tbaa !5
   %v11 = add i32 %v6, -1
-  %v12 = getelementptr i8, i8* %v10, i32 %v11
-  %v13 = bitcast i8* %v12 to i32 (%s.0*)**
-  %v14 = load i32 (%s.0*)*, i32 (%s.0*)** %v13, align 4
+  %v12 = getelementptr i8, ptr %v10, i32 %v11
+  %v14 = load ptr, ptr %v12, align 4
   br label %b3
 
 b2:                                               ; preds = %b0
-  %v15 = inttoptr i32 %v6 to i32 (%s.0*)*
+  %v15 = inttoptr i32 %v6 to ptr
   br label %b3
 
 b3:                                               ; preds = %b2, %b1
-  %v16 = phi i32 (%s.0*)* [ %v14, %b1 ], [ %v15, %b2 ]
-  %v17 = tail call i32 %v16(%s.0* %v5)
-  store i32 %v17, i32* @g6, align 4, !tbaa !0
+  %v16 = phi ptr [ %v14, %b1 ], [ %v15, %b2 ]
+  %v17 = tail call i32 %v16(ptr %v4)
+  store i32 %v17, ptr @g6, align 4, !tbaa !0
   ret void
 }
 
@@ -83,72 +80,67 @@ b0:
   %v1 = alloca %s.2, align 4
   tail call void @f4()
   tail call void @f5()
-  tail call void (i8*, ...) @f6(i8* getelementptr inbounds ([53 x i8], [53 x i8]* @g7, i32 0, i32 0))
-  tail call void (i8*, ...) @f6(i8* getelementptr inbounds ([6 x i8], [6 x i8]* @g8, i32 0, i32 0), i8* getelementptr inbounds ([43 x i8], [43 x i8]* @g9, i32 0, i32 0), i8* getelementptr inbounds ([49 x i8], [49 x i8]* @g10, i32 0, i32 0))
-  %v2 = load { i32, i32 }, { i32, i32 }* @g0, align 4, !tbaa !4
+  tail call void (ptr, ...) @f6(ptr @g7)
+  tail call void (ptr, ...) @f6(ptr @g8, ptr @g9, ptr @g10)
+  %v2 = load { i32, i32 }, ptr @g0, align 4, !tbaa !4
   %v3 = extractvalue { i32, i32 } %v2, 1
-  %v4 = bitcast %s.2* %v0 to i8*
-  %v5 = getelementptr inbounds i8, i8* %v4, i32 %v3
-  %v6 = bitcast i8* %v5 to %s.2*
+  %v5 = getelementptr inbounds i8, ptr %v0, i32 %v3
   %v7 = extractvalue { i32, i32 } %v2, 0
   %v8 = and i32 %v7, 1
   %v9 = icmp eq i32 %v8, 0
   br i1 %v9, label %b1, label %b2
 
 b1:                                               ; preds = %b0
-  %v10 = inttoptr i32 %v7 to i32 (%s.2*)*
+  %v10 = inttoptr i32 %v7 to ptr
   br label %b2
 
 b2:                                               ; preds = %b1, %b0
-  %v11 = phi i32 (%s.2*)* [ %v10, %b1 ], [ undef, %b0 ]
-  %v12 = call i32 %v11(%s.2* %v6)
+  %v11 = phi ptr [ %v10, %b1 ], [ undef, %b0 ]
+  %v12 = call i32 %v11(ptr %v5)
   %v13 = icmp eq i32 %v12, 11
   br i1 %v13, label %b4, label %b3
 
 b3:                                               ; preds = %b2
-  store i32 1, i32* @g11, align 4, !tbaa !0
+  store i32 1, ptr @g11, align 4, !tbaa !0
   br label %b4
 
 b4:                                               ; preds = %b3, %b2
   %v14 = call i32 @f7()
   call void @f5()
-  call void (i8*, ...) @f6(i8* getelementptr inbounds ([6 x i8], [6 x i8]* @g8, i32 0, i32 0), i8* getelementptr inbounds ([46 x i8], [46 x i8]* @g12, i32 0, i32 0), i8* getelementptr inbounds ([29 x i8], [29 x i8]* @g13, i32 0, i32 0))
-  %v15 = getelementptr inbounds %s.2, %s.2* %v1, i32 0, i32 0, i32 0
-  store i32 11, i32* %v15, align 4, !tbaa !7
-  %v16 = load i32, i32* @g1, align 4, !tbaa !4
-  %v17 = bitcast %s.2* %v1 to i8*
-  %v18 = getelementptr inbounds i8, i8* %v17, i32 %v16
-  %v19 = bitcast i8* %v18 to i32*
-  %v20 = load i32, i32* %v19, align 4, !tbaa !0
+  call void (ptr, ...) @f6(ptr @g8, ptr @g12, ptr @g13)
+  store i32 11, ptr %v1, align 4, !tbaa !7
+  %v16 = load i32, ptr @g1, align 4, !tbaa !4
+  %v18 = getelementptr inbounds i8, ptr %v1, i32 %v16
+  %v20 = load i32, ptr %v18, align 4, !tbaa !0
   %v21 = icmp eq i32 %v20, 11
   br i1 %v21, label %b6, label %b5
 
 b5:                                               ; preds = %b4
-  store i32 1, i32* @g11, align 4, !tbaa !0
+  store i32 1, ptr @g11, align 4, !tbaa !0
   br label %b6
 
 b6:                                               ; preds = %b5, %b4
   %v22 = call i32 @f7()
   call void @f5()
-  call void (i8*, ...) @f6(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @g14, i32 0, i32 0), i8* getelementptr inbounds ([51 x i8], [51 x i8]* @g15, i32 0, i32 0), i8* getelementptr inbounds ([24 x i8], [24 x i8]* @g16, i32 0, i32 0))
-  %v23 = load i32, i32* @g4, align 4, !tbaa !0
+  call void (ptr, ...) @f6(ptr @g14, ptr @g15, ptr @g16)
+  %v23 = load i32, ptr @g4, align 4, !tbaa !0
   %v24 = icmp eq i32 %v23, 11
   br i1 %v24, label %b8, label %b7
 
 b7:                                               ; preds = %b6
-  store i32 1, i32* @g11, align 4, !tbaa !0
+  store i32 1, ptr @g11, align 4, !tbaa !0
   br label %b8
 
 b8:                                               ; preds = %b7, %b6
   %v25 = call i32 @f7()
   call void @f5()
-  call void (i8*, ...) @f6(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @g14, i32 0, i32 0), i8* getelementptr inbounds ([51 x i8], [51 x i8]* @g15, i32 0, i32 0), i8* getelementptr inbounds ([24 x i8], [24 x i8]* @g16, i32 0, i32 0))
-  %v26 = load i32, i32* @g6, align 4, !tbaa !0
+  call void (ptr, ...) @f6(ptr @g14, ptr @g15, ptr @g16)
+  %v26 = load i32, ptr @g6, align 4, !tbaa !0
   %v27 = icmp eq i32 %v26, 11
   br i1 %v27, label %b10, label %b9
 
 b9:                                               ; preds = %b8
-  store i32 1, i32* @g11, align 4, !tbaa !0
+  store i32 1, ptr @g11, align 4, !tbaa !0
   br label %b10
 
 b10:                                              ; preds = %b9, %b8
@@ -164,7 +156,7 @@ declare void @f4() #0
 declare void @f5() #0
 
 ; Function Attrs: nounwind readnone
-declare void @f6(i8*, ...) #0
+declare void @f6(ptr, ...) #0
 
 ; Function Attrs: nounwind readnone
 declare i32 @f7() #0
@@ -173,40 +165,37 @@ declare i32 @f7() #0
 declare i32 @f8(i32) #0
 
 ; Function Attrs: nounwind readnone
-define linkonce_odr i32 @f9(%s.0* nocapture readnone %a0) unnamed_addr #0 align 2 {
+define linkonce_odr i32 @f9(ptr nocapture readnone %a0) unnamed_addr #0 align 2 {
 b0:
   ret i32 11
 }
 
 define internal void @f10() {
 b0:
-  store i32 (...)** bitcast (i8** getelementptr inbounds ([3 x i8*], [3 x i8*]* @g17, i32 0, i32 2) to i32 (...)**), i32 (...)*** getelementptr inbounds (%s.0, %s.0* @g2, i32 0, i32 0), align 4, !tbaa !5
-  %v0 = load { i32, i32 }, { i32, i32 }* @g3, align 4, !tbaa !4
+  store ptr getelementptr inbounds ([3 x ptr], ptr @g17, i32 0, i32 2), ptr @g2, align 4, !tbaa !5
+  %v0 = load { i32, i32 }, ptr @g3, align 4, !tbaa !4
   %v1 = extractvalue { i32, i32 } %v0, 1
-  %v2 = getelementptr inbounds i8, i8* bitcast (%s.0* @g2 to i8*), i32 %v1
-  %v3 = bitcast i8* %v2 to %s.0*
+  %v2 = getelementptr inbounds i8, ptr @g2, i32 %v1
   %v4 = extractvalue { i32, i32 } %v0, 0
   %v5 = and i32 %v4, 1
   %v6 = icmp eq i32 %v5, 0
   br i1 %v6, label %b2, label %b1
 
 b1:                                               ; preds = %b0
-  %v7 = bitcast i8* %v2 to i8**
-  %v8 = load i8*, i8** %v7, align 4, !tbaa !5
+  %v8 = load ptr, ptr %v2, align 4, !tbaa !5
   %v9 = add i32 %v4, -1
-  %v10 = getelementptr i8, i8* %v8, i32 %v9
-  %v11 = bitcast i8* %v10 to i32 (%s.0*)**
-  %v12 = load i32 (%s.0*)*, i32 (%s.0*)** %v11, align 4
+  %v10 = getelementptr i8, ptr %v8, i32 %v9
+  %v12 = load ptr, ptr %v10, align 4
   br label %b3
 
 b2:                                               ; preds = %b0
-  %v13 = inttoptr i32 %v4 to i32 (%s.0*)*
+  %v13 = inttoptr i32 %v4 to ptr
   br label %b3
 
 b3:                                               ; preds = %b2, %b1
-  %v14 = phi i32 (%s.0*)* [ %v12, %b1 ], [ %v13, %b2 ]
-  %v15 = tail call i32 %v14(%s.0* %v3)
-  store i32 %v15, i32* @g4, align 4, !tbaa !0
+  %v14 = phi ptr [ %v12, %b1 ], [ %v13, %b2 ]
+  %v15 = tail call i32 %v14(ptr %v2)
+  store i32 %v15, ptr @g4, align 4, !tbaa !0
   tail call fastcc void @f2()
   ret void
 }

diff  --git a/llvm/test/CodeGen/Hexagon/PR33749.ll b/llvm/test/CodeGen/Hexagon/PR33749.ll
index 7f8533054e88c..4441fe2351038 100644
--- a/llvm/test/CodeGen/Hexagon/PR33749.ll
+++ b/llvm/test/CodeGen/Hexagon/PR33749.ll
@@ -6,12 +6,12 @@
 target datalayout = "e-m:e-p:32:32:32-a:0-n16:32-i64:64:64-i32:32:32-i16:16:16-i1:8:8-f32:32:32-f64:64:64-v32:32:32-v64:64:64-v512:512:512-v1024:1024:1024-v2048:2048:2048"
 target triple = "hexagon"
 
-define void @foo(i32* nocapture %a0) local_unnamed_addr #0 {
+define void @foo(ptr nocapture %a0) local_unnamed_addr #0 {
 b1:
-  %v2 = getelementptr inbounds i32, i32* %a0, i32 26
-  %v3 = load i32, i32* %v2, align 4
+  %v2 = getelementptr inbounds i32, ptr %a0, i32 26
+  %v3 = load i32, ptr %v2, align 4
   %v4 = add nsw i32 %v3, 1
-  %v5 = load i32, i32* %a0, align 4
+  %v5 = load i32, ptr %a0, align 4
   br label %b6
 
 b6:                                               ; preds = %b28, %b1
@@ -40,7 +40,7 @@ b6:                                               ; preds = %b28, %b1
 
 b28:                                              ; preds = %b6
   %v29 = add nsw i32 %v3, %v7
-  store i32 %v29, i32* %a0, align 4
+  store i32 %v29, ptr %a0, align 4
   br label %b6
 
 b30:                                              ; preds = %b6

diff  --git a/llvm/test/CodeGen/Hexagon/S3_2op.ll b/llvm/test/CodeGen/Hexagon/S3_2op.ll
index 5e101c0bc3693..b4cf8c9b735bb 100644
--- a/llvm/test/CodeGen/Hexagon/S3_2op.ll
+++ b/llvm/test/CodeGen/Hexagon/S3_2op.ll
@@ -5,8 +5,8 @@
 define double @f0(double %a0) #0 {
 b0:
   %v0 = alloca double, align 8
-  store double %a0, double* %v0, align 8
-  %v1 = load double, double* %v0, align 8
+  store double %a0, ptr %v0, align 8
+  %v1 = load double, ptr %v0, align 8
   %v2 = fptosi double %v1 to i64
   %v3 = call i64 @llvm.hexagon.A2.absp(i64 %v2)
   %v4 = sitofp i64 %v3 to double
@@ -20,8 +20,8 @@ declare i64 @llvm.hexagon.A2.absp(i64) #1
 define double @f1(double %a0) #0 {
 b0:
   %v0 = alloca double, align 8
-  store double %a0, double* %v0, align 8
-  %v1 = load double, double* %v0, align 8
+  store double %a0, ptr %v0, align 8
+  %v1 = load double, ptr %v0, align 8
   %v2 = fptosi double %v1 to i64
   %v3 = call i64 @llvm.hexagon.A2.negp(i64 %v2)
   %v4 = sitofp i64 %v3 to double
@@ -35,8 +35,8 @@ declare i64 @llvm.hexagon.A2.negp(i64) #1
 define double @f2(double %a0) #0 {
 b0:
   %v0 = alloca double, align 8
-  store double %a0, double* %v0, align 8
-  %v1 = load double, double* %v0, align 8
+  store double %a0, ptr %v0, align 8
+  %v1 = load double, ptr %v0, align 8
   %v2 = fptosi double %v1 to i64
   %v3 = call i64 @llvm.hexagon.A2.notp(i64 %v2)
   %v4 = sitofp i64 %v3 to double
@@ -50,8 +50,8 @@ declare i64 @llvm.hexagon.A2.notp(i64) #1
 define double @f3(double %a0) #0 {
 b0:
   %v0 = alloca double, align 8
-  store double %a0, double* %v0, align 8
-  %v1 = load double, double* %v0, align 8
+  store double %a0, ptr %v0, align 8
+  %v1 = load double, ptr %v0, align 8
   %v2 = fptosi double %v1 to i64
   %v3 = call i64 @llvm.hexagon.S2.interleave(i64 %v2)
   %v4 = sitofp i64 %v3 to double
@@ -65,8 +65,8 @@ declare i64 @llvm.hexagon.S2.interleave(i64) #1
 define double @f4(double %a0) #0 {
 b0:
   %v0 = alloca double, align 8
-  store double %a0, double* %v0, align 8
-  %v1 = load double, double* %v0, align 8
+  store double %a0, ptr %v0, align 8
+  %v1 = load double, ptr %v0, align 8
   %v2 = fptosi double %v1 to i64
   %v3 = call i64 @llvm.hexagon.S2.deinterleave(i64 %v2)
   %v4 = sitofp i64 %v3 to double
@@ -80,8 +80,8 @@ declare i64 @llvm.hexagon.S2.deinterleave(i64) #1
 define double @f5(double %a0) #0 {
 b0:
   %v0 = alloca double, align 8
-  store double %a0, double* %v0, align 8
-  %v1 = load double, double* %v0, align 8
+  store double %a0, ptr %v0, align 8
+  %v1 = load double, ptr %v0, align 8
   %v2 = fptosi double %v1 to i64
   %v3 = call i64 @llvm.hexagon.A2.vconj(i64 %v2)
   %v4 = sitofp i64 %v3 to double
@@ -95,8 +95,8 @@ declare i64 @llvm.hexagon.A2.vconj(i64) #1
 define double @f6(double %a0) #0 {
 b0:
   %v0 = alloca double, align 8
-  store double %a0, double* %v0, align 8
-  %v1 = load double, double* %v0, align 8
+  store double %a0, ptr %v0, align 8
+  %v1 = load double, ptr %v0, align 8
   %v2 = fptosi double %v1 to i64
   %v3 = call i64 @llvm.hexagon.S2.vsathb.nopack(i64 %v2)
   %v4 = sitofp i64 %v3 to double
@@ -110,8 +110,8 @@ declare i64 @llvm.hexagon.S2.vsathb.nopack(i64) #1
 define double @f7(double %a0) #0 {
 b0:
   %v0 = alloca double, align 8
-  store double %a0, double* %v0, align 8
-  %v1 = load double, double* %v0, align 8
+  store double %a0, ptr %v0, align 8
+  %v1 = load double, ptr %v0, align 8
   %v2 = fptosi double %v1 to i64
   %v3 = call i64 @llvm.hexagon.S2.vsathub.nopack(i64 %v2)
   %v4 = sitofp i64 %v3 to double
@@ -125,8 +125,8 @@ declare i64 @llvm.hexagon.S2.vsathub.nopack(i64) #1
 define double @f8(double %a0) #0 {
 b0:
   %v0 = alloca double, align 8
-  store double %a0, double* %v0, align 8
-  %v1 = load double, double* %v0, align 8
+  store double %a0, ptr %v0, align 8
+  %v1 = load double, ptr %v0, align 8
   %v2 = fptosi double %v1 to i64
   %v3 = call i64 @llvm.hexagon.S2.vsatwh.nopack(i64 %v2)
   %v4 = sitofp i64 %v3 to double
@@ -140,8 +140,8 @@ declare i64 @llvm.hexagon.S2.vsatwh.nopack(i64) #1
 define double @f9(double %a0) #0 {
 b0:
   %v0 = alloca double, align 8
-  store double %a0, double* %v0, align 8
-  %v1 = load double, double* %v0, align 8
+  store double %a0, ptr %v0, align 8
+  %v1 = load double, ptr %v0, align 8
   %v2 = fptosi double %v1 to i64
   %v3 = call i64 @llvm.hexagon.S2.vsatwuh.nopack(i64 %v2)
   %v4 = sitofp i64 %v3 to double
@@ -155,8 +155,8 @@ declare i64 @llvm.hexagon.S2.vsatwuh.nopack(i64) #1
 define double @f10(double %a0) #0 {
 b0:
   %v0 = alloca double, align 8
-  store double %a0, double* %v0, align 8
-  %v1 = load double, double* %v0, align 8
+  store double %a0, ptr %v0, align 8
+  %v1 = load double, ptr %v0, align 8
   %v2 = fptosi double %v1 to i64
   %v3 = call i64 @llvm.hexagon.S2.asr.i.p(i64 %v2, i32 1)
   %v4 = sitofp i64 %v3 to double
@@ -170,8 +170,8 @@ declare i64 @llvm.hexagon.S2.asr.i.p(i64, i32) #1
 define double @f11(double %a0) #0 {
 b0:
   %v0 = alloca double, align 8
-  store double %a0, double* %v0, align 8
-  %v1 = load double, double* %v0, align 8
+  store double %a0, ptr %v0, align 8
+  %v1 = load double, ptr %v0, align 8
   %v2 = fptosi double %v1 to i64
   %v3 = call i64 @llvm.hexagon.S2.lsr.i.p(i64 %v2, i32 1)
   %v4 = sitofp i64 %v3 to double
@@ -185,8 +185,8 @@ declare i64 @llvm.hexagon.S2.lsr.i.p(i64, i32) #1
 define double @f12(double %a0) #0 {
 b0:
   %v0 = alloca double, align 8
-  store double %a0, double* %v0, align 8
-  %v1 = load double, double* %v0, align 8
+  store double %a0, ptr %v0, align 8
+  %v1 = load double, ptr %v0, align 8
   %v2 = fptosi double %v1 to i64
   %v3 = call i64 @llvm.hexagon.S2.asl.i.p(i64 %v2, i32 1)
   %v4 = sitofp i64 %v3 to double
@@ -200,8 +200,8 @@ declare i64 @llvm.hexagon.S2.asl.i.p(i64, i32) #1
 define double @f13(double %a0) #0 {
 b0:
   %v0 = alloca double, align 8
-  store double %a0, double* %v0, align 8
-  %v1 = load double, double* %v0, align 8
+  store double %a0, ptr %v0, align 8
+  %v1 = load double, ptr %v0, align 8
   %v2 = fptosi double %v1 to i64
   %v3 = call i64 @llvm.hexagon.A2.vabsh(i64 %v2)
   %v4 = sitofp i64 %v3 to double
@@ -215,8 +215,8 @@ declare i64 @llvm.hexagon.A2.vabsh(i64) #1
 define double @f14(double %a0) #0 {
 b0:
   %v0 = alloca double, align 8
-  store double %a0, double* %v0, align 8
-  %v1 = load double, double* %v0, align 8
+  store double %a0, ptr %v0, align 8
+  %v1 = load double, ptr %v0, align 8
   %v2 = fptosi double %v1 to i64
   %v3 = call i64 @llvm.hexagon.A2.vabshsat(i64 %v2)
   %v4 = sitofp i64 %v3 to double
@@ -230,8 +230,8 @@ declare i64 @llvm.hexagon.A2.vabshsat(i64) #1
 define double @f15(double %a0) #0 {
 b0:
   %v0 = alloca double, align 8
-  store double %a0, double* %v0, align 8
-  %v1 = load double, double* %v0, align 8
+  store double %a0, ptr %v0, align 8
+  %v1 = load double, ptr %v0, align 8
   %v2 = fptosi double %v1 to i64
   %v3 = call i64 @llvm.hexagon.S2.asr.i.vh(i64 %v2, i32 1)
   %v4 = sitofp i64 %v3 to double
@@ -245,8 +245,8 @@ declare i64 @llvm.hexagon.S2.asr.i.vh(i64, i32) #1
 define double @f16(double %a0) #0 {
 b0:
   %v0 = alloca double, align 8
-  store double %a0, double* %v0, align 8
-  %v1 = load double, double* %v0, align 8
+  store double %a0, ptr %v0, align 8
+  %v1 = load double, ptr %v0, align 8
   %v2 = fptosi double %v1 to i64
   %v3 = call i64 @llvm.hexagon.S2.lsr.i.vh(i64 %v2, i32 1)
   %v4 = sitofp i64 %v3 to double
@@ -260,8 +260,8 @@ declare i64 @llvm.hexagon.S2.lsr.i.vh(i64, i32) #1
 define double @f17(double %a0) #0 {
 b0:
   %v0 = alloca double, align 8
-  store double %a0, double* %v0, align 8
-  %v1 = load double, double* %v0, align 8
+  store double %a0, ptr %v0, align 8
+  %v1 = load double, ptr %v0, align 8
   %v2 = fptosi double %v1 to i64
   %v3 = call i64 @llvm.hexagon.S2.asl.i.vh(i64 %v2, i32 1)
   %v4 = sitofp i64 %v3 to double
@@ -275,8 +275,8 @@ declare i64 @llvm.hexagon.S2.asl.i.vh(i64, i32) #1
 define double @f18(double %a0) #0 {
 b0:
   %v0 = alloca double, align 8
-  store double %a0, double* %v0, align 8
-  %v1 = load double, double* %v0, align 8
+  store double %a0, ptr %v0, align 8
+  %v1 = load double, ptr %v0, align 8
   %v2 = fptosi double %v1 to i64
   %v3 = call i64 @llvm.hexagon.A2.vabsw(i64 %v2)
   %v4 = sitofp i64 %v3 to double
@@ -290,8 +290,8 @@ declare i64 @llvm.hexagon.A2.vabsw(i64) #1
 define double @f19(double %a0) #0 {
 b0:
   %v0 = alloca double, align 8
-  store double %a0, double* %v0, align 8
-  %v1 = load double, double* %v0, align 8
+  store double %a0, ptr %v0, align 8
+  %v1 = load double, ptr %v0, align 8
   %v2 = fptosi double %v1 to i64
   %v3 = call i64 @llvm.hexagon.A2.vabswsat(i64 %v2)
   %v4 = sitofp i64 %v3 to double
@@ -305,8 +305,8 @@ declare i64 @llvm.hexagon.A2.vabswsat(i64) #1
 define double @f20(double %a0) #0 {
 b0:
   %v0 = alloca double, align 8
-  store double %a0, double* %v0, align 8
-  %v1 = load double, double* %v0, align 8
+  store double %a0, ptr %v0, align 8
+  %v1 = load double, ptr %v0, align 8
   %v2 = fptosi double %v1 to i64
   %v3 = call i64 @llvm.hexagon.S2.asr.i.vw(i64 %v2, i32 1)
   %v4 = sitofp i64 %v3 to double
@@ -320,8 +320,8 @@ declare i64 @llvm.hexagon.S2.asr.i.vw(i64, i32) #1
 define double @f21(double %a0) #0 {
 b0:
   %v0 = alloca double, align 8
-  store double %a0, double* %v0, align 8
-  %v1 = load double, double* %v0, align 8
+  store double %a0, ptr %v0, align 8
+  %v1 = load double, ptr %v0, align 8
   %v2 = fptosi double %v1 to i64
   %v3 = call i64 @llvm.hexagon.S2.lsr.i.vw(i64 %v2, i32 1)
   %v4 = sitofp i64 %v3 to double
@@ -335,8 +335,8 @@ declare i64 @llvm.hexagon.S2.lsr.i.vw(i64, i32) #1
 define double @f22(double %a0) #0 {
 b0:
   %v0 = alloca double, align 8
-  store double %a0, double* %v0, align 8
-  %v1 = load double, double* %v0, align 8
+  store double %a0, ptr %v0, align 8
+  %v1 = load double, ptr %v0, align 8
   %v2 = fptosi double %v1 to i64
   %v3 = call i64 @llvm.hexagon.S2.asl.i.vw(i64 %v2, i32 1)
   %v4 = sitofp i64 %v3 to double
@@ -350,8 +350,8 @@ declare i64 @llvm.hexagon.S2.asl.i.vw(i64, i32) #1
 define double @f23(double %a0) #0 {
 b0:
   %v0 = alloca double, align 8
-  store double %a0, double* %v0, align 8
-  %v1 = load double, double* %v0, align 8
+  store double %a0, ptr %v0, align 8
+  %v1 = load double, ptr %v0, align 8
   %v2 = fptosi double %v1 to i64
   %v3 = call i64 @llvm.hexagon.S2.brevp(i64 %v2)
   %v4 = sitofp i64 %v3 to double

diff  --git a/llvm/test/CodeGen/Hexagon/SUnit-boundary-prob.ll b/llvm/test/CodeGen/Hexagon/SUnit-boundary-prob.ll
index 32c56053ce2c1..10db03550bbfa 100644
--- a/llvm/test/CodeGen/Hexagon/SUnit-boundary-prob.ll
+++ b/llvm/test/CodeGen/Hexagon/SUnit-boundary-prob.ll
@@ -23,8 +23,8 @@ declare <16 x i32> @llvm.hexagon.V6.vaslw.acc(<16 x i32>, <16 x i32>, i32) #0
 
 define void @f0() #1 {
 b0:
-  %v0 = load i16*, i16** undef, align 4
-  %v1 = load i32*, i32** undef, align 4
+  %v0 = load ptr, ptr undef, align 4
+  %v1 = load ptr, ptr undef, align 4
   br label %b1
 
 b1:                                               ; preds = %b3, %b0
@@ -36,22 +36,19 @@ b1:                                               ; preds = %b3, %b0
   br label %b2
 
 b2:                                               ; preds = %b2, %b1
-  %v7 = phi i32* [ %v1, %b1 ], [ %v127, %b2 ]
-  %v8 = phi i16* [ %v0, %b1 ], [ %v128, %b2 ]
+  %v7 = phi ptr [ %v1, %b1 ], [ %v127, %b2 ]
+  %v8 = phi ptr [ %v0, %b1 ], [ %v128, %b2 ]
   %v9 = phi i32 [ 0, %b1 ], [ %v125, %b2 ]
   %v10 = mul nuw nsw i32 %v9, 32
-  %v11 = bitcast i32* %v7 to <16 x i32>*
-  %v12 = load <16 x i32>, <16 x i32>* %v11, align 64, !tbaa !1
+  %v12 = load <16 x i32>, ptr %v7, align 64, !tbaa !1
   %v13 = add nuw nsw i32 %v10, 16
-  %v14 = getelementptr inbounds i32, i32* %v1, i32 %v13
-  %v15 = bitcast i32* %v14 to <16 x i32>*
-  %v16 = load <16 x i32>, <16 x i32>* %v15, align 64, !tbaa !1
+  %v14 = getelementptr inbounds i32, ptr %v1, i32 %v13
+  %v16 = load <16 x i32>, ptr %v14, align 64, !tbaa !1
   %v17 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v16, <16 x i32> %v12)
   %v18 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v17) #2
   %v19 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v17) #2
   %v20 = tail call <32 x i32> @llvm.hexagon.V6.vdealvdd(<16 x i32> %v19, <16 x i32> %v18, i32 -4) #2
-  %v21 = bitcast i16* %v8 to <16 x i32>*
-  %v22 = load <16 x i32>, <16 x i32>* %v21, align 64, !tbaa !4
+  %v22 = load <16 x i32>, ptr %v8, align 64, !tbaa !4
   %v23 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v20) #2
   %v24 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v20) #2
   %v25 = tail call <16 x i32> @llvm.hexagon.V6.vshufeh(<16 x i32> %v24, <16 x i32> %v23) #2
@@ -69,24 +66,20 @@ b2:                                               ; preds = %b2, %b1
   %v37 = tail call <32 x i32> @llvm.hexagon.V6.vshuffvdd(<16 x i32> %v36, <16 x i32> %v35, i32 -4) #2
   %v38 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v37)
   %v39 = add nuw nsw i32 %v10, %v3
-  %v40 = getelementptr inbounds i32, i32* undef, i32 %v39
-  %v41 = bitcast i32* %v40 to <16 x i32>*
-  store <16 x i32> %v38, <16 x i32>* %v41, align 64, !tbaa !6
+  %v40 = getelementptr inbounds i32, ptr undef, i32 %v39
+  store <16 x i32> %v38, ptr %v40, align 64, !tbaa !6
   %v42 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v37)
-  store <16 x i32> %v42, <16 x i32>* undef, align 64, !tbaa !6
-  %v43 = getelementptr i32, i32* %v7, i32 32
-  %v44 = getelementptr i16, i16* %v8, i32 32
-  %v45 = bitcast i32* %v43 to <16 x i32>*
-  %v46 = load <16 x i32>, <16 x i32>* %v45, align 64, !tbaa !1
+  store <16 x i32> %v42, ptr undef, align 64, !tbaa !6
+  %v43 = getelementptr i32, ptr %v7, i32 32
+  %v44 = getelementptr i16, ptr %v8, i32 32
+  %v46 = load <16 x i32>, ptr %v43, align 64, !tbaa !1
   %v47 = add nuw nsw i32 %v10, 48
-  %v48 = getelementptr inbounds i32, i32* %v1, i32 %v47
-  %v49 = bitcast i32* %v48 to <16 x i32>*
-  %v50 = load <16 x i32>, <16 x i32>* %v49, align 64, !tbaa !1
+  %v48 = getelementptr inbounds i32, ptr %v1, i32 %v47
+  %v50 = load <16 x i32>, ptr %v48, align 64, !tbaa !1
   %v51 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v50, <16 x i32> %v46)
   %v52 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v51) #2
   %v53 = tail call <32 x i32> @llvm.hexagon.V6.vdealvdd(<16 x i32> undef, <16 x i32> %v52, i32 -4) #2
-  %v54 = bitcast i16* %v44 to <16 x i32>*
-  %v55 = load <16 x i32>, <16 x i32>* %v54, align 64, !tbaa !4
+  %v55 = load <16 x i32>, ptr %v44, align 64, !tbaa !4
   %v56 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v53) #2
   %v57 = tail call <16 x i32> @llvm.hexagon.V6.vshufeh(<16 x i32> undef, <16 x i32> %v56) #2
   %v58 = tail call <16 x i32> @llvm.hexagon.V6.vshufoh(<16 x i32> undef, <16 x i32> %v56) #2
@@ -103,28 +96,23 @@ b2:                                               ; preds = %b2, %b1
   %v69 = tail call <32 x i32> @llvm.hexagon.V6.vshuffvdd(<16 x i32> %v68, <16 x i32> %v67, i32 -4) #2
   %v70 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v69)
   %v71 = add nuw nsw i32 %v4, %v10
-  %v72 = getelementptr inbounds i32, i32* undef, i32 %v71
-  %v73 = bitcast i32* %v72 to <16 x i32>*
-  store <16 x i32> %v70, <16 x i32>* %v73, align 64, !tbaa !6
+  %v72 = getelementptr inbounds i32, ptr undef, i32 %v71
+  store <16 x i32> %v70, ptr %v72, align 64, !tbaa !6
   %v74 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v69)
   %v75 = add nuw nsw i32 %v71, 16
-  %v76 = getelementptr inbounds i32, i32* undef, i32 %v75
-  %v77 = bitcast i32* %v76 to <16 x i32>*
-  store <16 x i32> %v74, <16 x i32>* %v77, align 64, !tbaa !6
-  %v78 = getelementptr i32, i32* %v7, i32 64
-  %v79 = getelementptr i16, i16* %v8, i32 64
-  %v80 = bitcast i32* %v78 to <16 x i32>*
-  %v81 = load <16 x i32>, <16 x i32>* %v80, align 64, !tbaa !1
+  %v76 = getelementptr inbounds i32, ptr undef, i32 %v75
+  store <16 x i32> %v74, ptr %v76, align 64, !tbaa !6
+  %v78 = getelementptr i32, ptr %v7, i32 64
+  %v79 = getelementptr i16, ptr %v8, i32 64
+  %v81 = load <16 x i32>, ptr %v78, align 64, !tbaa !1
   %v82 = add nuw nsw i32 %v10, 80
-  %v83 = getelementptr inbounds i32, i32* %v1, i32 %v82
-  %v84 = bitcast i32* %v83 to <16 x i32>*
-  %v85 = load <16 x i32>, <16 x i32>* %v84, align 64, !tbaa !1
+  %v83 = getelementptr inbounds i32, ptr %v1, i32 %v82
+  %v85 = load <16 x i32>, ptr %v83, align 64, !tbaa !1
   %v86 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v85, <16 x i32> %v81)
   %v87 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v86) #2
   %v88 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v86) #2
   %v89 = tail call <32 x i32> @llvm.hexagon.V6.vdealvdd(<16 x i32> %v88, <16 x i32> %v87, i32 -4) #2
-  %v90 = bitcast i16* %v79 to <16 x i32>*
-  %v91 = load <16 x i32>, <16 x i32>* %v90, align 64, !tbaa !4
+  %v91 = load <16 x i32>, ptr %v79, align 64, !tbaa !4
   %v92 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v89) #2
   %v93 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v89) #2
   %v94 = tail call <16 x i32> @llvm.hexagon.V6.vshufeh(<16 x i32> %v93, <16 x i32> %v92) #2
@@ -142,30 +130,26 @@ b2:                                               ; preds = %b2, %b1
   %v106 = tail call <32 x i32> @llvm.hexagon.V6.vshuffvdd(<16 x i32> %v105, <16 x i32> %v104, i32 -4) #2
   %v107 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v106)
   %v108 = add nuw nsw i32 %v5, %v10
-  %v109 = getelementptr inbounds i32, i32* undef, i32 %v108
-  %v110 = bitcast i32* %v109 to <16 x i32>*
-  store <16 x i32> %v107, <16 x i32>* %v110, align 64, !tbaa !6
+  %v109 = getelementptr inbounds i32, ptr undef, i32 %v108
+  store <16 x i32> %v107, ptr %v109, align 64, !tbaa !6
   %v111 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v106)
   %v112 = add nuw nsw i32 %v108, 16
-  %v113 = getelementptr inbounds i32, i32* undef, i32 %v112
-  %v114 = bitcast i32* %v113 to <16 x i32>*
-  store <16 x i32> %v111, <16 x i32>* %v114, align 64, !tbaa !6
+  %v113 = getelementptr inbounds i32, ptr undef, i32 %v112
+  store <16 x i32> %v111, ptr %v113, align 64, !tbaa !6
   %v115 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> undef) #2
   %v116 = tail call <32 x i32> @llvm.hexagon.V6.vshuffvdd(<16 x i32> undef, <16 x i32> %v115, i32 -4) #2
   %v117 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v116)
   %v118 = add nuw nsw i32 %v6, %v10
-  %v119 = getelementptr inbounds i32, i32* undef, i32 %v118
-  %v120 = bitcast i32* %v119 to <16 x i32>*
-  store <16 x i32> %v117, <16 x i32>* %v120, align 64, !tbaa !6
+  %v119 = getelementptr inbounds i32, ptr undef, i32 %v118
+  store <16 x i32> %v117, ptr %v119, align 64, !tbaa !6
   %v121 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v116)
   %v122 = add nuw nsw i32 %v118, 16
-  %v123 = getelementptr inbounds i32, i32* undef, i32 %v122
-  %v124 = bitcast i32* %v123 to <16 x i32>*
-  store <16 x i32> %v121, <16 x i32>* %v124, align 64, !tbaa !6
+  %v123 = getelementptr inbounds i32, ptr undef, i32 %v122
+  store <16 x i32> %v121, ptr %v123, align 64, !tbaa !6
   %v125 = add nuw nsw i32 %v9, 4
   %v126 = icmp eq i32 %v125, 24
-  %v127 = getelementptr i32, i32* %v7, i32 128
-  %v128 = getelementptr i16, i16* %v8, i32 128
+  %v127 = getelementptr i32, ptr %v7, i32 128
+  %v128 = getelementptr i16, ptr %v8, i32 128
   br i1 %v126, label %b3, label %b2
 
 b3:                                               ; preds = %b2

diff  --git a/llvm/test/CodeGen/Hexagon/V60-VDblNew.ll b/llvm/test/CodeGen/Hexagon/V60-VDblNew.ll
index 36634375995fe..76b05fcdde054 100644
--- a/llvm/test/CodeGen/Hexagon/V60-VDblNew.ll
+++ b/llvm/test/CodeGen/Hexagon/V60-VDblNew.ll
@@ -10,10 +10,10 @@ target triple = "hexagon"
 ; Function Attrs: nounwind
 define i32 @f0() #0 {
 b0:
-  %v0 = load <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @g0, i32 0, i32 0), align 64, !tbaa !0
-  %v1 = load <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @g0, i32 0, i32 1), align 64, !tbaa !0
+  %v0 = load <16 x i32>, ptr @g0, align 64, !tbaa !0
+  %v1 = load <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @g0, i32 0, i32 1), align 64, !tbaa !0
   %v2 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v0, <16 x i32> %v1)
-  store <32 x i32> %v2, <32 x i32>* @g1, align 128, !tbaa !0
+  store <32 x i32> %v2, ptr @g1, align 128, !tbaa !0
   ret i32 0
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/abi-padding-2.ll b/llvm/test/CodeGen/Hexagon/abi-padding-2.ll
index 52006f66444f8..9e5ec81742add 100644
--- a/llvm/test/CodeGen/Hexagon/abi-padding-2.ll
+++ b/llvm/test/CodeGen/Hexagon/abi-padding-2.ll
@@ -26,19 +26,18 @@
 define dso_local void @bar() local_unnamed_addr #0 {
 entry:
   %s = alloca %struct.S, align 8
-  %0 = getelementptr inbounds %struct.S, %struct.S* %s, i32 0, i32 0
-  call void @llvm.lifetime.start.p0i8(i64 16, i8* nonnull %0) #3
-  store i8 97, i8* %0, align 8
-  tail call void @foo(i32 42, %struct.S* nonnull byval(%struct.S) align 8 %s) #3
-  call void @llvm.lifetime.end.p0i8(i64 16, i8* nonnull %0) #3
+  call void @llvm.lifetime.start.p0(i64 16, ptr nonnull %s) #3
+  store i8 97, ptr %s, align 8
+  tail call void @foo(i32 42, ptr nonnull byval(%struct.S) align 8 %s) #3
+  call void @llvm.lifetime.end.p0(i64 16, ptr nonnull %s) #3
   ret void
 }
 
 ; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.start.p0i8(i64 immarg, i8* nocapture) #1
+declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1
 
-declare dso_local void @foo(i32, %struct.S* byval(%struct.S) align 8) local_unnamed_addr #2
+declare dso_local void @foo(i32, ptr byval(%struct.S) align 8) local_unnamed_addr #2
 
 ; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.end.p0i8(i64 immarg, i8* nocapture) #1
+declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1
 

diff  --git a/llvm/test/CodeGen/Hexagon/abi-padding.ll b/llvm/test/CodeGen/Hexagon/abi-padding.ll
index 55ffba6fec51e..32a44b5f43873 100644
--- a/llvm/test/CodeGen/Hexagon/abi-padding.ll
+++ b/llvm/test/CodeGen/Hexagon/abi-padding.ll
@@ -27,19 +27,17 @@
 define dso_local void @bar() local_unnamed_addr #0 {
 entry:
   %s = alloca %struct.S, align 4
-  %0 = bitcast %struct.S* %s to i8*
-  call void @llvm.lifetime.start.p0i8(i64 12, i8* nonnull %0) #3
-  %arrayidx = getelementptr inbounds %struct.S, %struct.S* %s, i32 0, i32 0, i32 0
-  store i32 9, i32* %arrayidx, align 4
-  tail call void @foo(i32 42, %struct.S* nonnull byval(%struct.S) align 4 %s, %struct.S* nonnull byval(%struct.S) align 4 %s) #3
-  call void @llvm.lifetime.end.p0i8(i64 12, i8* nonnull %0) #3
+  call void @llvm.lifetime.start.p0(i64 12, ptr nonnull %s) #3
+  store i32 9, ptr %s, align 4
+  tail call void @foo(i32 42, ptr nonnull byval(%struct.S) align 4 %s, ptr nonnull byval(%struct.S) align 4 %s) #3
+  call void @llvm.lifetime.end.p0(i64 12, ptr nonnull %s) #3
   ret void
 }
 
 ; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.start.p0i8(i64 immarg, i8* nocapture) #1
+declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1
 
-declare dso_local void @foo(i32, %struct.S* byval(%struct.S) align 4, %struct.S* byval(%struct.S) align 4) local_unnamed_addr #2
+declare dso_local void @foo(i32, ptr byval(%struct.S) align 4, ptr byval(%struct.S) align 4) local_unnamed_addr #2
 
 ; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.end.p0i8(i64 immarg, i8* nocapture) #1
+declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1

diff  --git a/llvm/test/CodeGen/Hexagon/absaddr-store.ll b/llvm/test/CodeGen/Hexagon/absaddr-store.ll
index 88af3de58385d..4574c4e5c7945 100644
--- a/llvm/test/CodeGen/Hexagon/absaddr-store.ll
+++ b/llvm/test/CodeGen/Hexagon/absaddr-store.ll
@@ -12,38 +12,38 @@
 define zeroext i8 @absStoreByte() nounwind {
 ; CHECK: memb(##b1) = r{{[0-9]+}}
 entry:
-  %0 = load i8, i8* @b0, align 1
+  %0 = load i8, ptr @b0, align 1
   %conv = zext i8 %0 to i32
   %mul = mul nsw i32 100, %conv
   %conv1 = trunc i32 %mul to i8
-  store i8 %conv1, i8* @b1, align 1
+  store i8 %conv1, ptr @b1, align 1
   ret i8 %conv1
 }
 
 define signext i16 @absStoreHalf() nounwind {
 ; CHECK: memh(##c1) = r{{[0-9]+}}
 entry:
-  %0 = load i16, i16* @c0, align 2
+  %0 = load i16, ptr @c0, align 2
   %conv = sext i16 %0 to i32
   %mul = mul nsw i32 100, %conv
   %conv1 = trunc i32 %mul to i16
-  store i16 %conv1, i16* @c1, align 2
+  store i16 %conv1, ptr @c1, align 2
   ret i16 %conv1
 }
 
 define i32 @absStoreWord() nounwind {
 ; CHECK: memw(##a1) = r{{[0-9]+}}
 entry:
-  %0 = load i32, i32* @a0, align 4
+  %0 = load i32, ptr @a0, align 4
   %mul = mul nsw i32 100, %0
-  store i32 %mul, i32* @a1, align 4
+  store i32 %mul, ptr @a1, align 4
   ret i32 %mul
 }
 
 define void @absStoreDouble() nounwind {
 ; CHECK: memd(##d) = r{{[0-9]+}}:{{[0-9]+}}
 entry:
-  store i64 100, i64* @d, align 8
+  store i64 100, ptr @d, align 8
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/absimm.ll b/llvm/test/CodeGen/Hexagon/absimm.ll
index 232b7221d0008..767324b1ab605 100644
--- a/llvm/test/CodeGen/Hexagon/absimm.ll
+++ b/llvm/test/CodeGen/Hexagon/absimm.ll
@@ -5,14 +5,14 @@
 define i32 @f1(i32 %i) nounwind {
 ; CHECK: memw(##786432) = r{{[0-9]+}}
 entry:
-  store volatile i32 %i, i32* inttoptr (i32 786432 to i32*), align 262144
+  store volatile i32 %i, ptr inttoptr (i32 786432 to ptr), align 262144
   ret i32 %i
 }
 
-define i32* @f2(i32* nocapture %i) nounwind {
+define ptr @f2(ptr nocapture %i) nounwind {
 entry:
 ; CHECK: r{{[0-9]+}} = memw(##786432)
-  %0 = load volatile i32, i32* inttoptr (i32 786432 to i32*), align 262144
-  %1 = inttoptr i32 %0 to i32*
-  ret i32* %1
+  %0 = load volatile i32, ptr inttoptr (i32 786432 to ptr), align 262144
+  %1 = inttoptr i32 %0 to ptr
+  ret ptr %1
   }

diff  --git a/llvm/test/CodeGen/Hexagon/add_mpi_RRR.ll b/llvm/test/CodeGen/Hexagon/add_mpi_RRR.ll
index ffefe3b901d19..92643f8d135d3 100644
--- a/llvm/test/CodeGen/Hexagon/add_mpi_RRR.ll
+++ b/llvm/test/CodeGen/Hexagon/add_mpi_RRR.ll
@@ -9,15 +9,15 @@ target triple = "hexagon"
 @g1 = private unnamed_addr constant [45 x i8] c"%x :  Q6_R_add_mpyi_RRR(-1,INT_MIN,INT_MIN)\0A\00", align 1
 
 ; Function Attrs: nounwind
-declare i32 @f0(i8* nocapture readonly, ...) #0
+declare i32 @f0(ptr nocapture readonly, ...) #0
 
 ; Function Attrs: nounwind
 define i32 @f1() #0 {
 b0:
   %v0 = tail call i32 @llvm.hexagon.M4.mpyrr.addr(i32 -2147483648, i32 -2147483648, i32 -2147483648)
-  %v1 = tail call i32 (i8*, ...) @f0(i8* getelementptr inbounds ([50 x i8], [50 x i8]* @g0, i32 0, i32 0), i32 %v0) #2
+  %v1 = tail call i32 (ptr, ...) @f0(ptr @g0, i32 %v0) #2
   %v2 = tail call i32 @llvm.hexagon.M4.mpyrr.addr(i32 -1, i32 -2147483648, i32 -2147483648)
-  %v3 = tail call i32 (i8*, ...) @f0(i8* getelementptr inbounds ([45 x i8], [45 x i8]* @g1, i32 0, i32 0), i32 %v2) #2
+  %v3 = tail call i32 (ptr, ...) @f0(ptr @g1, i32 %v2) #2
   ret i32 0
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/addaddi.ll b/llvm/test/CodeGen/Hexagon/addaddi.ll
index 6510858f1bd85..a0b18f2f05807 100644
--- a/llvm/test/CodeGen/Hexagon/addaddi.ll
+++ b/llvm/test/CodeGen/Hexagon/addaddi.ll
@@ -2,11 +2,11 @@
 ; Check for S4_addaddi:
 ; CHECK: r{{[0-9]+}} = add(r{{[0-9]+}},add(r{{[0-9]+}},#2))
 
-define i32 @fred(i32 %a0, i32 %a1, i32* nocapture %a2) #0 {
+define i32 @fred(i32 %a0, i32 %a1, ptr nocapture %a2) #0 {
 b3:
   %v4 = add nsw i32 %a0, 2
   %v5 = add nsw i32 %v4, %a1
-  store i32 %v5, i32* %a2, align 4
+  store i32 %v5, ptr %a2, align 4
   ret i32 undef
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/addasl-address.ll b/llvm/test/CodeGen/Hexagon/addasl-address.ll
index f9fc6aa7315c1..10e923c9e84fc 100644
--- a/llvm/test/CodeGen/Hexagon/addasl-address.ll
+++ b/llvm/test/CodeGen/Hexagon/addasl-address.ll
@@ -3,28 +3,27 @@
 
 %s.0 = type { i16, i8 }
 
- at g0 = internal global [20 x i8*] zeroinitializer, align 8
+ at g0 = internal global [20 x ptr] zeroinitializer, align 8
 
 ; Function Attrs: nounwind
-define void @f0(%s.0* %a0) #0 {
+define void @f0(ptr %a0) #0 {
 b0:
-  %v0 = icmp eq %s.0* %a0, null
+  %v0 = icmp eq ptr %a0, null
   br i1 %v0, label %b2, label %b1
 
 b1:                                               ; preds = %b0
-  %v1 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 1
-  %v2 = load i8, i8* %v1, align 1, !tbaa !0
+  %v1 = getelementptr inbounds %s.0, ptr %a0, i32 0, i32 1
+  %v2 = load i8, ptr %v1, align 1, !tbaa !0
   %v3 = zext i8 %v2 to i32
-  %v4 = getelementptr inbounds [20 x i8*], [20 x i8*]* @g0, i32 0, i32 %v3
-  %v5 = bitcast i8** %v4 to i8*
-  tail call void @f1(i8* %v5) #0
+  %v4 = getelementptr inbounds [20 x ptr], ptr @g0, i32 0, i32 %v3
+  tail call void @f1(ptr %v4) #0
   br label %b2
 
 b2:                                               ; preds = %b1, %b0
   ret void
 }
 
-declare void @f1(i8*)
+declare void @f1(ptr)
 
 attributes #0 = { nounwind }
 

diff  --git a/llvm/test/CodeGen/Hexagon/addr-calc-opt.ll b/llvm/test/CodeGen/Hexagon/addr-calc-opt.ll
index 07385c84ec871..3a0019ebd4ea6 100644
--- a/llvm/test/CodeGen/Hexagon/addr-calc-opt.ll
+++ b/llvm/test/CodeGen/Hexagon/addr-calc-opt.ll
@@ -23,14 +23,14 @@
 %14 = type { %15, %16 }
 %15 = type { i8, i8 }
 %16 = type { i8, i8 }
-%17 = type { i8, i8, %1*, i16, i16, i16, i64, i32, i32, %18, i8, %21, i8, [2 x i16], i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i16, i16, i16, i8, i8, i8, i16, i16, [2 x i16], i16, [2 x i32], [2 x i16], [2 x i16], i8, i8, [6 x %23], i8, i8, i8, %24, %25, %26, %28 }
+%17 = type { i8, i8, ptr, i16, i16, i16, i64, i32, i32, %18, i8, %21, i8, [2 x i16], i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i16, i16, i16, i8, i8, i8, i16, i16, [2 x i16], i16, [2 x i32], [2 x i16], [2 x i16], i8, i8, [6 x %23], i8, i8, i8, %24, %25, %26, %28 }
 %18 = type { %19, [10 x %20] }
 %19 = type { i32 }
 %20 = type { [2 x i8], [2 x i8], i8, i8, i8, i8 }
 %21 = type { i8, i8, i8, [8 x %22] }
 %22 = type { i8, i8, i8, i32 }
 %23 = type { i32, i16, i16, [2 x i16], [2 x i16], [2 x i16], i32 }
-%24 = type { [2 x i32], [2 x i64*], [2 x i64*], [2 x i64*], [2 x i32], [2 x i32], i32 }
+%24 = type { [2 x i32], [2 x ptr], [2 x ptr], [2 x ptr], [2 x i32], [2 x i32], i32 }
 %25 = type { [2 x i32], [2 x i32], [2 x i32] }
 %26 = type { i8, i8, i8, i16, i16, %27, i32, i32, i32, i16 }
 %27 = type { i64 }
@@ -47,8 +47,8 @@
 define zeroext i8 @myFun(i8 zeroext, i8 zeroext) {
   %3 = zext i8 %1 to i32
   %4 = zext i8 %0 to i32
-  %5 = getelementptr inbounds %0, %0* @the_global, i32 0, i32 0, i32 %4, i32 60, i32 0, i32 9, i32 1, i32 %3, i32 0, i32 0
-  %6 = load i8, i8* %5, align 4
+  %5 = getelementptr inbounds %0, ptr @the_global, i32 0, i32 0, i32 %4, i32 60, i32 0, i32 9, i32 1, i32 %3, i32 0, i32 0
+  %6 = load i8, ptr %5, align 4
   ret i8 %6
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/addr-mode-opt.ll b/llvm/test/CodeGen/Hexagon/addr-mode-opt.ll
index 54d94316d3dff..e47bda84e4e55 100644
--- a/llvm/test/CodeGen/Hexagon/addr-mode-opt.ll
+++ b/llvm/test/CodeGen/Hexagon/addr-mode-opt.ll
@@ -8,41 +8,41 @@
 
 @g0 = external global i32, align 4
 
-define i32 @f0(i8* nocapture readonly %a0, i8* nocapture readonly %a1) {
+define i32 @f0(ptr nocapture readonly %a0, ptr nocapture readonly %a1) {
 b0:
-  %v0 = getelementptr inbounds i8, i8* %a0, i32 2
-  %v1 = getelementptr inbounds i8, i8* %a1, i32 3
+  %v0 = getelementptr inbounds i8, ptr %a0, i32 2
+  %v1 = getelementptr inbounds i8, ptr %a1, i32 3
   br label %b2
 
 b1:                                               ; preds = %b3
-  %v2 = getelementptr inbounds i8, i8* %a0, i32 %v7
+  %v2 = getelementptr inbounds i8, ptr %a0, i32 %v7
   %v3 = add nuw nsw i32 %v7, 1
-  %v4 = getelementptr inbounds i8, i8* %a1, i32 %v3
+  %v4 = getelementptr inbounds i8, ptr %a1, i32 %v3
   %v5 = icmp eq i32 %v7, 3
   br i1 %v5, label %b4, label %b2
 
 b2:                                               ; preds = %b1, %b0
-  %v6 = phi i8* [ %v1, %b0 ], [ %v4, %b1 ]
+  %v6 = phi ptr [ %v1, %b0 ], [ %v4, %b1 ]
   %v7 = phi i32 [ 3, %b0 ], [ %v3, %b1 ]
-  %v8 = phi i8* [ %v0, %b0 ], [ %v2, %b1 ]
+  %v8 = phi ptr [ %v0, %b0 ], [ %v2, %b1 ]
   br label %b3
 
 b3:                                               ; preds = %b3, %b2
-  %v9 = load i8, i8* %v8, align 1
+  %v9 = load i8, ptr %v8, align 1
   %v10 = zext i8 %v9 to i32
-  %v11 = load i8, i8* %v6, align 1
+  %v11 = load i8, ptr %v6, align 1
   %v12 = zext i8 %v11 to i32
-  %v13 = tail call i32 bitcast (i32 (...)* @f1 to i32 (i32, i32)*)(i32 %v10, i32 %v12)
+  %v13 = tail call i32 @f1(i32 %v10, i32 %v12)
   %v14 = icmp eq i32 %v13, 0
   br i1 %v14, label %b1, label %b3
 
 b4:                                               ; preds = %b1
-  %v15 = tail call i32 @f2(i8* %a0, i8* %a1)
+  %v15 = tail call i32 @f2(ptr %a0, ptr %a1)
   %v16 = icmp sgt i32 %v15, 0
   br i1 %v16, label %b5, label %b6
 
 b5:                                               ; preds = %b4
-  store i32 10, i32* @g0, align 4
+  store i32 10, ptr @g0, align 4
   br label %b6
 
 b6:                                               ; preds = %b5, %b4
@@ -52,4 +52,4 @@ b6:                                               ; preds = %b5, %b4
 
 declare i32 @f1(...)
 
-declare i32 @f2(i8* nocapture, i8* nocapture)
+declare i32 @f2(ptr nocapture, ptr nocapture)

diff  --git a/llvm/test/CodeGen/Hexagon/addrmode-align.ll b/llvm/test/CodeGen/Hexagon/addrmode-align.ll
index f39019a0b40ef..c6d0978d3190c 100644
--- a/llvm/test/CodeGen/Hexagon/addrmode-align.ll
+++ b/llvm/test/CodeGen/Hexagon/addrmode-align.ll
@@ -12,20 +12,18 @@
 define i32 @f0() local_unnamed_addr {
 b0:
   %v0 = alloca [10 x %s.0], align 8
-  %v1 = bitcast [10 x %s.0]* %v0 to i8*
   br label %b1
 
 b1:                                               ; preds = %b1, %b0
   %v2 = phi i32 [ 0, %b0 ], [ %v6, %b1 ]
-  %v3 = getelementptr inbounds [10 x %s.0], [10 x %s.0]* %v0, i32 0, i32 %v2, i32 0
-  store i32 0, i32* %v3, align 8
-  %v4 = getelementptr inbounds [10 x %s.0], [10 x %s.0]* %v0, i32 0, i32 %v2, i32 1
-  store i8 0, i8* %v4, align 4
-  %v5 = getelementptr inbounds [10 x %s.0], [10 x %s.0]* %v0, i32 0, i32 %v2, i32 2
+  %v3 = getelementptr inbounds [10 x %s.0], ptr %v0, i32 0, i32 %v2, i32 0
+  store i32 0, ptr %v3, align 8
+  %v4 = getelementptr inbounds [10 x %s.0], ptr %v0, i32 0, i32 %v2, i32 1
+  store i8 0, ptr %v4, align 4
+  %v5 = getelementptr inbounds [10 x %s.0], ptr %v0, i32 0, i32 %v2, i32 2
   %v6 = add nuw nsw i32 %v2, 1
   %v7 = icmp eq i32 %v6, 10
-  %v8 = bitcast double* %v5 to i8*
-  call void @llvm.memset.p0i8.i64(i8* align 8 %v8, i8 0, i64 16, i1 false)
+  call void @llvm.memset.p0.i64(ptr align 8 %v5, i8 0, i64 16, i1 false)
   br i1 %v7, label %b2, label %b1
 
 b2:                                               ; preds = %b1
@@ -38,14 +36,13 @@ b3:                                               ; preds = %b3, %b2
   br i1 %v11, label %b4, label %b3
 
 b4:                                               ; preds = %b3
-  %v12 = getelementptr inbounds [10 x %s.0], [10 x %s.0]* %v0, i32 0, i32 0, i32 0
-  %v13 = load i32, i32* %v12, align 8
+  %v13 = load i32, ptr %v0, align 8
   %v14 = sub nsw i32 1122, %v13
   %v15 = icmp eq i32 %v14, 1121
   br i1 %v15, label %b6, label %b5
 
 b5:                                               ; preds = %b4
-  store i32 1, i32* @g0, align 4
+  store i32 1, ptr @g0, align 4
   br label %b6
 
 b6:                                               ; preds = %b5, %b4
@@ -56,6 +53,6 @@ b6:                                               ; preds = %b5, %b4
 declare void @f1() local_unnamed_addr
 
 ; Function Attrs: argmemonly nounwind
-declare void @llvm.memset.p0i8.i64(i8* nocapture writeonly, i8, i64, i1) #0
+declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1) #0
 
 attributes #0 = { argmemonly nounwind }

diff  --git a/llvm/test/CodeGen/Hexagon/addrmode-indoff.ll b/llvm/test/CodeGen/Hexagon/addrmode-indoff.ll
index 274add33898b4..dfe88a2ba3f9c 100644
--- a/llvm/test/CodeGen/Hexagon/addrmode-indoff.ll
+++ b/llvm/test/CodeGen/Hexagon/addrmode-indoff.ll
@@ -8,8 +8,8 @@
 ; CHECK: memub(r{{[0-9]+}}+##ga)
 define zeroext i8 @test0(i32 %i) nounwind readonly {
 entry:
-  %t = getelementptr inbounds [1024 x i8], [1024 x i8]* @ga, i32 0, i32 %i
-  %0 = load i8, i8* %t, align 1
+  %t = getelementptr inbounds [1024 x i8], ptr @ga, i32 0, i32 %i
+  %0 = load i8, ptr %t, align 1
   ret i8 %0
 }
 
@@ -17,8 +17,8 @@ entry:
 ; CHECK: memb(r{{[0-9]+}}+##ga)
 define signext i8 @test1(i32 %i) nounwind readonly {
 entry:
-  %t = getelementptr inbounds [1024 x i8], [1024 x i8]* @ga, i32 0, i32 %i
-  %0 = load i8, i8* %t, align 1
+  %t = getelementptr inbounds [1024 x i8], ptr @ga, i32 0, i32 %i
+  %0 = load i8, ptr %t, align 1
   ret i8 %0
 }
 
@@ -27,8 +27,8 @@ entry:
 define zeroext i8 @test2(i32 %i) nounwind readonly {
 entry:
   %j = shl nsw i32 %i, 1
-  %t = getelementptr inbounds [1024 x i8], [1024 x i8]* @ga, i32 0, i32 %j
-  %0 = load i8, i8* %t, align 1
+  %t = getelementptr inbounds [1024 x i8], ptr @ga, i32 0, i32 %j
+  %0 = load i8, ptr %t, align 1
   ret i8 %0
 }
 
@@ -37,8 +37,8 @@ entry:
 define signext i8 @test3(i32 %i) nounwind readonly {
 entry:
   %j = shl nsw i32 %i, 1
-  %t = getelementptr inbounds [1024 x i8], [1024 x i8]* @ga, i32 0, i32 %j
-  %0 = load i8, i8* %t, align 1
+  %t = getelementptr inbounds [1024 x i8], ptr @ga, i32 0, i32 %j
+  %0 = load i8, ptr %t, align 1
   ret i8 %0
 }
 
@@ -47,8 +47,8 @@ entry:
 define zeroext i8 @test4(i32 %i) nounwind readonly {
 entry:
   %j = shl nsw i32 %i, 2
-  %t = getelementptr inbounds [1024 x i8], [1024 x i8]* @ga, i32 0, i32 %j
-  %0 = load i8, i8* %t, align 1
+  %t = getelementptr inbounds [1024 x i8], ptr @ga, i32 0, i32 %j
+  %0 = load i8, ptr %t, align 1
   ret i8 %0
 }
 
@@ -57,8 +57,8 @@ entry:
 define signext i8 @test5(i32 %i) nounwind readonly {
 entry:
   %j = shl nsw i32 %i, 2
-  %t = getelementptr inbounds [1024 x i8], [1024 x i8]* @ga, i32 0, i32 %j
-  %0 = load i8, i8* %t, align 1
+  %t = getelementptr inbounds [1024 x i8], ptr @ga, i32 0, i32 %j
+  %0 = load i8, ptr %t, align 1
   ret i8 %0
 }
 
@@ -66,8 +66,8 @@ entry:
 ; CHECK: memb(r{{[0-9]+}}+##ga)
 define void @test10(i32 %i, i8 zeroext %v) nounwind {
 entry:
-  %t = getelementptr inbounds [1024 x i8], [1024 x i8]* @ga, i32 0, i32 %i
-  store i8 %v, i8* %t, align 1
+  %t = getelementptr inbounds [1024 x i8], ptr @ga, i32 0, i32 %i
+  store i8 %v, ptr %t, align 1
   ret void
 }
 
@@ -76,8 +76,8 @@ entry:
 define void @test11(i32 %i, i8 signext %v) nounwind {
 entry:
   %j = shl nsw i32 %i, 1
-  %t = getelementptr inbounds [1024 x i8], [1024 x i8]* @ga, i32 0, i32 %j
-  store i8 %v, i8* %t, align 1
+  %t = getelementptr inbounds [1024 x i8], ptr @ga, i32 0, i32 %j
+  store i8 %v, ptr %t, align 1
   ret void
 }
 
@@ -86,7 +86,7 @@ entry:
 define void @test12(i32 %i, i8 zeroext %v) nounwind {
 entry:
   %j = shl nsw i32 %i, 2
-  %t = getelementptr inbounds [1024 x i8], [1024 x i8]* @ga, i32 0, i32 %j
-  store i8 %v, i8* %t, align 1
+  %t = getelementptr inbounds [1024 x i8], ptr @ga, i32 0, i32 %j
+  store i8 %v, ptr %t, align 1
   ret void
 }

diff  --git a/llvm/test/CodeGen/Hexagon/addrmode-keepdeadphis.ll b/llvm/test/CodeGen/Hexagon/addrmode-keepdeadphis.ll
index 6b2755a96117c..5a9dd03ac6acc 100644
--- a/llvm/test/CodeGen/Hexagon/addrmode-keepdeadphis.ll
+++ b/llvm/test/CodeGen/Hexagon/addrmode-keepdeadphis.ll
@@ -22,29 +22,29 @@ target triple = "hexagon"
 define void @f0.1() local_unnamed_addr #0 align 2 {
 b0:
   %v0 = alloca %s.0, align 8
-  %v1 = getelementptr inbounds %s.0, %s.0* %v0, i32 0, i32 1
-  store i8 4, i8* %v1, align 1
-  %v2 = call signext i8 @f1.2(%s.3* undef) #0
-  %v3 = getelementptr inbounds %s.0, %s.0* %v0, i32 0, i32 2, i32 0, i32 0
-  %v4 = getelementptr inbounds %s.0, %s.0* %v0, i32 0, i32 2, i32 0, i32 3, i32 0
-  store i8 -1, i8* %v4, align 8
+  %v1 = getelementptr inbounds %s.0, ptr %v0, i32 0, i32 1
+  store i8 4, ptr %v1, align 1
+  %v2 = call signext i8 @f1.2(ptr undef) #0
+  %v3 = getelementptr inbounds %s.0, ptr %v0, i32 0, i32 2, i32 0, i32 0
+  %v4 = getelementptr inbounds %s.0, ptr %v0, i32 0, i32 2, i32 0, i32 3, i32 0
+  store i8 -1, ptr %v4, align 8
   br i1 undef, label %b1, label %b2
 
 b1:                                               ; preds = %b0
-  %v5 = call dereferenceable(12) %s.3* @f2.3(%s.3* nonnull undef, %s.3* nonnull dereferenceable(80) undef) #0
-  %v6 = call signext i8 @f1.2(%s.3* undef) #0
-  %v7 = call dereferenceable(12) %s.3* @f3(%s.3* nonnull %v5, i16 signext undef) #0
+  %v5 = call dereferenceable(12) ptr @f2.3(ptr nonnull undef, ptr nonnull dereferenceable(80) undef) #0
+  %v6 = call signext i8 @f1.2(ptr undef) #0
+  %v7 = call dereferenceable(12) ptr @f3(ptr nonnull %v5, i16 signext undef) #0
   br label %b2
 
 b2:                                               ; preds = %b1, %b0
-  call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 undef, i8* align 8 %v3, i32 48, i1 false)
+  call void @llvm.memcpy.p0.p0.i32(ptr align 8 undef, ptr align 8 %v3, i32 48, i1 false)
   ret void
 }
 
-declare signext i8 @f1.2(%s.3*) #0
-declare dereferenceable(12) %s.3* @f2.3(%s.3*, %s.3* dereferenceable(80)) #0
-declare dereferenceable(12) %s.3* @f3(%s.3*, i16 signext) #0
-declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture writeonly, i8* nocapture readonly, i32, i1) #1
+declare signext i8 @f1.2(ptr) #0
+declare dereferenceable(12) ptr @f2.3(ptr, ptr dereferenceable(80)) #0
+declare dereferenceable(12) ptr @f3(ptr, i16 signext) #0
+declare void @llvm.memcpy.p0.p0.i32(ptr nocapture writeonly, ptr nocapture readonly, i32, i1) #1
 
 attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="-long-calls" }
 attributes #1 = { argmemonly nounwind }

diff  --git a/llvm/test/CodeGen/Hexagon/addrmode-offset.ll b/llvm/test/CodeGen/Hexagon/addrmode-offset.ll
index 01294d09907b1..2afe8224ae8af 100644
--- a/llvm/test/CodeGen/Hexagon/addrmode-offset.ll
+++ b/llvm/test/CodeGen/Hexagon/addrmode-offset.ll
@@ -2,53 +2,49 @@
 
 ; CHECK-NOT: [[REG0:(r[0-9]+)]] = memw([[REG0:(r[0-9]+)]]<<#2+##state-4)
 
-%s.0 = type { i16, [10 x %s.1*] }
+%s.0 = type { i16, [10 x ptr] }
 %s.1 = type { %s.2, i16, i16 }
 %s.2 = type { i8, [15 x %s.3], [18 x %s.4], %s.5, i16 }
-%s.3 = type { %s.5, %s.4*, i8*, i16, i8, i8, [3 x %s.4*], [3 x %s.4*], [3 x %s.4*] }
-%s.4 = type { %s.5, %s.5*, i8, i16, i8 }
-%s.5 = type { %s.5*, %s.5* }
+%s.3 = type { %s.5, ptr, ptr, i16, i8, i8, [3 x ptr], [3 x ptr], [3 x ptr] }
+%s.4 = type { %s.5, ptr, i8, i16, i8 }
+%s.5 = type { ptr, ptr }
 %s.6 = type { i8, i8 }
 
 @g0 = common global %s.0 zeroinitializer, align 4
 
 ; Function Attrs: nounwind optsize
-define void @f0(%s.6* nocapture readonly %a0) local_unnamed_addr #0 {
+define void @f0(ptr nocapture readonly %a0) local_unnamed_addr #0 {
 b0:
-  %v0 = bitcast %s.6* %a0 to %s.6*
-  %v1 = getelementptr %s.6, %s.6* %v0, i32 0, i32 1
-  %v2 = load i8, i8* %v1, align 1
+  %v1 = getelementptr %s.6, ptr %a0, i32 0, i32 1
+  %v2 = load i8, ptr %v1, align 1
   %v3 = zext i8 %v2 to i32
   %v4 = add nsw i32 %v3, -1
-  %v5 = getelementptr %s.0, %s.0* @g0, i32 0, i32 1
-  %v6 = getelementptr [10 x %s.1*], [10 x %s.1*]* %v5, i32 0, i32 %v4
-  %v7 = load %s.1*, %s.1** %v6, align 4
-  %v8 = icmp eq %s.1* %v7, null
+  %v5 = getelementptr %s.0, ptr @g0, i32 0, i32 1
+  %v6 = getelementptr [10 x ptr], ptr %v5, i32 0, i32 %v4
+  %v7 = load ptr, ptr %v6, align 4
+  %v8 = icmp eq ptr %v7, null
   br i1 %v8, label %b4, label %b1
 
 b1:                                               ; preds = %b0
-  %v9 = bitcast %s.1* %v7 to %s.1*
-  %v10 = bitcast %s.1* %v9 to i8*
-  %v11 = load i8, i8* %v10, align 4
+  %v11 = load i8, ptr %v7, align 4
   %v12 = icmp eq i8 %v11, %v2
   br i1 %v12, label %b2, label %b4
 
 b2:                                               ; preds = %b1
-  %v13 = bitcast %s.6* %a0 to %s.6*
-  tail call void @f1(%s.1* nonnull %v7) #2
-  %v14 = getelementptr %s.6, %s.6* %v13, i32 0, i32 1
-  %v15 = load i8, i8* %v14, align 1
+  tail call void @f1(ptr nonnull %v7) #2
+  %v14 = getelementptr %s.6, ptr %a0, i32 0, i32 1
+  %v15 = load i8, ptr %v14, align 1
   %v16 = zext i8 %v15 to i32
   %v17 = add nsw i32 %v16, -1
-  %v18 = getelementptr [10 x %s.1*], [10 x %s.1*]* %v5, i32 0, i32 %v17
-  %v19 = load %s.1*, %s.1** %v18, align 4
-  %v20 = icmp eq %s.1* %v19, null
+  %v18 = getelementptr [10 x ptr], ptr %v5, i32 0, i32 %v17
+  %v19 = load ptr, ptr %v18, align 4
+  %v20 = icmp eq ptr %v19, null
   br i1 %v20, label %b4, label %b3
 
 b3:                                               ; preds = %b2
-  %v21 = getelementptr %s.1, %s.1* %v19, i32 0, i32 0, i32 3
-  tail call void @f2(%s.5* %v21) #2
-  store %s.1* null, %s.1** %v18, align 4
+  %v21 = getelementptr %s.1, ptr %v19, i32 0, i32 0, i32 3
+  tail call void @f2(ptr %v21) #2
+  store ptr null, ptr %v18, align 4
   br label %b4
 
 b4:                                               ; preds = %b3, %b2, %b1, %b0
@@ -56,10 +52,10 @@ b4:                                               ; preds = %b3, %b2, %b1, %b0
 }
 
 ; Function Attrs: optsize
-declare void @f1(%s.1*) #1
+declare void @f1(ptr) #1
 
 ; Function Attrs: optsize
-declare void @f2(%s.5*) #1
+declare void @f2(ptr) #1
 
 attributes #0 = { nounwind optsize "target-cpu"="hexagonv60" "target-features"="+hvx,+hvx-length64b" }
 attributes #1 = { optsize "target-cpu"="hexagonv60" "target-features"="+hvx" }

diff  --git a/llvm/test/CodeGen/Hexagon/addsubcarry.ll b/llvm/test/CodeGen/Hexagon/addsubcarry.ll
index b5e981c52f584..85eb71bfc533b 100644
--- a/llvm/test/CodeGen/Hexagon/addsubcarry.ll
+++ b/llvm/test/CodeGen/Hexagon/addsubcarry.ll
@@ -8,7 +8,7 @@
 ; CHECK: add({{.*}},{{.*}},p[[P0]]):carry
 define void @addc(i128 %a0, i128 %a1) #0 {
   %v0 = add i128 %a0, %a1
-  store i128 %v0, i128* @g, align 8
+  store i128 %v0, ptr @g, align 8
   ret void
 }
 
@@ -18,7 +18,7 @@ define void @addc(i128 %a0, i128 %a1) #0 {
 ; CHECK: sub({{.*}},{{.*}},p[[P0]]):carry
 define void @subc(i128 %a0, i128 %a1) #0 {
   %v0 = sub i128 %a0, %a1
-  store i128 %v0, i128* @g, align 8
+  store i128 %v0, ptr @g, align 8
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/adjust-latency-stackST.ll b/llvm/test/CodeGen/Hexagon/adjust-latency-stackST.ll
index 1751359dd8e16..3bcbd012523a2 100644
--- a/llvm/test/CodeGen/Hexagon/adjust-latency-stackST.ll
+++ b/llvm/test/CodeGen/Hexagon/adjust-latency-stackST.ll
@@ -11,67 +11,59 @@
 ; CHECK: }
 ; CHECK: = memw(gp+#G)
 
-%struct.0 = type { %struct.0*, i32, %struct.2 }
+%struct.0 = type { ptr, i32, %struct.2 }
 %struct.1 = type { i32, i32, [31 x i8] }
 %struct.2 = type { %struct.1 }
 
- at G = common global %struct.0* null, align 4
+ at G = common global ptr null, align 4
 
-define i32 @test(%struct.0* nocapture %a0) #0 {
+define i32 @test(ptr nocapture %a0) #0 {
 b1:
-  %v2 = alloca %struct.0*, align 4
-  %v3 = bitcast %struct.0** %v2 to i8*
-  %v4 = getelementptr inbounds %struct.0, %struct.0* %a0, i32 0, i32 0
-  %v5 = load %struct.0*, %struct.0** %v4, align 4
-  store %struct.0* %v5, %struct.0** %v2, align 4
-  %v6 = bitcast %struct.0* %v5 to i8*
-  %v7 = load i8*, i8** bitcast (%struct.0** @G to i8**), align 4
-  tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %v6, i8* align 4 %v7, i32 48, i1 false)
-  %v8 = getelementptr inbounds %struct.0, %struct.0* %a0, i32 0, i32 2, i32 0, i32 1
-  store i32 5, i32* %v8, align 4
-  %v9 = getelementptr inbounds %struct.0, %struct.0* %v5, i32 0, i32 2, i32 0, i32 1
-  store i32 5, i32* %v9, align 4
-  %v10 = bitcast %struct.0* %a0 to i32*
-  %v11 = load i32, i32* %v10, align 4
-  %v12 = bitcast %struct.0* %v5 to i32*
-  store i32 %v11, i32* %v12, align 4
-  %v13 = call i32 bitcast (i32 (...)* @f0 to i32 (%struct.0**)*)(%struct.0** nonnull %v2)
-  %v14 = load %struct.0*, %struct.0** %v2, align 4
-  %v15 = getelementptr inbounds %struct.0, %struct.0* %v14, i32 0, i32 1
-  %v16 = load i32, i32* %v15, align 4
+  %v2 = alloca ptr, align 4
+  %v5 = load ptr, ptr %a0, align 4
+  store ptr %v5, ptr %v2, align 4
+  %v7 = load ptr, ptr @G, align 4
+  tail call void @llvm.memcpy.p0.p0.i32(ptr align 4 %v5, ptr align 4 %v7, i32 48, i1 false)
+  %v8 = getelementptr inbounds %struct.0, ptr %a0, i32 0, i32 2, i32 0, i32 1
+  store i32 5, ptr %v8, align 4
+  %v9 = getelementptr inbounds %struct.0, ptr %v5, i32 0, i32 2, i32 0, i32 1
+  store i32 5, ptr %v9, align 4
+  %v11 = load i32, ptr %a0, align 4
+  store i32 %v11, ptr %v5, align 4
+  %v13 = call i32 @f0(ptr nonnull %v2)
+  %v14 = load ptr, ptr %v2, align 4
+  %v15 = getelementptr inbounds %struct.0, ptr %v14, i32 0, i32 1
+  %v16 = load i32, ptr %v15, align 4
   %v17 = icmp eq i32 %v16, 0
   br i1 %v17, label %b18, label %b32
 
 b18:                                              ; preds = %b1
-  %v19 = bitcast %struct.0** %v2 to i32**
-  %v20 = getelementptr inbounds %struct.0, %struct.0* %v14, i32 0, i32 2, i32 0, i32 1
-  store i32 6, i32* %v20, align 4
-  %v21 = getelementptr inbounds %struct.0, %struct.0* %a0, i32 0, i32 2, i32 0, i32 0
-  %v22 = load i32, i32* %v21, align 4
-  %v23 = getelementptr inbounds %struct.0, %struct.0* %v14, i32 0, i32 2, i32 0, i32 0
-  %v24 = call i32 bitcast (i32 (...)* @f1 to i32 (i32, i32*)*)(i32 %v22, i32* %v23)
-  %v25 = load i32*, i32** bitcast (%struct.0** @G to i32**), align 4
-  %v26 = load i32, i32* %v25, align 4
-  %v27 = load i32*, i32** %v19, align 4
-  store i32 %v26, i32* %v27, align 4
-  %v28 = load %struct.0*, %struct.0** %v2, align 4
-  %v29 = getelementptr inbounds %struct.0, %struct.0* %v28, i32 0, i32 2, i32 0, i32 1
-  %v30 = load i32, i32* %v29, align 4
-  %v31 = call i32 bitcast (i32 (...)* @f2 to i32 (i32, i32, i32*)*)(i32 %v30, i32 10, i32* %v29)
+  %v20 = getelementptr inbounds %struct.0, ptr %v14, i32 0, i32 2, i32 0, i32 1
+  store i32 6, ptr %v20, align 4
+  %v21 = getelementptr inbounds %struct.0, ptr %a0, i32 0, i32 2, i32 0, i32 0
+  %v22 = load i32, ptr %v21, align 4
+  %v23 = getelementptr inbounds %struct.0, ptr %v14, i32 0, i32 2, i32 0, i32 0
+  %v24 = call i32 @f1(i32 %v22, ptr %v23)
+  %v25 = load ptr, ptr @G, align 4
+  %v26 = load i32, ptr %v25, align 4
+  %v27 = load ptr, ptr %v2, align 4
+  store i32 %v26, ptr %v27, align 4
+  %v28 = load ptr, ptr %v2, align 4
+  %v29 = getelementptr inbounds %struct.0, ptr %v28, i32 0, i32 2, i32 0, i32 1
+  %v30 = load i32, ptr %v29, align 4
+  %v31 = call i32 @f2(i32 %v30, i32 10, ptr %v29)
   br label %b36
 
 b32:                                              ; preds = %b1
-  %v33 = bitcast %struct.0* %a0 to i8**
-  %v34 = load i8*, i8** %v33, align 4
-  %v35 = bitcast %struct.0* %a0 to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %v35, i8* align 4 %v34, i32 48, i1 false)
+  %v34 = load ptr, ptr %a0, align 4
+  call void @llvm.memcpy.p0.p0.i32(ptr align 4 %a0, ptr align 4 %v34, i32 48, i1 false)
   br label %b36
 
 b36:                                              ; preds = %b32, %b18
   ret i32 undef
 }
 
-declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture writeonly, i8* nocapture readonly, i32, i1) #1
+declare void @llvm.memcpy.p0.p0.i32(ptr nocapture writeonly, ptr nocapture readonly, i32, i1) #1
 
 declare i32 @f0(...) #0
 declare i32 @f1(...) #0

diff  --git a/llvm/test/CodeGen/Hexagon/aggr-antidep-tied.ll b/llvm/test/CodeGen/Hexagon/aggr-antidep-tied.ll
index 170518fa0fe76..ac8e47d565ca0 100644
--- a/llvm/test/CodeGen/Hexagon/aggr-antidep-tied.ll
+++ b/llvm/test/CodeGen/Hexagon/aggr-antidep-tied.ll
@@ -17,10 +17,10 @@ declare i64 @f0() #0
 ; Function Attrs: nounwind
 define void @f1() #1 {
 b0:
-  %v0 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @g0, i32 0, i32 0), align 8
+  %v0 = load i64, ptr @g0, align 8
   %v1 = trunc i64 %v0 to i32
-  %v2 = load i64, i64* getelementptr inbounds ([6 x i64], [6 x i64]* @g1, i32 0, i32 0), align 8
-  %v3 = load i64, i64* getelementptr inbounds ([6 x i64], [6 x i64]* @g1, i32 0, i32 3), align 8
+  %v2 = load i64, ptr @g1, align 8
+  %v3 = load i64, ptr getelementptr inbounds ([6 x i64], ptr @g1, i32 0, i32 3), align 8
   %v4 = lshr i64 %v2, 32
   %v5 = trunc i64 %v4 to i32
   %v6 = add i32 %v5, 0
@@ -40,15 +40,15 @@ b0:
   %v20 = add i32 %v16, %v1
   %v21 = and i32 %v20, 65535
   %v22 = zext i32 %v21 to i64
-  tail call void (i8*, ...) @f2(i8* getelementptr inbounds ([45 x i8], [45 x i8]* @g2, i32 0, i32 0), i32 %v13) #2
-  tail call void (i8*, ...) @f2(i8* getelementptr inbounds ([26 x i8], [26 x i8]* @g3, i32 0, i32 0), i64 %v14) #2
-  tail call void (i8*, ...) @f2(i8* getelementptr inbounds ([29 x i8], [29 x i8]* @g4, i32 0, i32 0), i64 %v19) #2
-  tail call void (i8*, ...) @f2(i8* getelementptr inbounds ([29 x i8], [29 x i8]* @g5, i32 0, i32 0), i64 %v22) #2
+  tail call void (ptr, ...) @f2(ptr @g2, i32 %v13) #2
+  tail call void (ptr, ...) @f2(ptr @g3, i64 %v14) #2
+  tail call void (ptr, ...) @f2(ptr @g4, i64 %v19) #2
+  tail call void (ptr, ...) @f2(ptr @g5, i64 %v22) #2
   ret void
 }
 
 ; Function Attrs: nounwind
-declare void @f2(i8* nocapture readonly, ...) #1
+declare void @f2(ptr nocapture readonly, ...) #1
 
 attributes #0 = { norecurse nounwind readonly "target-cpu"="hexagonv55" }
 attributes #1 = { nounwind "target-cpu"="hexagonv55" }

diff  --git a/llvm/test/CodeGen/Hexagon/aggr-copy-order.ll b/llvm/test/CodeGen/Hexagon/aggr-copy-order.ll
index 5f28d95e243f6..c539a8adf2502 100644
--- a/llvm/test/CodeGen/Hexagon/aggr-copy-order.ll
+++ b/llvm/test/CodeGen/Hexagon/aggr-copy-order.ll
@@ -8,25 +8,23 @@ target triple = "hexagon"
 %s.0 = type { i32, i32, i32 }
 
 ; Function Attrs: nounwind
-define void @f0(%s.0* %a0, %s.0* %a1) #0 {
+define void @f0(ptr %a0, ptr %a1) #0 {
 b0:
 ; CHECK: = memw({{.*}}+#0)
 ; CHECK: = memw({{.*}}+#4)
 ; CHECK: = memw({{.*}}+#8)
-  %v0 = alloca %s.0*, align 4
-  %v1 = alloca %s.0*, align 4
-  store %s.0* %a0, %s.0** %v0, align 4
-  store %s.0* %a1, %s.0** %v1, align 4
-  %v2 = load %s.0*, %s.0** %v0, align 4
-  %v3 = load %s.0*, %s.0** %v1, align 4
-  %v4 = bitcast %s.0* %v2 to i8*
-  %v5 = bitcast %s.0* %v3 to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %v4, i8* align 4 %v5, i32 12, i1 false)
+  %v0 = alloca ptr, align 4
+  %v1 = alloca ptr, align 4
+  store ptr %a0, ptr %v0, align 4
+  store ptr %a1, ptr %v1, align 4
+  %v2 = load ptr, ptr %v0, align 4
+  %v3 = load ptr, ptr %v1, align 4
+  call void @llvm.memcpy.p0.p0.i32(ptr align 4 %v2, ptr align 4 %v3, i32 12, i1 false)
   ret void
 }
 
 ; Function Attrs: argmemonly nounwind
-declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture writeonly, i8* nocapture readonly, i32, i1) #1
+declare void @llvm.memcpy.p0.p0.i32(ptr nocapture writeonly, ptr nocapture readonly, i32, i1) #1
 
 attributes #0 = { nounwind }
 attributes #1 = { argmemonly nounwind }

diff  --git a/llvm/test/CodeGen/Hexagon/aggressive_licm.ll b/llvm/test/CodeGen/Hexagon/aggressive_licm.ll
index c99fc0169e98a..3936e232fec26 100644
--- a/llvm/test/CodeGen/Hexagon/aggressive_licm.ll
+++ b/llvm/test/CodeGen/Hexagon/aggressive_licm.ll
@@ -12,17 +12,17 @@ target triple = "hexagon"
 define i32 @f0() #0 {
 b0:
   %v0 = alloca i16, align 2
-  call void @f1(i16* getelementptr inbounds ([4 x i16], [4 x i16]* @g0, i32 0, i32 0), i16* %v0) #0
-  %v1 = load i16, i16* %v0, align 2, !tbaa !0
+  call void @f1(ptr @g0, ptr %v0) #0
+  %v1 = load i16, ptr %v0, align 2, !tbaa !0
   %v2 = icmp slt i16 %v1, -15
   br i1 %v2, label %b1, label %b4
 
 b1:                                               ; preds = %b0
-  %v3 = load i32, i32* bitcast ([4 x i16]* @g0 to i32*), align 8
+  %v3 = load i32, ptr @g0, align 8
   %v4 = trunc i32 %v3 to i16
   %v5 = lshr i32 %v3, 16
   %v6 = trunc i32 %v5 to i16
-  %v7 = load i32, i32* bitcast (i16* getelementptr inbounds ([4 x i16], [4 x i16]* @g0, i32 0, i32 2) to i32*), align 4
+  %v7 = load i32, ptr getelementptr inbounds ([4 x i16], ptr @g0, i32 0, i32 2), align 4
   %v8 = trunc i32 %v7 to i16
   %v9 = lshr i32 %v7, 16
   %v10 = trunc i32 %v9 to i16
@@ -43,22 +43,22 @@ b2:                                               ; preds = %b2, %b1
   br i1 %v21, label %b2, label %b3
 
 b3:                                               ; preds = %b2
-  store i16 %v14, i16* getelementptr inbounds ([4 x i16], [4 x i16]* @g0, i32 0, i32 0), align 8, !tbaa !0
-  store i16 %v13, i16* getelementptr inbounds ([4 x i16], [4 x i16]* @g0, i32 0, i32 1), align 2, !tbaa !0
-  store i16 %v12, i16* getelementptr inbounds ([4 x i16], [4 x i16]* @g0, i32 0, i32 2), align 4, !tbaa !0
-  store i16 0, i16* getelementptr inbounds ([4 x i16], [4 x i16]* @g0, i32 0, i32 3), align 2, !tbaa !0
-  store i16 %v20, i16* %v0, align 2, !tbaa !0
+  store i16 %v14, ptr @g0, align 8, !tbaa !0
+  store i16 %v13, ptr getelementptr inbounds ([4 x i16], ptr @g0, i32 0, i32 1), align 2, !tbaa !0
+  store i16 %v12, ptr getelementptr inbounds ([4 x i16], ptr @g0, i32 0, i32 2), align 4, !tbaa !0
+  store i16 0, ptr getelementptr inbounds ([4 x i16], ptr @g0, i32 0, i32 3), align 2, !tbaa !0
+  store i16 %v20, ptr %v0, align 2, !tbaa !0
   br label %b4
 
 b4:                                               ; preds = %b3, %b0
   %v22 = phi i16 [ %v19, %b3 ], [ 0, %b0 ]
-  call void @f2(i16* getelementptr inbounds ([4 x i16], [4 x i16]* @g0, i32 0, i32 0), i16 signext %v22) #0
+  call void @f2(ptr @g0, i16 signext %v22) #0
   ret i32 0
 }
 
-declare void @f1(i16*, i16*) #0
+declare void @f1(ptr, ptr) #0
 
-declare void @f2(i16*, i16 signext) #0
+declare void @f2(ptr, i16 signext) #0
 
 attributes #0 = { nounwind }
 

diff  --git a/llvm/test/CodeGen/Hexagon/align_test.ll b/llvm/test/CodeGen/Hexagon/align_test.ll
index 9934c480b8557..9000665435139 100644
--- a/llvm/test/CodeGen/Hexagon/align_test.ll
+++ b/llvm/test/CodeGen/Hexagon/align_test.ll
@@ -8,7 +8,7 @@ target triple = "hexagon"
 %s.1 = type { [16 x i32] }
 
 ; Function Attrs: nounwind
-define i32 @f0(i32 %a0, %s.0* nocapture %a1) #0 {
+define i32 @f0(i32 %a0, ptr nocapture %a1) #0 {
 b0:
   %v0 = icmp sgt i32 %a0, 0
   br i1 %v0, label %b1, label %b10
@@ -25,8 +25,8 @@ b3:                                               ; preds = %b2
 b4:                                               ; preds = %b4, %b1
   %v1 = phi i32 [ %v6, %b4 ], [ 0, %b1 ]
   %v2 = phi i32 [ %v5, %b4 ], [ 0, %b1 ]
-  %v3 = getelementptr inbounds %s.0, %s.0* %a1, i32 0, i32 1, i32 0, i32 %v1
-  %v4 = load i32, i32* %v3, align 1, !tbaa !0
+  %v3 = getelementptr inbounds %s.0, ptr %a1, i32 0, i32 1, i32 0, i32 %v1
+  %v4 = load i32, ptr %v3, align 1, !tbaa !0
   %v5 = add nsw i32 %v4, %v2
   %v6 = add nsw i32 %v1, 1
   %v7 = icmp eq i32 %v6, %a0
@@ -41,8 +41,8 @@ b6:                                               ; preds = %b5
 b7:                                               ; preds = %b7, %b3
   %v8 = phi i32 [ %v13, %b7 ], [ 0, %b3 ]
   %v9 = phi i32 [ %v12, %b7 ], [ %v5, %b3 ]
-  %v10 = getelementptr inbounds %s.0, %s.0* %a1, i32 0, i32 1, i32 0, i32 %v8
-  %v11 = load i32, i32* %v10, align 1, !tbaa !0
+  %v10 = getelementptr inbounds %s.0, ptr %a1, i32 0, i32 1, i32 0, i32 %v8
+  %v11 = load i32, ptr %v10, align 1, !tbaa !0
   %v12 = add nsw i32 %v11, %v9
   %v13 = add nsw i32 %v8, 1
   %v14 = icmp eq i32 %v13, %a0
@@ -50,8 +50,8 @@ b7:                                               ; preds = %b7, %b3
 
 b8:                                               ; preds = %b8, %b6
   %v15 = phi i32 [ %v17, %b8 ], [ 0, %b6 ]
-  %v16 = getelementptr inbounds %s.0, %s.0* %a1, i32 0, i32 1, i32 0, i32 %v15
-  store i32 %a0, i32* %v16, align 1, !tbaa !0
+  %v16 = getelementptr inbounds %s.0, ptr %a1, i32 0, i32 1, i32 0, i32 %v15
+  store i32 %a0, ptr %v16, align 1, !tbaa !0
   %v17 = add nsw i32 %v15, 1
   %v18 = icmp eq i32 %v17, %a0
   br i1 %v18, label %b9, label %b8

diff  --git a/llvm/test/CodeGen/Hexagon/always-ext.ll b/llvm/test/CodeGen/Hexagon/always-ext.ll
index 4b2c915333c44..159ec0cfcb79c 100644
--- a/llvm/test/CodeGen/Hexagon/always-ext.ll
+++ b/llvm/test/CodeGen/Hexagon/always-ext.ll
@@ -10,8 +10,8 @@
 ; CHECK: memw(r{{[0-9+]}}<<#2+##4)
 ; CHECK: }
 
-%struct.CuTest.1.28.31.37.40.43.52.55.67.85.111 = type { i8*, void (%struct.CuTest.1.28.31.37.40.43.52.55.67.85.111*)*, i32, i32, i8*, [23 x i32]* }
-%struct.CuSuite.2.29.32.38.41.44.53.56.68.86.112 = type { i32, [1024 x %struct.CuTest.1.28.31.37.40.43.52.55.67.85.111*], i32 }
+%struct.CuTest.1.28.31.37.40.43.52.55.67.85.111 = type { ptr, ptr, i32, i32, ptr, ptr }
+%struct.CuSuite.2.29.32.38.41.44.53.56.68.86.112 = type { i32, [1024 x ptr], i32 }
 
 @__func__.CuSuiteAdd = external unnamed_addr constant [11 x i8], align 8
 @.str24 = external unnamed_addr constant [140 x i8], align 8
@@ -23,8 +23,8 @@ entry:
   br i1 undef, label %for.body.us, label %for.end
 
 for.body.us:                                      ; preds = %entry
-  %0 = load %struct.CuTest.1.28.31.37.40.43.52.55.67.85.111*, %struct.CuTest.1.28.31.37.40.43.52.55.67.85.111** null, align 4
-  %1 = load i32, i32* undef, align 4
+  %0 = load ptr, ptr null, align 4
+  %1 = load i32, ptr undef, align 4
   %cmp.i.us = icmp slt i32 %1, 1024
   br i1 %cmp.i.us, label %CuSuiteAdd.exit.us, label %cond.false6.i.us
 
@@ -33,8 +33,8 @@ cond.false6.i.us:                                 ; preds = %for.body.us
   unreachable
 
 CuSuiteAdd.exit.us:                               ; preds = %for.body.us
-  %arrayidx.i.us = getelementptr inbounds %struct.CuSuite.2.29.32.38.41.44.53.56.68.86.112, %struct.CuSuite.2.29.32.38.41.44.53.56.68.86.112* null, i32 0, i32 1, i32 %1
-  store %struct.CuTest.1.28.31.37.40.43.52.55.67.85.111* %0, %struct.CuTest.1.28.31.37.40.43.52.55.67.85.111** %arrayidx.i.us, align 4
+  %arrayidx.i.us = getelementptr inbounds %struct.CuSuite.2.29.32.38.41.44.53.56.68.86.112, ptr null, i32 0, i32 1, i32 %1
+  store ptr %0, ptr %arrayidx.i.us, align 4
   call void @llvm.trap()
   unreachable
 

diff  --git a/llvm/test/CodeGen/Hexagon/asr-rnd.ll b/llvm/test/CodeGen/Hexagon/asr-rnd.ll
index 19ad5a4ef7971..bc77e2a7a3ad4 100644
--- a/llvm/test/CodeGen/Hexagon/asr-rnd.ll
+++ b/llvm/test/CodeGen/Hexagon/asr-rnd.ll
@@ -9,8 +9,8 @@ define i32 @f0(i32 %a0) #0 {
 b0:
 ; CHECK: asr{{.*}}:rnd
   %v0 = alloca i32, align 4
-  store i32 %a0, i32* %v0, align 4
-  %v1 = load i32, i32* %v0, align 4
+  store i32 %a0, ptr %v0, align 4
+  %v1 = load i32, ptr %v0, align 4
   %v2 = ashr i32 %v1, 10
   %v3 = add nsw i32 %v2, 1
   %v4 = ashr i32 %v3, 1
@@ -22,8 +22,8 @@ define i64 @f1(i64 %a0) #0 {
 b0:
 ; CHECK: asr{{.*}}:rnd
   %v0 = alloca i64, align 8
-  store i64 %a0, i64* %v0, align 8
-  %v1 = load i64, i64* %v0, align 8
+  store i64 %a0, ptr %v0, align 8
+  %v1 = load i64, ptr %v0, align 8
   %v2 = ashr i64 %v1, 17
   %v3 = add nsw i64 %v2, 1
   %v4 = ashr i64 %v3, 1

diff  --git a/llvm/test/CodeGen/Hexagon/asr-rnd64.ll b/llvm/test/CodeGen/Hexagon/asr-rnd64.ll
index 45234ff999780..4928483e5be68 100644
--- a/llvm/test/CodeGen/Hexagon/asr-rnd64.ll
+++ b/llvm/test/CodeGen/Hexagon/asr-rnd64.ll
@@ -9,8 +9,8 @@ define i32 @f0(i32 %a0) {
 b0:
 ; CHECK: asr{{.*}}:rnd
   %v0 = alloca i32, align 4
-  store i32 %a0, i32* %v0, align 4
-  %v1 = load i32, i32* %v0, align 4
+  store i32 %a0, ptr %v0, align 4
+  %v1 = load i32, ptr %v0, align 4
   %v2 = ashr i32 %v1, 10
   %v3 = add nsw i32 %v2, 1
   %v4 = ashr i32 %v3, 1
@@ -21,8 +21,8 @@ define i64 @f1(i64 %a0) {
 b0:
 ; CHECK: asr{{.*}}:rnd
   %v0 = alloca i64, align 8
-  store i64 %a0, i64* %v0, align 8
-  %v1 = load i64, i64* %v0, align 8
+  store i64 %a0, ptr %v0, align 8
+  %v1 = load i64, ptr %v0, align 8
   %v2 = ashr i64 %v1, 17
   %v3 = add nsw i64 %v2, 1
   %v4 = ashr i64 %v3, 1

diff  --git a/llvm/test/CodeGen/Hexagon/assert-postinc-ptr-not-value.ll b/llvm/test/CodeGen/Hexagon/assert-postinc-ptr-not-value.ll
index 6648984c0eefc..b81237cdb212e 100644
--- a/llvm/test/CodeGen/Hexagon/assert-postinc-ptr-not-value.ll
+++ b/llvm/test/CodeGen/Hexagon/assert-postinc-ptr-not-value.ll
@@ -5,33 +5,33 @@ target triple = "hexagon"
 
 %s.0 = type { i32 }
 
- at g0 = internal unnamed_addr global %s.0* null, section ".data.............", align 4
+ at g0 = internal unnamed_addr global ptr null, section ".data.............", align 4
 @g1 = internal global i32 0, section ".data.............", align 4
 
 ; Function Attrs: nounwind
-define %s.0* @f0(i32* %a0) #0 {
+define ptr @f0(ptr %a0) #0 {
 b0:
-  %v0 = getelementptr inbounds i32, i32* %a0, i32 -1
-  %v1 = load i32, i32* %v0, align 4
+  %v0 = getelementptr inbounds i32, ptr %a0, i32 -1
+  %v1 = load i32, ptr %v0, align 4
   %v2 = and i32 %v1, -3
-  store i32 %v2, i32* %v0, align 4
-  %v3 = getelementptr inbounds i32, i32* %a0, i32 -2
-  %v4 = load i32, i32* %v3, align 4
+  store i32 %v2, ptr %v0, align 4
+  %v3 = getelementptr inbounds i32, ptr %a0, i32 -2
+  %v4 = load i32, ptr %v3, align 4
   %v5 = lshr i32 %v4, 2
   %v6 = xor i32 %v5, -1
-  %v7 = getelementptr inbounds i32, i32* %a0, i32 %v6
+  %v7 = getelementptr inbounds i32, ptr %a0, i32 %v6
   %v8 = lshr i32 %v1, 2
   %v9 = add i32 %v8, -1
-  %v10 = getelementptr inbounds i32, i32* %a0, i32 %v9
-  %v11 = load i32, i32* %v10, align 4
+  %v10 = getelementptr inbounds i32, ptr %a0, i32 %v9
+  %v11 = load i32, ptr %v10, align 4
   %v12 = lshr i32 %v11, 2
   %v13 = icmp eq i32 %v12, 0
   br i1 %v13, label %b3, label %b1
 
 b1:                                               ; preds = %b0
   %v14 = add i32 %v12, %v9
-  %v15 = getelementptr inbounds i32, i32* %a0, i32 %v14
-  %v16 = load i32, i32* %v15, align 4
+  %v15 = getelementptr inbounds i32, ptr %a0, i32 %v14
+  %v16 = load i32, ptr %v15, align 4
   %v17 = and i32 %v16, 1
   %v18 = icmp eq i32 %v17, 0
   br i1 %v18, label %b3, label %b2
@@ -41,7 +41,7 @@ b2:                                               ; preds = %b1
   %v20 = shl i32 %v19, 2
   %v21 = and i32 %v1, 1
   %v22 = or i32 %v20, %v21
-  store i32 %v22, i32* %v0, align 4
+  store i32 %v22, ptr %v0, align 4
   br label %b3
 
 b3:                                               ; preds = %b2, %b1, %b0
@@ -51,66 +51,63 @@ b3:                                               ; preds = %b2, %b1, %b0
   br i1 %v25, label %b5, label %b4
 
 b4:                                               ; preds = %b3
-  %v26 = load i32, i32* %v7, align 4
+  %v26 = load i32, ptr %v7, align 4
   %v27 = and i32 %v26, -4
   %v28 = add i32 %v27, %v23
   %v29 = and i32 %v28, -4
   %v30 = and i32 %v26, 3
   %v31 = or i32 %v29, %v30
-  store i32 %v31, i32* %v7, align 4
+  store i32 %v31, ptr %v7, align 4
   br label %b5
 
 b5:                                               ; preds = %b4, %b3
   %v32 = phi i32 [ %v31, %b4 ], [ %v23, %b3 ]
-  %v33 = phi i32* [ %v7, %b4 ], [ %v0, %b3 ]
-  %v34 = bitcast i32* %v33 to %s.0*
+  %v33 = phi ptr [ %v7, %b4 ], [ %v0, %b3 ]
   %v35 = lshr i32 %v32, 2
   %v36 = add i32 %v35, -1
-  %v37 = getelementptr inbounds %s.0, %s.0* %v34, i32 %v36, i32 0
-  %v38 = load i32, i32* %v37, align 4
+  %v37 = getelementptr inbounds %s.0, ptr %v33, i32 %v36, i32 0
+  %v38 = load i32, ptr %v37, align 4
   %v39 = shl nuw i32 %v35, 2
   %v40 = and i32 %v38, 3
   %v41 = or i32 %v40, %v39
-  store i32 %v41, i32* %v37, align 4
-  %v42 = load i32, i32* %v33, align 4
+  store i32 %v41, ptr %v37, align 4
+  %v42 = load i32, ptr %v33, align 4
   %v43 = lshr i32 %v42, 2
-  %v44 = getelementptr inbounds %s.0, %s.0* %v34, i32 %v43, i32 0
-  %v45 = load i32, i32* %v44, align 4
+  %v44 = getelementptr inbounds %s.0, ptr %v33, i32 %v43, i32 0
+  %v45 = load i32, ptr %v44, align 4
   %v46 = or i32 %v45, 1
-  store i32 %v46, i32* %v44, align 4
-  ret %s.0* %v34
+  store i32 %v46, ptr %v44, align 4
+  ret ptr %v33
 }
 
 ; Function Attrs: nounwind
 define i64 @f1(i32 %a0) #0 {
 b0:
-  %v0 = load %s.0*, %s.0** @g0, align 4, !tbaa !0
-  %v1 = getelementptr inbounds %s.0, %s.0* %v0, i32 7
-  tail call void @f2(i32* @g1) #0
+  %v0 = load ptr, ptr @g0, align 4, !tbaa !0
+  %v1 = getelementptr inbounds %s.0, ptr %v0, i32 7
+  tail call void @f2(ptr @g1) #0
   br label %b1
 
 b1:                                               ; preds = %b5, %b0
-  %v2 = phi %s.0* [ %v1, %b0 ], [ %v20, %b5 ]
-  %v3 = getelementptr inbounds %s.0, %s.0* %v2, i32 0, i32 0
-  %v4 = load i32, i32* %v3, align 4
+  %v2 = phi ptr [ %v1, %b0 ], [ %v20, %b5 ]
+  %v4 = load i32, ptr %v2, align 4
   %v5 = and i32 %v4, 2
   %v6 = icmp eq i32 %v5, 0
   br i1 %v6, label %b3, label %b2
 
 b2:                                               ; preds = %b1
   tail call fastcc void @f8()
-  %v7 = getelementptr inbounds %s.0, %s.0* %v2, i32 1, i32 0
-  %v8 = tail call %s.0* @f0(i32* %v7)
+  %v7 = getelementptr inbounds %s.0, ptr %v2, i32 1, i32 0
+  %v8 = tail call ptr @f0(ptr %v7)
   tail call fastcc void @f7()
   br label %b3
 
 b3:                                               ; preds = %b2, %b1
-  %v9 = phi %s.0* [ %v8, %b2 ], [ %v2, %b1 ]
-  %v10 = getelementptr inbounds %s.0, %s.0* %v9, i32 0, i32 0
-  %v11 = load i32, i32* %v10, align 4
+  %v9 = phi ptr [ %v8, %b2 ], [ %v2, %b1 ]
+  %v11 = load i32, ptr %v9, align 4
   %v12 = lshr i32 %v11, 2
-  %v13 = getelementptr inbounds %s.0, %s.0* %v9, i32 %v12, i32 0
-  %v14 = load i32, i32* %v13, align 4
+  %v13 = getelementptr inbounds %s.0, ptr %v9, i32 %v12, i32 0
+  %v14 = load i32, ptr %v13, align 4
   %v15 = and i32 %v14, 1
   %v16 = icmp eq i32 %v15, 0
   br i1 %v16, label %b5, label %b4
@@ -122,7 +119,7 @@ b4:                                               ; preds = %b3
   br i1 %v19, label %b5, label %b7
 
 b5:                                               ; preds = %b4, %b3
-  %v20 = getelementptr inbounds %s.0, %s.0* %v9, i32 %v12
+  %v20 = getelementptr inbounds %s.0, ptr %v9, i32 %v12
   %v21 = icmp ult i32 %v14, 4
   br i1 %v21, label %b6, label %b1
 
@@ -141,37 +138,37 @@ b7:                                               ; preds = %b4
   br i1 %v28, label %b8, label %b9
 
 b8:                                               ; preds = %b7
-  %v29 = getelementptr inbounds %s.0, %s.0* %v9, i32 %v26, i32 0
+  %v29 = getelementptr inbounds %s.0, ptr %v9, i32 %v26, i32 0
   %v30 = shl i32 %v27, 2
-  store i32 %v30, i32* %v29, align 4
-  %v31 = load i32, i32* %v10, align 4
+  store i32 %v30, ptr %v29, align 4
+  %v31 = load i32, ptr %v9, align 4
   %v32 = lshr i32 %v31, 2
   %v33 = add i32 %v32, -1
-  %v34 = getelementptr inbounds %s.0, %s.0* %v9, i32 %v33, i32 0
-  %v35 = load i32, i32* %v34, align 4
+  %v34 = getelementptr inbounds %s.0, ptr %v9, i32 %v33, i32 0
+  %v35 = load i32, ptr %v34, align 4
   %v36 = and i32 %v35, 3
   %v37 = or i32 %v36, %v30
-  store i32 %v37, i32* %v34, align 4
-  %v38 = load i32, i32* %v10, align 4
+  store i32 %v37, ptr %v34, align 4
+  %v38 = load i32, ptr %v9, align 4
   %v39 = mul i32 %v25, 32
   %v40 = and i32 %v38, 3
   %v41 = or i32 %v40, %v39
-  store i32 %v41, i32* %v10, align 4
+  store i32 %v41, ptr %v9, align 4
   br label %b10
 
 b9:                                               ; preds = %b7
   %v42 = and i32 %v14, -2
-  store i32 %v42, i32* %v13, align 4
+  store i32 %v42, ptr %v13, align 4
   br label %b10
 
 b10:                                              ; preds = %b9, %b8
   tail call fastcc void @f3()
-  %v43 = getelementptr inbounds %s.0, %s.0* %v9, i32 1
-  %v44 = load i32, i32* %v10, align 4
+  %v43 = getelementptr inbounds %s.0, ptr %v9, i32 1
+  %v44 = load i32, ptr %v9, align 4
   %v45 = lshr i32 %v44, 2
   %v46 = mul i32 %v45, 4
   %v47 = add i32 %v46, -4
-  %v48 = ptrtoint %s.0* %v43 to i32
+  %v48 = ptrtoint ptr %v43 to i32
   %v49 = zext i32 %v47 to i64
   %v50 = shl nuw i64 %v49, 32
   %v51 = zext i32 %v48 to i64
@@ -184,48 +181,48 @@ b11:                                              ; preds = %b10, %b6
   ret i64 %v54
 }
 
-declare void @f2(i32*) #0
+declare void @f2(ptr) #0
 
 ; Function Attrs: inlinehint nounwind
 define internal fastcc void @f3() #1 {
 b0:
-  store i32 0, i32* @g1, align 4, !tbaa !4
+  store i32 0, ptr @g1, align 4, !tbaa !4
   ret void
 }
 
 ; Function Attrs: nounwind
-define void @f4(i32* nocapture %a0) #0 {
+define void @f4(ptr nocapture %a0) #0 {
 b0:
-  %v0 = getelementptr inbounds i32, i32* %a0, i32 -1
-  tail call void @f2(i32* @g1) #0
-  %v1 = load i32, i32* %v0, align 4
+  %v0 = getelementptr inbounds i32, ptr %a0, i32 -1
+  tail call void @f2(ptr @g1) #0
+  %v1 = load i32, ptr %v0, align 4
   %v2 = or i32 %v1, 2
-  store i32 %v2, i32* %v0, align 4
+  store i32 %v2, ptr %v0, align 4
   tail call fastcc void @f3()
   ret void
 }
 
 ; Function Attrs: nounwind
-define %s.0* @f5(i32* %a0) #0 {
+define ptr @f5(ptr %a0) #0 {
 b0:
-  tail call void @f2(i32* @g1) #0
-  %v0 = tail call %s.0* @f0(i32* %a0)
+  tail call void @f2(ptr @g1) #0
+  %v0 = tail call ptr @f0(ptr %a0)
   tail call fastcc void @f3()
-  ret %s.0* %v0
+  ret ptr %v0
 }
 
 ; Function Attrs: nounwind
-define void @f6(%s.0* %a0, i32 %a1) #0 {
+define void @f6(ptr %a0, i32 %a1) #0 {
 b0:
-  %v0 = getelementptr inbounds %s.0, %s.0* %a0, i32 7, i32 0
+  %v0 = getelementptr inbounds %s.0, ptr %a0, i32 7, i32 0
   %v1 = mul i32 %a1, 4
   %v2 = add i32 %v1, -32
-  store i32 %v2, i32* %v0, align 4
+  store i32 %v2, ptr %v0, align 4
   %v3 = add i32 %a1, -1
-  %v4 = getelementptr inbounds %s.0, %s.0* %a0, i32 %v3, i32 0
-  store i32 1, i32* %v4, align 4
-  store i32 0, i32* @g1, align 4, !tbaa !4
-  store %s.0* %a0, %s.0** @g0, align 4
+  %v4 = getelementptr inbounds %s.0, ptr %a0, i32 %v3, i32 0
+  store i32 1, ptr %v4, align 4
+  store i32 0, ptr @g1, align 4, !tbaa !4
+  store ptr %a0, ptr @g0, align 4
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/atomic-opaque-basic.ll b/llvm/test/CodeGen/Hexagon/atomic-opaque-basic.ll
index 2b694b8fdf770..44f4cb1e61aae 100644
--- a/llvm/test/CodeGen/Hexagon/atomic-opaque-basic.ll
+++ b/llvm/test/CodeGen/Hexagon/atomic-opaque-basic.ll
@@ -54,8 +54,7 @@ define void @f0() #0 {
 ; CHECK-NEXT:     jumpr r31
 ; CHECK-NEXT:    }
   %v0 = alloca %s.0
-  %v1 = getelementptr %s.0, %s.0* %v0, i32 0, i32 0
-  atomicrmw add i8* %v1, i8 2 monotonic
+  atomicrmw add ptr %v0, i8 2 monotonic
   ret void
 }
 
@@ -116,7 +115,7 @@ define void @f1() #0 {
 ; CHECK-NEXT:     jumpr r31
 ; CHECK-NEXT:    }
 entry:
-  %v0 = cmpxchg volatile i8* @g0, i8 0, i8 1 seq_cst seq_cst
+  %v0 = cmpxchg volatile ptr @g0, i8 0, i8 1 seq_cst seq_cst
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/atomic-rmw-add.ll b/llvm/test/CodeGen/Hexagon/atomic-rmw-add.ll
index aedbd6101c4f9..f1ffc5f4f68db 100644
--- a/llvm/test/CodeGen/Hexagon/atomic-rmw-add.ll
+++ b/llvm/test/CodeGen/Hexagon/atomic-rmw-add.ll
@@ -13,8 +13,7 @@
 
 define void @b() #0 {
   %d = alloca %struct.a
-  %c = getelementptr %struct.a, %struct.a* %d, i32 0, i32 0
-  atomicrmw add i8* %c, i8 2 monotonic
+  atomicrmw add ptr %d, i8 2 monotonic
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/atomic-store-byte.ll b/llvm/test/CodeGen/Hexagon/atomic-store-byte.ll
index e3febe0264ad4..3e40f535169a8 100644
--- a/llvm/test/CodeGen/Hexagon/atomic-store-byte.ll
+++ b/llvm/test/CodeGen/Hexagon/atomic-store-byte.ll
@@ -12,7 +12,7 @@
 ; Function Attrs: nofree norecurse nounwind
 define dso_local void @foo() local_unnamed_addr #0 {
 entry:
-  %0 = cmpxchg volatile i8* @foo.a00, i8 0, i8 1 seq_cst seq_cst
+  %0 = cmpxchg volatile ptr @foo.a00, i8 0, i8 1 seq_cst seq_cst
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/autohvx/build-vector-float-type.ll b/llvm/test/CodeGen/Hexagon/autohvx/build-vector-float-type.ll
index 2eba9e2db446c..e261e99f1a632 100644
--- a/llvm/test/CodeGen/Hexagon/autohvx/build-vector-float-type.ll
+++ b/llvm/test/CodeGen/Hexagon/autohvx/build-vector-float-type.ll
@@ -8,167 +8,166 @@ target triple = "hexagon"
 ; Function Attrs: nounwind
 ; CHECK-LABEL: f0:
 ; CHECK: vinsert
-define <32 x float> @f0(i32* %a0, float* %a1) #0 {
+define <32 x float> @f0(ptr %a0, ptr %a1) #0 {
 b0:
-  %v0 = getelementptr i32, i32* %a0, i32 0
-  %v1 = load i32, i32* %v0, align 4
-  %v2 = getelementptr float, float* %a1, i32 %v1
-  %v3 = load float, float* %v2, align 4
+  %v1 = load i32, ptr %a0, align 4
+  %v2 = getelementptr float, ptr %a1, i32 %v1
+  %v3 = load float, ptr %v2, align 4
   %v4 = insertelement <32 x float> undef, float %v3, i32 0
-  %v5 = getelementptr i32, i32* %a0, i32 1
-  %v6 = load i32, i32* %v5, align 4
-  %v7 = getelementptr float, float* %a1, i32 %v6
-  %v8 = load float, float* %v7, align 4
+  %v5 = getelementptr i32, ptr %a0, i32 1
+  %v6 = load i32, ptr %v5, align 4
+  %v7 = getelementptr float, ptr %a1, i32 %v6
+  %v8 = load float, ptr %v7, align 4
   %v9 = insertelement <32 x float> %v4, float %v8, i32 1
-  %v10 = getelementptr i32, i32* %a0, i32 2
-  %v11 = load i32, i32* %v10, align 4
-  %v12 = getelementptr float, float* %a1, i32 %v11
-  %v13 = load float, float* %v12, align 4
+  %v10 = getelementptr i32, ptr %a0, i32 2
+  %v11 = load i32, ptr %v10, align 4
+  %v12 = getelementptr float, ptr %a1, i32 %v11
+  %v13 = load float, ptr %v12, align 4
   %v14 = insertelement <32 x float> %v9, float %v13, i32 2
-  %v15 = getelementptr i32, i32* %a0, i32 3
-  %v16 = load i32, i32* %v15, align 4
-  %v17 = getelementptr float, float* %a1, i32 %v16
-  %v18 = load float, float* %v17, align 4
+  %v15 = getelementptr i32, ptr %a0, i32 3
+  %v16 = load i32, ptr %v15, align 4
+  %v17 = getelementptr float, ptr %a1, i32 %v16
+  %v18 = load float, ptr %v17, align 4
   %v19 = insertelement <32 x float> %v14, float %v18, i32 3
-  %v20 = getelementptr i32, i32* %a0, i32 4
-  %v21 = load i32, i32* %v20, align 4
-  %v22 = getelementptr float, float* %a1, i32 %v21
-  %v23 = load float, float* %v22, align 4
+  %v20 = getelementptr i32, ptr %a0, i32 4
+  %v21 = load i32, ptr %v20, align 4
+  %v22 = getelementptr float, ptr %a1, i32 %v21
+  %v23 = load float, ptr %v22, align 4
   %v24 = insertelement <32 x float> %v19, float %v23, i32 4
-  %v25 = getelementptr i32, i32* %a0, i32 5
-  %v26 = load i32, i32* %v25, align 4
-  %v27 = getelementptr float, float* %a1, i32 %v26
-  %v28 = load float, float* %v27, align 4
+  %v25 = getelementptr i32, ptr %a0, i32 5
+  %v26 = load i32, ptr %v25, align 4
+  %v27 = getelementptr float, ptr %a1, i32 %v26
+  %v28 = load float, ptr %v27, align 4
   %v29 = insertelement <32 x float> %v24, float %v28, i32 5
-  %v30 = getelementptr i32, i32* %a0, i32 6
-  %v31 = load i32, i32* %v30, align 4
-  %v32 = getelementptr float, float* %a1, i32 %v31
-  %v33 = load float, float* %v32, align 4
+  %v30 = getelementptr i32, ptr %a0, i32 6
+  %v31 = load i32, ptr %v30, align 4
+  %v32 = getelementptr float, ptr %a1, i32 %v31
+  %v33 = load float, ptr %v32, align 4
   %v34 = insertelement <32 x float> %v29, float %v33, i32 6
-  %v35 = getelementptr i32, i32* %a0, i32 7
-  %v36 = load i32, i32* %v35, align 4
-  %v37 = getelementptr float, float* %a1, i32 %v36
-  %v38 = load float, float* %v37, align 4
+  %v35 = getelementptr i32, ptr %a0, i32 7
+  %v36 = load i32, ptr %v35, align 4
+  %v37 = getelementptr float, ptr %a1, i32 %v36
+  %v38 = load float, ptr %v37, align 4
   %v39 = insertelement <32 x float> %v34, float %v38, i32 7
-  %v40 = getelementptr i32, i32* %a0, i32 8
-  %v41 = load i32, i32* %v40, align 4
-  %v42 = getelementptr float, float* %a1, i32 %v41
-  %v43 = load float, float* %v42, align 4
+  %v40 = getelementptr i32, ptr %a0, i32 8
+  %v41 = load i32, ptr %v40, align 4
+  %v42 = getelementptr float, ptr %a1, i32 %v41
+  %v43 = load float, ptr %v42, align 4
   %v44 = insertelement <32 x float> %v39, float %v43, i32 8
-  %v45 = getelementptr i32, i32* %a0, i32 9
-  %v46 = load i32, i32* %v45, align 4
-  %v47 = getelementptr float, float* %a1, i32 %v46
-  %v48 = load float, float* %v47, align 4
+  %v45 = getelementptr i32, ptr %a0, i32 9
+  %v46 = load i32, ptr %v45, align 4
+  %v47 = getelementptr float, ptr %a1, i32 %v46
+  %v48 = load float, ptr %v47, align 4
   %v49 = insertelement <32 x float> %v44, float %v48, i32 9
-  %v50 = getelementptr i32, i32* %a0, i32 10
-  %v51 = load i32, i32* %v50, align 4
-  %v52 = getelementptr float, float* %a1, i32 %v51
-  %v53 = load float, float* %v52, align 4
+  %v50 = getelementptr i32, ptr %a0, i32 10
+  %v51 = load i32, ptr %v50, align 4
+  %v52 = getelementptr float, ptr %a1, i32 %v51
+  %v53 = load float, ptr %v52, align 4
   %v54 = insertelement <32 x float> %v49, float %v53, i32 10
-  %v55 = getelementptr i32, i32* %a0, i32 11
-  %v56 = load i32, i32* %v55, align 4
-  %v57 = getelementptr float, float* %a1, i32 %v56
-  %v58 = load float, float* %v57, align 4
+  %v55 = getelementptr i32, ptr %a0, i32 11
+  %v56 = load i32, ptr %v55, align 4
+  %v57 = getelementptr float, ptr %a1, i32 %v56
+  %v58 = load float, ptr %v57, align 4
   %v59 = insertelement <32 x float> %v54, float %v58, i32 11
-  %v60 = getelementptr i32, i32* %a0, i32 12
-  %v61 = load i32, i32* %v60, align 4
-  %v62 = getelementptr float, float* %a1, i32 %v61
-  %v63 = load float, float* %v62, align 4
+  %v60 = getelementptr i32, ptr %a0, i32 12
+  %v61 = load i32, ptr %v60, align 4
+  %v62 = getelementptr float, ptr %a1, i32 %v61
+  %v63 = load float, ptr %v62, align 4
   %v64 = insertelement <32 x float> %v59, float %v63, i32 12
-  %v65 = getelementptr i32, i32* %a0, i32 13
-  %v66 = load i32, i32* %v65, align 4
-  %v67 = getelementptr float, float* %a1, i32 %v66
-  %v68 = load float, float* %v67, align 4
+  %v65 = getelementptr i32, ptr %a0, i32 13
+  %v66 = load i32, ptr %v65, align 4
+  %v67 = getelementptr float, ptr %a1, i32 %v66
+  %v68 = load float, ptr %v67, align 4
   %v69 = insertelement <32 x float> %v64, float %v68, i32 13
-  %v70 = getelementptr i32, i32* %a0, i32 14
-  %v71 = load i32, i32* %v70, align 4
-  %v72 = getelementptr float, float* %a1, i32 %v71
-  %v73 = load float, float* %v72, align 4
+  %v70 = getelementptr i32, ptr %a0, i32 14
+  %v71 = load i32, ptr %v70, align 4
+  %v72 = getelementptr float, ptr %a1, i32 %v71
+  %v73 = load float, ptr %v72, align 4
   %v74 = insertelement <32 x float> %v69, float %v73, i32 14
-  %v75 = getelementptr i32, i32* %a0, i32 15
-  %v76 = load i32, i32* %v75, align 4
-  %v77 = getelementptr float, float* %a1, i32 %v76
-  %v78 = load float, float* %v77, align 4
+  %v75 = getelementptr i32, ptr %a0, i32 15
+  %v76 = load i32, ptr %v75, align 4
+  %v77 = getelementptr float, ptr %a1, i32 %v76
+  %v78 = load float, ptr %v77, align 4
   %v79 = insertelement <32 x float> %v74, float %v78, i32 15
-  %v80 = getelementptr i32, i32* %a0, i32 16
-  %v81 = load i32, i32* %v80, align 4
-  %v82 = getelementptr float, float* %a1, i32 %v81
-  %v83 = load float, float* %v82, align 4
+  %v80 = getelementptr i32, ptr %a0, i32 16
+  %v81 = load i32, ptr %v80, align 4
+  %v82 = getelementptr float, ptr %a1, i32 %v81
+  %v83 = load float, ptr %v82, align 4
   %v84 = insertelement <32 x float> %v79, float %v83, i32 16
-  %v85 = getelementptr i32, i32* %a0, i32 17
-  %v86 = load i32, i32* %v85, align 4
-  %v87 = getelementptr float, float* %a1, i32 %v86
-  %v88 = load float, float* %v87, align 4
+  %v85 = getelementptr i32, ptr %a0, i32 17
+  %v86 = load i32, ptr %v85, align 4
+  %v87 = getelementptr float, ptr %a1, i32 %v86
+  %v88 = load float, ptr %v87, align 4
   %v89 = insertelement <32 x float> %v84, float %v88, i32 17
-  %v90 = getelementptr i32, i32* %a0, i32 18
-  %v91 = load i32, i32* %v90, align 4
-  %v92 = getelementptr float, float* %a1, i32 %v91
-  %v93 = load float, float* %v92, align 4
+  %v90 = getelementptr i32, ptr %a0, i32 18
+  %v91 = load i32, ptr %v90, align 4
+  %v92 = getelementptr float, ptr %a1, i32 %v91
+  %v93 = load float, ptr %v92, align 4
   %v94 = insertelement <32 x float> %v89, float %v93, i32 18
-  %v95 = getelementptr i32, i32* %a0, i32 19
-  %v96 = load i32, i32* %v95, align 4
-  %v97 = getelementptr float, float* %a1, i32 %v96
-  %v98 = load float, float* %v97, align 4
+  %v95 = getelementptr i32, ptr %a0, i32 19
+  %v96 = load i32, ptr %v95, align 4
+  %v97 = getelementptr float, ptr %a1, i32 %v96
+  %v98 = load float, ptr %v97, align 4
   %v99 = insertelement <32 x float> %v94, float %v98, i32 19
-  %v100 = getelementptr i32, i32* %a0, i32 20
-  %v101 = load i32, i32* %v100, align 4
-  %v102 = getelementptr float, float* %a1, i32 %v101
-  %v103 = load float, float* %v102, align 4
+  %v100 = getelementptr i32, ptr %a0, i32 20
+  %v101 = load i32, ptr %v100, align 4
+  %v102 = getelementptr float, ptr %a1, i32 %v101
+  %v103 = load float, ptr %v102, align 4
   %v104 = insertelement <32 x float> %v99, float %v103, i32 20
-  %v105 = getelementptr i32, i32* %a0, i32 21
-  %v106 = load i32, i32* %v105, align 4
-  %v107 = getelementptr float, float* %a1, i32 %v106
-  %v108 = load float, float* %v107, align 4
+  %v105 = getelementptr i32, ptr %a0, i32 21
+  %v106 = load i32, ptr %v105, align 4
+  %v107 = getelementptr float, ptr %a1, i32 %v106
+  %v108 = load float, ptr %v107, align 4
   %v109 = insertelement <32 x float> %v104, float %v108, i32 21
-  %v110 = getelementptr i32, i32* %a0, i32 22
-  %v111 = load i32, i32* %v110, align 4
-  %v112 = getelementptr float, float* %a1, i32 %v111
-  %v113 = load float, float* %v112, align 4
+  %v110 = getelementptr i32, ptr %a0, i32 22
+  %v111 = load i32, ptr %v110, align 4
+  %v112 = getelementptr float, ptr %a1, i32 %v111
+  %v113 = load float, ptr %v112, align 4
   %v114 = insertelement <32 x float> %v109, float %v113, i32 22
-  %v115 = getelementptr i32, i32* %a0, i32 23
-  %v116 = load i32, i32* %v115, align 4
-  %v117 = getelementptr float, float* %a1, i32 %v116
-  %v118 = load float, float* %v117, align 4
+  %v115 = getelementptr i32, ptr %a0, i32 23
+  %v116 = load i32, ptr %v115, align 4
+  %v117 = getelementptr float, ptr %a1, i32 %v116
+  %v118 = load float, ptr %v117, align 4
   %v119 = insertelement <32 x float> %v114, float %v118, i32 23
-  %v120 = getelementptr i32, i32* %a0, i32 24
-  %v121 = load i32, i32* %v120, align 4
-  %v122 = getelementptr float, float* %a1, i32 %v121
-  %v123 = load float, float* %v122, align 4
+  %v120 = getelementptr i32, ptr %a0, i32 24
+  %v121 = load i32, ptr %v120, align 4
+  %v122 = getelementptr float, ptr %a1, i32 %v121
+  %v123 = load float, ptr %v122, align 4
   %v124 = insertelement <32 x float> %v119, float %v123, i32 24
-  %v125 = getelementptr i32, i32* %a0, i32 25
-  %v126 = load i32, i32* %v125, align 4
-  %v127 = getelementptr float, float* %a1, i32 %v126
-  %v128 = load float, float* %v127, align 4
+  %v125 = getelementptr i32, ptr %a0, i32 25
+  %v126 = load i32, ptr %v125, align 4
+  %v127 = getelementptr float, ptr %a1, i32 %v126
+  %v128 = load float, ptr %v127, align 4
   %v129 = insertelement <32 x float> %v124, float %v128, i32 25
-  %v130 = getelementptr i32, i32* %a0, i32 26
-  %v131 = load i32, i32* %v130, align 4
-  %v132 = getelementptr float, float* %a1, i32 %v131
-  %v133 = load float, float* %v132, align 4
+  %v130 = getelementptr i32, ptr %a0, i32 26
+  %v131 = load i32, ptr %v130, align 4
+  %v132 = getelementptr float, ptr %a1, i32 %v131
+  %v133 = load float, ptr %v132, align 4
   %v134 = insertelement <32 x float> %v129, float %v133, i32 26
-  %v135 = getelementptr i32, i32* %a0, i32 27
-  %v136 = load i32, i32* %v135, align 4
-  %v137 = getelementptr float, float* %a1, i32 %v136
-  %v138 = load float, float* %v137, align 4
+  %v135 = getelementptr i32, ptr %a0, i32 27
+  %v136 = load i32, ptr %v135, align 4
+  %v137 = getelementptr float, ptr %a1, i32 %v136
+  %v138 = load float, ptr %v137, align 4
   %v139 = insertelement <32 x float> %v134, float %v138, i32 27
-  %v140 = getelementptr i32, i32* %a0, i32 28
-  %v141 = load i32, i32* %v140, align 4
-  %v142 = getelementptr float, float* %a1, i32 %v141
-  %v143 = load float, float* %v142, align 4
+  %v140 = getelementptr i32, ptr %a0, i32 28
+  %v141 = load i32, ptr %v140, align 4
+  %v142 = getelementptr float, ptr %a1, i32 %v141
+  %v143 = load float, ptr %v142, align 4
   %v144 = insertelement <32 x float> %v139, float %v143, i32 28
-  %v145 = getelementptr i32, i32* %a0, i32 29
-  %v146 = load i32, i32* %v145, align 4
-  %v147 = getelementptr float, float* %a1, i32 %v146
-  %v148 = load float, float* %v147, align 4
+  %v145 = getelementptr i32, ptr %a0, i32 29
+  %v146 = load i32, ptr %v145, align 4
+  %v147 = getelementptr float, ptr %a1, i32 %v146
+  %v148 = load float, ptr %v147, align 4
   %v149 = insertelement <32 x float> %v144, float %v148, i32 29
-  %v150 = getelementptr i32, i32* %a0, i32 30
-  %v151 = load i32, i32* %v150, align 4
-  %v152 = getelementptr float, float* %a1, i32 %v151
-  %v153 = load float, float* %v152, align 4
+  %v150 = getelementptr i32, ptr %a0, i32 30
+  %v151 = load i32, ptr %v150, align 4
+  %v152 = getelementptr float, ptr %a1, i32 %v151
+  %v153 = load float, ptr %v152, align 4
   %v154 = insertelement <32 x float> %v149, float %v153, i32 30
-  %v155 = getelementptr i32, i32* %a0, i32 31
-  %v156 = load i32, i32* %v155, align 4
-  %v157 = getelementptr float, float* %a1, i32 %v156
-  %v158 = load float, float* %v157, align 4
+  %v155 = getelementptr i32, ptr %a0, i32 31
+  %v156 = load i32, ptr %v155, align 4
+  %v157 = getelementptr float, ptr %a1, i32 %v156
+  %v158 = load float, ptr %v157, align 4
   %v159 = insertelement <32 x float> %v154, float %v158, i32 31
   ret <32 x float> %v159
 }
@@ -176,327 +175,326 @@ b0:
 ; Function Attrs: nounwind
 ; CHECK-LABEL: f1:
 ; CHECK: vinsert
-define <64 x half> @f1(i32* %a0, half* %a1) #0 {
+define <64 x half> @f1(ptr %a0, ptr %a1) #0 {
 b0:
-  %v0 = getelementptr i32, i32* %a0, i32 0
-  %v1 = load i32, i32* %v0, align 4
-  %v2 = getelementptr half, half* %a1, i32 %v1
-  %v3 = load half, half* %v2, align 4
+  %v1 = load i32, ptr %a0, align 4
+  %v2 = getelementptr half, ptr %a1, i32 %v1
+  %v3 = load half, ptr %v2, align 4
   %v4 = insertelement <64 x half> undef, half %v3, i32 0
-  %v5 = getelementptr i32, i32* %a0, i32 1
-  %v6 = load i32, i32* %v5, align 4
-  %v7 = getelementptr half, half* %a1, i32 %v6
-  %v8 = load half, half* %v7, align 4
+  %v5 = getelementptr i32, ptr %a0, i32 1
+  %v6 = load i32, ptr %v5, align 4
+  %v7 = getelementptr half, ptr %a1, i32 %v6
+  %v8 = load half, ptr %v7, align 4
   %v9 = insertelement <64 x half> %v4, half %v8, i32 1
-  %v10 = getelementptr i32, i32* %a0, i32 2
-  %v11 = load i32, i32* %v10, align 4
-  %v12 = getelementptr half, half* %a1, i32 %v11
-  %v13 = load half, half* %v12, align 4
+  %v10 = getelementptr i32, ptr %a0, i32 2
+  %v11 = load i32, ptr %v10, align 4
+  %v12 = getelementptr half, ptr %a1, i32 %v11
+  %v13 = load half, ptr %v12, align 4
   %v14 = insertelement <64 x half> %v9, half %v13, i32 2
-  %v15 = getelementptr i32, i32* %a0, i32 3
-  %v16 = load i32, i32* %v15, align 4
-  %v17 = getelementptr half, half* %a1, i32 %v16
-  %v18 = load half, half* %v17, align 4
+  %v15 = getelementptr i32, ptr %a0, i32 3
+  %v16 = load i32, ptr %v15, align 4
+  %v17 = getelementptr half, ptr %a1, i32 %v16
+  %v18 = load half, ptr %v17, align 4
   %v19 = insertelement <64 x half> %v14, half %v18, i32 3
-  %v20 = getelementptr i32, i32* %a0, i32 4
-  %v21 = load i32, i32* %v20, align 4
-  %v22 = getelementptr half, half* %a1, i32 %v21
-  %v23 = load half, half* %v22, align 4
+  %v20 = getelementptr i32, ptr %a0, i32 4
+  %v21 = load i32, ptr %v20, align 4
+  %v22 = getelementptr half, ptr %a1, i32 %v21
+  %v23 = load half, ptr %v22, align 4
   %v24 = insertelement <64 x half> %v19, half %v23, i32 4
-  %v25 = getelementptr i32, i32* %a0, i32 5
-  %v26 = load i32, i32* %v25, align 4
-  %v27 = getelementptr half, half* %a1, i32 %v26
-  %v28 = load half, half* %v27, align 4
+  %v25 = getelementptr i32, ptr %a0, i32 5
+  %v26 = load i32, ptr %v25, align 4
+  %v27 = getelementptr half, ptr %a1, i32 %v26
+  %v28 = load half, ptr %v27, align 4
   %v29 = insertelement <64 x half> %v24, half %v28, i32 5
-  %v30 = getelementptr i32, i32* %a0, i32 6
-  %v31 = load i32, i32* %v30, align 4
-  %v32 = getelementptr half, half* %a1, i32 %v31
-  %v33 = load half, half* %v32, align 4
+  %v30 = getelementptr i32, ptr %a0, i32 6
+  %v31 = load i32, ptr %v30, align 4
+  %v32 = getelementptr half, ptr %a1, i32 %v31
+  %v33 = load half, ptr %v32, align 4
   %v34 = insertelement <64 x half> %v29, half %v33, i32 6
-  %v35 = getelementptr i32, i32* %a0, i32 7
-  %v36 = load i32, i32* %v35, align 4
-  %v37 = getelementptr half, half* %a1, i32 %v36
-  %v38 = load half, half* %v37, align 4
+  %v35 = getelementptr i32, ptr %a0, i32 7
+  %v36 = load i32, ptr %v35, align 4
+  %v37 = getelementptr half, ptr %a1, i32 %v36
+  %v38 = load half, ptr %v37, align 4
   %v39 = insertelement <64 x half> %v34, half %v38, i32 7
-  %v40 = getelementptr i32, i32* %a0, i32 8
-  %v41 = load i32, i32* %v40, align 4
-  %v42 = getelementptr half, half* %a1, i32 %v41
-  %v43 = load half, half* %v42, align 4
+  %v40 = getelementptr i32, ptr %a0, i32 8
+  %v41 = load i32, ptr %v40, align 4
+  %v42 = getelementptr half, ptr %a1, i32 %v41
+  %v43 = load half, ptr %v42, align 4
   %v44 = insertelement <64 x half> %v39, half %v43, i32 8
-  %v45 = getelementptr i32, i32* %a0, i32 9
-  %v46 = load i32, i32* %v45, align 4
-  %v47 = getelementptr half, half* %a1, i32 %v46
-  %v48 = load half, half* %v47, align 4
+  %v45 = getelementptr i32, ptr %a0, i32 9
+  %v46 = load i32, ptr %v45, align 4
+  %v47 = getelementptr half, ptr %a1, i32 %v46
+  %v48 = load half, ptr %v47, align 4
   %v49 = insertelement <64 x half> %v44, half %v48, i32 9
-  %v50 = getelementptr i32, i32* %a0, i32 10
-  %v51 = load i32, i32* %v50, align 4
-  %v52 = getelementptr half, half* %a1, i32 %v51
-  %v53 = load half, half* %v52, align 4
+  %v50 = getelementptr i32, ptr %a0, i32 10
+  %v51 = load i32, ptr %v50, align 4
+  %v52 = getelementptr half, ptr %a1, i32 %v51
+  %v53 = load half, ptr %v52, align 4
   %v54 = insertelement <64 x half> %v49, half %v53, i32 10
-  %v55 = getelementptr i32, i32* %a0, i32 11
-  %v56 = load i32, i32* %v55, align 4
-  %v57 = getelementptr half, half* %a1, i32 %v56
-  %v58 = load half, half* %v57, align 4
+  %v55 = getelementptr i32, ptr %a0, i32 11
+  %v56 = load i32, ptr %v55, align 4
+  %v57 = getelementptr half, ptr %a1, i32 %v56
+  %v58 = load half, ptr %v57, align 4
   %v59 = insertelement <64 x half> %v54, half %v58, i32 11
-  %v60 = getelementptr i32, i32* %a0, i32 12
-  %v61 = load i32, i32* %v60, align 4
-  %v62 = getelementptr half, half* %a1, i32 %v61
-  %v63 = load half, half* %v62, align 4
+  %v60 = getelementptr i32, ptr %a0, i32 12
+  %v61 = load i32, ptr %v60, align 4
+  %v62 = getelementptr half, ptr %a1, i32 %v61
+  %v63 = load half, ptr %v62, align 4
   %v64 = insertelement <64 x half> %v59, half %v63, i32 12
-  %v65 = getelementptr i32, i32* %a0, i32 13
-  %v66 = load i32, i32* %v65, align 4
-  %v67 = getelementptr half, half* %a1, i32 %v66
-  %v68 = load half, half* %v67, align 4
+  %v65 = getelementptr i32, ptr %a0, i32 13
+  %v66 = load i32, ptr %v65, align 4
+  %v67 = getelementptr half, ptr %a1, i32 %v66
+  %v68 = load half, ptr %v67, align 4
   %v69 = insertelement <64 x half> %v64, half %v68, i32 13
-  %v70 = getelementptr i32, i32* %a0, i32 14
-  %v71 = load i32, i32* %v70, align 4
-  %v72 = getelementptr half, half* %a1, i32 %v71
-  %v73 = load half, half* %v72, align 4
+  %v70 = getelementptr i32, ptr %a0, i32 14
+  %v71 = load i32, ptr %v70, align 4
+  %v72 = getelementptr half, ptr %a1, i32 %v71
+  %v73 = load half, ptr %v72, align 4
   %v74 = insertelement <64 x half> %v69, half %v73, i32 14
-  %v75 = getelementptr i32, i32* %a0, i32 15
-  %v76 = load i32, i32* %v75, align 4
-  %v77 = getelementptr half, half* %a1, i32 %v76
-  %v78 = load half, half* %v77, align 4
+  %v75 = getelementptr i32, ptr %a0, i32 15
+  %v76 = load i32, ptr %v75, align 4
+  %v77 = getelementptr half, ptr %a1, i32 %v76
+  %v78 = load half, ptr %v77, align 4
   %v79 = insertelement <64 x half> %v74, half %v78, i32 15
-  %v80 = getelementptr i32, i32* %a0, i32 16
-  %v81 = load i32, i32* %v80, align 4
-  %v82 = getelementptr half, half* %a1, i32 %v81
-  %v83 = load half, half* %v82, align 4
+  %v80 = getelementptr i32, ptr %a0, i32 16
+  %v81 = load i32, ptr %v80, align 4
+  %v82 = getelementptr half, ptr %a1, i32 %v81
+  %v83 = load half, ptr %v82, align 4
   %v84 = insertelement <64 x half> %v79, half %v83, i32 16
-  %v85 = getelementptr i32, i32* %a0, i32 17
-  %v86 = load i32, i32* %v85, align 4
-  %v87 = getelementptr half, half* %a1, i32 %v86
-  %v88 = load half, half* %v87, align 4
+  %v85 = getelementptr i32, ptr %a0, i32 17
+  %v86 = load i32, ptr %v85, align 4
+  %v87 = getelementptr half, ptr %a1, i32 %v86
+  %v88 = load half, ptr %v87, align 4
   %v89 = insertelement <64 x half> %v84, half %v88, i32 17
-  %v90 = getelementptr i32, i32* %a0, i32 18
-  %v91 = load i32, i32* %v90, align 4
-  %v92 = getelementptr half, half* %a1, i32 %v91
-  %v93 = load half, half* %v92, align 4
+  %v90 = getelementptr i32, ptr %a0, i32 18
+  %v91 = load i32, ptr %v90, align 4
+  %v92 = getelementptr half, ptr %a1, i32 %v91
+  %v93 = load half, ptr %v92, align 4
   %v94 = insertelement <64 x half> %v89, half %v93, i32 18
-  %v95 = getelementptr i32, i32* %a0, i32 19
-  %v96 = load i32, i32* %v95, align 4
-  %v97 = getelementptr half, half* %a1, i32 %v96
-  %v98 = load half, half* %v97, align 4
+  %v95 = getelementptr i32, ptr %a0, i32 19
+  %v96 = load i32, ptr %v95, align 4
+  %v97 = getelementptr half, ptr %a1, i32 %v96
+  %v98 = load half, ptr %v97, align 4
   %v99 = insertelement <64 x half> %v94, half %v98, i32 19
-  %v100 = getelementptr i32, i32* %a0, i32 20
-  %v101 = load i32, i32* %v100, align 4
-  %v102 = getelementptr half, half* %a1, i32 %v101
-  %v103 = load half, half* %v102, align 4
+  %v100 = getelementptr i32, ptr %a0, i32 20
+  %v101 = load i32, ptr %v100, align 4
+  %v102 = getelementptr half, ptr %a1, i32 %v101
+  %v103 = load half, ptr %v102, align 4
   %v104 = insertelement <64 x half> %v99, half %v103, i32 20
-  %v105 = getelementptr i32, i32* %a0, i32 21
-  %v106 = load i32, i32* %v105, align 4
-  %v107 = getelementptr half, half* %a1, i32 %v106
-  %v108 = load half, half* %v107, align 4
+  %v105 = getelementptr i32, ptr %a0, i32 21
+  %v106 = load i32, ptr %v105, align 4
+  %v107 = getelementptr half, ptr %a1, i32 %v106
+  %v108 = load half, ptr %v107, align 4
   %v109 = insertelement <64 x half> %v104, half %v108, i32 21
-  %v110 = getelementptr i32, i32* %a0, i32 22
-  %v111 = load i32, i32* %v110, align 4
-  %v112 = getelementptr half, half* %a1, i32 %v111
-  %v113 = load half, half* %v112, align 4
+  %v110 = getelementptr i32, ptr %a0, i32 22
+  %v111 = load i32, ptr %v110, align 4
+  %v112 = getelementptr half, ptr %a1, i32 %v111
+  %v113 = load half, ptr %v112, align 4
   %v114 = insertelement <64 x half> %v109, half %v113, i32 22
-  %v115 = getelementptr i32, i32* %a0, i32 23
-  %v116 = load i32, i32* %v115, align 4
-  %v117 = getelementptr half, half* %a1, i32 %v116
-  %v118 = load half, half* %v117, align 4
+  %v115 = getelementptr i32, ptr %a0, i32 23
+  %v116 = load i32, ptr %v115, align 4
+  %v117 = getelementptr half, ptr %a1, i32 %v116
+  %v118 = load half, ptr %v117, align 4
   %v119 = insertelement <64 x half> %v114, half %v118, i32 23
-  %v120 = getelementptr i32, i32* %a0, i32 24
-  %v121 = load i32, i32* %v120, align 4
-  %v122 = getelementptr half, half* %a1, i32 %v121
-  %v123 = load half, half* %v122, align 4
+  %v120 = getelementptr i32, ptr %a0, i32 24
+  %v121 = load i32, ptr %v120, align 4
+  %v122 = getelementptr half, ptr %a1, i32 %v121
+  %v123 = load half, ptr %v122, align 4
   %v124 = insertelement <64 x half> %v119, half %v123, i32 24
-  %v125 = getelementptr i32, i32* %a0, i32 25
-  %v126 = load i32, i32* %v125, align 4
-  %v127 = getelementptr half, half* %a1, i32 %v126
-  %v128 = load half, half* %v127, align 4
+  %v125 = getelementptr i32, ptr %a0, i32 25
+  %v126 = load i32, ptr %v125, align 4
+  %v127 = getelementptr half, ptr %a1, i32 %v126
+  %v128 = load half, ptr %v127, align 4
   %v129 = insertelement <64 x half> %v124, half %v128, i32 25
-  %v130 = getelementptr i32, i32* %a0, i32 26
-  %v131 = load i32, i32* %v130, align 4
-  %v132 = getelementptr half, half* %a1, i32 %v131
-  %v133 = load half, half* %v132, align 4
+  %v130 = getelementptr i32, ptr %a0, i32 26
+  %v131 = load i32, ptr %v130, align 4
+  %v132 = getelementptr half, ptr %a1, i32 %v131
+  %v133 = load half, ptr %v132, align 4
   %v134 = insertelement <64 x half> %v129, half %v133, i32 26
-  %v135 = getelementptr i32, i32* %a0, i32 27
-  %v136 = load i32, i32* %v135, align 4
-  %v137 = getelementptr half, half* %a1, i32 %v136
-  %v138 = load half, half* %v137, align 4
+  %v135 = getelementptr i32, ptr %a0, i32 27
+  %v136 = load i32, ptr %v135, align 4
+  %v137 = getelementptr half, ptr %a1, i32 %v136
+  %v138 = load half, ptr %v137, align 4
   %v139 = insertelement <64 x half> %v134, half %v138, i32 27
-  %v140 = getelementptr i32, i32* %a0, i32 28
-  %v141 = load i32, i32* %v140, align 4
-  %v142 = getelementptr half, half* %a1, i32 %v141
-  %v143 = load half, half* %v142, align 4
+  %v140 = getelementptr i32, ptr %a0, i32 28
+  %v141 = load i32, ptr %v140, align 4
+  %v142 = getelementptr half, ptr %a1, i32 %v141
+  %v143 = load half, ptr %v142, align 4
   %v144 = insertelement <64 x half> %v139, half %v143, i32 28
-  %v145 = getelementptr i32, i32* %a0, i32 29
-  %v146 = load i32, i32* %v145, align 4
-  %v147 = getelementptr half, half* %a1, i32 %v146
-  %v148 = load half, half* %v147, align 4
+  %v145 = getelementptr i32, ptr %a0, i32 29
+  %v146 = load i32, ptr %v145, align 4
+  %v147 = getelementptr half, ptr %a1, i32 %v146
+  %v148 = load half, ptr %v147, align 4
   %v149 = insertelement <64 x half> %v144, half %v148, i32 29
-  %v150 = getelementptr i32, i32* %a0, i32 30
-  %v151 = load i32, i32* %v150, align 4
-  %v152 = getelementptr half, half* %a1, i32 %v151
-  %v153 = load half, half* %v152, align 4
+  %v150 = getelementptr i32, ptr %a0, i32 30
+  %v151 = load i32, ptr %v150, align 4
+  %v152 = getelementptr half, ptr %a1, i32 %v151
+  %v153 = load half, ptr %v152, align 4
   %v154 = insertelement <64 x half> %v149, half %v153, i32 30
-  %v155 = getelementptr i32, i32* %a0, i32 31
-  %v156 = load i32, i32* %v155, align 4
-  %v157 = getelementptr half, half* %a1, i32 %v156
-  %v158 = load half, half* %v157, align 4
+  %v155 = getelementptr i32, ptr %a0, i32 31
+  %v156 = load i32, ptr %v155, align 4
+  %v157 = getelementptr half, ptr %a1, i32 %v156
+  %v158 = load half, ptr %v157, align 4
   %v159 = insertelement <64 x half> %v154, half %v158, i32 31
-  %v160 = getelementptr i32, i32* %a0, i32 32
-  %v161 = load i32, i32* %v160, align 4
-  %v162 = getelementptr half, half* %a1, i32 %v161
-  %v163 = load half, half* %v162, align 4
+  %v160 = getelementptr i32, ptr %a0, i32 32
+  %v161 = load i32, ptr %v160, align 4
+  %v162 = getelementptr half, ptr %a1, i32 %v161
+  %v163 = load half, ptr %v162, align 4
   %v164 = insertelement <64 x half> %v159, half %v163, i32 32
-  %v165 = getelementptr i32, i32* %a0, i32 33
-  %v166 = load i32, i32* %v165, align 4
-  %v167 = getelementptr half, half* %a1, i32 %v166
-  %v168 = load half, half* %v167, align 4
+  %v165 = getelementptr i32, ptr %a0, i32 33
+  %v166 = load i32, ptr %v165, align 4
+  %v167 = getelementptr half, ptr %a1, i32 %v166
+  %v168 = load half, ptr %v167, align 4
   %v169 = insertelement <64 x half> %v164, half %v168, i32 33
-  %v170 = getelementptr i32, i32* %a0, i32 34
-  %v171 = load i32, i32* %v170, align 4
-  %v172 = getelementptr half, half* %a1, i32 %v171
-  %v173 = load half, half* %v172, align 4
+  %v170 = getelementptr i32, ptr %a0, i32 34
+  %v171 = load i32, ptr %v170, align 4
+  %v172 = getelementptr half, ptr %a1, i32 %v171
+  %v173 = load half, ptr %v172, align 4
   %v174 = insertelement <64 x half> %v169, half %v173, i32 34
-  %v175 = getelementptr i32, i32* %a0, i32 35
-  %v176 = load i32, i32* %v175, align 4
-  %v177 = getelementptr half, half* %a1, i32 %v176
-  %v178 = load half, half* %v177, align 4
+  %v175 = getelementptr i32, ptr %a0, i32 35
+  %v176 = load i32, ptr %v175, align 4
+  %v177 = getelementptr half, ptr %a1, i32 %v176
+  %v178 = load half, ptr %v177, align 4
   %v179 = insertelement <64 x half> %v174, half %v178, i32 35
-  %v180 = getelementptr i32, i32* %a0, i32 36
-  %v181 = load i32, i32* %v180, align 4
-  %v182 = getelementptr half, half* %a1, i32 %v181
-  %v183 = load half, half* %v182, align 4
+  %v180 = getelementptr i32, ptr %a0, i32 36
+  %v181 = load i32, ptr %v180, align 4
+  %v182 = getelementptr half, ptr %a1, i32 %v181
+  %v183 = load half, ptr %v182, align 4
   %v184 = insertelement <64 x half> %v179, half %v183, i32 36
-  %v185 = getelementptr i32, i32* %a0, i32 37
-  %v186 = load i32, i32* %v185, align 4
-  %v187 = getelementptr half, half* %a1, i32 %v186
-  %v188 = load half, half* %v187, align 4
+  %v185 = getelementptr i32, ptr %a0, i32 37
+  %v186 = load i32, ptr %v185, align 4
+  %v187 = getelementptr half, ptr %a1, i32 %v186
+  %v188 = load half, ptr %v187, align 4
   %v189 = insertelement <64 x half> %v184, half %v188, i32 37
-  %v190 = getelementptr i32, i32* %a0, i32 38
-  %v191 = load i32, i32* %v190, align 4
-  %v192 = getelementptr half, half* %a1, i32 %v191
-  %v193 = load half, half* %v192, align 4
+  %v190 = getelementptr i32, ptr %a0, i32 38
+  %v191 = load i32, ptr %v190, align 4
+  %v192 = getelementptr half, ptr %a1, i32 %v191
+  %v193 = load half, ptr %v192, align 4
   %v194 = insertelement <64 x half> %v189, half %v193, i32 38
-  %v195 = getelementptr i32, i32* %a0, i32 39
-  %v196 = load i32, i32* %v195, align 4
-  %v197 = getelementptr half, half* %a1, i32 %v196
-  %v198 = load half, half* %v197, align 4
+  %v195 = getelementptr i32, ptr %a0, i32 39
+  %v196 = load i32, ptr %v195, align 4
+  %v197 = getelementptr half, ptr %a1, i32 %v196
+  %v198 = load half, ptr %v197, align 4
   %v199 = insertelement <64 x half> %v194, half %v198, i32 39
-  %v200 = getelementptr i32, i32* %a0, i32 40
-  %v201 = load i32, i32* %v200, align 4
-  %v202 = getelementptr half, half* %a1, i32 %v201
-  %v203 = load half, half* %v202, align 4
+  %v200 = getelementptr i32, ptr %a0, i32 40
+  %v201 = load i32, ptr %v200, align 4
+  %v202 = getelementptr half, ptr %a1, i32 %v201
+  %v203 = load half, ptr %v202, align 4
   %v204 = insertelement <64 x half> %v199, half %v203, i32 40
-  %v205 = getelementptr i32, i32* %a0, i32 41
-  %v206 = load i32, i32* %v205, align 4
-  %v207 = getelementptr half, half* %a1, i32 %v206
-  %v208 = load half, half* %v207, align 4
+  %v205 = getelementptr i32, ptr %a0, i32 41
+  %v206 = load i32, ptr %v205, align 4
+  %v207 = getelementptr half, ptr %a1, i32 %v206
+  %v208 = load half, ptr %v207, align 4
   %v209 = insertelement <64 x half> %v204, half %v208, i32 41
-  %v210 = getelementptr i32, i32* %a0, i32 42
-  %v211 = load i32, i32* %v210, align 4
-  %v212 = getelementptr half, half* %a1, i32 %v211
-  %v213 = load half, half* %v212, align 4
+  %v210 = getelementptr i32, ptr %a0, i32 42
+  %v211 = load i32, ptr %v210, align 4
+  %v212 = getelementptr half, ptr %a1, i32 %v211
+  %v213 = load half, ptr %v212, align 4
   %v214 = insertelement <64 x half> %v209, half %v213, i32 42
-  %v215 = getelementptr i32, i32* %a0, i32 43
-  %v216 = load i32, i32* %v215, align 4
-  %v217 = getelementptr half, half* %a1, i32 %v216
-  %v218 = load half, half* %v217, align 4
+  %v215 = getelementptr i32, ptr %a0, i32 43
+  %v216 = load i32, ptr %v215, align 4
+  %v217 = getelementptr half, ptr %a1, i32 %v216
+  %v218 = load half, ptr %v217, align 4
   %v219 = insertelement <64 x half> %v214, half %v218, i32 43
-  %v220 = getelementptr i32, i32* %a0, i32 44
-  %v221 = load i32, i32* %v220, align 4
-  %v222 = getelementptr half, half* %a1, i32 %v221
-  %v223 = load half, half* %v222, align 4
+  %v220 = getelementptr i32, ptr %a0, i32 44
+  %v221 = load i32, ptr %v220, align 4
+  %v222 = getelementptr half, ptr %a1, i32 %v221
+  %v223 = load half, ptr %v222, align 4
   %v224 = insertelement <64 x half> %v219, half %v223, i32 44
-  %v225 = getelementptr i32, i32* %a0, i32 45
-  %v226 = load i32, i32* %v225, align 4
-  %v227 = getelementptr half, half* %a1, i32 %v226
-  %v228 = load half, half* %v227, align 4
+  %v225 = getelementptr i32, ptr %a0, i32 45
+  %v226 = load i32, ptr %v225, align 4
+  %v227 = getelementptr half, ptr %a1, i32 %v226
+  %v228 = load half, ptr %v227, align 4
   %v229 = insertelement <64 x half> %v224, half %v228, i32 45
-  %v230 = getelementptr i32, i32* %a0, i32 46
-  %v231 = load i32, i32* %v230, align 4
-  %v232 = getelementptr half, half* %a1, i32 %v231
-  %v233 = load half, half* %v232, align 4
+  %v230 = getelementptr i32, ptr %a0, i32 46
+  %v231 = load i32, ptr %v230, align 4
+  %v232 = getelementptr half, ptr %a1, i32 %v231
+  %v233 = load half, ptr %v232, align 4
   %v234 = insertelement <64 x half> %v229, half %v233, i32 46
-  %v235 = getelementptr i32, i32* %a0, i32 47
-  %v236 = load i32, i32* %v235, align 4
-  %v237 = getelementptr half, half* %a1, i32 %v236
-  %v238 = load half, half* %v237, align 4
+  %v235 = getelementptr i32, ptr %a0, i32 47
+  %v236 = load i32, ptr %v235, align 4
+  %v237 = getelementptr half, ptr %a1, i32 %v236
+  %v238 = load half, ptr %v237, align 4
   %v239 = insertelement <64 x half> %v234, half %v238, i32 47
-  %v240 = getelementptr i32, i32* %a0, i32 48
-  %v241 = load i32, i32* %v240, align 4
-  %v242 = getelementptr half, half* %a1, i32 %v241
-  %v243 = load half, half* %v242, align 4
+  %v240 = getelementptr i32, ptr %a0, i32 48
+  %v241 = load i32, ptr %v240, align 4
+  %v242 = getelementptr half, ptr %a1, i32 %v241
+  %v243 = load half, ptr %v242, align 4
   %v244 = insertelement <64 x half> %v239, half %v243, i32 48
-  %v245 = getelementptr i32, i32* %a0, i32 49
-  %v246 = load i32, i32* %v245, align 4
-  %v247 = getelementptr half, half* %a1, i32 %v246
-  %v248 = load half, half* %v247, align 4
+  %v245 = getelementptr i32, ptr %a0, i32 49
+  %v246 = load i32, ptr %v245, align 4
+  %v247 = getelementptr half, ptr %a1, i32 %v246
+  %v248 = load half, ptr %v247, align 4
   %v249 = insertelement <64 x half> %v244, half %v248, i32 49
-  %v250 = getelementptr i32, i32* %a0, i32 50
-  %v251 = load i32, i32* %v250, align 4
-  %v252 = getelementptr half, half* %a1, i32 %v251
-  %v253 = load half, half* %v252, align 4
+  %v250 = getelementptr i32, ptr %a0, i32 50
+  %v251 = load i32, ptr %v250, align 4
+  %v252 = getelementptr half, ptr %a1, i32 %v251
+  %v253 = load half, ptr %v252, align 4
   %v254 = insertelement <64 x half> %v249, half %v253, i32 50
-  %v255 = getelementptr i32, i32* %a0, i32 51
-  %v256 = load i32, i32* %v255, align 4
-  %v257 = getelementptr half, half* %a1, i32 %v256
-  %v258 = load half, half* %v257, align 4
+  %v255 = getelementptr i32, ptr %a0, i32 51
+  %v256 = load i32, ptr %v255, align 4
+  %v257 = getelementptr half, ptr %a1, i32 %v256
+  %v258 = load half, ptr %v257, align 4
   %v259 = insertelement <64 x half> %v254, half %v258, i32 51
-  %v260 = getelementptr i32, i32* %a0, i32 52
-  %v261 = load i32, i32* %v260, align 4
-  %v262 = getelementptr half, half* %a1, i32 %v261
-  %v263 = load half, half* %v262, align 4
+  %v260 = getelementptr i32, ptr %a0, i32 52
+  %v261 = load i32, ptr %v260, align 4
+  %v262 = getelementptr half, ptr %a1, i32 %v261
+  %v263 = load half, ptr %v262, align 4
   %v264 = insertelement <64 x half> %v259, half %v263, i32 52
-  %v265 = getelementptr i32, i32* %a0, i32 53
-  %v266 = load i32, i32* %v265, align 4
-  %v267 = getelementptr half, half* %a1, i32 %v266
-  %v268 = load half, half* %v267, align 4
+  %v265 = getelementptr i32, ptr %a0, i32 53
+  %v266 = load i32, ptr %v265, align 4
+  %v267 = getelementptr half, ptr %a1, i32 %v266
+  %v268 = load half, ptr %v267, align 4
   %v269 = insertelement <64 x half> %v264, half %v268, i32 53
-  %v270 = getelementptr i32, i32* %a0, i32 54
-  %v271 = load i32, i32* %v270, align 4
-  %v272 = getelementptr half, half* %a1, i32 %v271
-  %v273 = load half, half* %v272, align 4
+  %v270 = getelementptr i32, ptr %a0, i32 54
+  %v271 = load i32, ptr %v270, align 4
+  %v272 = getelementptr half, ptr %a1, i32 %v271
+  %v273 = load half, ptr %v272, align 4
   %v274 = insertelement <64 x half> %v269, half %v273, i32 54
-  %v275 = getelementptr i32, i32* %a0, i32 55
-  %v276 = load i32, i32* %v275, align 4
-  %v277 = getelementptr half, half* %a1, i32 %v276
-  %v278 = load half, half* %v277, align 4
+  %v275 = getelementptr i32, ptr %a0, i32 55
+  %v276 = load i32, ptr %v275, align 4
+  %v277 = getelementptr half, ptr %a1, i32 %v276
+  %v278 = load half, ptr %v277, align 4
   %v279 = insertelement <64 x half> %v274, half %v278, i32 55
-  %v280 = getelementptr i32, i32* %a0, i32 56
-  %v281 = load i32, i32* %v280, align 4
-  %v282 = getelementptr half, half* %a1, i32 %v281
-  %v283 = load half, half* %v282, align 4
+  %v280 = getelementptr i32, ptr %a0, i32 56
+  %v281 = load i32, ptr %v280, align 4
+  %v282 = getelementptr half, ptr %a1, i32 %v281
+  %v283 = load half, ptr %v282, align 4
   %v284 = insertelement <64 x half> %v279, half %v283, i32 56
-  %v285 = getelementptr i32, i32* %a0, i32 57
-  %v286 = load i32, i32* %v285, align 4
-  %v287 = getelementptr half, half* %a1, i32 %v286
-  %v288 = load half, half* %v287, align 4
+  %v285 = getelementptr i32, ptr %a0, i32 57
+  %v286 = load i32, ptr %v285, align 4
+  %v287 = getelementptr half, ptr %a1, i32 %v286
+  %v288 = load half, ptr %v287, align 4
   %v289 = insertelement <64 x half> %v284, half %v288, i32 57
-  %v290 = getelementptr i32, i32* %a0, i32 58
-  %v291 = load i32, i32* %v290, align 4
-  %v292 = getelementptr half, half* %a1, i32 %v291
-  %v293 = load half, half* %v292, align 4
+  %v290 = getelementptr i32, ptr %a0, i32 58
+  %v291 = load i32, ptr %v290, align 4
+  %v292 = getelementptr half, ptr %a1, i32 %v291
+  %v293 = load half, ptr %v292, align 4
   %v294 = insertelement <64 x half> %v289, half %v293, i32 58
-  %v295 = getelementptr i32, i32* %a0, i32 59
-  %v296 = load i32, i32* %v295, align 4
-  %v297 = getelementptr half, half* %a1, i32 %v296
-  %v298 = load half, half* %v297, align 4
+  %v295 = getelementptr i32, ptr %a0, i32 59
+  %v296 = load i32, ptr %v295, align 4
+  %v297 = getelementptr half, ptr %a1, i32 %v296
+  %v298 = load half, ptr %v297, align 4
   %v299 = insertelement <64 x half> %v294, half %v298, i32 59
-  %v300 = getelementptr i32, i32* %a0, i32 60
-  %v301 = load i32, i32* %v300, align 4
-  %v302 = getelementptr half, half* %a1, i32 %v301
-  %v303 = load half, half* %v302, align 4
+  %v300 = getelementptr i32, ptr %a0, i32 60
+  %v301 = load i32, ptr %v300, align 4
+  %v302 = getelementptr half, ptr %a1, i32 %v301
+  %v303 = load half, ptr %v302, align 4
   %v304 = insertelement <64 x half> %v299, half %v303, i32 60
-  %v305 = getelementptr i32, i32* %a0, i32 61
-  %v306 = load i32, i32* %v305, align 4
-  %v307 = getelementptr half, half* %a1, i32 %v306
-  %v308 = load half, half* %v307, align 4
+  %v305 = getelementptr i32, ptr %a0, i32 61
+  %v306 = load i32, ptr %v305, align 4
+  %v307 = getelementptr half, ptr %a1, i32 %v306
+  %v308 = load half, ptr %v307, align 4
   %v309 = insertelement <64 x half> %v304, half %v308, i32 61
-  %v310 = getelementptr i32, i32* %a0, i32 62
-  %v311 = load i32, i32* %v310, align 4
-  %v312 = getelementptr half, half* %a1, i32 %v311
-  %v313 = load half, half* %v312, align 4
+  %v310 = getelementptr i32, ptr %a0, i32 62
+  %v311 = load i32, ptr %v310, align 4
+  %v312 = getelementptr half, ptr %a1, i32 %v311
+  %v313 = load half, ptr %v312, align 4
   %v314 = insertelement <64 x half> %v309, half %v313, i32 62
-  %v315 = getelementptr i32, i32* %a0, i32 63
-  %v316 = load i32, i32* %v315, align 4
-  %v317 = getelementptr half, half* %a1, i32 %v316
-  %v318 = load half, half* %v317, align 4
+  %v315 = getelementptr i32, ptr %a0, i32 63
+  %v316 = load i32, ptr %v315, align 4
+  %v317 = getelementptr half, ptr %a1, i32 %v316
+  %v318 = load half, ptr %v317, align 4
   %v319 = insertelement <64 x half> %v314, half %v318, i32 63
   ret <64 x half> %v319
 }

diff  --git a/llvm/test/CodeGen/Hexagon/autohvx/build-vector-i32-type.ll b/llvm/test/CodeGen/Hexagon/autohvx/build-vector-i32-type.ll
index c534c0b748787..0e44358b34c12 100644
--- a/llvm/test/CodeGen/Hexagon/autohvx/build-vector-i32-type.ll
+++ b/llvm/test/CodeGen/Hexagon/autohvx/build-vector-i32-type.ll
@@ -11,11 +11,11 @@ target triple = "hexagon"
 
 define void @fred(<16 x i16> %x, <16 x float> %y) #0 {
 b0:
-  %v1 = load <16 x float>, <16 x float>* @g0, align 8
+  %v1 = load <16 x float>, ptr @g0, align 8
   %v2 = fcmp olt <16 x float> %y, %v1
   %v3 = select <16 x i1> %v2, <16 x i16> %x, <16 x i16> zeroinitializer
   %v4 = sext <16 x i16> %v3 to <16 x i32>
-  store <16 x i32> %v4, <16 x i32>* @g1, align 64
+  store <16 x i32> %v4, ptr @g1, align 64
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/autohvx/calling-conv.ll b/llvm/test/CodeGen/Hexagon/autohvx/calling-conv.ll
index 884eb6e7ac759..e4a423f0a1496 100644
--- a/llvm/test/CodeGen/Hexagon/autohvx/calling-conv.ll
+++ b/llvm/test/CodeGen/Hexagon/autohvx/calling-conv.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -march=hexagon < %s | FileCheck %s
 
-define void @f0(<128 x i8> %a0, <128 x i8>* %a1) #0 {
+define void @f0(<128 x i8> %a0, ptr %a1) #0 {
 ; CHECK-LABEL: f0:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -9,12 +9,11 @@ define void @f0(<128 x i8> %a0, <128 x i8>* %a1) #0 {
 ; CHECK-NEXT:     vmem(r0+#0) = v0
 ; CHECK-NEXT:    }
 b0:
-  %v0 = getelementptr <128 x i8>, <128 x i8>* %a1, i32 0
-  store <128 x i8> %a0, <128 x i8>* %v0, align 128
+  store <128 x i8> %a0, ptr %a1, align 128
   ret void
 }
 
-define void @f1(<128 x i8> %a0, <128 x i8> %a1, <128 x i8>* %a2) #0 {
+define void @f1(<128 x i8> %a0, <128 x i8> %a1, ptr %a2) #0 {
 ; CHECK-LABEL: f1:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -25,14 +24,13 @@ define void @f1(<128 x i8> %a0, <128 x i8> %a1, <128 x i8>* %a2) #0 {
 ; CHECK-NEXT:     vmem(r0+#1) = v1
 ; CHECK-NEXT:    }
 b0:
-  %v0 = getelementptr <128 x i8>, <128 x i8>* %a2, i32 0
-  store <128 x i8> %a0, <128 x i8>* %v0, align 128
-  %v1 = getelementptr <128 x i8>, <128 x i8>* %a2, i32 1
-  store <128 x i8> %a1, <128 x i8>* %v1, align 128
+  store <128 x i8> %a0, ptr %a2, align 128
+  %v1 = getelementptr <128 x i8>, ptr %a2, i32 1
+  store <128 x i8> %a1, ptr %v1, align 128
   ret void
 }
 
-define void @f2(<128 x i8> %a0, <128 x i8> %a1, <128 x i8> %a2, <128 x i8>* %a3) #0 {
+define void @f2(<128 x i8> %a0, <128 x i8> %a1, <128 x i8> %a2, ptr %a3) #0 {
 ; CHECK-LABEL: f2:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -46,16 +44,15 @@ define void @f2(<128 x i8> %a0, <128 x i8> %a1, <128 x i8> %a2, <128 x i8>* %a3)
 ; CHECK-NEXT:     vmem(r0+#2) = v2
 ; CHECK-NEXT:    }
 b0:
-  %v0 = getelementptr <128 x i8>, <128 x i8>* %a3, i32 0
-  store <128 x i8> %a0, <128 x i8>* %v0, align 128
-  %v1 = getelementptr <128 x i8>, <128 x i8>* %a3, i32 1
-  store <128 x i8> %a1, <128 x i8>* %v1, align 128
-  %v2 = getelementptr <128 x i8>, <128 x i8>* %a3, i32 2
-  store <128 x i8> %a2, <128 x i8>* %v2, align 128
+  store <128 x i8> %a0, ptr %a3, align 128
+  %v1 = getelementptr <128 x i8>, ptr %a3, i32 1
+  store <128 x i8> %a1, ptr %v1, align 128
+  %v2 = getelementptr <128 x i8>, ptr %a3, i32 2
+  store <128 x i8> %a2, ptr %v2, align 128
   ret void
 }
 
-define void @f3(<128 x i8> %a0, <128 x i8> %a1, <128 x i8> %a2, <128 x i8> %a3, <128 x i8>* %a4) #0 {
+define void @f3(<128 x i8> %a0, <128 x i8> %a1, <128 x i8> %a2, <128 x i8> %a3, ptr %a4) #0 {
 ; CHECK-LABEL: f3:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -72,18 +69,17 @@ define void @f3(<128 x i8> %a0, <128 x i8> %a1, <128 x i8> %a2, <128 x i8> %a3,
 ; CHECK-NEXT:     vmem(r0+#3) = v3
 ; CHECK-NEXT:    }
 b0:
-  %v0 = getelementptr <128 x i8>, <128 x i8>* %a4, i32 0
-  store <128 x i8> %a0, <128 x i8>* %v0, align 128
-  %v1 = getelementptr <128 x i8>, <128 x i8>* %a4, i32 1
-  store <128 x i8> %a1, <128 x i8>* %v1, align 128
-  %v2 = getelementptr <128 x i8>, <128 x i8>* %a4, i32 2
-  store <128 x i8> %a2, <128 x i8>* %v2, align 128
-  %v3 = getelementptr <128 x i8>, <128 x i8>* %a4, i32 3
-  store <128 x i8> %a3, <128 x i8>* %v3, align 128
+  store <128 x i8> %a0, ptr %a4, align 128
+  %v1 = getelementptr <128 x i8>, ptr %a4, i32 1
+  store <128 x i8> %a1, ptr %v1, align 128
+  %v2 = getelementptr <128 x i8>, ptr %a4, i32 2
+  store <128 x i8> %a2, ptr %v2, align 128
+  %v3 = getelementptr <128 x i8>, ptr %a4, i32 3
+  store <128 x i8> %a3, ptr %v3, align 128
   ret void
 }
 
-define void @f4(<128 x i8> %a0, <128 x i8> %a1, <128 x i8> %a2, <128 x i8> %a3, <128 x i8> %a4, <128 x i8>* %a5) #0 {
+define void @f4(<128 x i8> %a0, <128 x i8> %a1, <128 x i8> %a2, <128 x i8> %a3, <128 x i8> %a4, ptr %a5) #0 {
 ; CHECK-LABEL: f4:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -103,20 +99,19 @@ define void @f4(<128 x i8> %a0, <128 x i8> %a1, <128 x i8> %a2, <128 x i8> %a3,
 ; CHECK-NEXT:     vmem(r0+#4) = v4
 ; CHECK-NEXT:    }
 b0:
-  %v0 = getelementptr <128 x i8>, <128 x i8>* %a5, i32 0
-  store <128 x i8> %a0, <128 x i8>* %v0, align 128
-  %v1 = getelementptr <128 x i8>, <128 x i8>* %a5, i32 1
-  store <128 x i8> %a1, <128 x i8>* %v1, align 128
-  %v2 = getelementptr <128 x i8>, <128 x i8>* %a5, i32 2
-  store <128 x i8> %a2, <128 x i8>* %v2, align 128
-  %v3 = getelementptr <128 x i8>, <128 x i8>* %a5, i32 3
-  store <128 x i8> %a3, <128 x i8>* %v3, align 128
-  %v4 = getelementptr <128 x i8>, <128 x i8>* %a5, i32 4
-  store <128 x i8> %a4, <128 x i8>* %v4, align 128
+  store <128 x i8> %a0, ptr %a5, align 128
+  %v1 = getelementptr <128 x i8>, ptr %a5, i32 1
+  store <128 x i8> %a1, ptr %v1, align 128
+  %v2 = getelementptr <128 x i8>, ptr %a5, i32 2
+  store <128 x i8> %a2, ptr %v2, align 128
+  %v3 = getelementptr <128 x i8>, ptr %a5, i32 3
+  store <128 x i8> %a3, ptr %v3, align 128
+  %v4 = getelementptr <128 x i8>, ptr %a5, i32 4
+  store <128 x i8> %a4, ptr %v4, align 128
   ret void
 }
 
-define void @f5(<128 x i8> %a0, <128 x i8> %a1, <128 x i8> %a2, <128 x i8> %a3, <128 x i8> %a4, <128 x i8> %a5, <128 x i8>* %a6) #0 {
+define void @f5(<128 x i8> %a0, <128 x i8> %a1, <128 x i8> %a2, <128 x i8> %a3, <128 x i8> %a4, <128 x i8> %a5, ptr %a6) #0 {
 ; CHECK-LABEL: f5:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -139,22 +134,21 @@ define void @f5(<128 x i8> %a0, <128 x i8> %a1, <128 x i8> %a2, <128 x i8> %a3,
 ; CHECK-NEXT:     vmem(r0+#5) = v5
 ; CHECK-NEXT:    }
 b0:
-  %v0 = getelementptr <128 x i8>, <128 x i8>* %a6, i32 0
-  store <128 x i8> %a0, <128 x i8>* %v0, align 128
-  %v1 = getelementptr <128 x i8>, <128 x i8>* %a6, i32 1
-  store <128 x i8> %a1, <128 x i8>* %v1, align 128
-  %v2 = getelementptr <128 x i8>, <128 x i8>* %a6, i32 2
-  store <128 x i8> %a2, <128 x i8>* %v2, align 128
-  %v3 = getelementptr <128 x i8>, <128 x i8>* %a6, i32 3
-  store <128 x i8> %a3, <128 x i8>* %v3, align 128
-  %v4 = getelementptr <128 x i8>, <128 x i8>* %a6, i32 4
-  store <128 x i8> %a4, <128 x i8>* %v4, align 128
-  %v5 = getelementptr <128 x i8>, <128 x i8>* %a6, i32 5
-  store <128 x i8> %a5, <128 x i8>* %v5, align 128
+  store <128 x i8> %a0, ptr %a6, align 128
+  %v1 = getelementptr <128 x i8>, ptr %a6, i32 1
+  store <128 x i8> %a1, ptr %v1, align 128
+  %v2 = getelementptr <128 x i8>, ptr %a6, i32 2
+  store <128 x i8> %a2, ptr %v2, align 128
+  %v3 = getelementptr <128 x i8>, ptr %a6, i32 3
+  store <128 x i8> %a3, ptr %v3, align 128
+  %v4 = getelementptr <128 x i8>, ptr %a6, i32 4
+  store <128 x i8> %a4, ptr %v4, align 128
+  %v5 = getelementptr <128 x i8>, ptr %a6, i32 5
+  store <128 x i8> %a5, ptr %v5, align 128
   ret void
 }
 
-define void @f6(<128 x i8> %a0, <128 x i8> %a1, <128 x i8> %a2, <128 x i8> %a3, <128 x i8> %a4, <128 x i8> %a5, <128 x i8> %a6, <128 x i8>* %a7) #0 {
+define void @f6(<128 x i8> %a0, <128 x i8> %a1, <128 x i8> %a2, <128 x i8> %a3, <128 x i8> %a4, <128 x i8> %a5, <128 x i8> %a6, ptr %a7) #0 {
 ; CHECK-LABEL: f6:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -180,24 +174,23 @@ define void @f6(<128 x i8> %a0, <128 x i8> %a1, <128 x i8> %a2, <128 x i8> %a3,
 ; CHECK-NEXT:     vmem(r0+#6) = v6
 ; CHECK-NEXT:    }
 b0:
-  %v0 = getelementptr <128 x i8>, <128 x i8>* %a7, i32 0
-  store <128 x i8> %a0, <128 x i8>* %v0, align 128
-  %v1 = getelementptr <128 x i8>, <128 x i8>* %a7, i32 1
-  store <128 x i8> %a1, <128 x i8>* %v1, align 128
-  %v2 = getelementptr <128 x i8>, <128 x i8>* %a7, i32 2
-  store <128 x i8> %a2, <128 x i8>* %v2, align 128
-  %v3 = getelementptr <128 x i8>, <128 x i8>* %a7, i32 3
-  store <128 x i8> %a3, <128 x i8>* %v3, align 128
-  %v4 = getelementptr <128 x i8>, <128 x i8>* %a7, i32 4
-  store <128 x i8> %a4, <128 x i8>* %v4, align 128
-  %v5 = getelementptr <128 x i8>, <128 x i8>* %a7, i32 5
-  store <128 x i8> %a5, <128 x i8>* %v5, align 128
-  %v6 = getelementptr <128 x i8>, <128 x i8>* %a7, i32 6
-  store <128 x i8> %a6, <128 x i8>* %v6, align 128
+  store <128 x i8> %a0, ptr %a7, align 128
+  %v1 = getelementptr <128 x i8>, ptr %a7, i32 1
+  store <128 x i8> %a1, ptr %v1, align 128
+  %v2 = getelementptr <128 x i8>, ptr %a7, i32 2
+  store <128 x i8> %a2, ptr %v2, align 128
+  %v3 = getelementptr <128 x i8>, ptr %a7, i32 3
+  store <128 x i8> %a3, ptr %v3, align 128
+  %v4 = getelementptr <128 x i8>, ptr %a7, i32 4
+  store <128 x i8> %a4, ptr %v4, align 128
+  %v5 = getelementptr <128 x i8>, ptr %a7, i32 5
+  store <128 x i8> %a5, ptr %v5, align 128
+  %v6 = getelementptr <128 x i8>, ptr %a7, i32 6
+  store <128 x i8> %a6, ptr %v6, align 128
   ret void
 }
 
-define void @f7(<128 x i8> %a0, <128 x i8> %a1, <128 x i8> %a2, <128 x i8> %a3, <128 x i8> %a4, <128 x i8> %a5, <128 x i8> %a6, <128 x i8> %a7, <128 x i8>* %a8) #0 {
+define void @f7(<128 x i8> %a0, <128 x i8> %a1, <128 x i8> %a2, <128 x i8> %a3, <128 x i8> %a4, <128 x i8> %a5, <128 x i8> %a6, <128 x i8> %a7, ptr %a8) #0 {
 ; CHECK-LABEL: f7:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -226,26 +219,25 @@ define void @f7(<128 x i8> %a0, <128 x i8> %a1, <128 x i8> %a2, <128 x i8> %a3,
 ; CHECK-NEXT:     vmem(r0+#7) = v7
 ; CHECK-NEXT:    }
 b0:
-  %v0 = getelementptr <128 x i8>, <128 x i8>* %a8, i32 0
-  store <128 x i8> %a0, <128 x i8>* %v0, align 128
-  %v1 = getelementptr <128 x i8>, <128 x i8>* %a8, i32 1
-  store <128 x i8> %a1, <128 x i8>* %v1, align 128
-  %v2 = getelementptr <128 x i8>, <128 x i8>* %a8, i32 2
-  store <128 x i8> %a2, <128 x i8>* %v2, align 128
-  %v3 = getelementptr <128 x i8>, <128 x i8>* %a8, i32 3
-  store <128 x i8> %a3, <128 x i8>* %v3, align 128
-  %v4 = getelementptr <128 x i8>, <128 x i8>* %a8, i32 4
-  store <128 x i8> %a4, <128 x i8>* %v4, align 128
-  %v5 = getelementptr <128 x i8>, <128 x i8>* %a8, i32 5
-  store <128 x i8> %a5, <128 x i8>* %v5, align 128
-  %v6 = getelementptr <128 x i8>, <128 x i8>* %a8, i32 6
-  store <128 x i8> %a6, <128 x i8>* %v6, align 128
-  %v7 = getelementptr <128 x i8>, <128 x i8>* %a8, i32 7
-  store <128 x i8> %a7, <128 x i8>* %v7, align 128
+  store <128 x i8> %a0, ptr %a8, align 128
+  %v1 = getelementptr <128 x i8>, ptr %a8, i32 1
+  store <128 x i8> %a1, ptr %v1, align 128
+  %v2 = getelementptr <128 x i8>, ptr %a8, i32 2
+  store <128 x i8> %a2, ptr %v2, align 128
+  %v3 = getelementptr <128 x i8>, ptr %a8, i32 3
+  store <128 x i8> %a3, ptr %v3, align 128
+  %v4 = getelementptr <128 x i8>, ptr %a8, i32 4
+  store <128 x i8> %a4, ptr %v4, align 128
+  %v5 = getelementptr <128 x i8>, ptr %a8, i32 5
+  store <128 x i8> %a5, ptr %v5, align 128
+  %v6 = getelementptr <128 x i8>, ptr %a8, i32 6
+  store <128 x i8> %a6, ptr %v6, align 128
+  %v7 = getelementptr <128 x i8>, ptr %a8, i32 7
+  store <128 x i8> %a7, ptr %v7, align 128
   ret void
 }
 
-define void @f8(<128 x i8> %a0, <128 x i8> %a1, <128 x i8> %a2, <128 x i8> %a3, <128 x i8> %a4, <128 x i8> %a5, <128 x i8> %a6, <128 x i8> %a7, <128 x i8> %a8, <128 x i8>* %a9) #0 {
+define void @f8(<128 x i8> %a0, <128 x i8> %a1, <128 x i8> %a2, <128 x i8> %a3, <128 x i8> %a4, <128 x i8> %a5, <128 x i8> %a6, <128 x i8> %a7, <128 x i8> %a8, ptr %a9) #0 {
 ; CHECK-LABEL: f8:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -278,28 +270,27 @@ define void @f8(<128 x i8> %a0, <128 x i8> %a1, <128 x i8> %a2, <128 x i8> %a3,
 ; CHECK-NEXT:     vmem(r1+#0) = v8
 ; CHECK-NEXT:    }
 b0:
-  %v0 = getelementptr <128 x i8>, <128 x i8>* %a9, i32 0
-  store <128 x i8> %a0, <128 x i8>* %v0, align 128
-  %v1 = getelementptr <128 x i8>, <128 x i8>* %a9, i32 1
-  store <128 x i8> %a1, <128 x i8>* %v1, align 128
-  %v2 = getelementptr <128 x i8>, <128 x i8>* %a9, i32 2
-  store <128 x i8> %a2, <128 x i8>* %v2, align 128
-  %v3 = getelementptr <128 x i8>, <128 x i8>* %a9, i32 3
-  store <128 x i8> %a3, <128 x i8>* %v3, align 128
-  %v4 = getelementptr <128 x i8>, <128 x i8>* %a9, i32 4
-  store <128 x i8> %a4, <128 x i8>* %v4, align 128
-  %v5 = getelementptr <128 x i8>, <128 x i8>* %a9, i32 5
-  store <128 x i8> %a5, <128 x i8>* %v5, align 128
-  %v6 = getelementptr <128 x i8>, <128 x i8>* %a9, i32 6
-  store <128 x i8> %a6, <128 x i8>* %v6, align 128
-  %v7 = getelementptr <128 x i8>, <128 x i8>* %a9, i32 7
-  store <128 x i8> %a7, <128 x i8>* %v7, align 128
-  %v8 = getelementptr <128 x i8>, <128 x i8>* %a9, i32 8
-  store <128 x i8> %a8, <128 x i8>* %v8, align 128
+  store <128 x i8> %a0, ptr %a9, align 128
+  %v1 = getelementptr <128 x i8>, ptr %a9, i32 1
+  store <128 x i8> %a1, ptr %v1, align 128
+  %v2 = getelementptr <128 x i8>, ptr %a9, i32 2
+  store <128 x i8> %a2, ptr %v2, align 128
+  %v3 = getelementptr <128 x i8>, ptr %a9, i32 3
+  store <128 x i8> %a3, ptr %v3, align 128
+  %v4 = getelementptr <128 x i8>, ptr %a9, i32 4
+  store <128 x i8> %a4, ptr %v4, align 128
+  %v5 = getelementptr <128 x i8>, ptr %a9, i32 5
+  store <128 x i8> %a5, ptr %v5, align 128
+  %v6 = getelementptr <128 x i8>, ptr %a9, i32 6
+  store <128 x i8> %a6, ptr %v6, align 128
+  %v7 = getelementptr <128 x i8>, ptr %a9, i32 7
+  store <128 x i8> %a7, ptr %v7, align 128
+  %v8 = getelementptr <128 x i8>, ptr %a9, i32 8
+  store <128 x i8> %a8, ptr %v8, align 128
   ret void
 }
 
-define void @f9(<128 x i8> %a0, <128 x i8> %a1, <128 x i8> %a2, <128 x i8> %a3, <128 x i8> %a4, <128 x i8> %a5, <128 x i8> %a6, <128 x i8> %a7, <128 x i8> %a8, <128 x i8> %a9, <128 x i8>* %a10) #0 {
+define void @f9(<128 x i8> %a0, <128 x i8> %a1, <128 x i8> %a2, <128 x i8> %a3, <128 x i8> %a4, <128 x i8> %a5, <128 x i8> %a6, <128 x i8> %a7, <128 x i8> %a8, <128 x i8> %a9, ptr %a10) #0 {
 ; CHECK-LABEL: f9:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -336,30 +327,29 @@ define void @f9(<128 x i8> %a0, <128 x i8> %a1, <128 x i8> %a2, <128 x i8> %a3,
 ; CHECK-NEXT:     vmem(r2+#0) = v9
 ; CHECK-NEXT:    }
 b0:
-  %v0 = getelementptr <128 x i8>, <128 x i8>* %a10, i32 0
-  store <128 x i8> %a0, <128 x i8>* %v0, align 128
-  %v1 = getelementptr <128 x i8>, <128 x i8>* %a10, i32 1
-  store <128 x i8> %a1, <128 x i8>* %v1, align 128
-  %v2 = getelementptr <128 x i8>, <128 x i8>* %a10, i32 2
-  store <128 x i8> %a2, <128 x i8>* %v2, align 128
-  %v3 = getelementptr <128 x i8>, <128 x i8>* %a10, i32 3
-  store <128 x i8> %a3, <128 x i8>* %v3, align 128
-  %v4 = getelementptr <128 x i8>, <128 x i8>* %a10, i32 4
-  store <128 x i8> %a4, <128 x i8>* %v4, align 128
-  %v5 = getelementptr <128 x i8>, <128 x i8>* %a10, i32 5
-  store <128 x i8> %a5, <128 x i8>* %v5, align 128
-  %v6 = getelementptr <128 x i8>, <128 x i8>* %a10, i32 6
-  store <128 x i8> %a6, <128 x i8>* %v6, align 128
-  %v7 = getelementptr <128 x i8>, <128 x i8>* %a10, i32 7
-  store <128 x i8> %a7, <128 x i8>* %v7, align 128
-  %v8 = getelementptr <128 x i8>, <128 x i8>* %a10, i32 8
-  store <128 x i8> %a8, <128 x i8>* %v8, align 128
-  %v9 = getelementptr <128 x i8>, <128 x i8>* %a10, i32 9
-  store <128 x i8> %a9, <128 x i8>* %v9, align 128
+  store <128 x i8> %a0, ptr %a10, align 128
+  %v1 = getelementptr <128 x i8>, ptr %a10, i32 1
+  store <128 x i8> %a1, ptr %v1, align 128
+  %v2 = getelementptr <128 x i8>, ptr %a10, i32 2
+  store <128 x i8> %a2, ptr %v2, align 128
+  %v3 = getelementptr <128 x i8>, ptr %a10, i32 3
+  store <128 x i8> %a3, ptr %v3, align 128
+  %v4 = getelementptr <128 x i8>, ptr %a10, i32 4
+  store <128 x i8> %a4, ptr %v4, align 128
+  %v5 = getelementptr <128 x i8>, ptr %a10, i32 5
+  store <128 x i8> %a5, ptr %v5, align 128
+  %v6 = getelementptr <128 x i8>, ptr %a10, i32 6
+  store <128 x i8> %a6, ptr %v6, align 128
+  %v7 = getelementptr <128 x i8>, ptr %a10, i32 7
+  store <128 x i8> %a7, ptr %v7, align 128
+  %v8 = getelementptr <128 x i8>, ptr %a10, i32 8
+  store <128 x i8> %a8, ptr %v8, align 128
+  %v9 = getelementptr <128 x i8>, ptr %a10, i32 9
+  store <128 x i8> %a9, ptr %v9, align 128
   ret void
 }
 
-define void @f10(<128 x i8> %a0, <128 x i8> %a1, <128 x i8> %a2, <128 x i8> %a3, <128 x i8> %a4, <128 x i8> %a5, <128 x i8> %a6, <128 x i8> %a7, <128 x i8> %a8, <128 x i8> %a9, <128 x i8> %a10, <128 x i8>* %a11) #0 {
+define void @f10(<128 x i8> %a0, <128 x i8> %a1, <128 x i8> %a2, <128 x i8> %a3, <128 x i8> %a4, <128 x i8> %a5, <128 x i8> %a6, <128 x i8> %a7, <128 x i8> %a8, <128 x i8> %a9, <128 x i8> %a10, ptr %a11) #0 {
 ; CHECK-LABEL: f10:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -400,32 +390,31 @@ define void @f10(<128 x i8> %a0, <128 x i8> %a1, <128 x i8> %a2, <128 x i8> %a3,
 ; CHECK-NEXT:     vmem(r3+#0) = v10
 ; CHECK-NEXT:    }
 b0:
-  %v0 = getelementptr <128 x i8>, <128 x i8>* %a11, i32 0
-  store <128 x i8> %a0, <128 x i8>* %v0, align 128
-  %v1 = getelementptr <128 x i8>, <128 x i8>* %a11, i32 1
-  store <128 x i8> %a1, <128 x i8>* %v1, align 128
-  %v2 = getelementptr <128 x i8>, <128 x i8>* %a11, i32 2
-  store <128 x i8> %a2, <128 x i8>* %v2, align 128
-  %v3 = getelementptr <128 x i8>, <128 x i8>* %a11, i32 3
-  store <128 x i8> %a3, <128 x i8>* %v3, align 128
-  %v4 = getelementptr <128 x i8>, <128 x i8>* %a11, i32 4
-  store <128 x i8> %a4, <128 x i8>* %v4, align 128
-  %v5 = getelementptr <128 x i8>, <128 x i8>* %a11, i32 5
-  store <128 x i8> %a5, <128 x i8>* %v5, align 128
-  %v6 = getelementptr <128 x i8>, <128 x i8>* %a11, i32 6
-  store <128 x i8> %a6, <128 x i8>* %v6, align 128
-  %v7 = getelementptr <128 x i8>, <128 x i8>* %a11, i32 7
-  store <128 x i8> %a7, <128 x i8>* %v7, align 128
-  %v8 = getelementptr <128 x i8>, <128 x i8>* %a11, i32 8
-  store <128 x i8> %a8, <128 x i8>* %v8, align 128
-  %v9 = getelementptr <128 x i8>, <128 x i8>* %a11, i32 9
-  store <128 x i8> %a9, <128 x i8>* %v9, align 128
-  %v10 = getelementptr <128 x i8>, <128 x i8>* %a11, i32 10
-  store <128 x i8> %a10, <128 x i8>* %v10, align 128
+  store <128 x i8> %a0, ptr %a11, align 128
+  %v1 = getelementptr <128 x i8>, ptr %a11, i32 1
+  store <128 x i8> %a1, ptr %v1, align 128
+  %v2 = getelementptr <128 x i8>, ptr %a11, i32 2
+  store <128 x i8> %a2, ptr %v2, align 128
+  %v3 = getelementptr <128 x i8>, ptr %a11, i32 3
+  store <128 x i8> %a3, ptr %v3, align 128
+  %v4 = getelementptr <128 x i8>, ptr %a11, i32 4
+  store <128 x i8> %a4, ptr %v4, align 128
+  %v5 = getelementptr <128 x i8>, ptr %a11, i32 5
+  store <128 x i8> %a5, ptr %v5, align 128
+  %v6 = getelementptr <128 x i8>, ptr %a11, i32 6
+  store <128 x i8> %a6, ptr %v6, align 128
+  %v7 = getelementptr <128 x i8>, ptr %a11, i32 7
+  store <128 x i8> %a7, ptr %v7, align 128
+  %v8 = getelementptr <128 x i8>, ptr %a11, i32 8
+  store <128 x i8> %a8, ptr %v8, align 128
+  %v9 = getelementptr <128 x i8>, ptr %a11, i32 9
+  store <128 x i8> %a9, ptr %v9, align 128
+  %v10 = getelementptr <128 x i8>, ptr %a11, i32 10
+  store <128 x i8> %a10, ptr %v10, align 128
   ret void
 }
 
-define void @f11(<128 x i8> %a0, <128 x i8> %a1, <128 x i8> %a2, <128 x i8> %a3, <128 x i8> %a4, <128 x i8> %a5, <128 x i8> %a6, <128 x i8> %a7, <128 x i8> %a8, <128 x i8> %a9, <128 x i8> %a10, <128 x i8> %a11, <128 x i8>* %a12) #0 {
+define void @f11(<128 x i8> %a0, <128 x i8> %a1, <128 x i8> %a2, <128 x i8> %a3, <128 x i8> %a4, <128 x i8> %a5, <128 x i8> %a6, <128 x i8> %a7, <128 x i8> %a8, <128 x i8> %a9, <128 x i8> %a10, <128 x i8> %a11, ptr %a12) #0 {
 ; CHECK-LABEL: f11:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -472,34 +461,33 @@ define void @f11(<128 x i8> %a0, <128 x i8> %a1, <128 x i8> %a2, <128 x i8> %a3,
 ; CHECK-NEXT:     vmem(r4+#0) = v11
 ; CHECK-NEXT:    }
 b0:
-  %v0 = getelementptr <128 x i8>, <128 x i8>* %a12, i32 0
-  store <128 x i8> %a0, <128 x i8>* %v0, align 128
-  %v1 = getelementptr <128 x i8>, <128 x i8>* %a12, i32 1
-  store <128 x i8> %a1, <128 x i8>* %v1, align 128
-  %v2 = getelementptr <128 x i8>, <128 x i8>* %a12, i32 2
-  store <128 x i8> %a2, <128 x i8>* %v2, align 128
-  %v3 = getelementptr <128 x i8>, <128 x i8>* %a12, i32 3
-  store <128 x i8> %a3, <128 x i8>* %v3, align 128
-  %v4 = getelementptr <128 x i8>, <128 x i8>* %a12, i32 4
-  store <128 x i8> %a4, <128 x i8>* %v4, align 128
-  %v5 = getelementptr <128 x i8>, <128 x i8>* %a12, i32 5
-  store <128 x i8> %a5, <128 x i8>* %v5, align 128
-  %v6 = getelementptr <128 x i8>, <128 x i8>* %a12, i32 6
-  store <128 x i8> %a6, <128 x i8>* %v6, align 128
-  %v7 = getelementptr <128 x i8>, <128 x i8>* %a12, i32 7
-  store <128 x i8> %a7, <128 x i8>* %v7, align 128
-  %v8 = getelementptr <128 x i8>, <128 x i8>* %a12, i32 8
-  store <128 x i8> %a8, <128 x i8>* %v8, align 128
-  %v9 = getelementptr <128 x i8>, <128 x i8>* %a12, i32 9
-  store <128 x i8> %a9, <128 x i8>* %v9, align 128
-  %v10 = getelementptr <128 x i8>, <128 x i8>* %a12, i32 10
-  store <128 x i8> %a10, <128 x i8>* %v10, align 128
-  %v11 = getelementptr <128 x i8>, <128 x i8>* %a12, i32 11
-  store <128 x i8> %a11, <128 x i8>* %v11, align 128
+  store <128 x i8> %a0, ptr %a12, align 128
+  %v1 = getelementptr <128 x i8>, ptr %a12, i32 1
+  store <128 x i8> %a1, ptr %v1, align 128
+  %v2 = getelementptr <128 x i8>, ptr %a12, i32 2
+  store <128 x i8> %a2, ptr %v2, align 128
+  %v3 = getelementptr <128 x i8>, ptr %a12, i32 3
+  store <128 x i8> %a3, ptr %v3, align 128
+  %v4 = getelementptr <128 x i8>, ptr %a12, i32 4
+  store <128 x i8> %a4, ptr %v4, align 128
+  %v5 = getelementptr <128 x i8>, ptr %a12, i32 5
+  store <128 x i8> %a5, ptr %v5, align 128
+  %v6 = getelementptr <128 x i8>, ptr %a12, i32 6
+  store <128 x i8> %a6, ptr %v6, align 128
+  %v7 = getelementptr <128 x i8>, ptr %a12, i32 7
+  store <128 x i8> %a7, ptr %v7, align 128
+  %v8 = getelementptr <128 x i8>, ptr %a12, i32 8
+  store <128 x i8> %a8, ptr %v8, align 128
+  %v9 = getelementptr <128 x i8>, ptr %a12, i32 9
+  store <128 x i8> %a9, ptr %v9, align 128
+  %v10 = getelementptr <128 x i8>, ptr %a12, i32 10
+  store <128 x i8> %a10, ptr %v10, align 128
+  %v11 = getelementptr <128 x i8>, ptr %a12, i32 11
+  store <128 x i8> %a11, ptr %v11, align 128
   ret void
 }
 
-define void @f12(<128 x i8> %a0, <128 x i8> %a1, <128 x i8> %a2, <128 x i8> %a3, <128 x i8> %a4, <128 x i8> %a5, <128 x i8> %a6, <128 x i8> %a7, <128 x i8> %a8, <128 x i8> %a9, <128 x i8> %a10, <128 x i8> %a11, <128 x i8> %a12, <128 x i8>* %a13) #0 {
+define void @f12(<128 x i8> %a0, <128 x i8> %a1, <128 x i8> %a2, <128 x i8> %a3, <128 x i8> %a4, <128 x i8> %a5, <128 x i8> %a6, <128 x i8> %a7, <128 x i8> %a8, <128 x i8> %a9, <128 x i8> %a10, <128 x i8> %a11, <128 x i8> %a12, ptr %a13) #0 {
 ; CHECK-LABEL: f12:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -550,36 +538,35 @@ define void @f12(<128 x i8> %a0, <128 x i8> %a1, <128 x i8> %a2, <128 x i8> %a3,
 ; CHECK-NEXT:     vmem(r5+#0) = v12
 ; CHECK-NEXT:    }
 b0:
-  %v0 = getelementptr <128 x i8>, <128 x i8>* %a13, i32 0
-  store <128 x i8> %a0, <128 x i8>* %v0, align 128
-  %v1 = getelementptr <128 x i8>, <128 x i8>* %a13, i32 1
-  store <128 x i8> %a1, <128 x i8>* %v1, align 128
-  %v2 = getelementptr <128 x i8>, <128 x i8>* %a13, i32 2
-  store <128 x i8> %a2, <128 x i8>* %v2, align 128
-  %v3 = getelementptr <128 x i8>, <128 x i8>* %a13, i32 3
-  store <128 x i8> %a3, <128 x i8>* %v3, align 128
-  %v4 = getelementptr <128 x i8>, <128 x i8>* %a13, i32 4
-  store <128 x i8> %a4, <128 x i8>* %v4, align 128
-  %v5 = getelementptr <128 x i8>, <128 x i8>* %a13, i32 5
-  store <128 x i8> %a5, <128 x i8>* %v5, align 128
-  %v6 = getelementptr <128 x i8>, <128 x i8>* %a13, i32 6
-  store <128 x i8> %a6, <128 x i8>* %v6, align 128
-  %v7 = getelementptr <128 x i8>, <128 x i8>* %a13, i32 7
-  store <128 x i8> %a7, <128 x i8>* %v7, align 128
-  %v8 = getelementptr <128 x i8>, <128 x i8>* %a13, i32 8
-  store <128 x i8> %a8, <128 x i8>* %v8, align 128
-  %v9 = getelementptr <128 x i8>, <128 x i8>* %a13, i32 9
-  store <128 x i8> %a9, <128 x i8>* %v9, align 128
-  %v10 = getelementptr <128 x i8>, <128 x i8>* %a13, i32 10
-  store <128 x i8> %a10, <128 x i8>* %v10, align 128
-  %v11 = getelementptr <128 x i8>, <128 x i8>* %a13, i32 11
-  store <128 x i8> %a11, <128 x i8>* %v11, align 128
-  %v12 = getelementptr <128 x i8>, <128 x i8>* %a13, i32 12
-  store <128 x i8> %a12, <128 x i8>* %v12, align 128
+  store <128 x i8> %a0, ptr %a13, align 128
+  %v1 = getelementptr <128 x i8>, ptr %a13, i32 1
+  store <128 x i8> %a1, ptr %v1, align 128
+  %v2 = getelementptr <128 x i8>, ptr %a13, i32 2
+  store <128 x i8> %a2, ptr %v2, align 128
+  %v3 = getelementptr <128 x i8>, ptr %a13, i32 3
+  store <128 x i8> %a3, ptr %v3, align 128
+  %v4 = getelementptr <128 x i8>, ptr %a13, i32 4
+  store <128 x i8> %a4, ptr %v4, align 128
+  %v5 = getelementptr <128 x i8>, ptr %a13, i32 5
+  store <128 x i8> %a5, ptr %v5, align 128
+  %v6 = getelementptr <128 x i8>, ptr %a13, i32 6
+  store <128 x i8> %a6, ptr %v6, align 128
+  %v7 = getelementptr <128 x i8>, ptr %a13, i32 7
+  store <128 x i8> %a7, ptr %v7, align 128
+  %v8 = getelementptr <128 x i8>, ptr %a13, i32 8
+  store <128 x i8> %a8, ptr %v8, align 128
+  %v9 = getelementptr <128 x i8>, ptr %a13, i32 9
+  store <128 x i8> %a9, ptr %v9, align 128
+  %v10 = getelementptr <128 x i8>, ptr %a13, i32 10
+  store <128 x i8> %a10, ptr %v10, align 128
+  %v11 = getelementptr <128 x i8>, ptr %a13, i32 11
+  store <128 x i8> %a11, ptr %v11, align 128
+  %v12 = getelementptr <128 x i8>, ptr %a13, i32 12
+  store <128 x i8> %a12, ptr %v12, align 128
   ret void
 }
 
-define void @f13(<128 x i8> %a0, <128 x i8> %a1, <128 x i8> %a2, <128 x i8> %a3, <128 x i8> %a4, <128 x i8> %a5, <128 x i8> %a6, <128 x i8> %a7, <128 x i8> %a8, <128 x i8> %a9, <128 x i8> %a10, <128 x i8> %a11, <128 x i8> %a12, <128 x i8> %a13, <128 x i8>* %a14) #0 {
+define void @f13(<128 x i8> %a0, <128 x i8> %a1, <128 x i8> %a2, <128 x i8> %a3, <128 x i8> %a4, <128 x i8> %a5, <128 x i8> %a6, <128 x i8> %a7, <128 x i8> %a8, <128 x i8> %a9, <128 x i8> %a10, <128 x i8> %a11, <128 x i8> %a12, <128 x i8> %a13, ptr %a14) #0 {
 ; CHECK-LABEL: f13:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -634,38 +621,37 @@ define void @f13(<128 x i8> %a0, <128 x i8> %a1, <128 x i8> %a2, <128 x i8> %a3,
 ; CHECK-NEXT:     vmem(r2+#0) = v13
 ; CHECK-NEXT:    }
 b0:
-  %v0 = getelementptr <128 x i8>, <128 x i8>* %a14, i32 0
-  store <128 x i8> %a0, <128 x i8>* %v0, align 128
-  %v1 = getelementptr <128 x i8>, <128 x i8>* %a14, i32 1
-  store <128 x i8> %a1, <128 x i8>* %v1, align 128
-  %v2 = getelementptr <128 x i8>, <128 x i8>* %a14, i32 2
-  store <128 x i8> %a2, <128 x i8>* %v2, align 128
-  %v3 = getelementptr <128 x i8>, <128 x i8>* %a14, i32 3
-  store <128 x i8> %a3, <128 x i8>* %v3, align 128
-  %v4 = getelementptr <128 x i8>, <128 x i8>* %a14, i32 4
-  store <128 x i8> %a4, <128 x i8>* %v4, align 128
-  %v5 = getelementptr <128 x i8>, <128 x i8>* %a14, i32 5
-  store <128 x i8> %a5, <128 x i8>* %v5, align 128
-  %v6 = getelementptr <128 x i8>, <128 x i8>* %a14, i32 6
-  store <128 x i8> %a6, <128 x i8>* %v6, align 128
-  %v7 = getelementptr <128 x i8>, <128 x i8>* %a14, i32 7
-  store <128 x i8> %a7, <128 x i8>* %v7, align 128
-  %v8 = getelementptr <128 x i8>, <128 x i8>* %a14, i32 8
-  store <128 x i8> %a8, <128 x i8>* %v8, align 128
-  %v9 = getelementptr <128 x i8>, <128 x i8>* %a14, i32 9
-  store <128 x i8> %a9, <128 x i8>* %v9, align 128
-  %v10 = getelementptr <128 x i8>, <128 x i8>* %a14, i32 10
-  store <128 x i8> %a10, <128 x i8>* %v10, align 128
-  %v11 = getelementptr <128 x i8>, <128 x i8>* %a14, i32 11
-  store <128 x i8> %a11, <128 x i8>* %v11, align 128
-  %v12 = getelementptr <128 x i8>, <128 x i8>* %a14, i32 12
-  store <128 x i8> %a12, <128 x i8>* %v12, align 128
-  %v13 = getelementptr <128 x i8>, <128 x i8>* %a14, i32 13
-  store <128 x i8> %a13, <128 x i8>* %v13, align 128
+  store <128 x i8> %a0, ptr %a14, align 128
+  %v1 = getelementptr <128 x i8>, ptr %a14, i32 1
+  store <128 x i8> %a1, ptr %v1, align 128
+  %v2 = getelementptr <128 x i8>, ptr %a14, i32 2
+  store <128 x i8> %a2, ptr %v2, align 128
+  %v3 = getelementptr <128 x i8>, ptr %a14, i32 3
+  store <128 x i8> %a3, ptr %v3, align 128
+  %v4 = getelementptr <128 x i8>, ptr %a14, i32 4
+  store <128 x i8> %a4, ptr %v4, align 128
+  %v5 = getelementptr <128 x i8>, ptr %a14, i32 5
+  store <128 x i8> %a5, ptr %v5, align 128
+  %v6 = getelementptr <128 x i8>, ptr %a14, i32 6
+  store <128 x i8> %a6, ptr %v6, align 128
+  %v7 = getelementptr <128 x i8>, ptr %a14, i32 7
+  store <128 x i8> %a7, ptr %v7, align 128
+  %v8 = getelementptr <128 x i8>, ptr %a14, i32 8
+  store <128 x i8> %a8, ptr %v8, align 128
+  %v9 = getelementptr <128 x i8>, ptr %a14, i32 9
+  store <128 x i8> %a9, ptr %v9, align 128
+  %v10 = getelementptr <128 x i8>, ptr %a14, i32 10
+  store <128 x i8> %a10, ptr %v10, align 128
+  %v11 = getelementptr <128 x i8>, ptr %a14, i32 11
+  store <128 x i8> %a11, ptr %v11, align 128
+  %v12 = getelementptr <128 x i8>, ptr %a14, i32 12
+  store <128 x i8> %a12, ptr %v12, align 128
+  %v13 = getelementptr <128 x i8>, ptr %a14, i32 13
+  store <128 x i8> %a13, ptr %v13, align 128
   ret void
 }
 
-define void @f14(<128 x i8> %a0, <128 x i8> %a1, <128 x i8> %a2, <128 x i8> %a3, <128 x i8> %a4, <128 x i8> %a5, <128 x i8> %a6, <128 x i8> %a7, <128 x i8> %a8, <128 x i8> %a9, <128 x i8> %a10, <128 x i8> %a11, <128 x i8> %a12, <128 x i8> %a13, <128 x i8> %a14, <128 x i8>* %a15) #0 {
+define void @f14(<128 x i8> %a0, <128 x i8> %a1, <128 x i8> %a2, <128 x i8> %a3, <128 x i8> %a4, <128 x i8> %a5, <128 x i8> %a6, <128 x i8> %a7, <128 x i8> %a8, <128 x i8> %a9, <128 x i8> %a10, <128 x i8> %a11, <128 x i8> %a12, <128 x i8> %a13, <128 x i8> %a14, ptr %a15) #0 {
 ; CHECK-LABEL: f14:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -724,40 +710,39 @@ define void @f14(<128 x i8> %a0, <128 x i8> %a1, <128 x i8> %a2, <128 x i8> %a3,
 ; CHECK-NEXT:     vmem(r2+#0) = v14
 ; CHECK-NEXT:    }
 b0:
-  %v0 = getelementptr <128 x i8>, <128 x i8>* %a15, i32 0
-  store <128 x i8> %a0, <128 x i8>* %v0, align 128
-  %v1 = getelementptr <128 x i8>, <128 x i8>* %a15, i32 1
-  store <128 x i8> %a1, <128 x i8>* %v1, align 128
-  %v2 = getelementptr <128 x i8>, <128 x i8>* %a15, i32 2
-  store <128 x i8> %a2, <128 x i8>* %v2, align 128
-  %v3 = getelementptr <128 x i8>, <128 x i8>* %a15, i32 3
-  store <128 x i8> %a3, <128 x i8>* %v3, align 128
-  %v4 = getelementptr <128 x i8>, <128 x i8>* %a15, i32 4
-  store <128 x i8> %a4, <128 x i8>* %v4, align 128
-  %v5 = getelementptr <128 x i8>, <128 x i8>* %a15, i32 5
-  store <128 x i8> %a5, <128 x i8>* %v5, align 128
-  %v6 = getelementptr <128 x i8>, <128 x i8>* %a15, i32 6
-  store <128 x i8> %a6, <128 x i8>* %v6, align 128
-  %v7 = getelementptr <128 x i8>, <128 x i8>* %a15, i32 7
-  store <128 x i8> %a7, <128 x i8>* %v7, align 128
-  %v8 = getelementptr <128 x i8>, <128 x i8>* %a15, i32 8
-  store <128 x i8> %a8, <128 x i8>* %v8, align 128
-  %v9 = getelementptr <128 x i8>, <128 x i8>* %a15, i32 9
-  store <128 x i8> %a9, <128 x i8>* %v9, align 128
-  %v10 = getelementptr <128 x i8>, <128 x i8>* %a15, i32 10
-  store <128 x i8> %a10, <128 x i8>* %v10, align 128
-  %v11 = getelementptr <128 x i8>, <128 x i8>* %a15, i32 11
-  store <128 x i8> %a11, <128 x i8>* %v11, align 128
-  %v12 = getelementptr <128 x i8>, <128 x i8>* %a15, i32 12
-  store <128 x i8> %a12, <128 x i8>* %v12, align 128
-  %v13 = getelementptr <128 x i8>, <128 x i8>* %a15, i32 13
-  store <128 x i8> %a13, <128 x i8>* %v13, align 128
-  %v14 = getelementptr <128 x i8>, <128 x i8>* %a15, i32 14
-  store <128 x i8> %a14, <128 x i8>* %v14, align 128
+  store <128 x i8> %a0, ptr %a15, align 128
+  %v1 = getelementptr <128 x i8>, ptr %a15, i32 1
+  store <128 x i8> %a1, ptr %v1, align 128
+  %v2 = getelementptr <128 x i8>, ptr %a15, i32 2
+  store <128 x i8> %a2, ptr %v2, align 128
+  %v3 = getelementptr <128 x i8>, ptr %a15, i32 3
+  store <128 x i8> %a3, ptr %v3, align 128
+  %v4 = getelementptr <128 x i8>, ptr %a15, i32 4
+  store <128 x i8> %a4, ptr %v4, align 128
+  %v5 = getelementptr <128 x i8>, ptr %a15, i32 5
+  store <128 x i8> %a5, ptr %v5, align 128
+  %v6 = getelementptr <128 x i8>, ptr %a15, i32 6
+  store <128 x i8> %a6, ptr %v6, align 128
+  %v7 = getelementptr <128 x i8>, ptr %a15, i32 7
+  store <128 x i8> %a7, ptr %v7, align 128
+  %v8 = getelementptr <128 x i8>, ptr %a15, i32 8
+  store <128 x i8> %a8, ptr %v8, align 128
+  %v9 = getelementptr <128 x i8>, ptr %a15, i32 9
+  store <128 x i8> %a9, ptr %v9, align 128
+  %v10 = getelementptr <128 x i8>, ptr %a15, i32 10
+  store <128 x i8> %a10, ptr %v10, align 128
+  %v11 = getelementptr <128 x i8>, ptr %a15, i32 11
+  store <128 x i8> %a11, ptr %v11, align 128
+  %v12 = getelementptr <128 x i8>, ptr %a15, i32 12
+  store <128 x i8> %a12, ptr %v12, align 128
+  %v13 = getelementptr <128 x i8>, ptr %a15, i32 13
+  store <128 x i8> %a13, ptr %v13, align 128
+  %v14 = getelementptr <128 x i8>, ptr %a15, i32 14
+  store <128 x i8> %a14, ptr %v14, align 128
   ret void
 }
 
-define void @f15(<128 x i8> %a0, <128 x i8> %a1, <128 x i8> %a2, <128 x i8> %a3, <128 x i8> %a4, <128 x i8> %a5, <128 x i8> %a6, <128 x i8> %a7, <128 x i8> %a8, <128 x i8> %a9, <128 x i8> %a10, <128 x i8> %a11, <128 x i8> %a12, <128 x i8> %a13, <128 x i8> %a14, <128 x i8> %a15, <128 x i8>* %a16) #0 {
+define void @f15(<128 x i8> %a0, <128 x i8> %a1, <128 x i8> %a2, <128 x i8> %a3, <128 x i8> %a4, <128 x i8> %a5, <128 x i8> %a6, <128 x i8> %a7, <128 x i8> %a8, <128 x i8> %a9, <128 x i8> %a10, <128 x i8> %a11, <128 x i8> %a12, <128 x i8> %a13, <128 x i8> %a14, <128 x i8> %a15, ptr %a16) #0 {
 ; CHECK-LABEL: f15:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -820,42 +805,41 @@ define void @f15(<128 x i8> %a0, <128 x i8> %a1, <128 x i8> %a2, <128 x i8> %a3,
 ; CHECK-NEXT:     vmem(r2+#0) = v15
 ; CHECK-NEXT:    }
 b0:
-  %v0 = getelementptr <128 x i8>, <128 x i8>* %a16, i32 0
-  store <128 x i8> %a0, <128 x i8>* %v0, align 128
-  %v1 = getelementptr <128 x i8>, <128 x i8>* %a16, i32 1
-  store <128 x i8> %a1, <128 x i8>* %v1, align 128
-  %v2 = getelementptr <128 x i8>, <128 x i8>* %a16, i32 2
-  store <128 x i8> %a2, <128 x i8>* %v2, align 128
-  %v3 = getelementptr <128 x i8>, <128 x i8>* %a16, i32 3
-  store <128 x i8> %a3, <128 x i8>* %v3, align 128
-  %v4 = getelementptr <128 x i8>, <128 x i8>* %a16, i32 4
-  store <128 x i8> %a4, <128 x i8>* %v4, align 128
-  %v5 = getelementptr <128 x i8>, <128 x i8>* %a16, i32 5
-  store <128 x i8> %a5, <128 x i8>* %v5, align 128
-  %v6 = getelementptr <128 x i8>, <128 x i8>* %a16, i32 6
-  store <128 x i8> %a6, <128 x i8>* %v6, align 128
-  %v7 = getelementptr <128 x i8>, <128 x i8>* %a16, i32 7
-  store <128 x i8> %a7, <128 x i8>* %v7, align 128
-  %v8 = getelementptr <128 x i8>, <128 x i8>* %a16, i32 8
-  store <128 x i8> %a8, <128 x i8>* %v8, align 128
-  %v9 = getelementptr <128 x i8>, <128 x i8>* %a16, i32 9
-  store <128 x i8> %a9, <128 x i8>* %v9, align 128
-  %v10 = getelementptr <128 x i8>, <128 x i8>* %a16, i32 10
-  store <128 x i8> %a10, <128 x i8>* %v10, align 128
-  %v11 = getelementptr <128 x i8>, <128 x i8>* %a16, i32 11
-  store <128 x i8> %a11, <128 x i8>* %v11, align 128
-  %v12 = getelementptr <128 x i8>, <128 x i8>* %a16, i32 12
-  store <128 x i8> %a12, <128 x i8>* %v12, align 128
-  %v13 = getelementptr <128 x i8>, <128 x i8>* %a16, i32 13
-  store <128 x i8> %a13, <128 x i8>* %v13, align 128
-  %v14 = getelementptr <128 x i8>, <128 x i8>* %a16, i32 14
-  store <128 x i8> %a14, <128 x i8>* %v14, align 128
-  %v15 = getelementptr <128 x i8>, <128 x i8>* %a16, i32 15
-  store <128 x i8> %a15, <128 x i8>* %v15, align 128
+  store <128 x i8> %a0, ptr %a16, align 128
+  %v1 = getelementptr <128 x i8>, ptr %a16, i32 1
+  store <128 x i8> %a1, ptr %v1, align 128
+  %v2 = getelementptr <128 x i8>, ptr %a16, i32 2
+  store <128 x i8> %a2, ptr %v2, align 128
+  %v3 = getelementptr <128 x i8>, ptr %a16, i32 3
+  store <128 x i8> %a3, ptr %v3, align 128
+  %v4 = getelementptr <128 x i8>, ptr %a16, i32 4
+  store <128 x i8> %a4, ptr %v4, align 128
+  %v5 = getelementptr <128 x i8>, ptr %a16, i32 5
+  store <128 x i8> %a5, ptr %v5, align 128
+  %v6 = getelementptr <128 x i8>, ptr %a16, i32 6
+  store <128 x i8> %a6, ptr %v6, align 128
+  %v7 = getelementptr <128 x i8>, ptr %a16, i32 7
+  store <128 x i8> %a7, ptr %v7, align 128
+  %v8 = getelementptr <128 x i8>, ptr %a16, i32 8
+  store <128 x i8> %a8, ptr %v8, align 128
+  %v9 = getelementptr <128 x i8>, ptr %a16, i32 9
+  store <128 x i8> %a9, ptr %v9, align 128
+  %v10 = getelementptr <128 x i8>, ptr %a16, i32 10
+  store <128 x i8> %a10, ptr %v10, align 128
+  %v11 = getelementptr <128 x i8>, ptr %a16, i32 11
+  store <128 x i8> %a11, ptr %v11, align 128
+  %v12 = getelementptr <128 x i8>, ptr %a16, i32 12
+  store <128 x i8> %a12, ptr %v12, align 128
+  %v13 = getelementptr <128 x i8>, ptr %a16, i32 13
+  store <128 x i8> %a13, ptr %v13, align 128
+  %v14 = getelementptr <128 x i8>, ptr %a16, i32 14
+  store <128 x i8> %a14, ptr %v14, align 128
+  %v15 = getelementptr <128 x i8>, ptr %a16, i32 15
+  store <128 x i8> %a15, ptr %v15, align 128
   ret void
 }
 
-define void @f16(<128 x i8> %a0, <128 x i8> %a1, <128 x i8> %a2, <128 x i8> %a3, <128 x i8> %a4, <128 x i8> %a5, <128 x i8> %a6, <128 x i8> %a7, <128 x i8> %a8, <128 x i8> %a9, <128 x i8> %a10, <128 x i8> %a11, <128 x i8> %a12, <128 x i8> %a13, <128 x i8> %a14, <128 x i8> %a15, <128 x i8> %a16, <128 x i8>* %a17) #0 {
+define void @f16(<128 x i8> %a0, <128 x i8> %a1, <128 x i8> %a2, <128 x i8> %a3, <128 x i8> %a4, <128 x i8> %a5, <128 x i8> %a6, <128 x i8> %a7, <128 x i8> %a8, <128 x i8> %a9, <128 x i8> %a10, <128 x i8> %a11, <128 x i8> %a12, <128 x i8> %a13, <128 x i8> %a14, <128 x i8> %a15, <128 x i8> %a16, ptr %a17) #0 {
 ; CHECK-LABEL: f16:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -928,44 +912,43 @@ define void @f16(<128 x i8> %a0, <128 x i8> %a1, <128 x i8> %a2, <128 x i8> %a3,
 ; CHECK-NEXT:     r31:30 = dealloc_return(r30):raw
 ; CHECK-NEXT:    }
 b0:
-  %v0 = getelementptr <128 x i8>, <128 x i8>* %a17, i32 0
-  store <128 x i8> %a0, <128 x i8>* %v0, align 128
-  %v1 = getelementptr <128 x i8>, <128 x i8>* %a17, i32 1
-  store <128 x i8> %a1, <128 x i8>* %v1, align 128
-  %v2 = getelementptr <128 x i8>, <128 x i8>* %a17, i32 2
-  store <128 x i8> %a2, <128 x i8>* %v2, align 128
-  %v3 = getelementptr <128 x i8>, <128 x i8>* %a17, i32 3
-  store <128 x i8> %a3, <128 x i8>* %v3, align 128
-  %v4 = getelementptr <128 x i8>, <128 x i8>* %a17, i32 4
-  store <128 x i8> %a4, <128 x i8>* %v4, align 128
-  %v5 = getelementptr <128 x i8>, <128 x i8>* %a17, i32 5
-  store <128 x i8> %a5, <128 x i8>* %v5, align 128
-  %v6 = getelementptr <128 x i8>, <128 x i8>* %a17, i32 6
-  store <128 x i8> %a6, <128 x i8>* %v6, align 128
-  %v7 = getelementptr <128 x i8>, <128 x i8>* %a17, i32 7
-  store <128 x i8> %a7, <128 x i8>* %v7, align 128
-  %v8 = getelementptr <128 x i8>, <128 x i8>* %a17, i32 8
-  store <128 x i8> %a8, <128 x i8>* %v8, align 128
-  %v9 = getelementptr <128 x i8>, <128 x i8>* %a17, i32 9
-  store <128 x i8> %a9, <128 x i8>* %v9, align 128
-  %v10 = getelementptr <128 x i8>, <128 x i8>* %a17, i32 10
-  store <128 x i8> %a10, <128 x i8>* %v10, align 128
-  %v11 = getelementptr <128 x i8>, <128 x i8>* %a17, i32 11
-  store <128 x i8> %a11, <128 x i8>* %v11, align 128
-  %v12 = getelementptr <128 x i8>, <128 x i8>* %a17, i32 12
-  store <128 x i8> %a12, <128 x i8>* %v12, align 128
-  %v13 = getelementptr <128 x i8>, <128 x i8>* %a17, i32 13
-  store <128 x i8> %a13, <128 x i8>* %v13, align 128
-  %v14 = getelementptr <128 x i8>, <128 x i8>* %a17, i32 14
-  store <128 x i8> %a14, <128 x i8>* %v14, align 128
-  %v15 = getelementptr <128 x i8>, <128 x i8>* %a17, i32 15
-  store <128 x i8> %a15, <128 x i8>* %v15, align 128
-  %v16 = getelementptr <128 x i8>, <128 x i8>* %a17, i32 16
-  store <128 x i8> %a16, <128 x i8>* %v16, align 128
+  store <128 x i8> %a0, ptr %a17, align 128
+  %v1 = getelementptr <128 x i8>, ptr %a17, i32 1
+  store <128 x i8> %a1, ptr %v1, align 128
+  %v2 = getelementptr <128 x i8>, ptr %a17, i32 2
+  store <128 x i8> %a2, ptr %v2, align 128
+  %v3 = getelementptr <128 x i8>, ptr %a17, i32 3
+  store <128 x i8> %a3, ptr %v3, align 128
+  %v4 = getelementptr <128 x i8>, ptr %a17, i32 4
+  store <128 x i8> %a4, ptr %v4, align 128
+  %v5 = getelementptr <128 x i8>, ptr %a17, i32 5
+  store <128 x i8> %a5, ptr %v5, align 128
+  %v6 = getelementptr <128 x i8>, ptr %a17, i32 6
+  store <128 x i8> %a6, ptr %v6, align 128
+  %v7 = getelementptr <128 x i8>, ptr %a17, i32 7
+  store <128 x i8> %a7, ptr %v7, align 128
+  %v8 = getelementptr <128 x i8>, ptr %a17, i32 8
+  store <128 x i8> %a8, ptr %v8, align 128
+  %v9 = getelementptr <128 x i8>, ptr %a17, i32 9
+  store <128 x i8> %a9, ptr %v9, align 128
+  %v10 = getelementptr <128 x i8>, ptr %a17, i32 10
+  store <128 x i8> %a10, ptr %v10, align 128
+  %v11 = getelementptr <128 x i8>, ptr %a17, i32 11
+  store <128 x i8> %a11, ptr %v11, align 128
+  %v12 = getelementptr <128 x i8>, ptr %a17, i32 12
+  store <128 x i8> %a12, ptr %v12, align 128
+  %v13 = getelementptr <128 x i8>, ptr %a17, i32 13
+  store <128 x i8> %a13, ptr %v13, align 128
+  %v14 = getelementptr <128 x i8>, ptr %a17, i32 14
+  store <128 x i8> %a14, ptr %v14, align 128
+  %v15 = getelementptr <128 x i8>, ptr %a17, i32 15
+  store <128 x i8> %a15, ptr %v15, align 128
+  %v16 = getelementptr <128 x i8>, ptr %a17, i32 16
+  store <128 x i8> %a16, ptr %v16, align 128
   ret void
 }
 
-define void @f17(<64 x i16> %a0, <64 x i16> %a1, <64 x i16> %a2, <64 x i16> %a3, <64 x i16> %a4, <64 x i16> %a5, <64 x i16> %a6, <64 x i16> %a7, <64 x i16> %a8, <64 x i16> %a9, <64 x i16> %a10, <64 x i16> %a11, <64 x i16> %a12, <64 x i16> %a13, <64 x i16> %a14, <64 x i16> %a15, <64 x i16> %a16, <64 x i16>* %a17) #0 {
+define void @f17(<64 x i16> %a0, <64 x i16> %a1, <64 x i16> %a2, <64 x i16> %a3, <64 x i16> %a4, <64 x i16> %a5, <64 x i16> %a6, <64 x i16> %a7, <64 x i16> %a8, <64 x i16> %a9, <64 x i16> %a10, <64 x i16> %a11, <64 x i16> %a12, <64 x i16> %a13, <64 x i16> %a14, <64 x i16> %a15, <64 x i16> %a16, ptr %a17) #0 {
 ; CHECK-LABEL: f17:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -1038,44 +1021,43 @@ define void @f17(<64 x i16> %a0, <64 x i16> %a1, <64 x i16> %a2, <64 x i16> %a3,
 ; CHECK-NEXT:     r31:30 = dealloc_return(r30):raw
 ; CHECK-NEXT:    }
 b0:
-  %v0 = getelementptr <64 x i16>, <64 x i16>* %a17, i32 0
-  store <64 x i16> %a0, <64 x i16>* %v0, align 128
-  %v1 = getelementptr <64 x i16>, <64 x i16>* %a17, i32 1
-  store <64 x i16> %a1, <64 x i16>* %v1, align 128
-  %v2 = getelementptr <64 x i16>, <64 x i16>* %a17, i32 2
-  store <64 x i16> %a2, <64 x i16>* %v2, align 128
-  %v3 = getelementptr <64 x i16>, <64 x i16>* %a17, i32 3
-  store <64 x i16> %a3, <64 x i16>* %v3, align 128
-  %v4 = getelementptr <64 x i16>, <64 x i16>* %a17, i32 4
-  store <64 x i16> %a4, <64 x i16>* %v4, align 128
-  %v5 = getelementptr <64 x i16>, <64 x i16>* %a17, i32 5
-  store <64 x i16> %a5, <64 x i16>* %v5, align 128
-  %v6 = getelementptr <64 x i16>, <64 x i16>* %a17, i32 6
-  store <64 x i16> %a6, <64 x i16>* %v6, align 128
-  %v7 = getelementptr <64 x i16>, <64 x i16>* %a17, i32 7
-  store <64 x i16> %a7, <64 x i16>* %v7, align 128
-  %v8 = getelementptr <64 x i16>, <64 x i16>* %a17, i32 8
-  store <64 x i16> %a8, <64 x i16>* %v8, align 128
-  %v9 = getelementptr <64 x i16>, <64 x i16>* %a17, i32 9
-  store <64 x i16> %a9, <64 x i16>* %v9, align 128
-  %v10 = getelementptr <64 x i16>, <64 x i16>* %a17, i32 10
-  store <64 x i16> %a10, <64 x i16>* %v10, align 128
-  %v11 = getelementptr <64 x i16>, <64 x i16>* %a17, i32 11
-  store <64 x i16> %a11, <64 x i16>* %v11, align 128
-  %v12 = getelementptr <64 x i16>, <64 x i16>* %a17, i32 12
-  store <64 x i16> %a12, <64 x i16>* %v12, align 128
-  %v13 = getelementptr <64 x i16>, <64 x i16>* %a17, i32 13
-  store <64 x i16> %a13, <64 x i16>* %v13, align 128
-  %v14 = getelementptr <64 x i16>, <64 x i16>* %a17, i32 14
-  store <64 x i16> %a14, <64 x i16>* %v14, align 128
-  %v15 = getelementptr <64 x i16>, <64 x i16>* %a17, i32 15
-  store <64 x i16> %a15, <64 x i16>* %v15, align 128
-  %v16 = getelementptr <64 x i16>, <64 x i16>* %a17, i32 16
-  store <64 x i16> %a16, <64 x i16>* %v16, align 128
+  store <64 x i16> %a0, ptr %a17, align 128
+  %v1 = getelementptr <64 x i16>, ptr %a17, i32 1
+  store <64 x i16> %a1, ptr %v1, align 128
+  %v2 = getelementptr <64 x i16>, ptr %a17, i32 2
+  store <64 x i16> %a2, ptr %v2, align 128
+  %v3 = getelementptr <64 x i16>, ptr %a17, i32 3
+  store <64 x i16> %a3, ptr %v3, align 128
+  %v4 = getelementptr <64 x i16>, ptr %a17, i32 4
+  store <64 x i16> %a4, ptr %v4, align 128
+  %v5 = getelementptr <64 x i16>, ptr %a17, i32 5
+  store <64 x i16> %a5, ptr %v5, align 128
+  %v6 = getelementptr <64 x i16>, ptr %a17, i32 6
+  store <64 x i16> %a6, ptr %v6, align 128
+  %v7 = getelementptr <64 x i16>, ptr %a17, i32 7
+  store <64 x i16> %a7, ptr %v7, align 128
+  %v8 = getelementptr <64 x i16>, ptr %a17, i32 8
+  store <64 x i16> %a8, ptr %v8, align 128
+  %v9 = getelementptr <64 x i16>, ptr %a17, i32 9
+  store <64 x i16> %a9, ptr %v9, align 128
+  %v10 = getelementptr <64 x i16>, ptr %a17, i32 10
+  store <64 x i16> %a10, ptr %v10, align 128
+  %v11 = getelementptr <64 x i16>, ptr %a17, i32 11
+  store <64 x i16> %a11, ptr %v11, align 128
+  %v12 = getelementptr <64 x i16>, ptr %a17, i32 12
+  store <64 x i16> %a12, ptr %v12, align 128
+  %v13 = getelementptr <64 x i16>, ptr %a17, i32 13
+  store <64 x i16> %a13, ptr %v13, align 128
+  %v14 = getelementptr <64 x i16>, ptr %a17, i32 14
+  store <64 x i16> %a14, ptr %v14, align 128
+  %v15 = getelementptr <64 x i16>, ptr %a17, i32 15
+  store <64 x i16> %a15, ptr %v15, align 128
+  %v16 = getelementptr <64 x i16>, ptr %a17, i32 16
+  store <64 x i16> %a16, ptr %v16, align 128
   ret void
 }
 
-define void @f18(<32 x i32> %a0, <32 x i32> %a1, <32 x i32> %a2, <32 x i32> %a3, <32 x i32> %a4, <32 x i32> %a5, <32 x i32> %a6, <32 x i32> %a7, <32 x i32> %a8, <32 x i32> %a9, <32 x i32> %a10, <32 x i32> %a11, <32 x i32> %a12, <32 x i32> %a13, <32 x i32> %a14, <32 x i32> %a15, <32 x i32> %a16, <32 x i32>* %a17) #0 {
+define void @f18(<32 x i32> %a0, <32 x i32> %a1, <32 x i32> %a2, <32 x i32> %a3, <32 x i32> %a4, <32 x i32> %a5, <32 x i32> %a6, <32 x i32> %a7, <32 x i32> %a8, <32 x i32> %a9, <32 x i32> %a10, <32 x i32> %a11, <32 x i32> %a12, <32 x i32> %a13, <32 x i32> %a14, <32 x i32> %a15, <32 x i32> %a16, ptr %a17) #0 {
 ; CHECK-LABEL: f18:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -1148,44 +1130,43 @@ define void @f18(<32 x i32> %a0, <32 x i32> %a1, <32 x i32> %a2, <32 x i32> %a3,
 ; CHECK-NEXT:     r31:30 = dealloc_return(r30):raw
 ; CHECK-NEXT:    }
 b0:
-  %v0 = getelementptr <32 x i32>, <32 x i32>* %a17, i32 0
-  store <32 x i32> %a0, <32 x i32>* %v0, align 128
-  %v1 = getelementptr <32 x i32>, <32 x i32>* %a17, i32 1
-  store <32 x i32> %a1, <32 x i32>* %v1, align 128
-  %v2 = getelementptr <32 x i32>, <32 x i32>* %a17, i32 2
-  store <32 x i32> %a2, <32 x i32>* %v2, align 128
-  %v3 = getelementptr <32 x i32>, <32 x i32>* %a17, i32 3
-  store <32 x i32> %a3, <32 x i32>* %v3, align 128
-  %v4 = getelementptr <32 x i32>, <32 x i32>* %a17, i32 4
-  store <32 x i32> %a4, <32 x i32>* %v4, align 128
-  %v5 = getelementptr <32 x i32>, <32 x i32>* %a17, i32 5
-  store <32 x i32> %a5, <32 x i32>* %v5, align 128
-  %v6 = getelementptr <32 x i32>, <32 x i32>* %a17, i32 6
-  store <32 x i32> %a6, <32 x i32>* %v6, align 128
-  %v7 = getelementptr <32 x i32>, <32 x i32>* %a17, i32 7
-  store <32 x i32> %a7, <32 x i32>* %v7, align 128
-  %v8 = getelementptr <32 x i32>, <32 x i32>* %a17, i32 8
-  store <32 x i32> %a8, <32 x i32>* %v8, align 128
-  %v9 = getelementptr <32 x i32>, <32 x i32>* %a17, i32 9
-  store <32 x i32> %a9, <32 x i32>* %v9, align 128
-  %v10 = getelementptr <32 x i32>, <32 x i32>* %a17, i32 10
-  store <32 x i32> %a10, <32 x i32>* %v10, align 128
-  %v11 = getelementptr <32 x i32>, <32 x i32>* %a17, i32 11
-  store <32 x i32> %a11, <32 x i32>* %v11, align 128
-  %v12 = getelementptr <32 x i32>, <32 x i32>* %a17, i32 12
-  store <32 x i32> %a12, <32 x i32>* %v12, align 128
-  %v13 = getelementptr <32 x i32>, <32 x i32>* %a17, i32 13
-  store <32 x i32> %a13, <32 x i32>* %v13, align 128
-  %v14 = getelementptr <32 x i32>, <32 x i32>* %a17, i32 14
-  store <32 x i32> %a14, <32 x i32>* %v14, align 128
-  %v15 = getelementptr <32 x i32>, <32 x i32>* %a17, i32 15
-  store <32 x i32> %a15, <32 x i32>* %v15, align 128
-  %v16 = getelementptr <32 x i32>, <32 x i32>* %a17, i32 16
-  store <32 x i32> %a16, <32 x i32>* %v16, align 128
+  store <32 x i32> %a0, ptr %a17, align 128
+  %v1 = getelementptr <32 x i32>, ptr %a17, i32 1
+  store <32 x i32> %a1, ptr %v1, align 128
+  %v2 = getelementptr <32 x i32>, ptr %a17, i32 2
+  store <32 x i32> %a2, ptr %v2, align 128
+  %v3 = getelementptr <32 x i32>, ptr %a17, i32 3
+  store <32 x i32> %a3, ptr %v3, align 128
+  %v4 = getelementptr <32 x i32>, ptr %a17, i32 4
+  store <32 x i32> %a4, ptr %v4, align 128
+  %v5 = getelementptr <32 x i32>, ptr %a17, i32 5
+  store <32 x i32> %a5, ptr %v5, align 128
+  %v6 = getelementptr <32 x i32>, ptr %a17, i32 6
+  store <32 x i32> %a6, ptr %v6, align 128
+  %v7 = getelementptr <32 x i32>, ptr %a17, i32 7
+  store <32 x i32> %a7, ptr %v7, align 128
+  %v8 = getelementptr <32 x i32>, ptr %a17, i32 8
+  store <32 x i32> %a8, ptr %v8, align 128
+  %v9 = getelementptr <32 x i32>, ptr %a17, i32 9
+  store <32 x i32> %a9, ptr %v9, align 128
+  %v10 = getelementptr <32 x i32>, ptr %a17, i32 10
+  store <32 x i32> %a10, ptr %v10, align 128
+  %v11 = getelementptr <32 x i32>, ptr %a17, i32 11
+  store <32 x i32> %a11, ptr %v11, align 128
+  %v12 = getelementptr <32 x i32>, ptr %a17, i32 12
+  store <32 x i32> %a12, ptr %v12, align 128
+  %v13 = getelementptr <32 x i32>, ptr %a17, i32 13
+  store <32 x i32> %a13, ptr %v13, align 128
+  %v14 = getelementptr <32 x i32>, ptr %a17, i32 14
+  store <32 x i32> %a14, ptr %v14, align 128
+  %v15 = getelementptr <32 x i32>, ptr %a17, i32 15
+  store <32 x i32> %a15, ptr %v15, align 128
+  %v16 = getelementptr <32 x i32>, ptr %a17, i32 16
+  store <32 x i32> %a16, ptr %v16, align 128
   ret void
 }
 
-define void @f19(<64 x half> %a0, <64 x half> %a1, <64 x half> %a2, <64 x half> %a3, <64 x half> %a4, <64 x half> %a5, <64 x half> %a6, <64 x half> %a7, <64 x half> %a8, <64 x half> %a9, <64 x half> %a10, <64 x half> %a11, <64 x half> %a12, <64 x half> %a13, <64 x half> %a14, <64 x half> %a15, <64 x half> %a16, <64 x half>* %a17) #0 {
+define void @f19(<64 x half> %a0, <64 x half> %a1, <64 x half> %a2, <64 x half> %a3, <64 x half> %a4, <64 x half> %a5, <64 x half> %a6, <64 x half> %a7, <64 x half> %a8, <64 x half> %a9, <64 x half> %a10, <64 x half> %a11, <64 x half> %a12, <64 x half> %a13, <64 x half> %a14, <64 x half> %a15, <64 x half> %a16, ptr %a17) #0 {
 ; CHECK-LABEL: f19:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -1258,44 +1239,43 @@ define void @f19(<64 x half> %a0, <64 x half> %a1, <64 x half> %a2, <64 x half>
 ; CHECK-NEXT:     r31:30 = dealloc_return(r30):raw
 ; CHECK-NEXT:    }
 b0:
-  %v0 = getelementptr <64 x half>, <64 x half>* %a17, i32 0
-  store <64 x half> %a0, <64 x half>* %v0, align 128
-  %v1 = getelementptr <64 x half>, <64 x half>* %a17, i32 1
-  store <64 x half> %a1, <64 x half>* %v1, align 128
-  %v2 = getelementptr <64 x half>, <64 x half>* %a17, i32 2
-  store <64 x half> %a2, <64 x half>* %v2, align 128
-  %v3 = getelementptr <64 x half>, <64 x half>* %a17, i32 3
-  store <64 x half> %a3, <64 x half>* %v3, align 128
-  %v4 = getelementptr <64 x half>, <64 x half>* %a17, i32 4
-  store <64 x half> %a4, <64 x half>* %v4, align 128
-  %v5 = getelementptr <64 x half>, <64 x half>* %a17, i32 5
-  store <64 x half> %a5, <64 x half>* %v5, align 128
-  %v6 = getelementptr <64 x half>, <64 x half>* %a17, i32 6
-  store <64 x half> %a6, <64 x half>* %v6, align 128
-  %v7 = getelementptr <64 x half>, <64 x half>* %a17, i32 7
-  store <64 x half> %a7, <64 x half>* %v7, align 128
-  %v8 = getelementptr <64 x half>, <64 x half>* %a17, i32 8
-  store <64 x half> %a8, <64 x half>* %v8, align 128
-  %v9 = getelementptr <64 x half>, <64 x half>* %a17, i32 9
-  store <64 x half> %a9, <64 x half>* %v9, align 128
-  %v10 = getelementptr <64 x half>, <64 x half>* %a17, i32 10
-  store <64 x half> %a10, <64 x half>* %v10, align 128
-  %v11 = getelementptr <64 x half>, <64 x half>* %a17, i32 11
-  store <64 x half> %a11, <64 x half>* %v11, align 128
-  %v12 = getelementptr <64 x half>, <64 x half>* %a17, i32 12
-  store <64 x half> %a12, <64 x half>* %v12, align 128
-  %v13 = getelementptr <64 x half>, <64 x half>* %a17, i32 13
-  store <64 x half> %a13, <64 x half>* %v13, align 128
-  %v14 = getelementptr <64 x half>, <64 x half>* %a17, i32 14
-  store <64 x half> %a14, <64 x half>* %v14, align 128
-  %v15 = getelementptr <64 x half>, <64 x half>* %a17, i32 15
-  store <64 x half> %a15, <64 x half>* %v15, align 128
-  %v16 = getelementptr <64 x half>, <64 x half>* %a17, i32 16
-  store <64 x half> %a16, <64 x half>* %v16, align 128
+  store <64 x half> %a0, ptr %a17, align 128
+  %v1 = getelementptr <64 x half>, ptr %a17, i32 1
+  store <64 x half> %a1, ptr %v1, align 128
+  %v2 = getelementptr <64 x half>, ptr %a17, i32 2
+  store <64 x half> %a2, ptr %v2, align 128
+  %v3 = getelementptr <64 x half>, ptr %a17, i32 3
+  store <64 x half> %a3, ptr %v3, align 128
+  %v4 = getelementptr <64 x half>, ptr %a17, i32 4
+  store <64 x half> %a4, ptr %v4, align 128
+  %v5 = getelementptr <64 x half>, ptr %a17, i32 5
+  store <64 x half> %a5, ptr %v5, align 128
+  %v6 = getelementptr <64 x half>, ptr %a17, i32 6
+  store <64 x half> %a6, ptr %v6, align 128
+  %v7 = getelementptr <64 x half>, ptr %a17, i32 7
+  store <64 x half> %a7, ptr %v7, align 128
+  %v8 = getelementptr <64 x half>, ptr %a17, i32 8
+  store <64 x half> %a8, ptr %v8, align 128
+  %v9 = getelementptr <64 x half>, ptr %a17, i32 9
+  store <64 x half> %a9, ptr %v9, align 128
+  %v10 = getelementptr <64 x half>, ptr %a17, i32 10
+  store <64 x half> %a10, ptr %v10, align 128
+  %v11 = getelementptr <64 x half>, ptr %a17, i32 11
+  store <64 x half> %a11, ptr %v11, align 128
+  %v12 = getelementptr <64 x half>, ptr %a17, i32 12
+  store <64 x half> %a12, ptr %v12, align 128
+  %v13 = getelementptr <64 x half>, ptr %a17, i32 13
+  store <64 x half> %a13, ptr %v13, align 128
+  %v14 = getelementptr <64 x half>, ptr %a17, i32 14
+  store <64 x half> %a14, ptr %v14, align 128
+  %v15 = getelementptr <64 x half>, ptr %a17, i32 15
+  store <64 x half> %a15, ptr %v15, align 128
+  %v16 = getelementptr <64 x half>, ptr %a17, i32 16
+  store <64 x half> %a16, ptr %v16, align 128
   ret void
 }
 
-define void @f20(<32 x float> %a0, <32 x float> %a1, <32 x float> %a2, <32 x float> %a3, <32 x float> %a4, <32 x float> %a5, <32 x float> %a6, <32 x float> %a7, <32 x float> %a8, <32 x float> %a9, <32 x float> %a10, <32 x float> %a11, <32 x float> %a12, <32 x float> %a13, <32 x float> %a14, <32 x float> %a15, <32 x float> %a16, <32 x float>* %a17) #0 {
+define void @f20(<32 x float> %a0, <32 x float> %a1, <32 x float> %a2, <32 x float> %a3, <32 x float> %a4, <32 x float> %a5, <32 x float> %a6, <32 x float> %a7, <32 x float> %a8, <32 x float> %a9, <32 x float> %a10, <32 x float> %a11, <32 x float> %a12, <32 x float> %a13, <32 x float> %a14, <32 x float> %a15, <32 x float> %a16, ptr %a17) #0 {
 ; CHECK-LABEL: f20:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -1368,40 +1348,39 @@ define void @f20(<32 x float> %a0, <32 x float> %a1, <32 x float> %a2, <32 x flo
 ; CHECK-NEXT:     r31:30 = dealloc_return(r30):raw
 ; CHECK-NEXT:    }
 b0:
-  %v0 = getelementptr <32 x float>, <32 x float>* %a17, i32 0
-  store <32 x float> %a0, <32 x float>* %v0, align 128
-  %v1 = getelementptr <32 x float>, <32 x float>* %a17, i32 1
-  store <32 x float> %a1, <32 x float>* %v1, align 128
-  %v2 = getelementptr <32 x float>, <32 x float>* %a17, i32 2
-  store <32 x float> %a2, <32 x float>* %v2, align 128
-  %v3 = getelementptr <32 x float>, <32 x float>* %a17, i32 3
-  store <32 x float> %a3, <32 x float>* %v3, align 128
-  %v4 = getelementptr <32 x float>, <32 x float>* %a17, i32 4
-  store <32 x float> %a4, <32 x float>* %v4, align 128
-  %v5 = getelementptr <32 x float>, <32 x float>* %a17, i32 5
-  store <32 x float> %a5, <32 x float>* %v5, align 128
-  %v6 = getelementptr <32 x float>, <32 x float>* %a17, i32 6
-  store <32 x float> %a6, <32 x float>* %v6, align 128
-  %v7 = getelementptr <32 x float>, <32 x float>* %a17, i32 7
-  store <32 x float> %a7, <32 x float>* %v7, align 128
-  %v8 = getelementptr <32 x float>, <32 x float>* %a17, i32 8
-  store <32 x float> %a8, <32 x float>* %v8, align 128
-  %v9 = getelementptr <32 x float>, <32 x float>* %a17, i32 9
-  store <32 x float> %a9, <32 x float>* %v9, align 128
-  %v10 = getelementptr <32 x float>, <32 x float>* %a17, i32 10
-  store <32 x float> %a10, <32 x float>* %v10, align 128
-  %v11 = getelementptr <32 x float>, <32 x float>* %a17, i32 11
-  store <32 x float> %a11, <32 x float>* %v11, align 128
-  %v12 = getelementptr <32 x float>, <32 x float>* %a17, i32 12
-  store <32 x float> %a12, <32 x float>* %v12, align 128
-  %v13 = getelementptr <32 x float>, <32 x float>* %a17, i32 13
-  store <32 x float> %a13, <32 x float>* %v13, align 128
-  %v14 = getelementptr <32 x float>, <32 x float>* %a17, i32 14
-  store <32 x float> %a14, <32 x float>* %v14, align 128
-  %v15 = getelementptr <32 x float>, <32 x float>* %a17, i32 15
-  store <32 x float> %a15, <32 x float>* %v15, align 128
-  %v16 = getelementptr <32 x float>, <32 x float>* %a17, i32 16
-  store <32 x float> %a16, <32 x float>* %v16, align 128
+  store <32 x float> %a0, ptr %a17, align 128
+  %v1 = getelementptr <32 x float>, ptr %a17, i32 1
+  store <32 x float> %a1, ptr %v1, align 128
+  %v2 = getelementptr <32 x float>, ptr %a17, i32 2
+  store <32 x float> %a2, ptr %v2, align 128
+  %v3 = getelementptr <32 x float>, ptr %a17, i32 3
+  store <32 x float> %a3, ptr %v3, align 128
+  %v4 = getelementptr <32 x float>, ptr %a17, i32 4
+  store <32 x float> %a4, ptr %v4, align 128
+  %v5 = getelementptr <32 x float>, ptr %a17, i32 5
+  store <32 x float> %a5, ptr %v5, align 128
+  %v6 = getelementptr <32 x float>, ptr %a17, i32 6
+  store <32 x float> %a6, ptr %v6, align 128
+  %v7 = getelementptr <32 x float>, ptr %a17, i32 7
+  store <32 x float> %a7, ptr %v7, align 128
+  %v8 = getelementptr <32 x float>, ptr %a17, i32 8
+  store <32 x float> %a8, ptr %v8, align 128
+  %v9 = getelementptr <32 x float>, ptr %a17, i32 9
+  store <32 x float> %a9, ptr %v9, align 128
+  %v10 = getelementptr <32 x float>, ptr %a17, i32 10
+  store <32 x float> %a10, ptr %v10, align 128
+  %v11 = getelementptr <32 x float>, ptr %a17, i32 11
+  store <32 x float> %a11, ptr %v11, align 128
+  %v12 = getelementptr <32 x float>, ptr %a17, i32 12
+  store <32 x float> %a12, ptr %v12, align 128
+  %v13 = getelementptr <32 x float>, ptr %a17, i32 13
+  store <32 x float> %a13, ptr %v13, align 128
+  %v14 = getelementptr <32 x float>, ptr %a17, i32 14
+  store <32 x float> %a14, ptr %v14, align 128
+  %v15 = getelementptr <32 x float>, ptr %a17, i32 15
+  store <32 x float> %a15, ptr %v15, align 128
+  %v16 = getelementptr <32 x float>, ptr %a17, i32 16
+  store <32 x float> %a16, ptr %v16, align 128
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/autohvx/float-cost.ll b/llvm/test/CodeGen/Hexagon/autohvx/float-cost.ll
index eab389522ee8e..089bc9967d08f 100644
--- a/llvm/test/CodeGen/Hexagon/autohvx/float-cost.ll
+++ b/llvm/test/CodeGen/Hexagon/autohvx/float-cost.ll
@@ -7,7 +7,7 @@
 target datalayout = "e-m:e-p:32:32:32-a:0-n16:32-i64:64:64-i32:32:32-i16:16:16-i1:8:8-f32:32:32-f64:64:64-v32:32:32-v64:64:64-v512:512:512-v1024:1024:1024-v2048:2048:2048"
 target triple = "hexagon"
 
-define void @f0(i8* nocapture readonly %a0, i8* nocapture %a1, i32 %a2, i32 %a3, i32 %a4, float %a5, float %a6) #0 {
+define void @f0(ptr nocapture readonly %a0, ptr nocapture %a1, i32 %a2, i32 %a3, i32 %a4, float %a5, float %a6) #0 {
 b0:
   %v0 = icmp sgt i32 %a2, 0
   br i1 %v0, label %b1, label %b2
@@ -51,17 +51,17 @@ b2:                                               ; preds = %b3, %b0
 b3:                                               ; preds = %b3, %b1
   %v31 = phi i32 [ 0, %b1 ], [ %v60, %b3 ]
   %v32 = add nsw i32 %v31, %v15
-  %v33 = getelementptr inbounds i8, i8* %a0, i32 %v32
-  %v34 = load i8, i8* %v33, align 1, !tbaa !0
+  %v33 = getelementptr inbounds i8, ptr %a0, i32 %v32
+  %v34 = load i8, ptr %v33, align 1, !tbaa !0
   %v35 = add nsw i32 %v31, %v21
-  %v36 = getelementptr inbounds i8, i8* %a0, i32 %v35
-  %v37 = load i8, i8* %v36, align 1, !tbaa !0
+  %v36 = getelementptr inbounds i8, ptr %a0, i32 %v35
+  %v37 = load i8, ptr %v36, align 1, !tbaa !0
   %v38 = add nsw i32 %v31, %v28
-  %v39 = getelementptr inbounds i8, i8* %a0, i32 %v38
-  %v40 = load i8, i8* %v39, align 1, !tbaa !0
+  %v39 = getelementptr inbounds i8, ptr %a0, i32 %v38
+  %v40 = load i8, ptr %v39, align 1, !tbaa !0
   %v41 = add nsw i32 %v31, %v30
-  %v42 = getelementptr inbounds i8, i8* %a0, i32 %v41
-  %v43 = load i8, i8* %v42, align 1, !tbaa !0
+  %v42 = getelementptr inbounds i8, ptr %a0, i32 %v41
+  %v43 = load i8, ptr %v42, align 1, !tbaa !0
   %v44 = uitofp i8 %v34 to float
   %v45 = uitofp i8 %v37 to float
   %v46 = uitofp i8 %v40 to float
@@ -77,8 +77,8 @@ b3:                                               ; preds = %b3, %b1
   %v56 = fadd float %v50, %v55
   %v57 = fadd float %v56, 5.000000e-01
   %v58 = fptoui float %v57 to i8
-  %v59 = getelementptr inbounds i8, i8* %a1, i32 %v31
-  store i8 %v58, i8* %v59, align 1, !tbaa !0
+  %v59 = getelementptr inbounds i8, ptr %a1, i32 %v31
+  store i8 %v58, ptr %v59, align 1, !tbaa !0
   %v60 = add nuw nsw i32 %v31, 1
   %v61 = icmp eq i32 %v60, %a2
   br i1 %v61, label %b2, label %b3

diff  --git a/llvm/test/CodeGen/Hexagon/autohvx/hfinsert.ll b/llvm/test/CodeGen/Hexagon/autohvx/hfinsert.ll
index ffca572e4be84..5e6365a33ec26 100644
--- a/llvm/test/CodeGen/Hexagon/autohvx/hfinsert.ll
+++ b/llvm/test/CodeGen/Hexagon/autohvx/hfinsert.ll
@@ -5,14 +5,12 @@
 target datalayout = "e-m:e-p:32:32:32-a:0-n16:32-i64:64:64-i32:32:32-i16:16:16-i1:8:8-f32:32:32-f64:64:64-v32:32:32-v64:64:64-v512:512:512-v1024:1024:1024-v2048:2048:2048"
 target triple = "hexagon"
 
-define half* @fred(half* %v0) local_unnamed_addr #0 {
+define ptr @fred(ptr %v0) local_unnamed_addr #0 {
 b0:
-  %t1 = bitcast half* %v0 to <64 x half>*
-  %v1 = load <64 x half>, <64 x half>* %t1, align 2
+  %v1 = load <64 x half>, ptr %v0, align 2
   %v2 = insertelement <64 x half> %v1, half 0xH4170, i32 17
-  store <64 x half> %v2, <64 x half>* %t1, align 2
-  %t2 = bitcast <64 x half>* %t1 to half*
-  ret half* %t2
+  store <64 x half> %v2, ptr %v0, align 2
+  ret ptr %v0
 }
 
 attributes #0 = { norecurse nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="hexagonv69" "target-features"="+hvx-length128b,+hvxv69,+v69,+hvx-qfloat,-long-calls" "unsafe-fp-math"="false" "use-soft-float"="false" }

diff  --git a/llvm/test/CodeGen/Hexagon/autohvx/hfnosplat_cp.ll b/llvm/test/CodeGen/Hexagon/autohvx/hfnosplat_cp.ll
index d5d3dcbe07377..4c5c96e61b78c 100644
--- a/llvm/test/CodeGen/Hexagon/autohvx/hfnosplat_cp.ll
+++ b/llvm/test/CodeGen/Hexagon/autohvx/hfnosplat_cp.ll
@@ -8,10 +8,9 @@
 target datalayout = "e-m:e-p:32:32:32-a:0-n16:32-i64:64:64-i32:32:32-i16:16:16-i1:8:8-f32:32:32-f64:64:64-v32:32:32-v64:64:64-v512:512:512-v1024:1024:1024-v2048:2048:2048"
 target triple = "hexagon"
 ; Function Attrs: nofree norecurse nounwind writeonly
-define dso_local i32 @foo(half* nocapture %a) local_unnamed_addr #0 {
+define dso_local i32 @foo(ptr nocapture %a) local_unnamed_addr #0 {
 vector.body:
-  %0 = bitcast half* %a to <40 x half>*
-  store <40 x half> <half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH3E79, half 0xH3E79, half 0xH3E79, half 0xH3E79, half 0xH3E79, half 0xH3E79, half 0xH3E79, half 0xH3E79, half 0xH3E79>, <40 x half>* %0, align 2
+  store <40 x half> <half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH4170, half 0xH3E79, half 0xH3E79, half 0xH3E79, half 0xH3E79, half 0xH3E79, half 0xH3E79, half 0xH3E79, half 0xH3E79, half 0xH3E79>, ptr %a, align 2
   ret i32 0
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/autohvx/hvx-idiom-empty-results.ll b/llvm/test/CodeGen/Hexagon/autohvx/hvx-idiom-empty-results.ll
index d281a8962d8f5..13ff6154f15a5 100644
--- a/llvm/test/CodeGen/Hexagon/autohvx/hvx-idiom-empty-results.ll
+++ b/llvm/test/CodeGen/Hexagon/autohvx/hvx-idiom-empty-results.ll
@@ -6,22 +6,20 @@
 target datalayout = "e-m:e-p:32:32:32-a:0-n16:32-i64:64:64-i32:32:32-i16:16:16-i1:8:8-f32:32:32-f64:64:64-v32:32:32-v64:64:64-v512:512:512-v1024:1024:1024-v2048:2048:2048"
 target triple = "hexagon"
 
-define void @f0(i8* %a0, i32* %a1, i32* %a2) unnamed_addr #0 {
+define void @f0(ptr %a0, ptr %a1, ptr %a2) unnamed_addr #0 {
 b0:
-  %v0 = load i8, i8* %a0, align 1
-  %v1 = load i32, i32* %a1, align 4
-  %v2 = load i32, i32* %a2, align 4
+  %v0 = load i8, ptr %a0, align 1
+  %v1 = load i32, ptr %a1, align 4
+  %v2 = load i32, ptr %a2, align 4
   %v3 = zext i8 %v0 to i32
-  %v4 = getelementptr inbounds i8, i8* null, i32 %v1
+  %v4 = getelementptr inbounds i8, ptr null, i32 %v1
   %v5 = add nsw i32 %v2, 2
-  %v6 = getelementptr inbounds i8, i8* %v4, i32 0
-  %v7 = getelementptr inbounds i8, i8* %v6, i32 0
   %v8 = insertelement <16 x i32> poison, i32 %v3, i64 0
   %v9 = shufflevector <16 x i32> %v8, <16 x i32> poison, <16 x i32> zeroinitializer
   br label %b1
 
 b1:                                               ; preds = %b3, %b2
-  %v10 = phi i8* [ %v7, %b0 ], [ %v19, %b1 ]
+  %v10 = phi ptr [ %v4, %b0 ], [ %v19, %b1 ]
   %v11 = add nsw <16 x i32> zeroinitializer, <i32 -128, i32 -128, i32 -128, i32 -128, i32 -128, i32 -128, i32 -128, i32 -128, i32 -128, i32 -128, i32 -128, i32 -128, i32 -128, i32 -128, i32 -128, i32 -128>
   %v12 = mul nsw <16 x i32> %v11, %v9
   %v13 = add nsw <16 x i32> %v12, <i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4>
@@ -30,9 +28,8 @@ b1:                                               ; preds = %b3, %b2
   %v16 = add nsw <16 x i32> %v15, <i32 128, i32 128, i32 128, i32 128, i32 128, i32 128, i32 128, i32 128, i32 128, i32 128, i32 128, i32 128, i32 128, i32 128, i32 128, i32 128>
   %v17 = select <16 x i1> zeroinitializer, <16 x i32> zeroinitializer, <16 x i32> %v16
   %v18 = trunc <16 x i32> %v17 to <16 x i8>
-  %v19 = getelementptr inbounds i8, i8* %v10, i32 1
-  %v20 = bitcast i8* %v19 to <16 x i8>*
-  store <16 x i8> %v18, <16 x i8>* %v20, align 1
+  %v19 = getelementptr inbounds i8, ptr %v10, i32 1
+  store <16 x i8> %v18, ptr %v19, align 1
   br label %b1, !llvm.loop !0
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/autohvx/interleave.ll b/llvm/test/CodeGen/Hexagon/autohvx/interleave.ll
index 9ace9e9f03bc5..0367721f6a670 100644
--- a/llvm/test/CodeGen/Hexagon/autohvx/interleave.ll
+++ b/llvm/test/CodeGen/Hexagon/autohvx/interleave.ll
@@ -5,7 +5,7 @@
 target datalayout = "e-m:e-p:32:32:32-a:0-n16:32-i64:64:64-i32:32:32-i16:16:16-i1:8:8-f32:32:32-f64:64:64-v32:32:32-v64:64:64-v512:512:512-v1024:1024:1024-v2048:2048:2048"
 target triple = "hexagon"
 
-define void @f0(i32* noalias nocapture %a0, i32* noalias nocapture readonly %a1, i32 %a2) #0 {
+define void @f0(ptr noalias nocapture %a0, ptr noalias nocapture readonly %a1, i32 %a2) #0 {
 b0:
   %v0 = icmp eq i32 %a2, 0
   br i1 %v0, label %b3, label %b1
@@ -21,19 +21,19 @@ b3:                                               ; preds = %b2, %b0
 
 b4:                                               ; preds = %b4, %b1
   %v1 = phi i32 [ %v13, %b4 ], [ 0, %b1 ]
-  %v2 = getelementptr inbounds i32, i32* %a1, i32 %v1
-  %v3 = load i32, i32* %v2, align 4, !tbaa !1
-  %v4 = getelementptr inbounds i32, i32* %a0, i32 %v1
-  %v5 = load i32, i32* %v4, align 4, !tbaa !1
+  %v2 = getelementptr inbounds i32, ptr %a1, i32 %v1
+  %v3 = load i32, ptr %v2, align 4, !tbaa !1
+  %v4 = getelementptr inbounds i32, ptr %a0, i32 %v1
+  %v5 = load i32, ptr %v4, align 4, !tbaa !1
   %v6 = add nsw i32 %v5, %v3
-  store i32 %v6, i32* %v4, align 4, !tbaa !1
+  store i32 %v6, ptr %v4, align 4, !tbaa !1
   %v7 = or i32 %v1, 1
-  %v8 = getelementptr inbounds i32, i32* %a1, i32 %v7
-  %v9 = load i32, i32* %v8, align 4, !tbaa !1
-  %v10 = getelementptr inbounds i32, i32* %a0, i32 %v7
-  %v11 = load i32, i32* %v10, align 4, !tbaa !1
+  %v8 = getelementptr inbounds i32, ptr %a1, i32 %v7
+  %v9 = load i32, ptr %v8, align 4, !tbaa !1
+  %v10 = getelementptr inbounds i32, ptr %a0, i32 %v7
+  %v11 = load i32, ptr %v10, align 4, !tbaa !1
   %v12 = add nsw i32 %v11, %v9
-  store i32 %v12, i32* %v10, align 4, !tbaa !1
+  store i32 %v12, ptr %v10, align 4, !tbaa !1
   %v13 = add nuw nsw i32 %v1, 2
   %v14 = icmp eq i32 %v13, %a2
   br i1 %v14, label %b2, label %b4, !llvm.loop !5

diff  --git a/llvm/test/CodeGen/Hexagon/autohvx/isel-anyext-inreg.ll b/llvm/test/CodeGen/Hexagon/autohvx/isel-anyext-inreg.ll
index 91384d3d9a703..39fb325db4a2e 100644
--- a/llvm/test/CodeGen/Hexagon/autohvx/isel-anyext-inreg.ll
+++ b/llvm/test/CodeGen/Hexagon/autohvx/isel-anyext-inreg.ll
@@ -7,13 +7,13 @@
 target datalayout = "e-m:e-p:32:32:32-a:0-n16:32-i64:64:64-i32:32:32-i16:16:16-i1:8:8-f32:32:32-f64:64:64-v32:32:32-v64:64:64-v512:512:512-v1024:1024:1024-v2048:2048:2048"
 target triple = "hexagon"
 
-define fastcc void @fred(i16* %a0, <16 x i32>* %a1) #0 {
+define fastcc void @fred(ptr %a0, ptr %a1) #0 {
 b0:
-  %v1 = load i16, i16* %a0, align 2
+  %v1 = load i16, ptr %a0, align 2
   %v2 = insertelement <16 x i16> undef, i16 %v1, i32 15
   %v3 = zext <16 x i16> %v2 to <16 x i32>
   %v4 = shl nuw <16 x i32> %v3, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
-  store <16 x i32> %v4, <16 x i32>* %a1, align 4
+  store <16 x i32> %v4, ptr %a1, align 4
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/autohvx/isel-anyext-pair.ll b/llvm/test/CodeGen/Hexagon/autohvx/isel-anyext-pair.ll
index 9a290192f639d..05b67c0da136d 100644
--- a/llvm/test/CodeGen/Hexagon/autohvx/isel-anyext-pair.ll
+++ b/llvm/test/CodeGen/Hexagon/autohvx/isel-anyext-pair.ll
@@ -6,11 +6,11 @@
 target datalayout = "e-m:e-p:32:32:32-a:0-n16:32-i64:64:64-i32:32:32-i16:16:16-i1:8:8-f32:32:32-f64:64:64-v32:32:32-v64:64:64-v512:512:512-v1024:1024:1024-v2048:2048:2048"
 target triple = "hexagon"
 
-define void @fred(<64 x i8>* %a0, <64 x i8>* %a1) #0 {
+define void @fred(ptr %a0, ptr %a1) #0 {
 b0:
-  %v1 = load <64 x i8>, <64 x i8>* %a0, align 1
+  %v1 = load <64 x i8>, ptr %a0, align 1
   %v2 = sext <64 x i8> %v1 to <64 x i32>
-  %v3 = load <64 x i8>, <64 x i8>* %a1, align 1
+  %v3 = load <64 x i8>, ptr %a1, align 1
   %v4 = sext <64 x i8> %v3 to <64 x i32>
   %v5 = mul nsw <64 x i32> %v4, %v2
   %v6 = add nsw <64 x i32> %v5, zeroinitializer
@@ -18,7 +18,7 @@ b0:
   %v8 = ashr exact <64 x i32> %v7, <i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24>
   %v9 = mul nsw <64 x i32> %v8, %v8
   %v10 = trunc <64 x i32> %v9 to <64 x i8>
-  store <64 x i8> %v10, <64 x i8>* %a0, align 1
+  store <64 x i8> %v10, ptr %a0, align 1
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/autohvx/isel-bitcast-vsplat.ll b/llvm/test/CodeGen/Hexagon/autohvx/isel-bitcast-vsplat.ll
index 94b77bf7f80a8..a98cffbfc8a46 100644
--- a/llvm/test/CodeGen/Hexagon/autohvx/isel-bitcast-vsplat.ll
+++ b/llvm/test/CodeGen/Hexagon/autohvx/isel-bitcast-vsplat.ll
@@ -13,16 +13,16 @@
 target triple = "hexagon"
 
 %s.0 = type { %s.1 }
-%s.1 = type { i32, i8* }
+%s.1 = type { i32, ptr }
 %s.2 = type { i8, i8, [16 x i8], i8, [16 x i8] }
 
 ; Function Attrs: nounwind
-define dso_local zeroext i8 @f0(i8 zeroext %a0, %s.2* nocapture readonly %a1, i8 signext %a2) local_unnamed_addr #0 {
+define dso_local zeroext i8 @f0(i8 zeroext %a0, ptr nocapture readonly %a1, i8 signext %a2) local_unnamed_addr #0 {
 b0:
   br i1 undef, label %b2, label %b1
 
 b1:                                               ; preds = %b0
-  %v0 = load <64 x i8>, <64 x i8>* undef, align 1
+  %v0 = load <64 x i8>, ptr undef, align 1
   %v1 = icmp ult <64 x i8> %v0, <i8 52, i8 52, i8 52, i8 52, i8 52, i8 52, i8 52, i8 52, i8 52, i8 52, i8 52, i8 52, i8 52, i8 52, i8 52, i8 52, i8 52, i8 52, i8 52, i8 52, i8 52, i8 52, i8 52, i8 52, i8 52, i8 52, i8 52, i8 52, i8 52, i8 52, i8 52, i8 52, i8 52, i8 52, i8 52, i8 52, i8 52, i8 52, i8 52, i8 52, i8 52, i8 52, i8 52, i8 52, i8 52, i8 52, i8 52, i8 52, i8 52, i8 52, i8 52, i8 52, i8 52, i8 52, i8 52, i8 52, i8 52, i8 52, i8 52, i8 52, i8 52, i8 52, i8 52, i8 52>
   %v2 = xor <64 x i1> %v1, zeroinitializer
   %v3 = select <64 x i1> %v2, <64 x i32> undef, <64 x i32> zeroinitializer
@@ -39,11 +39,11 @@ b2:                                               ; preds = %b1, %b0
   %v11 = add <64 x i32> %v10, undef
   %v12 = add <64 x i32> %v11, undef
   %v13 = extractelement <64 x i32> %v12, i32 0
-  tail call void @f1(%s.0* null, i32 undef, i32 undef, i32 %v13, i32 undef) #2
+  tail call void @f1(ptr null, i32 undef, i32 undef, i32 %v13, i32 undef) #2
   unreachable
 }
 
-declare dso_local void @f1(%s.0*, i32, i32, i32, i32) local_unnamed_addr #1
+declare dso_local void @f1(ptr, i32, i32, i32, i32) local_unnamed_addr #1
 
 attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvx-length64b,+hvxv60" }
 attributes #1 = { "target-cpu"="hexagonv60" "target-features"="+hvx-length64b,+hvxv60" }

diff  --git a/llvm/test/CodeGen/Hexagon/autohvx/isel-bitcast-vsplat2.ll b/llvm/test/CodeGen/Hexagon/autohvx/isel-bitcast-vsplat2.ll
index 691ca82561535..e957a4a8eb6db 100644
--- a/llvm/test/CodeGen/Hexagon/autohvx/isel-bitcast-vsplat2.ll
+++ b/llvm/test/CodeGen/Hexagon/autohvx/isel-bitcast-vsplat2.ll
@@ -6,7 +6,7 @@
 target triple = "hexagon"
 
 ; Function Attrs: norecurse nounwind
-define dso_local i32 @f0(i32* nocapture %a0, i32* nocapture readonly %a1, i32* nocapture readonly %a2, i32 %a3) local_unnamed_addr #0 {
+define dso_local i32 @f0(ptr nocapture %a0, ptr nocapture readonly %a1, ptr nocapture readonly %a2, i32 %a3) local_unnamed_addr #0 {
 b0:
   %v0 = insertelement <16 x i32> undef, i32 %a3, i32 0
   %v1 = shufflevector <16 x i32> %v0, <16 x i32> undef, <16 x i32> zeroinitializer

diff  --git a/llvm/test/CodeGen/Hexagon/autohvx/isel-bool-vector.ll b/llvm/test/CodeGen/Hexagon/autohvx/isel-bool-vector.ll
index f11a8f642da30..d2dcacb943320 100644
--- a/llvm/test/CodeGen/Hexagon/autohvx/isel-bool-vector.ll
+++ b/llvm/test/CodeGen/Hexagon/autohvx/isel-bool-vector.ll
@@ -6,14 +6,14 @@
 target datalayout = "e-m:e-p:32:32:32-a:0-n16:32-i64:64:64-i32:32:32-i16:16:16-i1:8:8-f32:32:32-f64:64:64-v32:32:32-v64:64:64-v512:512:512-v1024:1024:1024-v2048:2048:2048"
 target triple = "hexagon"
 
-define void @fred(<16 x float>* %a0, <16 x i16>* %a1, <16 x i32>* %a2) #0 {
+define void @fred(ptr %a0, ptr %a1, ptr %a2) #0 {
 b0:
-  %v0 = load <16 x float>, <16 x float>* %a0, align 128
+  %v0 = load <16 x float>, ptr %a0, align 128
   %v1 = fcmp olt <16 x float> zeroinitializer, %v0
-  %v2 = load <16 x i16>, <16 x i16>* %a1, align 128
+  %v2 = load <16 x i16>, ptr %a1, align 128
   %v3 = select <16 x i1> %v1, <16 x i16> %v2, <16 x i16> zeroinitializer
   %v4 = sext <16 x i16> %v3 to <16 x i32>
-  store <16 x i32> %v4, <16 x i32>* %a2, align 128
+  store <16 x i32> %v4, ptr %a2, align 128
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/autohvx/isel-build-undef.ll b/llvm/test/CodeGen/Hexagon/autohvx/isel-build-undef.ll
index 4ba248a09628a..c5c76fef05189 100644
--- a/llvm/test/CodeGen/Hexagon/autohvx/isel-build-undef.ll
+++ b/llvm/test/CodeGen/Hexagon/autohvx/isel-build-undef.ll
@@ -12,9 +12,9 @@ target triple = "hexagon"
 
 @g0 = global <32 x i8> zeroinitializer
 
-define void @fred(i8* %a0) #0 {
+define void @fred(ptr %a0) #0 {
 b0:
-  %v1 = load i8, i8* %a0, align 1
+  %v1 = load i8, ptr %a0, align 1
   %v2 = insertelement <32 x i8> undef, i8 %v1, i32 31
   %v3 = zext <32 x i8> %v2 to <32 x i16>
   %v4 = add nuw nsw <32 x i16> %v3, zeroinitializer
@@ -34,7 +34,7 @@ b0:
   %v18 = add <32 x i16> %v17, zeroinitializer
   %v19 = lshr <32 x i16> %v18, <i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4>
   %v20 = trunc <32 x i16> %v19 to <32 x i8>
-  store <32 x i8> %v20, <32 x i8>* @g0, align 1
+  store <32 x i8> %v20, ptr @g0, align 1
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/autohvx/isel-concat-multiple.ll b/llvm/test/CodeGen/Hexagon/autohvx/isel-concat-multiple.ll
index bcab26101ccb6..1a7c281f22a83 100644
--- a/llvm/test/CodeGen/Hexagon/autohvx/isel-concat-multiple.ll
+++ b/llvm/test/CodeGen/Hexagon/autohvx/isel-concat-multiple.ll
@@ -7,13 +7,13 @@
 target datalayout = "e-m:e-p:32:32:32-a:0-n16:32-i64:64:64-i32:32:32-i16:16:16-i1:8:8-f32:32:32-f64:64:64-v32:32:32-v64:64:64-v512:512:512-v1024:1024:1024-v2048:2048:2048"
 target triple = "hexagon"
 
-define void @fred(i32* %a0, i32* %a1, i8* %a2) #0 {
+define void @fred(ptr %a0, ptr %a1, ptr %a2) #0 {
 b0:
-  %v1 = load i32, i32* %a0, align 4
+  %v1 = load i32, ptr %a0, align 4
   %v2 = mul nsw i32 %v1, -15137
   %v3 = add nsw i32 0, %v2
   %v4 = sub nsw i32 0, %v3
-  %v5 = load i32, i32* %a1, align 4
+  %v5 = load i32, ptr %a1, align 4
   %v6 = insertelement <2 x i32> undef, i32 %v5, i32 1
   %v7 = add nsw <2 x i32> %v6, %v6
   %v8 = extractelement <2 x i32> %v7, i32 0
@@ -26,9 +26,9 @@ b0:
   %v15 = lshr <8 x i32> %v14, <i32 18, i32 18, i32 18, i32 18, i32 18, i32 18, i32 18, i32 18>
   %v16 = and <8 x i32> %v15, %v14
   %v17 = extractelement <8 x i32> %v16, i32 5
-  %v18 = getelementptr inbounds i8, i8* null, i32 %v17
-  %v19 = load i8, i8* %v18, align 1
-  store i8 %v19, i8* %a2, align 1
+  %v18 = getelementptr inbounds i8, ptr null, i32 %v17
+  %v19 = load i8, ptr %v18, align 1
+  store i8 %v19, ptr %a2, align 1
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/autohvx/isel-concat-vectors-bool.ll b/llvm/test/CodeGen/Hexagon/autohvx/isel-concat-vectors-bool.ll
index 4230bf1b41072..73ac65f7e147f 100644
--- a/llvm/test/CodeGen/Hexagon/autohvx/isel-concat-vectors-bool.ll
+++ b/llvm/test/CodeGen/Hexagon/autohvx/isel-concat-vectors-bool.ll
@@ -8,15 +8,15 @@ target triple = "hexagon"
 
 @g0 = global <8 x i32> zeroinitializer, align 8
 
-define void @fred(<8 x float>* %a0, <8 x float>* %a1) #0 {
+define void @fred(ptr %a0, ptr %a1) #0 {
 b0:
-  %v0 = load <8 x float>, <8 x float>* %a1, align 8
+  %v0 = load <8 x float>, ptr %a1, align 8
   %v1 = fcmp olt <8 x float> %v0, zeroinitializer
-  %v2 = load <8 x float>, <8 x float>* %a0, align 8
+  %v2 = load <8 x float>, ptr %a0, align 8
   %v3 = fcmp olt <8 x float> %v2, zeroinitializer
   %v4 = and <8 x i1> %v1, %v3
   %v5 = zext <8 x i1> %v4 to <8 x i32>
-  store <8 x i32> %v5, <8 x i32>* @g0, align 8
+  store <8 x i32> %v5, ptr @g0, align 8
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/autohvx/isel-concat-vectors.ll b/llvm/test/CodeGen/Hexagon/autohvx/isel-concat-vectors.ll
index 3ec4c7bc5ce0d..678fda1508110 100644
--- a/llvm/test/CodeGen/Hexagon/autohvx/isel-concat-vectors.ll
+++ b/llvm/test/CodeGen/Hexagon/autohvx/isel-concat-vectors.ll
@@ -10,7 +10,7 @@ declare <16 x i32> @llvm.hexagon.V6.lo(<32 x i32>) #0
 declare <32 x i32> @llvm.hexagon.V6.vshuffvdd(<16 x i32>, <16 x i32>, i32) #0
 declare <16 x i32> @llvm.hexagon.V6.lvsplatw(i32) #0
 
-define void @crash(<16 x i32>* %a0) #1 {
+define void @crash(ptr %a0) #1 {
 b0:
   %v1 = tail call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 0) #0
   %v2 = bitcast <16 x i32> %v1 to <32 x i16>
@@ -20,7 +20,7 @@ b0:
   %v6 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v5) #0
   %v7 = tail call <32 x i32> @llvm.hexagon.V6.vshuffvdd(<16 x i32> undef, <16 x i32> %v6, i32 -2) #0
   %v8 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v7)
-  store <16 x i32> %v8, <16 x i32>* %a0, align 2
+  store <16 x i32> %v8, ptr %a0, align 2
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/autohvx/isel-const-splat-bitcast.ll b/llvm/test/CodeGen/Hexagon/autohvx/isel-const-splat-bitcast.ll
index 4afc60b1d451d..4e378b71f5a7d 100644
--- a/llvm/test/CodeGen/Hexagon/autohvx/isel-const-splat-bitcast.ll
+++ b/llvm/test/CodeGen/Hexagon/autohvx/isel-const-splat-bitcast.ll
@@ -15,7 +15,7 @@ target triple = "hexagon"
 
 define i32 @fred() #0 {
 b0:
-  %v1 = load <8 x i16>, <8 x i16>* @g0, align 2
+  %v1 = load <8 x i16>, ptr @g0, align 2
   %v2 = icmp sgt <8 x i16> %v1, <i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11>
   %v3 = zext <8 x i1> %v2 to <8 x i32>
   %v4 = add nuw nsw <8 x i32> zeroinitializer, %v3

diff  --git a/llvm/test/CodeGen/Hexagon/autohvx/isel-const-splat-imm.ll b/llvm/test/CodeGen/Hexagon/autohvx/isel-const-splat-imm.ll
index f3827fd0de02c..3576f63d11abc 100644
--- a/llvm/test/CodeGen/Hexagon/autohvx/isel-const-splat-imm.ll
+++ b/llvm/test/CodeGen/Hexagon/autohvx/isel-const-splat-imm.ll
@@ -7,16 +7,16 @@
 target datalayout = "e-m:e-p:32:32:32-a:0-n16:32-i64:64:64-i32:32:32-i16:16:16-i1:8:8-f32:32:32-f64:64:64-v32:32:32-v64:64:64-v512:512:512-v1024:1024:1024-v2048:2048:2048"
 target triple = "hexagon"
 
- at g0 = external dllexport local_unnamed_addr global i32 (i32, i32, i8*)*, align 4
+ at g0 = external dllexport local_unnamed_addr global ptr, align 4
 
 ; Function Attrs: noinline
-define dso_local fastcc void @f0(i8* %a0, i32 %a1) unnamed_addr #0 {
+define dso_local fastcc void @f0(ptr %a0, i32 %a1) unnamed_addr #0 {
 b0:
   br i1 undef, label %b2, label %b1
 
 b1:                                               ; preds = %b0
   %v0 = add nsw <8 x i32> zeroinitializer, <i32 -22, i32 -22, i32 -22, i32 -22, i32 -22, i32 -22, i32 -22, i32 -22>
-  %v1 = load <8 x i32>, <8 x i32>* undef, align 32
+  %v1 = load <8 x i32>, ptr undef, align 32
   %v2 = shl <8 x i32> %v1, <i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24>
   %v3 = ashr exact <8 x i32> %v2, <i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24>
   %v4 = add nsw <8 x i32> %v3, <i32 128, i32 128, i32 128, i32 128, i32 128, i32 128, i32 128, i32 128>
@@ -43,7 +43,7 @@ b1:                                               ; preds = %b0
   %v25 = fptosi <8 x float> %v24 to <8 x i8>
   %v26 = sext <8 x i8> %v25 to <8 x i32>
   %v27 = add nsw <8 x i32> %v26, <i32 128, i32 128, i32 128, i32 128, i32 128, i32 128, i32 128, i32 128>
-  %v28 = load <8 x i32>, <8 x i32>* undef, align 16
+  %v28 = load <8 x i32>, ptr undef, align 16
   %v29 = mul nsw <8 x i32> %v27, %v28
   %v30 = sext <8 x i32> %v29 to <8 x i64>
   %v31 = mul nsw <8 x i64> %v30, <i64 1077952632, i64 1077952632, i64 1077952632, i64 1077952632, i64 1077952632, i64 1077952632, i64 1077952632, i64 1077952632>
@@ -78,11 +78,10 @@ b1:                                               ; preds = %b0
   %v60 = call <8 x i32> @llvm.smax.v8i32(<8 x i32> %v59, <8 x i32> <i32 -186, i32 -186, i32 -186, i32 -186, i32 -186, i32 -186, i32 -186, i32 -186>)
   %v61 = trunc <8 x i32> %v60 to <8 x i8>
   %v62 = add <8 x i8> %v61, <i8 58, i8 58, i8 58, i8 58, i8 58, i8 58, i8 58, i8 58>
-  %v63 = getelementptr inbounds i8, i8* %a0, i32 undef
-  %v64 = bitcast i8* %v63 to <8 x i8>*
-  store <8 x i8> %v62, <8 x i8>* %v64, align 8
-  %v65 = load i32 (i32, i32, i8*)*, i32 (i32, i32, i8*)** @g0, align 4
-  %v66 = tail call i32 %v65(i32 14, i32 %a1, i8* nonnull undef)
+  %v63 = getelementptr inbounds i8, ptr %a0, i32 undef
+  store <8 x i8> %v62, ptr %v63, align 8
+  %v65 = load ptr, ptr @g0, align 4
+  %v66 = tail call i32 %v65(i32 14, i32 %a1, ptr nonnull undef)
   unreachable
 
 b2:                                               ; preds = %b0

diff  --git a/llvm/test/CodeGen/Hexagon/autohvx/isel-const-vector.ll b/llvm/test/CodeGen/Hexagon/autohvx/isel-const-vector.ll
index 0ed1e0c562a98..fbd72b275c392 100644
--- a/llvm/test/CodeGen/Hexagon/autohvx/isel-const-vector.ll
+++ b/llvm/test/CodeGen/Hexagon/autohvx/isel-const-vector.ll
@@ -3,8 +3,8 @@
 ; Check that the elements of the constants have correct type.
 ; CHECK: .half 31
 
-define void @fred(<32 x i16>* %p) #0 {
-  store <32 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 16, i16 17, i16 18, i16 19, i16 20, i16 21, i16 22, i16 23, i16 24, i16 25, i16 26, i16 27, i16 28, i16 29, i16 30, i16 31>, <32 x i16>* %p, align 64
+define void @fred(ptr %p) #0 {
+  store <32 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 16, i16 17, i16 18, i16 19, i16 20, i16 21, i16 22, i16 23, i16 24, i16 25, i16 26, i16 27, i16 28, i16 29, i16 30, i16 31>, ptr %p, align 64
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/autohvx/isel-expand-unaligned-loads-noindexed.ll b/llvm/test/CodeGen/Hexagon/autohvx/isel-expand-unaligned-loads-noindexed.ll
index b2b71bf062858..7b6b78bf961f5 100644
--- a/llvm/test/CodeGen/Hexagon/autohvx/isel-expand-unaligned-loads-noindexed.ll
+++ b/llvm/test/CodeGen/Hexagon/autohvx/isel-expand-unaligned-loads-noindexed.ll
@@ -6,47 +6,46 @@
 target triple = "hexagon"
 
 ; Function Attrs: nounwind
-define dso_local void @f0(i8* %a0, i8* %a1) local_unnamed_addr #0 {
+define dso_local void @f0(ptr %a0, ptr %a1) local_unnamed_addr #0 {
 b0:
   br label %b1
 
 b1:                                               ; preds = %b1, %b0
   %v0 = phi i32 [ %v18, %b1 ], [ 0, %b0 ]
-  %v1 = getelementptr inbounds i8, i8* %a1, i32 %v0
-  %v2 = load <64 x i8>, <64 x i8>* undef, align 1, !tbaa !0, !alias.scope !3
+  %v1 = getelementptr inbounds i8, ptr %a1, i32 %v0
+  %v2 = load <64 x i8>, ptr undef, align 1, !tbaa !0, !alias.scope !3
   %v3 = add <64 x i8> zeroinitializer, %v2
-  %v4 = getelementptr inbounds i8, i8* %a0, i32 undef
-  %v5 = getelementptr inbounds i8, i8* %a0, i32 undef
-  %v6 = getelementptr inbounds i8, i8* %a0, i32 undef
-  store i8 0, i8* undef, align 1, !tbaa !0, !alias.scope !6, !noalias !8
+  %v4 = getelementptr inbounds i8, ptr %a0, i32 undef
+  %v5 = getelementptr inbounds i8, ptr %a0, i32 undef
+  %v6 = getelementptr inbounds i8, ptr %a0, i32 undef
+  store i8 0, ptr undef, align 1, !tbaa !0, !alias.scope !6, !noalias !8
   %v7 = extractelement <64 x i8> %v3, i32 12
-  store i8 %v7, i8* undef, align 1, !tbaa !0, !alias.scope !6, !noalias !8
-  store i8 0, i8* undef, align 1, !tbaa !0, !alias.scope !6, !noalias !8
-  store i8 undef, i8* %v4, align 1, !tbaa !0, !alias.scope !6, !noalias !8
-  store i8 0, i8* %v5, align 1, !tbaa !0, !alias.scope !6, !noalias !8
-  store i8 0, i8* undef, align 1, !tbaa !0, !alias.scope !6, !noalias !8
+  store i8 %v7, ptr undef, align 1, !tbaa !0, !alias.scope !6, !noalias !8
+  store i8 0, ptr undef, align 1, !tbaa !0, !alias.scope !6, !noalias !8
+  store i8 undef, ptr %v4, align 1, !tbaa !0, !alias.scope !6, !noalias !8
+  store i8 0, ptr %v5, align 1, !tbaa !0, !alias.scope !6, !noalias !8
+  store i8 0, ptr undef, align 1, !tbaa !0, !alias.scope !6, !noalias !8
   %v8 = extractelement <64 x i8> %v3, i32 36
-  store i8 %v8, i8* undef, align 1, !tbaa !0, !alias.scope !6, !noalias !8
-  store i8 0, i8* undef, align 1, !tbaa !0, !alias.scope !6, !noalias !8
+  store i8 %v8, ptr undef, align 1, !tbaa !0, !alias.scope !6, !noalias !8
+  store i8 0, ptr undef, align 1, !tbaa !0, !alias.scope !6, !noalias !8
   %v9 = extractelement <64 x i8> %v3, i32 38
-  store i8 %v9, i8* undef, align 1, !tbaa !0, !alias.scope !6, !noalias !8
-  store i8 0, i8* undef, align 1, !tbaa !0, !alias.scope !6, !noalias !8
+  store i8 %v9, ptr undef, align 1, !tbaa !0, !alias.scope !6, !noalias !8
+  store i8 0, ptr undef, align 1, !tbaa !0, !alias.scope !6, !noalias !8
   %v10 = extractelement <64 x i8> %v3, i32 41
-  store i8 %v10, i8* %v6, align 1, !tbaa !0, !alias.scope !6, !noalias !8
-  store i8 0, i8* undef, align 1, !tbaa !0, !alias.scope !6, !noalias !8
-  store i8 undef, i8* undef, align 1, !tbaa !0, !alias.scope !6, !noalias !8
+  store i8 %v10, ptr %v6, align 1, !tbaa !0, !alias.scope !6, !noalias !8
+  store i8 0, ptr undef, align 1, !tbaa !0, !alias.scope !6, !noalias !8
+  store i8 undef, ptr undef, align 1, !tbaa !0, !alias.scope !6, !noalias !8
   %v11 = extractelement <64 x i8> %v3, i32 55
-  store i8 %v11, i8* undef, align 1, !tbaa !0, !alias.scope !6, !noalias !8
-  store i8 0, i8* undef, align 1, !tbaa !0, !alias.scope !6, !noalias !8
+  store i8 %v11, ptr undef, align 1, !tbaa !0, !alias.scope !6, !noalias !8
+  store i8 0, ptr undef, align 1, !tbaa !0, !alias.scope !6, !noalias !8
   %v12 = extractelement <64 x i8> %v3, i32 58
-  store i8 %v12, i8* undef, align 1, !tbaa !0, !alias.scope !6, !noalias !8
-  store i8 0, i8* undef, align 1, !tbaa !0, !alias.scope !6, !noalias !8
-  %v13 = bitcast i8* %v1 to <64 x i8>*
-  %v14 = load <64 x i8>, <64 x i8>* %v13, align 1, !tbaa !0, !alias.scope !3
+  store i8 %v12, ptr undef, align 1, !tbaa !0, !alias.scope !6, !noalias !8
+  store i8 0, ptr undef, align 1, !tbaa !0, !alias.scope !6, !noalias !8
+  %v14 = load <64 x i8>, ptr %v1, align 1, !tbaa !0, !alias.scope !3
   %v15 = add <64 x i8> zeroinitializer, %v14
-  %v16 = getelementptr inbounds i8, i8* %a0, i32 undef
+  %v16 = getelementptr inbounds i8, ptr %a0, i32 undef
   %v17 = extractelement <64 x i8> %v15, i32 23
-  store i8 %v17, i8* %v16, align 1, !tbaa !0, !alias.scope !6, !noalias !8
+  store i8 %v17, ptr %v16, align 1, !tbaa !0, !alias.scope !6, !noalias !8
   %v18 = add i32 %v0, 64
   br label %b1
 }

diff  --git a/llvm/test/CodeGen/Hexagon/autohvx/isel-expand-unaligned-loads.ll b/llvm/test/CodeGen/Hexagon/autohvx/isel-expand-unaligned-loads.ll
index ca1c17470136a..c2de46a17b9dc 100644
--- a/llvm/test/CodeGen/Hexagon/autohvx/isel-expand-unaligned-loads.ll
+++ b/llvm/test/CodeGen/Hexagon/autohvx/isel-expand-unaligned-loads.ll
@@ -4,9 +4,9 @@
 ; CHECK-DAG: v[[V00:[0-9]+]] = vmem(r[[B00:[0-9]+]]+#0)
 ; CHECK-DAG: v[[V01:[0-9]+]] = vmem(r[[B00]]+#1)
 ; CHECK: valign(v[[V01]],v[[V00]],r[[B00]])
-define void @test_00(<64 x i8>* %p, <64 x i8>* %q) #0 {
-  %v0 = load <64 x i8>, <64 x i8>* %p, align 1
-  store <64 x i8> %v0, <64 x i8>* %q, align 1
+define void @test_00(ptr %p, ptr %q) #0 {
+  %v0 = load <64 x i8>, ptr %p, align 1
+  store <64 x i8> %v0, ptr %q, align 1
   ret void
 }
 
@@ -17,9 +17,9 @@ define void @test_00(<64 x i8>* %p, <64 x i8>* %q) #0 {
 ; CHECK: }
 ; CHECK-DAG: valign(v[[V11]],v[[V10]],r[[B01]])
 ; CHECK-DAG: valign(v[[V12]],v[[V11]],r[[B01]])
-define void @test_01(<128 x i8>* %p, <128 x i8>* %q) #0 {
-  %v0 = load <128 x i8>, <128 x i8>* %p, align 1
-  store <128 x i8> %v0, <128 x i8>* %q, align 1
+define void @test_01(ptr %p, ptr %q) #0 {
+  %v0 = load <128 x i8>, ptr %p, align 1
+  store <128 x i8> %v0, ptr %q, align 1
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/autohvx/isel-extractelt-illegal-type.ll b/llvm/test/CodeGen/Hexagon/autohvx/isel-extractelt-illegal-type.ll
index cc44c149ca724..0d27a0a4d9442 100644
--- a/llvm/test/CodeGen/Hexagon/autohvx/isel-extractelt-illegal-type.ll
+++ b/llvm/test/CodeGen/Hexagon/autohvx/isel-extractelt-illegal-type.ll
@@ -9,9 +9,9 @@ target triple = "hexagon"
 declare i32 @llvm.hexagon.A2.subh.l16.sat.ll(i32, i32) #0
 
 ; Function Attrs: nounwind readonly
-define dso_local signext i16 @f0(i16* nocapture readonly %a0) local_unnamed_addr #1 {
+define dso_local signext i16 @f0(ptr nocapture readonly %a0) local_unnamed_addr #1 {
 b0:
-  %v0 = load <8 x i16>, <8 x i16>* undef, align 2, !tbaa !0
+  %v0 = load <8 x i16>, ptr undef, align 2, !tbaa !0
   %v1 = shufflevector <8 x i16> %v0, <8 x i16> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
   %v2 = shufflevector <8 x i16> %v1, <8 x i16> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
   %v3 = lshr <16 x i16> %v2, <i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>

diff  --git a/llvm/test/CodeGen/Hexagon/autohvx/isel-hvx-concat-truncate.ll b/llvm/test/CodeGen/Hexagon/autohvx/isel-hvx-concat-truncate.ll
index 73e863b3f23e0..c0d985d40a980 100644
--- a/llvm/test/CodeGen/Hexagon/autohvx/isel-hvx-concat-truncate.ll
+++ b/llvm/test/CodeGen/Hexagon/autohvx/isel-hvx-concat-truncate.ll
@@ -6,20 +6,18 @@
 target datalayout = "e-m:e-p:32:32:32-a:0-n16:32-i64:64:64-i32:32:32-i16:16:16-i1:8:8-f32:32:32-f64:64:64-v32:32:32-v64:64:64-v512:512:512-v1024:1024:1024-v2048:2048:2048"
 target triple = "hexagon"
 
-define dllexport void @f0(i8* %a0) #0 {
+define dllexport void @f0(ptr %a0) #0 {
 b0:
-  %v0 = bitcast i8* %a0 to i32*
-  %v1 = getelementptr inbounds i32, i32* %v0, i32 undef
-  %v2 = bitcast i32* %v1 to <7 x i32>*
-  %v3 = load i8, i8* undef, align 1
+  %v1 = getelementptr inbounds i32, ptr %a0, i32 undef
+  %v3 = load i8, ptr undef, align 1
   %v4 = insertelement <7 x i8> undef, i8 %v3, i32 0
   %v5 = shufflevector <7 x i8> %v4, <7 x i8> undef, <7 x i32> zeroinitializer
   %v6 = zext <7 x i8> %v5 to <7 x i32>
-  %v7 = load <7 x i8>, <7 x i8>* undef, align 1
+  %v7 = load <7 x i8>, ptr undef, align 1
   %v8 = zext <7 x i8> %v7 to <7 x i32>
   %v9 = mul nsw <7 x i32> %v6, %v8
   %v10 = add nsw <7 x i32> %v9, zeroinitializer
-  store <7 x i32> %v10, <7 x i32>* %v2, align 4
+  store <7 x i32> %v10, ptr %v1, align 4
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/autohvx/isel-mstore-fp16.ll b/llvm/test/CodeGen/Hexagon/autohvx/isel-mstore-fp16.ll
index 923660cfecc05..2b11c0631a12f 100644
--- a/llvm/test/CodeGen/Hexagon/autohvx/isel-mstore-fp16.ll
+++ b/llvm/test/CodeGen/Hexagon/autohvx/isel-mstore-fp16.ll
@@ -7,11 +7,11 @@ target datalayout = "e-m:e-p:32:32:32-a:0-n16:32-i64:64:64-i32:32:32-i16:16:16-i
 target triple = "hexagon"
 
 define dllexport void @fred() #0 {
-  tail call void @llvm.masked.store.v64f16.p0v64f16(<64 x half> <half 0xHFBFF, half undef, half 0xHFBFF, half undef, half 0xHFBFF, half undef, half 0xHFBFF, half undef, half 0xHFBFF, half undef, half 0xHFBFF, half undef, half 0xHFBFF, half undef, half 0xHFBFF, half undef, half 0xHFBFF, half undef, half 0xHFBFF, half undef, half 0xHFBFF, half undef, half 0xHFBFF, half undef, half 0xHFBFF, half undef, half 0xHFBFF, half undef, half 0xHFBFF, half undef, half 0xHFBFF, half undef, half 0xHFBFF, half undef, half 0xHFBFF, half undef, half 0xHFBFF, half undef, half 0xHFBFF, half undef, half 0xHFBFF, half undef, half 0xHFBFF, half undef, half 0xHFBFF, half undef, half 0xHFBFF, half undef, half 0xHFBFF, half undef, half 0xHFBFF, half undef, half 0xHFBFF, half undef, half 0xHFBFF, half undef, half 0xHFBFF, half undef, half 0xHFBFF, half undef, half 0xHFBFF, half undef, half 0xHFBFF, half undef>, <64 x half>* undef, i32 64, <64 x i1> <i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false>)
+  tail call void @llvm.masked.store.v64f16.p0(<64 x half> <half 0xHFBFF, half undef, half 0xHFBFF, half undef, half 0xHFBFF, half undef, half 0xHFBFF, half undef, half 0xHFBFF, half undef, half 0xHFBFF, half undef, half 0xHFBFF, half undef, half 0xHFBFF, half undef, half 0xHFBFF, half undef, half 0xHFBFF, half undef, half 0xHFBFF, half undef, half 0xHFBFF, half undef, half 0xHFBFF, half undef, half 0xHFBFF, half undef, half 0xHFBFF, half undef, half 0xHFBFF, half undef, half 0xHFBFF, half undef, half 0xHFBFF, half undef, half 0xHFBFF, half undef, half 0xHFBFF, half undef, half 0xHFBFF, half undef, half 0xHFBFF, half undef, half 0xHFBFF, half undef, half 0xHFBFF, half undef, half 0xHFBFF, half undef, half 0xHFBFF, half undef, half 0xHFBFF, half undef, half 0xHFBFF, half undef, half 0xHFBFF, half undef, half 0xHFBFF, half undef, half 0xHFBFF, half undef, half 0xHFBFF, half undef>, ptr undef, i32 64, <64 x i1> <i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false>)
   ret void
 }
 
 ; Function Attrs: argmemonly nounwind willreturn writeonly
-declare void @llvm.masked.store.v64f16.p0v64f16(<64 x half>, <64 x half>*, i32 immarg, <64 x i1>) #0
+declare void @llvm.masked.store.v64f16.p0(<64 x half>, ptr, i32 immarg, <64 x i1>) #0
 
 attributes #0 = { argmemonly nounwind willreturn writeonly "target-cpu"="hexagonv69" "target-features"="+hvxv69,+hvx-length128b,+hvx-qfloat" }

diff  --git a/llvm/test/CodeGen/Hexagon/autohvx/isel-q-legalization-loop.ll b/llvm/test/CodeGen/Hexagon/autohvx/isel-q-legalization-loop.ll
index 949f86cdb089f..e288f5260c14b 100644
--- a/llvm/test/CodeGen/Hexagon/autohvx/isel-q-legalization-loop.ll
+++ b/llvm/test/CodeGen/Hexagon/autohvx/isel-q-legalization-loop.ll
@@ -18,7 +18,7 @@ declare <32 x i32> @llvm.hexagon.V6.hi.128B(<64 x i32>) #0
 define void @f0() local_unnamed_addr #1 {
 b0:
   %v0 = tail call <128 x i1> @llvm.hexagon.V6.vandvrt.128B(<32 x i32> undef, i32 16843009)
-  %v1 = getelementptr inbounds %s.0, %s.0* null, i32 0, i32 0, i32 3
+  %v1 = getelementptr inbounds %s.0, ptr null, i32 0, i32 0, i32 3
   br label %b1
 
 b1:                                               ; preds = %b1, %b0
@@ -31,7 +31,7 @@ b1:                                               ; preds = %b1, %b0
   %v8 = tail call <32 x i32> @llvm.hexagon.V6.hi.128B(<64 x i32> %v7)
   %v9 = tail call <64 x i32> @llvm.hexagon.V6.vdealvdd.128B(<32 x i32> undef, <32 x i32> %v8, i32 -32)
   %v10 = tail call <32 x i32> @llvm.hexagon.V6.hi.128B(<64 x i32> %v9)
-  store <32 x i32> %v10, <32 x i32>* %v1, align 128
+  store <32 x i32> %v10, ptr %v1, align 128
   %v11 = add nuw nsw i32 %v2, 1
   br label %b1
 }

diff  --git a/llvm/test/CodeGen/Hexagon/autohvx/isel-q2v-pair.ll b/llvm/test/CodeGen/Hexagon/autohvx/isel-q2v-pair.ll
index e2fa59b3e22bd..f22b99c852cd0 100644
--- a/llvm/test/CodeGen/Hexagon/autohvx/isel-q2v-pair.ll
+++ b/llvm/test/CodeGen/Hexagon/autohvx/isel-q2v-pair.ll
@@ -3,13 +3,13 @@
 ; Make sure that this doesn't crash.
 ; CHECK: vadd
 
-define void @foo(<64 x i32>* %a0, <64 x i32>* %a1) #0 {
-  %v0 = load <64 x i32>, <64 x i32>* %a0, align 128
-  %v1 = load <64 x i32>, <64 x i32>* %a1, align 128
+define void @foo(ptr %a0, ptr %a1) #0 {
+  %v0 = load <64 x i32>, ptr %a0, align 128
+  %v1 = load <64 x i32>, ptr %a1, align 128
   %v2 = icmp sgt <64 x i32> %v0, zeroinitializer
   %v3 = sext <64 x i1> %v2 to <64 x i32>
   %v4 = add nsw <64 x i32> %v1, %v3
-  store <64 x i32> %v4, <64 x i32>* %a1, align 128
+  store <64 x i32> %v4, ptr %a1, align 128
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/autohvx/isel-select-const.ll b/llvm/test/CodeGen/Hexagon/autohvx/isel-select-const.ll
index 7b5b58bb5d012..1f39acdce5e38 100644
--- a/llvm/test/CodeGen/Hexagon/autohvx/isel-select-const.ll
+++ b/llvm/test/CodeGen/Hexagon/autohvx/isel-select-const.ll
@@ -6,7 +6,7 @@
 target datalayout = "e-m:e-p:32:32:32-a:0-n16:32-i64:64:64-i32:32:32-i16:16:16-i1:8:8-f32:32:32-f64:64:64-v32:32:32-v64:64:64-v512:512:512-v1024:1024:1024-v2048:2048:2048"
 target triple = "hexagon-unknown--elf"
 
-define void @fred(<16 x i32>* %a0, <16 x i32>* %a1) #0 {
+define void @fred(ptr %a0, ptr %a1) #0 {
 b0:
   %v1 = tail call <16 x i32> @llvm.hexagon.V6.vlutvvb.oracc(<16 x i32> undef, <16 x i32> <i32 151388928, i32 353505036, i32 555621144, i32 757737252, i32 959853360, i32 1161969468, i32 1364085576, i32 1566201684, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>, <16 x i32> undef, i32 3)
   %v2 = bitcast <16 x i32> %v1 to <64 x i8>
@@ -14,14 +14,14 @@ b0:
   %v4 = shufflevector <32 x i8> zeroinitializer, <32 x i8> %v3, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
   %v5 = bitcast <64 x i8> %v4 to <16 x i32>
   %v6 = tail call <16 x i32> @llvm.hexagon.V6.vshuffb(<16 x i32> %v5)
-  store <16 x i32> %v6, <16 x i32>* %a0, align 1
+  store <16 x i32> %v6, ptr %a0, align 1
   %v7 = tail call <16 x i32> @llvm.hexagon.V6.vlutvvb.oracc(<16 x i32> undef, <16 x i32> <i32 151388928, i32 353505036, i32 555621144, i32 757737252, i32 959853360, i32 1161969468, i32 1364085576, i32 1566201684, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>, <16 x i32> zeroinitializer, i32 3)
   %v8 = bitcast <16 x i32> %v7 to <64 x i8>
   %v9 = shufflevector <64 x i8> %v8, <64 x i8> undef, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
   %v10 = shufflevector <32 x i8> %v9, <32 x i8> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
   %v11 = bitcast <64 x i8> %v10 to <16 x i32>
   %v12 = tail call <16 x i32> @llvm.hexagon.V6.vshuffb(<16 x i32> %v11)
-  store <16 x i32> %v12, <16 x i32>* %a1, align 1
+  store <16 x i32> %v12, ptr %a1, align 1
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/autohvx/isel-sext-inreg.ll b/llvm/test/CodeGen/Hexagon/autohvx/isel-sext-inreg.ll
index 72a8e2868c949..72da335ba7988 100644
--- a/llvm/test/CodeGen/Hexagon/autohvx/isel-sext-inreg.ll
+++ b/llvm/test/CodeGen/Hexagon/autohvx/isel-sext-inreg.ll
@@ -8,9 +8,9 @@ target triple = "hexagon"
 
 ; CHECK-LABEL: danny:
 ; CHECK: memh
-define void @danny(i16* %a0) #0 {
+define void @danny(ptr %a0) #0 {
 b0:
-  %v1 = load i16, i16* %a0, align 2
+  %v1 = load i16, ptr %a0, align 2
   %v2 = insertelement <8 x i16> undef, i16 %v1, i32 6
   %v3 = insertelement <8 x i16> %v2, i16 undef, i32 7
   %v4 = sext <8 x i16> %v3 to <8 x i32>
@@ -24,15 +24,15 @@ b0:
   %v12 = sub nsw <8 x i32> zeroinitializer, %v11
   %v13 = trunc <8 x i32> %v12 to <8 x i16>
   %v14 = extractelement <8 x i16> %v13, i32 7
-  store i16 %v14, i16* %a0, align 2
+  store i16 %v14, ptr %a0, align 2
   ret void
 }
 
 ; CHECK-LABEL: sammy:
 ; CHECK: memh
-define void @sammy(i16* %a0) #1 {
+define void @sammy(ptr %a0) #1 {
 b0:
-  %v1 = load i16, i16* %a0, align 2
+  %v1 = load i16, ptr %a0, align 2
   %v2 = insertelement <16 x i16> undef, i16 %v1, i32 14
   %v3 = insertelement <16 x i16> %v2, i16 undef, i32 15
   %v4 = sext <16 x i16> %v3 to <16 x i32>
@@ -46,7 +46,7 @@ b0:
   %v12 = sub nsw <16 x i32> zeroinitializer, %v11
   %v13 = trunc <16 x i32> %v12 to <16 x i16>
   %v14 = extractelement <16 x i16> %v13, i32 15
-  store i16 %v14, i16* %a0, align 2
+  store i16 %v14, ptr %a0, align 2
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/autohvx/isel-shift-byte.ll b/llvm/test/CodeGen/Hexagon/autohvx/isel-shift-byte.ll
index 269bfe6b9f559..c9e5e2f3c2ea8 100644
--- a/llvm/test/CodeGen/Hexagon/autohvx/isel-shift-byte.ll
+++ b/llvm/test/CodeGen/Hexagon/autohvx/isel-shift-byte.ll
@@ -6,7 +6,7 @@
 target datalayout = "e-m:e-p:32:32:32-a:0-n16:32-i64:64:64-i32:32:32-i16:16:16-i1:8:8-f32:32:32-f64:64:64-v32:32:32-v64:64:64-v512:512:512-v1024:1024:1024-v2048:2048:2048"
 target triple = "hexagon"
 
-define void @fred(<64 x i8>* %a0) #0 {
+define void @fred(ptr %a0) #0 {
 b0:
   br label %b1
 
@@ -17,7 +17,7 @@ b1:                                               ; preds = %b9, %b0
   %v5 = trunc <64 x i32> %v4 to <64 x i8>
   %v6 = xor <64 x i8> %v5, <i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128>
   %v7 = sub <64 x i8> zeroinitializer, %v6
-  store <64 x i8> %v7, <64 x i8>* %a0, align 64
+  store <64 x i8> %v7, ptr %a0, align 64
   br i1 false, label %b8, label %b9
 
 b8:                                               ; preds = %b1

diff  --git a/llvm/test/CodeGen/Hexagon/autohvx/isel-shuffle-gather.ll b/llvm/test/CodeGen/Hexagon/autohvx/isel-shuffle-gather.ll
index 80b1e7b36cbb4..b0c47cd7ded15 100644
--- a/llvm/test/CodeGen/Hexagon/autohvx/isel-shuffle-gather.ll
+++ b/llvm/test/CodeGen/Hexagon/autohvx/isel-shuffle-gather.ll
@@ -24,7 +24,7 @@ target datalayout = "e-m:e-p:32:32:32-a:0-n16:32-i64:64:64-i32:32:32-i16:16:16-i
 target triple = "hexagon"
 
 ; Function Attrs: norecurse nounwind
-define void @f0(i32* nocapture %a0, i32* nocapture readonly %a1, i32 %a2) #0 {
+define void @f0(ptr nocapture %a0, ptr nocapture readonly %a1, i32 %a2) #0 {
 b0:
   %v0 = icmp eq i32 %a2, 0
   br i1 %v0, label %b7, label %b1
@@ -42,10 +42,10 @@ b2:                                               ; preds = %b6, %b3, %b1
 
 b3:                                               ; preds = %b1
   %v6 = and i32 %a2, -2
-  %v7 = getelementptr i32, i32* %a0, i32 %v6
-  %v8 = getelementptr i32, i32* %a1, i32 %v6
-  %v9 = icmp ugt i32* %v8, %a0
-  %v10 = icmp ugt i32* %v7, %a1
+  %v7 = getelementptr i32, ptr %a0, i32 %v6
+  %v8 = getelementptr i32, ptr %a1, i32 %v6
+  %v9 = icmp ugt ptr %v8, %a0
+  %v10 = icmp ugt ptr %v7, %a1
   %v11 = and i1 %v9, %v10
   br i1 %v11, label %b2, label %b4
 
@@ -58,24 +58,21 @@ b5:                                               ; preds = %b5, %b4
   %v14 = phi i32 [ 0, %b4 ], [ %v34, %b5 ]
   %v15 = shl i32 %v14, 1
   %v16 = or i32 %v15, 1
-  %v17 = getelementptr inbounds i32, i32* %a1, i32 -1
-  %v18 = getelementptr inbounds i32, i32* %v17, i32 %v16
-  %v19 = bitcast i32* %v18 to <64 x i32>*
-  %v20 = load <64 x i32>, <64 x i32>* %v19, align 4, !tbaa !1
+  %v17 = getelementptr inbounds i32, ptr %a1, i32 -1
+  %v18 = getelementptr inbounds i32, ptr %v17, i32 %v16
+  %v20 = load <64 x i32>, ptr %v18, align 4, !tbaa !1
   %v21 = shufflevector <64 x i32> %v20, <64 x i32> undef, <32 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30, i32 32, i32 34, i32 36, i32 38, i32 40, i32 42, i32 44, i32 46, i32 48, i32 50, i32 52, i32 54, i32 56, i32 58, i32 60, i32 62>
   %v22 = shufflevector <64 x i32> %v20, <64 x i32> undef, <32 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31, i32 33, i32 35, i32 37, i32 39, i32 41, i32 43, i32 45, i32 47, i32 49, i32 51, i32 53, i32 55, i32 57, i32 59, i32 61, i32 63>
-  %v23 = getelementptr inbounds i32, i32* %a0, i32 %v15
-  %v24 = bitcast i32* %v23 to <64 x i32>*
-  %v25 = load <64 x i32>, <64 x i32>* %v24, align 4, !tbaa !1
+  %v23 = getelementptr inbounds i32, ptr %a0, i32 %v15
+  %v25 = load <64 x i32>, ptr %v23, align 4, !tbaa !1
   %v26 = shufflevector <64 x i32> %v25, <64 x i32> undef, <32 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30, i32 32, i32 34, i32 36, i32 38, i32 40, i32 42, i32 44, i32 46, i32 48, i32 50, i32 52, i32 54, i32 56, i32 58, i32 60, i32 62>
   %v27 = shufflevector <64 x i32> %v25, <64 x i32> undef, <32 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31, i32 33, i32 35, i32 37, i32 39, i32 41, i32 43, i32 45, i32 47, i32 49, i32 51, i32 53, i32 55, i32 57, i32 59, i32 61, i32 63>
   %v28 = add nsw <32 x i32> %v26, %v22
-  %v29 = getelementptr inbounds i32, i32* %a0, i32 -1
+  %v29 = getelementptr inbounds i32, ptr %a0, i32 -1
   %v30 = add nsw <32 x i32> %v27, %v21
-  %v31 = getelementptr inbounds i32, i32* %v29, i32 %v16
-  %v32 = bitcast i32* %v31 to <64 x i32>*
+  %v31 = getelementptr inbounds i32, ptr %v29, i32 %v16
   %v33 = shufflevector <32 x i32> %v28, <32 x i32> %v30, <64 x i32> <i32 0, i32 32, i32 1, i32 33, i32 2, i32 34, i32 3, i32 35, i32 4, i32 36, i32 5, i32 37, i32 6, i32 38, i32 7, i32 39, i32 8, i32 40, i32 9, i32 41, i32 10, i32 42, i32 11, i32 43, i32 12, i32 44, i32 13, i32 45, i32 14, i32 46, i32 15, i32 47, i32 16, i32 48, i32 17, i32 49, i32 18, i32 50, i32 19, i32 51, i32 20, i32 52, i32 21, i32 53, i32 22, i32 54, i32 23, i32 55, i32 24, i32 56, i32 25, i32 57, i32 26, i32 58, i32 27, i32 59, i32 28, i32 60, i32 29, i32 61, i32 30, i32 62, i32 31, i32 63>
-  store <64 x i32> %v33, <64 x i32>* %v32, align 4, !tbaa !1
+  store <64 x i32> %v33, ptr %v31, align 4, !tbaa !1
   %v34 = add i32 %v14, 32
   %v35 = icmp eq i32 %v34, %v12
   br i1 %v35, label %b6, label %b5, !llvm.loop !5
@@ -90,18 +87,18 @@ b7:                                               ; preds = %b8, %b6, %b0
 b8:                                               ; preds = %b8, %b2
   %v37 = phi i32 [ %v49, %b8 ], [ %v5, %b2 ]
   %v38 = or i32 %v37, 1
-  %v39 = getelementptr inbounds i32, i32* %a1, i32 %v38
-  %v40 = load i32, i32* %v39, align 4, !tbaa !1
-  %v41 = getelementptr inbounds i32, i32* %a0, i32 %v37
-  %v42 = load i32, i32* %v41, align 4, !tbaa !1
+  %v39 = getelementptr inbounds i32, ptr %a1, i32 %v38
+  %v40 = load i32, ptr %v39, align 4, !tbaa !1
+  %v41 = getelementptr inbounds i32, ptr %a0, i32 %v37
+  %v42 = load i32, ptr %v41, align 4, !tbaa !1
   %v43 = add nsw i32 %v42, %v40
-  store i32 %v43, i32* %v41, align 4, !tbaa !1
-  %v44 = getelementptr inbounds i32, i32* %a1, i32 %v37
-  %v45 = load i32, i32* %v44, align 4, !tbaa !1
-  %v46 = getelementptr inbounds i32, i32* %a0, i32 %v38
-  %v47 = load i32, i32* %v46, align 4, !tbaa !1
+  store i32 %v43, ptr %v41, align 4, !tbaa !1
+  %v44 = getelementptr inbounds i32, ptr %a1, i32 %v37
+  %v45 = load i32, ptr %v44, align 4, !tbaa !1
+  %v46 = getelementptr inbounds i32, ptr %a0, i32 %v38
+  %v47 = load i32, ptr %v46, align 4, !tbaa !1
   %v48 = add nsw i32 %v47, %v45
-  store i32 %v48, i32* %v46, align 4, !tbaa !1
+  store i32 %v48, ptr %v46, align 4, !tbaa !1
   %v49 = add nuw nsw i32 %v37, 2
   %v50 = icmp eq i32 %v49, %a2
   br i1 %v50, label %b7, label %b8, !llvm.loop !7

diff  --git a/llvm/test/CodeGen/Hexagon/autohvx/isel-split-masked.ll b/llvm/test/CodeGen/Hexagon/autohvx/isel-split-masked.ll
index 61bcbce6e6422..15739aaeaadf0 100644
--- a/llvm/test/CodeGen/Hexagon/autohvx/isel-split-masked.ll
+++ b/llvm/test/CodeGen/Hexagon/autohvx/isel-split-masked.ll
@@ -8,7 +8,7 @@ target triple = "hexagon"
 
 define void @f0() #0 {
 b0:
-  %v0 = call <64 x i32> @llvm.masked.load.v64i32.p0v64i32(<64 x i32>* nonnull undef, i32 4, <64 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false>, <64 x i32> undef)
+  %v0 = call <64 x i32> @llvm.masked.load.v64i32.p0(ptr nonnull undef, i32 4, <64 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false>, <64 x i32> undef)
   %v1 = icmp sgt <64 x i32> %v0, zeroinitializer
   %v2 = sext <64 x i1> %v1 to <64 x i32>
   %v3 = add nsw <64 x i32> zeroinitializer, %v2
@@ -17,15 +17,15 @@ b0:
   %v6 = select <64 x i1> %v5, <64 x i32> %v4, <64 x i32> zeroinitializer
   %v7 = select <64 x i1> zeroinitializer, <64 x i32> undef, <64 x i32> %v6
   %v8 = trunc <64 x i32> %v7 to <64 x i16>
-  call void @llvm.masked.store.v64i16.p0v64i16(<64 x i16> %v8, <64 x i16>* undef, i32 2, <64 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false>)
+  call void @llvm.masked.store.v64i16.p0(<64 x i16> %v8, ptr undef, i32 2, <64 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false>)
   ret void
 }
 
 ; Function Attrs: argmemonly nounwind readonly willreturn
-declare <64 x i32> @llvm.masked.load.v64i32.p0v64i32(<64 x i32>*, i32 immarg, <64 x i1>, <64 x i32>) #1
+declare <64 x i32> @llvm.masked.load.v64i32.p0(ptr, i32 immarg, <64 x i1>, <64 x i32>) #1
 
 ; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.masked.store.v64i16.p0v64i16(<64 x i16>, <64 x i16>*, i32 immarg, <64 x i1>) #2
+declare void @llvm.masked.store.v64i16.p0(<64 x i16>, ptr, i32 immarg, <64 x i1>) #2
 
 attributes #0 = { "target-features"="+hvx-length128b,+hvxv67,+v67,-long-calls" }
 attributes #1 = { argmemonly nounwind readonly willreturn }

diff  --git a/llvm/test/CodeGen/Hexagon/autohvx/isel-store-bitcast-v128i1.ll b/llvm/test/CodeGen/Hexagon/autohvx/isel-store-bitcast-v128i1.ll
index d8d24a052660d..cb35286fafcae 100644
--- a/llvm/test/CodeGen/Hexagon/autohvx/isel-store-bitcast-v128i1.ll
+++ b/llvm/test/CodeGen/Hexagon/autohvx/isel-store-bitcast-v128i1.ll
@@ -4,10 +4,10 @@
 
 ; CHECK-LABEL: fred:
 ; CHECK: memd
-define void @fred(<128 x i8> %a0, <128 x i8> %a1, i128* %a2) #0 {
+define void @fred(<128 x i8> %a0, <128 x i8> %a1, ptr %a2) #0 {
   %v0 = icmp eq <128 x i8> %a0, %a1
   %v1 = bitcast <128 x i1> %v0 to i128
-  store i128 %v1, i128* %a2, align 16
+  store i128 %v1, ptr %a2, align 16
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/autohvx/isel-truncate-legal.ll b/llvm/test/CodeGen/Hexagon/autohvx/isel-truncate-legal.ll
index e9c7f9cce771e..014a90cf7f236 100644
--- a/llvm/test/CodeGen/Hexagon/autohvx/isel-truncate-legal.ll
+++ b/llvm/test/CodeGen/Hexagon/autohvx/isel-truncate-legal.ll
@@ -8,9 +8,9 @@
 target datalayout = "e-m:e-p:32:32:32-a:0-n16:32-i64:64:64-i32:32:32-i16:16:16-i1:8:8-f32:32:32-f64:64:64-v32:32:32-v64:64:64-v512:512:512-v1024:1024:1024-v2048:2048:2048"
 target triple = "hexagon"
 
-define dllexport void @f0(i8* %a0) local_unnamed_addr #0 {
+define dllexport void @f0(ptr %a0) local_unnamed_addr #0 {
 b0:
-  %v0 = load i8, i8* undef, align 1
+  %v0 = load i8, ptr undef, align 1
   %v1 = zext i8 %v0 to i16
   %v2 = add i16 0, %v1
   %v3 = icmp sgt i16 %v2, 1
@@ -19,15 +19,14 @@ b0:
   %v6 = zext i16 %v5 to i32
   %v7 = insertelement <8 x i32> undef, i32 %v6, i32 0
   %v8 = shufflevector <8 x i32> %v7, <8 x i32> undef, <8 x i32> zeroinitializer
-  %v9 = load <8 x i16>, <8 x i16>* undef, align 2
+  %v9 = load <8 x i16>, ptr undef, align 2
   %v10 = sext <8 x i16> %v9 to <8 x i32>
   %v11 = mul nsw <8 x i32> %v8, %v10
   %v12 = add nsw <8 x i32> %v11, <i32 16384, i32 16384, i32 16384, i32 16384, i32 16384, i32 16384, i32 16384, i32 16384>
   %v13 = lshr <8 x i32> %v12, <i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15>
   %v14 = trunc <8 x i32> %v13 to <8 x i8>
-  %v15 = getelementptr inbounds i8, i8* %a0, i32 undef
-  %v16 = bitcast i8* %v15 to <8 x i8>*
-  store <8 x i8> %v14, <8 x i8>* %v16, align 1
+  %v15 = getelementptr inbounds i8, ptr %a0, i32 undef
+  store <8 x i8> %v14, ptr %v15, align 1
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/autohvx/isel-truncate.ll b/llvm/test/CodeGen/Hexagon/autohvx/isel-truncate.ll
index 48bcfb4e85b99..2384ca4f95ec4 100644
--- a/llvm/test/CodeGen/Hexagon/autohvx/isel-truncate.ll
+++ b/llvm/test/CodeGen/Hexagon/autohvx/isel-truncate.ll
@@ -50,7 +50,7 @@ b0:
   %v0 = icmp eq <16 x i32> %a0, %a1
   %v1 = select <16 x i1> %v0, <16 x i32> %a0, <16 x i32> zeroinitializer
   %v2 = trunc <16 x i32> %v1 to <16 x i16>
-  store <16 x i16> %v2, <16 x i16>* @g0, align 2
+  store <16 x i16> %v2, ptr @g0, align 2
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/autohvx/isel-undef-not-zero.ll b/llvm/test/CodeGen/Hexagon/autohvx/isel-undef-not-zero.ll
index f8f0a7211a63d..2afc9fc0e96ab 100644
--- a/llvm/test/CodeGen/Hexagon/autohvx/isel-undef-not-zero.ll
+++ b/llvm/test/CodeGen/Hexagon/autohvx/isel-undef-not-zero.ll
@@ -8,23 +8,21 @@
 target datalayout = "e-m:e-p:32:32:32-a:0-n16:32-i64:64:64-i32:32:32-i16:16:16-i1:8:8-f32:32:32-f64:64:64-v32:32:32-v64:64:64-v512:512:512-v1024:1024:1024-v2048:2048:2048"
 target triple = "hexagon"
 
-define dllexport void @f0(i8* noalias align 128 %a0) #0 {
+define dllexport void @f0(ptr noalias align 128 %a0) #0 {
 b0:
-  %v0 = bitcast i8* %a0 to i32*
-  %v1 = getelementptr inbounds i32, i32* %v0, i32 undef
-  %v2 = bitcast i32* %v1 to <7 x i32>*
+  %v1 = getelementptr inbounds i32, ptr %a0, i32 undef
   br label %b1
 
 b1:                                               ; preds = %b0
-  %v3 = load i8, i8* undef, align 1
+  %v3 = load i8, ptr undef, align 1
   %v4 = insertelement <7 x i8> undef, i8 %v3, i32 0
   %v5 = shufflevector <7 x i8> %v4, <7 x i8> undef, <7 x i32> zeroinitializer
   %v6 = zext <7 x i8> %v5 to <7 x i32>
-  %v7 = load <7 x i8>, <7 x i8>* undef, align 1
+  %v7 = load <7 x i8>, ptr undef, align 1
   %v8 = zext <7 x i8> %v7 to <7 x i32>
   %v9 = mul nsw <7 x i32> %v6, %v8
   %v10 = add nsw <7 x i32> %v9, zeroinitializer
-  store <7 x i32> %v10, <7 x i32>* %v2, align 4
+  store <7 x i32> %v10, ptr %v1, align 4
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/autohvx/isel-vec-ext.ll b/llvm/test/CodeGen/Hexagon/autohvx/isel-vec-ext.ll
index 1bb1fe34076ea..5565f1ea140ba 100644
--- a/llvm/test/CodeGen/Hexagon/autohvx/isel-vec-ext.ll
+++ b/llvm/test/CodeGen/Hexagon/autohvx/isel-vec-ext.ll
@@ -6,22 +6,22 @@ target triple = "hexagon"
 ; CHECK-LABEL: danny:
 ; CHECK: vunpack
 ; CHECK-NOT: vinsert
-define void @danny(<16 x i16>* %a0, <16 x i32>* %a1) #0 {
+define void @danny(ptr %a0, ptr %a1) #0 {
 b2:
-  %v16 = load <16 x i16>, <16 x i16>* %a0, align 128
+  %v16 = load <16 x i16>, ptr %a0, align 128
   %v17 = sext <16 x i16> %v16 to <16 x i32>
-  store <16 x i32> %v17, <16 x i32>* %a1, align 128
+  store <16 x i32> %v17, ptr %a1, align 128
   ret void
 }
 
 ; CHECK-LABEL: sammy:
 ; CHECK: vunpack
 ; CHECK-NOT: vinsert
-define void @sammy(<32 x i16>* %a0, <32 x i32>* %a1) #1 {
+define void @sammy(ptr %a0, ptr %a1) #1 {
 b2:
-  %v16 = load <32 x i16>, <32 x i16>* %a0, align 128
+  %v16 = load <32 x i16>, ptr %a0, align 128
   %v17 = sext <32 x i16> %v16 to <32 x i32>
-  store <32 x i32> %v17, <32 x i32>* %a1, align 128
+  store <32 x i32> %v17, ptr %a1, align 128
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/autohvx/isel-vsplat-pair.ll b/llvm/test/CodeGen/Hexagon/autohvx/isel-vsplat-pair.ll
index 359b8045e92ea..3e6523b69af8c 100644
--- a/llvm/test/CodeGen/Hexagon/autohvx/isel-vsplat-pair.ll
+++ b/llvm/test/CodeGen/Hexagon/autohvx/isel-vsplat-pair.ll
@@ -6,16 +6,16 @@
 target datalayout = "e-m:e-p:32:32:32-a:0-n16:32-i64:64:64-i32:32:32-i16:16:16-i1:8:8-f32:32:32-f64:64:64-v32:32:32-v64:64:64-v512:512:512-v1024:1024:1024-v2048:2048:2048"
 target triple = "hexagon"
 
-define void @fred(<64 x i8>* %a0) #0 {
+define void @fred(ptr %a0) #0 {
 b0:
-  %v1 = load <64 x i8>, <64 x i8>* %a0, align 8
+  %v1 = load <64 x i8>, ptr %a0, align 8
   %v2 = zext <64 x i8> %v1 to <64 x i32>
   %v3 = add nuw nsw <64 x i32> %v2, zeroinitializer
   %v4 = icmp ugt <64 x i32> %v3, <i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254>
   %v5 = zext <64 x i1> %v4 to <64 x i32>
   %v6 = add nuw nsw <64 x i32> %v3, %v5
   %v7 = trunc <64 x i32> %v6 to <64 x i8>
-  store <64 x i8> %v7, <64 x i8>* %a0, align 8
+  store <64 x i8> %v7, ptr %a0, align 8
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/autohvx/isel-widen-memop.ll b/llvm/test/CodeGen/Hexagon/autohvx/isel-widen-memop.ll
index bed13b1dbcc98..5a9141d1bd5c7 100644
--- a/llvm/test/CodeGen/Hexagon/autohvx/isel-widen-memop.ll
+++ b/llvm/test/CodeGen/Hexagon/autohvx/isel-widen-memop.ll
@@ -7,29 +7,27 @@
 target datalayout = "e-m:e-p:32:32:32-a:0-n16:32-i64:64:64-i32:32:32-i16:16:16-i1:8:8-f32:32:32-f64:64:64-v32:32:32-v64:64:64-v512:512:512-v1024:1024:1024-v2048:2048:2048"
 target triple = "hexagon"
 
-define dso_local void @f0(i16* %a0) local_unnamed_addr #0 {
+define dso_local void @f0(ptr %a0) local_unnamed_addr #0 {
 b0:
-  %v0 = getelementptr i16, i16* %a0, i32 8
-  %v1 = getelementptr i16, i16* %v0, i32 0
+  %v0 = getelementptr i16, ptr %a0, i32 8
   %v2 = icmp eq i32 0, 0
   %v3 = insertelement <8 x i1> undef, i1 %v2, i64 0
   %v4 = shufflevector <8 x i1> %v3, <8 x i1> undef, <8 x i32> zeroinitializer
-  %v5 = call <8 x i32> @llvm.masked.load.v8i32.p0v8i32(<8 x i32>* nonnull undef, i32 4, <8 x i1> %v4, <8 x i32> undef)
+  %v5 = call <8 x i32> @llvm.masked.load.v8i32.p0(ptr nonnull undef, i32 4, <8 x i1> %v4, <8 x i32> undef)
   %v6 = sub nsw <8 x i32> zeroinitializer, %v5
   %v7 = add nsw <8 x i32> %v6, zeroinitializer
   %v8 = add <8 x i32> zeroinitializer, %v7
   %v9 = lshr <8 x i32> %v8, <i32 6, i32 6, i32 6, i32 6, i32 6, i32 6, i32 6, i32 6>
   %v10 = trunc <8 x i32> %v9 to <8 x i16>
-  %v11 = bitcast i16* %v1 to <8 x i16>*
-  call void @llvm.masked.store.v8i16.p0v8i16(<8 x i16> %v10, <8 x i16>* %v11, i32 2, <8 x i1> %v4)
+  call void @llvm.masked.store.v8i16.p0(<8 x i16> %v10, ptr %v0, i32 2, <8 x i1> %v4)
   ret void
 }
 
 ; Function Attrs: argmemonly nounwind readonly willreturn
-declare <8 x i32> @llvm.masked.load.v8i32.p0v8i32(<8 x i32>*, i32 immarg, <8 x i1>, <8 x i32>) #1
+declare <8 x i32> @llvm.masked.load.v8i32.p0(ptr, i32 immarg, <8 x i1>, <8 x i32>) #1
 
 ; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.masked.store.v8i16.p0v8i16(<8 x i16>, <8 x i16>*, i32 immarg, <8 x i1>) #2
+declare void @llvm.masked.store.v8i16.p0(<8 x i16>, ptr, i32 immarg, <8 x i1>) #2
 
 attributes #0 = { "target-features"="+hvx-length64b,+hvxv65,+v65,-long-calls,-packets" }
 attributes #1 = { argmemonly nounwind readonly willreturn }

diff  --git a/llvm/test/CodeGen/Hexagon/autohvx/isel-widen-store.ll b/llvm/test/CodeGen/Hexagon/autohvx/isel-widen-store.ll
index 311450502f248..500451e9c4ec0 100644
--- a/llvm/test/CodeGen/Hexagon/autohvx/isel-widen-store.ll
+++ b/llvm/test/CodeGen/Hexagon/autohvx/isel-widen-store.ll
@@ -3,12 +3,12 @@
 ; CHECK-LABEL: f0:
 ; CHECK: q[[Q0:[0-3]]] = vsetq(r{{[0-9]+}})
 ; CHECK: if (q[[Q0]]) vmem({{.*}}) = v
-define void @f0(<32 x i8>* %a0) #0 {
-  %v0 = load <32 x i8>, <32 x i8>* %a0, align 128
+define void @f0(ptr %a0) #0 {
+  %v0 = load <32 x i8>, ptr %a0, align 128
   %v1 = insertelement <32 x i8> undef, i8 1, i32 0
   %v2 = shufflevector <32 x i8> %v1, <32 x i8> undef, <32 x i32> zeroinitializer
   %v3 = add <32 x i8> %v0, %v2
-  store <32 x i8> %v3, <32 x i8>* %a0, align 128
+  store <32 x i8> %v3, ptr %a0, align 128
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/autohvx/isel-widen-truncate-illegal-elem.ll b/llvm/test/CodeGen/Hexagon/autohvx/isel-widen-truncate-illegal-elem.ll
index 3f55d22308c3d..d6ebb9f4855b9 100644
--- a/llvm/test/CodeGen/Hexagon/autohvx/isel-widen-truncate-illegal-elem.ll
+++ b/llvm/test/CodeGen/Hexagon/autohvx/isel-widen-truncate-illegal-elem.ll
@@ -8,7 +8,7 @@ target triple = "hexagon"
 
 define dso_local void @f0() local_unnamed_addr #0 {
 b0:
-  %v0 = load i32, i32* undef, align 4
+  %v0 = load i32, ptr undef, align 4
   %v1 = select i1 undef, i32 0, i32 1073741823
   %v2 = shl i32 %v1, 0
   %v3 = sext i32 %v0 to i64
@@ -19,15 +19,14 @@ b0:
   %v8 = sext i32 %v7 to i64
   %v9 = insertelement <32 x i64> undef, i64 %v8, i32 0
   %v10 = shufflevector <32 x i64> %v9, <32 x i64> undef, <32 x i32> zeroinitializer
-  %v11 = getelementptr i32, i32* null, i32 32
-  %v12 = bitcast i32* %v11 to <32 x i32>*
-  %v13 = load <32 x i32>, <32 x i32>* %v12, align 4
+  %v11 = getelementptr i32, ptr null, i32 32
+  %v13 = load <32 x i32>, ptr %v11, align 4
   %v14 = shl <32 x i32> %v13, zeroinitializer
   %v15 = sext <32 x i32> %v14 to <32 x i64>
   %v16 = mul nsw <32 x i64> %v10, %v15
   %v17 = lshr <32 x i64> %v16, <i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32>
   %v18 = trunc <32 x i64> %v17 to <32 x i32>
-  store <32 x i32> %v18, <32 x i32>* %v12, align 4
+  store <32 x i32> %v18, ptr %v11, align 4
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/autohvx/isel-widen-truncate-op.ll b/llvm/test/CodeGen/Hexagon/autohvx/isel-widen-truncate-op.ll
index 404d3d1ff2606..0aaf54ae475e5 100644
--- a/llvm/test/CodeGen/Hexagon/autohvx/isel-widen-truncate-op.ll
+++ b/llvm/test/CodeGen/Hexagon/autohvx/isel-widen-truncate-op.ll
@@ -8,29 +8,27 @@
 target datalayout = "e-m:e-p:32:32:32-a:0-n16:32-i64:64:64-i32:32:32-i16:16:16-i1:8:8-f32:32:32-f64:64:64-v32:32:32-v64:64:64-v512:512:512-v1024:1024:1024-v2048:2048:2048"
 target triple = "hexagon"
 
-define dso_local void @f0(i16* %a0) local_unnamed_addr #0 {
+define dso_local void @f0(ptr %a0) local_unnamed_addr #0 {
 b0:
-  %v0 = getelementptr i16, i16* %a0, i32 8
-  %v1 = getelementptr i16, i16* %v0, i32 0
+  %v0 = getelementptr i16, ptr %a0, i32 8
   %v2 = icmp eq i32 0, 0
   %v3 = insertelement <8 x i1> undef, i1 %v2, i64 0
   %v4 = shufflevector <8 x i1> %v3, <8 x i1> undef, <8 x i32> zeroinitializer
-  %v5 = call <8 x i32> @llvm.masked.load.v8i32.p0v8i32(<8 x i32>* nonnull undef, i32 4, <8 x i1> %v4, <8 x i32> undef)
+  %v5 = call <8 x i32> @llvm.masked.load.v8i32.p0(ptr nonnull undef, i32 4, <8 x i1> %v4, <8 x i32> undef)
   %v6 = sub nsw <8 x i32> zeroinitializer, %v5
   %v7 = add nsw <8 x i32> %v6, zeroinitializer
   %v8 = add <8 x i32> zeroinitializer, %v7
   %v9 = lshr <8 x i32> %v8, <i32 6, i32 6, i32 6, i32 6, i32 6, i32 6, i32 6, i32 6>
   %v10 = trunc <8 x i32> %v9 to <8 x i16>
-  %v11 = bitcast i16* %v1 to <8 x i16>*
-  call void @llvm.masked.store.v8i16.p0v8i16(<8 x i16> %v10, <8 x i16>* %v11, i32 2, <8 x i1> %v4)
+  call void @llvm.masked.store.v8i16.p0(<8 x i16> %v10, ptr %v0, i32 2, <8 x i1> %v4)
   ret void
 }
 
 ; Function Attrs: argmemonly nounwind readonly willreturn
-declare <8 x i32> @llvm.masked.load.v8i32.p0v8i32(<8 x i32>*, i32 immarg, <8 x i1>, <8 x i32>) #1
+declare <8 x i32> @llvm.masked.load.v8i32.p0(ptr, i32 immarg, <8 x i1>, <8 x i32>) #1
 
 ; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.masked.store.v8i16.p0v8i16(<8 x i16>, <8 x i16>*, i32 immarg, <8 x i1>) #2
+declare void @llvm.masked.store.v8i16.p0(<8 x i16>, ptr, i32 immarg, <8 x i1>) #2
 
 attributes #0 = { "target-features"="+hvx-length64b,+hvxv65,+v65,-long-calls,-packets" }
 attributes #1 = { argmemonly nounwind readonly willreturn }

diff  --git a/llvm/test/CodeGen/Hexagon/autohvx/isel-widen-truncate-pair.ll b/llvm/test/CodeGen/Hexagon/autohvx/isel-widen-truncate-pair.ll
index 23e8b590b2d8a..a40cc0ba06d55 100644
--- a/llvm/test/CodeGen/Hexagon/autohvx/isel-widen-truncate-pair.ll
+++ b/llvm/test/CodeGen/Hexagon/autohvx/isel-widen-truncate-pair.ll
@@ -7,9 +7,9 @@
 ; CHECK-LABEL: fred:
 ; CHECK: v[[V0:[0-9]+]].h = vpacke(v1.w,v0.w)
 ; CHECK:                  = vpacke({{.*}},v[[V0]].h)
-define void @fred(<32 x i8>* %a0, <32 x i32> %a1) #0 {
+define void @fred(ptr %a0, <32 x i32> %a1) #0 {
   %v0 = trunc <32 x i32> %a1 to <32 x i8>
-  store <32 x i8> %v0, <32 x i8>* %a0, align 32
+  store <32 x i8> %v0, ptr %a0, align 32
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/autohvx/isel-widen-truncate.ll b/llvm/test/CodeGen/Hexagon/autohvx/isel-widen-truncate.ll
index 6d5018757c7a6..5921a56a3a99c 100644
--- a/llvm/test/CodeGen/Hexagon/autohvx/isel-widen-truncate.ll
+++ b/llvm/test/CodeGen/Hexagon/autohvx/isel-widen-truncate.ll
@@ -13,12 +13,12 @@ define dllexport void @f0(<32 x i32> %a0) local_unnamed_addr #0 {
 b0:
   %v0 = trunc <32 x i32> %a0 to <32 x i8>
   %v1 = shufflevector <32 x i8> %v0, <32 x i8> undef, <128 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
-  tail call void @llvm.masked.store.v128i8.p0v128i8(<128 x i8> %v1, <128 x i8>* undef, i32 128, <128 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false>)
+  tail call void @llvm.masked.store.v128i8.p0(<128 x i8> %v1, ptr undef, i32 128, <128 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false>)
   ret void
 }
 
 ; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.masked.store.v128i8.p0v128i8(<128 x i8>, <128 x i8>*, i32 immarg, <128 x i1>) #1
+declare void @llvm.masked.store.v128i8.p0(<128 x i8>, ptr, i32 immarg, <128 x i1>) #1
 
 attributes #0 = { "target-cpu"="hexagonv66" "target-features"="+hvx,+hvx-length128b" }
 attributes #1 = { argmemonly nounwind willreturn }

diff  --git a/llvm/test/CodeGen/Hexagon/autohvx/lower-insert-elt.ll b/llvm/test/CodeGen/Hexagon/autohvx/lower-insert-elt.ll
index 22afa4a3c1fec..0f5cda4271a68 100644
--- a/llvm/test/CodeGen/Hexagon/autohvx/lower-insert-elt.ll
+++ b/llvm/test/CodeGen/Hexagon/autohvx/lower-insert-elt.ll
@@ -9,7 +9,7 @@ target triple = "hexagon-unknown--elf"
 
 define void @fred() local_unnamed_addr #0 {
 b0:
-  %v1 = load <64 x i8>, <64 x i8>* undef, align 64
+  %v1 = load <64 x i8>, ptr undef, align 64
   %v2 = insertelement <64 x i8> %v1, i8 0, i32 0
   br label %b3
 

diff  --git a/llvm/test/CodeGen/Hexagon/autohvx/masked-vmem-basic.ll b/llvm/test/CodeGen/Hexagon/autohvx/masked-vmem-basic.ll
index 36526f2fade53..2096977d011b9 100644
--- a/llvm/test/CodeGen/Hexagon/autohvx/masked-vmem-basic.ll
+++ b/llvm/test/CodeGen/Hexagon/autohvx/masked-vmem-basic.ll
@@ -3,29 +3,29 @@
 ; CHECK-LABEL: f0:
 ; CHECK: vmemu
 ; CHECK: vmux
-define <128 x i8> @f0(<128 x i8>* %a0, i32 %a1, i32 %a2) #0 {
+define <128 x i8> @f0(ptr %a0, i32 %a1, i32 %a2) #0 {
   %q0 = call <128 x i1> @llvm.hexagon.V6.pred.scalar2.128B(i32 %a2)
   %v0 = call <32 x i32> @llvm.hexagon.V6.lvsplatb.128B(i32 %a1)
   %v1 = bitcast <32 x i32> %v0 to <128 x i8>
-  %v2 = call <128 x i8> @llvm.masked.load.v128i8.p0v128i8(<128 x i8>* %a0, i32 4, <128 x i1> %q0, <128 x i8> %v1)
+  %v2 = call <128 x i8> @llvm.masked.load.v128i8.p0(ptr %a0, i32 4, <128 x i1> %q0, <128 x i8> %v1)
   ret <128 x i8> %v2
 }
 
 ; CHECK-LABEL: f1:
 ; CHECK: vlalign
 ; CHECK: if (q{{.}}) vmem{{.*}} = v
-define void @f1(<128 x i8>* %a0, i32 %a1, i32 %a2) #0 {
+define void @f1(ptr %a0, i32 %a1, i32 %a2) #0 {
   %q0 = call <128 x i1> @llvm.hexagon.V6.pred.scalar2.128B(i32 %a2)
   %v0 = call <32 x i32> @llvm.hexagon.V6.lvsplatb.128B(i32 %a1)
   %v1 = bitcast <32 x i32> %v0 to <128 x i8>
-  call void @llvm.masked.store.v128i8.p0v128i8(<128 x i8> %v1, <128 x i8>* %a0, i32 4, <128 x i1> %q0)
+  call void @llvm.masked.store.v128i8.p0(<128 x i8> %v1, ptr %a0, i32 4, <128 x i1> %q0)
   ret void
 }
 
 declare <128 x i1> @llvm.hexagon.V6.pred.scalar2.128B(i32) #1
 declare <32 x i32> @llvm.hexagon.V6.lvsplatb.128B(i32) #1
-declare <128 x i8> @llvm.masked.load.v128i8.p0v128i8(<128 x i8>*, i32 immarg, <128 x i1>, <128 x i8>) #2
-declare void @llvm.masked.store.v128i8.p0v128i8(<128 x i8>, <128 x i8>*, i32 immarg, <128 x i1>) #2
+declare <128 x i8> @llvm.masked.load.v128i8.p0(ptr, i32 immarg, <128 x i1>, <128 x i8>) #2
+declare void @llvm.masked.store.v128i8.p0(<128 x i8>, ptr, i32 immarg, <128 x i1>) #2
 
 attributes #0 = { nounwind "target-features"="+hvxv65,+hvx-length128b" }
 attributes #1 = { nounwind readnone }

diff  --git a/llvm/test/CodeGen/Hexagon/autohvx/maximize-bandwidth.ll b/llvm/test/CodeGen/Hexagon/autohvx/maximize-bandwidth.ll
index f1e85c4b6c14d..e2a9f0937c84a 100644
--- a/llvm/test/CodeGen/Hexagon/autohvx/maximize-bandwidth.ll
+++ b/llvm/test/CodeGen/Hexagon/autohvx/maximize-bandwidth.ll
@@ -6,26 +6,26 @@
 target datalayout = "e-m:e-p:32:32:32-a:0-n16:32-i64:64:64-i32:32:32-i16:16:16-i1:8:8-f32:32:32-f64:64:64-v32:32:32-v64:64:64-v512:512:512-v1024:1024:1024-v2048:2048:2048"
 target triple = "hexagon"
 
-define dso_local void @example10a(i16* noalias nocapture %a0, i16* noalias nocapture readonly %a1, i16* noalias nocapture readonly %a2, i32* noalias nocapture %a3, i32* noalias nocapture readonly %a4, i32* noalias nocapture readonly %a5) local_unnamed_addr #0 {
+define dso_local void @example10a(ptr noalias nocapture %a0, ptr noalias nocapture readonly %a1, ptr noalias nocapture readonly %a2, ptr noalias nocapture %a3, ptr noalias nocapture readonly %a4, ptr noalias nocapture readonly %a5) local_unnamed_addr #0 {
 b0:
   br label %b1
 
 b1:                                               ; preds = %b1, %b0
   %v0 = phi i32 [ 0, %b0 ], [ %v13, %b1 ]
-  %v1 = getelementptr inbounds i32, i32* %a4, i32 %v0
-  %v2 = load i32, i32* %v1, align 4, !tbaa !1
-  %v3 = getelementptr inbounds i32, i32* %a5, i32 %v0
-  %v4 = load i32, i32* %v3, align 4, !tbaa !1
+  %v1 = getelementptr inbounds i32, ptr %a4, i32 %v0
+  %v2 = load i32, ptr %v1, align 4, !tbaa !1
+  %v3 = getelementptr inbounds i32, ptr %a5, i32 %v0
+  %v4 = load i32, ptr %v3, align 4, !tbaa !1
   %v5 = add nsw i32 %v4, %v2
-  %v6 = getelementptr inbounds i32, i32* %a3, i32 %v0
-  store i32 %v5, i32* %v6, align 4, !tbaa !1
-  %v7 = getelementptr inbounds i16, i16* %a1, i32 %v0
-  %v8 = load i16, i16* %v7, align 2, !tbaa !5
-  %v9 = getelementptr inbounds i16, i16* %a2, i32 %v0
-  %v10 = load i16, i16* %v9, align 2, !tbaa !5
+  %v6 = getelementptr inbounds i32, ptr %a3, i32 %v0
+  store i32 %v5, ptr %v6, align 4, !tbaa !1
+  %v7 = getelementptr inbounds i16, ptr %a1, i32 %v0
+  %v8 = load i16, ptr %v7, align 2, !tbaa !5
+  %v9 = getelementptr inbounds i16, ptr %a2, i32 %v0
+  %v10 = load i16, ptr %v9, align 2, !tbaa !5
   %v11 = add i16 %v10, %v8
-  %v12 = getelementptr inbounds i16, i16* %a0, i32 %v0
-  store i16 %v11, i16* %v12, align 2, !tbaa !5
+  %v12 = getelementptr inbounds i16, ptr %a0, i32 %v0
+  store i16 %v11, ptr %v12, align 2, !tbaa !5
   %v13 = add nuw nsw i32 %v0, 1
   %v14 = icmp eq i32 %v13, 1024
   br i1 %v14, label %b2, label %b1

diff  --git a/llvm/test/CodeGen/Hexagon/autohvx/non-simple-hvx-type.ll b/llvm/test/CodeGen/Hexagon/autohvx/non-simple-hvx-type.ll
index 18523ccc682f0..e7b4387516edf 100644
--- a/llvm/test/CodeGen/Hexagon/autohvx/non-simple-hvx-type.ll
+++ b/llvm/test/CodeGen/Hexagon/autohvx/non-simple-hvx-type.ll
@@ -3,7 +3,7 @@
 
 ; Check that <24 x i32> is treated as an HVX vector type.
 
-define <24 x i32> @f0(<24 x i32>* %a0, <24 x i32> %a1, <24 x i32> %a2) #0 {
+define <24 x i32> @f0(ptr %a0, <24 x i32> %a1, <24 x i32> %a2) #0 {
 ; CHECK-LABEL: f0:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    {
@@ -11,11 +11,11 @@ define <24 x i32> @f0(<24 x i32>* %a0, <24 x i32> %a1, <24 x i32> %a2) #0 {
 ; CHECK-NEXT:     v0 = vmemu(r0+#0)
 ; CHECK-NEXT:    }
   %v1 = icmp ne <24 x i32> %a1, zeroinitializer
-  %v2 = call <24 x i32> @llvm.masked.load.v24i1.p0v24i1(<24 x i32>* %a0, i32 4, <24 x i1> %v1, <24 x i32> undef)
+  %v2 = call <24 x i32> @llvm.masked.load.v24i1.p0(ptr %a0, i32 4, <24 x i1> %v1, <24 x i32> undef)
   ret <24 x i32> %v2
 }
 
-declare <24 x i32> @llvm.masked.load.v24i1.p0v24i1(<24 x i32>*, i32, <24 x i1>, <24 x i32>)
+declare <24 x i32> @llvm.masked.load.v24i1.p0(ptr, i32, <24 x i1>, <24 x i32>)
 
 attributes #0 = { nounwind readnone "target-cpu"="hexagonv62" "target-features"="+hvx,+hvx-length128b" }
 

diff  --git a/llvm/test/CodeGen/Hexagon/autohvx/pred-vmem-128b.ll b/llvm/test/CodeGen/Hexagon/autohvx/pred-vmem-128b.ll
index c7f052b7a6dba..e22d79b1d81e2 100644
--- a/llvm/test/CodeGen/Hexagon/autohvx/pred-vmem-128b.ll
+++ b/llvm/test/CodeGen/Hexagon/autohvx/pred-vmem-128b.ll
@@ -1,44 +1,44 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -march=hexagon < %s | FileCheck %s
 
-declare <32 x i32> @llvm.hexagon.V6.vL32b.pred.ai.128B(i1, <32 x i32>*, i32)
-declare <32 x i32> @llvm.hexagon.V6.vL32b.npred.ai.128B(i1, <32 x i32>*, i32)
-declare <32 x i32> @llvm.hexagon.V6.vL32b.nt.pred.ai.128B(i1, <32 x i32>*, i32)
-declare <32 x i32> @llvm.hexagon.V6.vL32b.nt.npred.ai.128B(i1, <32 x i32>*, i32)
-
-declare { <32 x i32>, <32 x i32>* } @llvm.hexagon.V6.vL32b.pred.pi.128B(i1, <32 x i32>*, i32)
-declare { <32 x i32>, <32 x i32>* } @llvm.hexagon.V6.vL32b.npred.pi.128B(i1, <32 x i32>*, i32)
-declare { <32 x i32>, <32 x i32>* } @llvm.hexagon.V6.vL32b.nt.pred.pi.128B(i1, <32 x i32>*, i32)
-declare { <32 x i32>, <32 x i32>* } @llvm.hexagon.V6.vL32b.nt.npred.pi.128B(i1, <32 x i32>*, i32)
-
-declare { <32 x i32>, <32 x i32>* } @llvm.hexagon.V6.vL32b.pred.ppu.128B(i1, <32 x i32>*, i32)
-declare { <32 x i32>, <32 x i32>* } @llvm.hexagon.V6.vL32b.npred.ppu.128B(i1, <32 x i32>*, i32)
-declare { <32 x i32>, <32 x i32>* } @llvm.hexagon.V6.vL32b.nt.pred.ppu.128B(i1, <32 x i32>*, i32)
-declare { <32 x i32>, <32 x i32>* } @llvm.hexagon.V6.vL32b.nt.npred.ppu.128B(i1, <32 x i32>*, i32)
-
-declare void @llvm.hexagon.V6.vS32b.pred.ai.128B(i1, <32 x i32>*, i32, <32 x i32>)
-declare void @llvm.hexagon.V6.vS32b.npred.ai.128B(i1, <32 x i32>*, i32, <32 x i32>)
-declare void @llvm.hexagon.V6.vS32Ub.pred.ai.128B(i1, <32 x i32>*, i32, <32 x i32>)
-declare void @llvm.hexagon.V6.vS32Ub.npred.ai.128B(i1, <32 x i32>*, i32, <32 x i32>)
-declare void @llvm.hexagon.V6.vS32b.nt.pred.ai.128B(i1, <32 x i32>*, i32, <32 x i32>)
-declare void @llvm.hexagon.V6.vS32b.nt.npred.ai.128B(i1, <32 x i32>*, i32, <32 x i32>)
-
-declare <32 x i32>* @llvm.hexagon.V6.vS32b.pred.pi.128B(i1, <32 x i32>*, i32, <32 x i32>)
-declare <32 x i32>* @llvm.hexagon.V6.vS32b.npred.pi.128B(i1, <32 x i32>*, i32, <32 x i32>)
-declare <32 x i32>* @llvm.hexagon.V6.vS32Ub.pred.pi.128B(i1, <32 x i32>*, i32, <32 x i32>)
-declare <32 x i32>* @llvm.hexagon.V6.vS32Ub.npred.pi.128B(i1, <32 x i32>*, i32, <32 x i32>)
-declare <32 x i32>* @llvm.hexagon.V6.vS32b.nt.pred.pi.128B(i1, <32 x i32>*, i32, <32 x i32>)
-declare <32 x i32>* @llvm.hexagon.V6.vS32b.nt.npred.pi.128B(i1, <32 x i32>*, i32, <32 x i32>)
-
-declare <32 x i32>* @llvm.hexagon.V6.vS32b.pred.ppu.128B(i1, <32 x i32>*, i32, <32 x i32>)
-declare <32 x i32>* @llvm.hexagon.V6.vS32b.npred.ppu.128B(i1, <32 x i32>*, i32, <32 x i32>)
-declare <32 x i32>* @llvm.hexagon.V6.vS32Ub.pred.ppu.128B(i1, <32 x i32>*, i32, <32 x i32>)
-declare <32 x i32>* @llvm.hexagon.V6.vS32Ub.npred.ppu.128B(i1, <32 x i32>*, i32, <32 x i32>)
-declare <32 x i32>* @llvm.hexagon.V6.vS32b.nt.pred.ppu.128B(i1, <32 x i32>*, i32, <32 x i32>)
-declare <32 x i32>* @llvm.hexagon.V6.vS32b.nt.npred.ppu.128B(i1, <32 x i32>*, i32, <32 x i32>)
-
-
-define <32 x i32> @f0(i32 %a0, <32 x i32>* %a1) #0 {
+declare <32 x i32> @llvm.hexagon.V6.vL32b.pred.ai.128B(i1, ptr, i32)
+declare <32 x i32> @llvm.hexagon.V6.vL32b.npred.ai.128B(i1, ptr, i32)
+declare <32 x i32> @llvm.hexagon.V6.vL32b.nt.pred.ai.128B(i1, ptr, i32)
+declare <32 x i32> @llvm.hexagon.V6.vL32b.nt.npred.ai.128B(i1, ptr, i32)
+
+declare { <32 x i32>, ptr } @llvm.hexagon.V6.vL32b.pred.pi.128B(i1, ptr, i32)
+declare { <32 x i32>, ptr } @llvm.hexagon.V6.vL32b.npred.pi.128B(i1, ptr, i32)
+declare { <32 x i32>, ptr } @llvm.hexagon.V6.vL32b.nt.pred.pi.128B(i1, ptr, i32)
+declare { <32 x i32>, ptr } @llvm.hexagon.V6.vL32b.nt.npred.pi.128B(i1, ptr, i32)
+
+declare { <32 x i32>, ptr } @llvm.hexagon.V6.vL32b.pred.ppu.128B(i1, ptr, i32)
+declare { <32 x i32>, ptr } @llvm.hexagon.V6.vL32b.npred.ppu.128B(i1, ptr, i32)
+declare { <32 x i32>, ptr } @llvm.hexagon.V6.vL32b.nt.pred.ppu.128B(i1, ptr, i32)
+declare { <32 x i32>, ptr } @llvm.hexagon.V6.vL32b.nt.npred.ppu.128B(i1, ptr, i32)
+
+declare void @llvm.hexagon.V6.vS32b.pred.ai.128B(i1, ptr, i32, <32 x i32>)
+declare void @llvm.hexagon.V6.vS32b.npred.ai.128B(i1, ptr, i32, <32 x i32>)
+declare void @llvm.hexagon.V6.vS32Ub.pred.ai.128B(i1, ptr, i32, <32 x i32>)
+declare void @llvm.hexagon.V6.vS32Ub.npred.ai.128B(i1, ptr, i32, <32 x i32>)
+declare void @llvm.hexagon.V6.vS32b.nt.pred.ai.128B(i1, ptr, i32, <32 x i32>)
+declare void @llvm.hexagon.V6.vS32b.nt.npred.ai.128B(i1, ptr, i32, <32 x i32>)
+
+declare ptr @llvm.hexagon.V6.vS32b.pred.pi.128B(i1, ptr, i32, <32 x i32>)
+declare ptr @llvm.hexagon.V6.vS32b.npred.pi.128B(i1, ptr, i32, <32 x i32>)
+declare ptr @llvm.hexagon.V6.vS32Ub.pred.pi.128B(i1, ptr, i32, <32 x i32>)
+declare ptr @llvm.hexagon.V6.vS32Ub.npred.pi.128B(i1, ptr, i32, <32 x i32>)
+declare ptr @llvm.hexagon.V6.vS32b.nt.pred.pi.128B(i1, ptr, i32, <32 x i32>)
+declare ptr @llvm.hexagon.V6.vS32b.nt.npred.pi.128B(i1, ptr, i32, <32 x i32>)
+
+declare ptr @llvm.hexagon.V6.vS32b.pred.ppu.128B(i1, ptr, i32, <32 x i32>)
+declare ptr @llvm.hexagon.V6.vS32b.npred.ppu.128B(i1, ptr, i32, <32 x i32>)
+declare ptr @llvm.hexagon.V6.vS32Ub.pred.ppu.128B(i1, ptr, i32, <32 x i32>)
+declare ptr @llvm.hexagon.V6.vS32Ub.npred.ppu.128B(i1, ptr, i32, <32 x i32>)
+declare ptr @llvm.hexagon.V6.vS32b.nt.pred.ppu.128B(i1, ptr, i32, <32 x i32>)
+declare ptr @llvm.hexagon.V6.vS32b.nt.npred.ppu.128B(i1, ptr, i32, <32 x i32>)
+
+
+define <32 x i32> @f0(i32 %a0, ptr %a1) #0 {
 ; CHECK-LABEL: f0:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -52,11 +52,11 @@ define <32 x i32> @f0(i32 %a0, <32 x i32>* %a1) #0 {
 ; CHECK-NEXT:    }
 b0:
   %v0 = icmp eq i32 %a0, 0
-  %v1 = call <32 x i32> @llvm.hexagon.V6.vL32b.pred.ai.128B(i1 %v0, <32 x i32>* %a1, i32 384)
+  %v1 = call <32 x i32> @llvm.hexagon.V6.vL32b.pred.ai.128B(i1 %v0, ptr %a1, i32 384)
   ret <32 x i32> %v1
 }
 
-define <32 x i32> @f1(i32 %a0, <32 x i32>* %a1) #0 {
+define <32 x i32> @f1(i32 %a0, ptr %a1) #0 {
 ; CHECK-LABEL: f1:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -70,11 +70,11 @@ define <32 x i32> @f1(i32 %a0, <32 x i32>* %a1) #0 {
 ; CHECK-NEXT:    }
 b0:
   %v0 = icmp eq i32 %a0, 0
-  %v1 = call <32 x i32> @llvm.hexagon.V6.vL32b.npred.ai.128B(i1 %v0, <32 x i32>* %a1, i32 384)
+  %v1 = call <32 x i32> @llvm.hexagon.V6.vL32b.npred.ai.128B(i1 %v0, ptr %a1, i32 384)
   ret <32 x i32> %v1
 }
 
-define <32 x i32> @f2(i32 %a0, <32 x i32>* %a1) #0 {
+define <32 x i32> @f2(i32 %a0, ptr %a1) #0 {
 ; CHECK-LABEL: f2:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -88,11 +88,11 @@ define <32 x i32> @f2(i32 %a0, <32 x i32>* %a1) #0 {
 ; CHECK-NEXT:    }
 b0:
   %v0 = icmp eq i32 %a0, 0
-  %v1 = call <32 x i32> @llvm.hexagon.V6.vL32b.nt.pred.ai.128B(i1 %v0, <32 x i32>* %a1, i32 384)
+  %v1 = call <32 x i32> @llvm.hexagon.V6.vL32b.nt.pred.ai.128B(i1 %v0, ptr %a1, i32 384)
   ret <32 x i32> %v1
 }
 
-define <32 x i32> @f3(i32 %a0, <32 x i32>* %a1) #0 {
+define <32 x i32> @f3(i32 %a0, ptr %a1) #0 {
 ; CHECK-LABEL: f3:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -106,11 +106,11 @@ define <32 x i32> @f3(i32 %a0, <32 x i32>* %a1) #0 {
 ; CHECK-NEXT:    }
 b0:
   %v0 = icmp eq i32 %a0, 0
-  %v1 = call <32 x i32> @llvm.hexagon.V6.vL32b.nt.npred.ai.128B(i1 %v0, <32 x i32>* %a1, i32 384)
+  %v1 = call <32 x i32> @llvm.hexagon.V6.vL32b.nt.npred.ai.128B(i1 %v0, ptr %a1, i32 384)
   ret <32 x i32> %v1
 }
 
-define <32 x i32>* @f4(i32 %a0, <32 x i32>* %a1) #0 {
+define ptr @f4(i32 %a0, ptr %a1) #0 {
 ; CHECK-LABEL: f4:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -127,12 +127,12 @@ define <32 x i32>* @f4(i32 %a0, <32 x i32>* %a1) #0 {
 ; CHECK-NEXT:    }
 b0:
   %v0 = icmp eq i32 %a0, 0
-  %v1 = call { <32 x i32>, <32 x i32>* } @llvm.hexagon.V6.vL32b.pred.pi.128B(i1 %v0, <32 x i32>* %a1, i32 384)
-  %v2 = extractvalue { <32 x i32>, <32 x i32>* } %v1, 1
-  ret <32 x i32>* %v2
+  %v1 = call { <32 x i32>, ptr } @llvm.hexagon.V6.vL32b.pred.pi.128B(i1 %v0, ptr %a1, i32 384)
+  %v2 = extractvalue { <32 x i32>, ptr } %v1, 1
+  ret ptr %v2
 }
 
-define <32 x i32>* @f5(i32 %a0, <32 x i32>* %a1) #0 {
+define ptr @f5(i32 %a0, ptr %a1) #0 {
 ; CHECK-LABEL: f5:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -149,12 +149,12 @@ define <32 x i32>* @f5(i32 %a0, <32 x i32>* %a1) #0 {
 ; CHECK-NEXT:    }
 b0:
   %v0 = icmp eq i32 %a0, 0
-  %v1 = call { <32 x i32>, <32 x i32>* } @llvm.hexagon.V6.vL32b.npred.pi.128B(i1 %v0, <32 x i32>* %a1, i32 384)
-  %v2 = extractvalue { <32 x i32>, <32 x i32>* } %v1, 1
-  ret <32 x i32>* %v2
+  %v1 = call { <32 x i32>, ptr } @llvm.hexagon.V6.vL32b.npred.pi.128B(i1 %v0, ptr %a1, i32 384)
+  %v2 = extractvalue { <32 x i32>, ptr } %v1, 1
+  ret ptr %v2
 }
 
-define <32 x i32>* @f6(i32 %a0, <32 x i32>* %a1) #0 {
+define ptr @f6(i32 %a0, ptr %a1) #0 {
 ; CHECK-LABEL: f6:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -171,12 +171,12 @@ define <32 x i32>* @f6(i32 %a0, <32 x i32>* %a1) #0 {
 ; CHECK-NEXT:    }
 b0:
   %v0 = icmp eq i32 %a0, 0
-  %v1 = call { <32 x i32>, <32 x i32>* } @llvm.hexagon.V6.vL32b.nt.pred.pi.128B(i1 %v0, <32 x i32>* %a1, i32 384)
-  %v2 = extractvalue { <32 x i32>, <32 x i32>* } %v1, 1
-  ret <32 x i32>* %v2
+  %v1 = call { <32 x i32>, ptr } @llvm.hexagon.V6.vL32b.nt.pred.pi.128B(i1 %v0, ptr %a1, i32 384)
+  %v2 = extractvalue { <32 x i32>, ptr } %v1, 1
+  ret ptr %v2
 }
 
-define <32 x i32>* @f7(i32 %a0, <32 x i32>* %a1) #0 {
+define ptr @f7(i32 %a0, ptr %a1) #0 {
 ; CHECK-LABEL: f7:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -193,12 +193,12 @@ define <32 x i32>* @f7(i32 %a0, <32 x i32>* %a1) #0 {
 ; CHECK-NEXT:    }
 b0:
   %v0 = icmp eq i32 %a0, 0
-  %v1 = call { <32 x i32>, <32 x i32>* } @llvm.hexagon.V6.vL32b.nt.npred.pi.128B(i1 %v0, <32 x i32>* %a1, i32 384)
-  %v2 = extractvalue { <32 x i32>, <32 x i32>* } %v1, 1
-  ret <32 x i32>* %v2
+  %v1 = call { <32 x i32>, ptr } @llvm.hexagon.V6.vL32b.nt.npred.pi.128B(i1 %v0, ptr %a1, i32 384)
+  %v2 = extractvalue { <32 x i32>, ptr } %v1, 1
+  ret ptr %v2
 }
 
-define <32 x i32>* @f8(i32 %a0, <32 x i32>* %a1, i32 %a2) #0 {
+define ptr @f8(i32 %a0, ptr %a1, i32 %a2) #0 {
 ; CHECK-LABEL: f8:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -218,12 +218,12 @@ define <32 x i32>* @f8(i32 %a0, <32 x i32>* %a1, i32 %a2) #0 {
 ; CHECK-NEXT:    }
 b0:
   %v0 = icmp eq i32 %a0, 0
-  %v1 = call { <32 x i32>, <32 x i32>* } @llvm.hexagon.V6.vL32b.pred.ppu.128B(i1 %v0, <32 x i32>* %a1, i32 %a2)
-  %v2 = extractvalue { <32 x i32>, <32 x i32>* } %v1, 1
-  ret <32 x i32>* %v2
+  %v1 = call { <32 x i32>, ptr } @llvm.hexagon.V6.vL32b.pred.ppu.128B(i1 %v0, ptr %a1, i32 %a2)
+  %v2 = extractvalue { <32 x i32>, ptr } %v1, 1
+  ret ptr %v2
 }
 
-define <32 x i32>* @f9(i32 %a0, <32 x i32>* %a1, i32 %a2) #0 {
+define ptr @f9(i32 %a0, ptr %a1, i32 %a2) #0 {
 ; CHECK-LABEL: f9:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -243,12 +243,12 @@ define <32 x i32>* @f9(i32 %a0, <32 x i32>* %a1, i32 %a2) #0 {
 ; CHECK-NEXT:    }
 b0:
   %v0 = icmp eq i32 %a0, 0
-  %v1 = call { <32 x i32>, <32 x i32>* } @llvm.hexagon.V6.vL32b.npred.ppu.128B(i1 %v0, <32 x i32>* %a1, i32 %a2)
-  %v2 = extractvalue { <32 x i32>, <32 x i32>* } %v1, 1
-  ret <32 x i32>* %v2
+  %v1 = call { <32 x i32>, ptr } @llvm.hexagon.V6.vL32b.npred.ppu.128B(i1 %v0, ptr %a1, i32 %a2)
+  %v2 = extractvalue { <32 x i32>, ptr } %v1, 1
+  ret ptr %v2
 }
 
-define <32 x i32>* @f10(i32 %a0, <32 x i32>* %a1, i32 %a2) #0 {
+define ptr @f10(i32 %a0, ptr %a1, i32 %a2) #0 {
 ; CHECK-LABEL: f10:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -268,12 +268,12 @@ define <32 x i32>* @f10(i32 %a0, <32 x i32>* %a1, i32 %a2) #0 {
 ; CHECK-NEXT:    }
 b0:
   %v0 = icmp eq i32 %a0, 0
-  %v1 = call { <32 x i32>, <32 x i32>* } @llvm.hexagon.V6.vL32b.nt.pred.ppu.128B(i1 %v0, <32 x i32>* %a1, i32 %a2)
-  %v2 = extractvalue { <32 x i32>, <32 x i32>* } %v1, 1
-  ret <32 x i32>* %v2
+  %v1 = call { <32 x i32>, ptr } @llvm.hexagon.V6.vL32b.nt.pred.ppu.128B(i1 %v0, ptr %a1, i32 %a2)
+  %v2 = extractvalue { <32 x i32>, ptr } %v1, 1
+  ret ptr %v2
 }
 
-define <32 x i32>* @f11(i32 %a0, <32 x i32>* %a1, i32 %a2) #0 {
+define ptr @f11(i32 %a0, ptr %a1, i32 %a2) #0 {
 ; CHECK-LABEL: f11:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -293,12 +293,12 @@ define <32 x i32>* @f11(i32 %a0, <32 x i32>* %a1, i32 %a2) #0 {
 ; CHECK-NEXT:    }
 b0:
   %v0 = icmp eq i32 %a0, 0
-  %v1 = call { <32 x i32>, <32 x i32>* } @llvm.hexagon.V6.vL32b.nt.npred.ppu.128B(i1 %v0, <32 x i32>* %a1, i32 %a2)
-  %v2 = extractvalue { <32 x i32>, <32 x i32>* } %v1, 1
-  ret <32 x i32>* %v2
+  %v1 = call { <32 x i32>, ptr } @llvm.hexagon.V6.vL32b.nt.npred.ppu.128B(i1 %v0, ptr %a1, i32 %a2)
+  %v2 = extractvalue { <32 x i32>, ptr } %v1, 1
+  ret ptr %v2
 }
 
-define void @f12(i32 %a0, <32 x i32>* %a1, <32 x i32> %a2) #0 {
+define void @f12(i32 %a0, ptr %a1, <32 x i32> %a2) #0 {
 ; CHECK-LABEL: f12:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -312,11 +312,11 @@ define void @f12(i32 %a0, <32 x i32>* %a1, <32 x i32> %a2) #0 {
 ; CHECK-NEXT:    }
 b0:
   %v0 = icmp eq i32 %a0, 0
-  call void @llvm.hexagon.V6.vS32b.pred.ai.128B(i1 %v0, <32 x i32>* %a1, i32 -384, <32 x i32> %a2)
+  call void @llvm.hexagon.V6.vS32b.pred.ai.128B(i1 %v0, ptr %a1, i32 -384, <32 x i32> %a2)
   ret void
 }
 
-define void @f13(i32 %a0, <32 x i32>* %a1, <32 x i32> %a2) #0 {
+define void @f13(i32 %a0, ptr %a1, <32 x i32> %a2) #0 {
 ; CHECK-LABEL: f13:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -330,11 +330,11 @@ define void @f13(i32 %a0, <32 x i32>* %a1, <32 x i32> %a2) #0 {
 ; CHECK-NEXT:    }
 b0:
   %v0 = icmp eq i32 %a0, 0
-  call void @llvm.hexagon.V6.vS32b.npred.ai.128B(i1 %v0, <32 x i32>* %a1, i32 -384, <32 x i32> %a2)
+  call void @llvm.hexagon.V6.vS32b.npred.ai.128B(i1 %v0, ptr %a1, i32 -384, <32 x i32> %a2)
   ret void
 }
 
-define void @f14(i32 %a0, <32 x i32>* %a1, <32 x i32> %a2) #0 {
+define void @f14(i32 %a0, ptr %a1, <32 x i32> %a2) #0 {
 ; CHECK-LABEL: f14:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -348,11 +348,11 @@ define void @f14(i32 %a0, <32 x i32>* %a1, <32 x i32> %a2) #0 {
 ; CHECK-NEXT:    }
 b0:
   %v0 = icmp eq i32 %a0, 0
-  call void @llvm.hexagon.V6.vS32Ub.pred.ai.128B(i1 %v0, <32 x i32>* %a1, i32 -384, <32 x i32> %a2)
+  call void @llvm.hexagon.V6.vS32Ub.pred.ai.128B(i1 %v0, ptr %a1, i32 -384, <32 x i32> %a2)
   ret void
 }
 
-define void @f15(i32 %a0, <32 x i32>* %a1, <32 x i32> %a2) #0 {
+define void @f15(i32 %a0, ptr %a1, <32 x i32> %a2) #0 {
 ; CHECK-LABEL: f15:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -366,11 +366,11 @@ define void @f15(i32 %a0, <32 x i32>* %a1, <32 x i32> %a2) #0 {
 ; CHECK-NEXT:    }
 b0:
   %v0 = icmp eq i32 %a0, 0
-  call void @llvm.hexagon.V6.vS32Ub.npred.ai.128B(i1 %v0, <32 x i32>* %a1, i32 -384, <32 x i32> %a2)
+  call void @llvm.hexagon.V6.vS32Ub.npred.ai.128B(i1 %v0, ptr %a1, i32 -384, <32 x i32> %a2)
   ret void
 }
 
-define void @f16(i32 %a0, <32 x i32>* %a1, <32 x i32> %a2) #0 {
+define void @f16(i32 %a0, ptr %a1, <32 x i32> %a2) #0 {
 ; CHECK-LABEL: f16:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -384,11 +384,11 @@ define void @f16(i32 %a0, <32 x i32>* %a1, <32 x i32> %a2) #0 {
 ; CHECK-NEXT:    }
 b0:
   %v0 = icmp eq i32 %a0, 0
-  call void @llvm.hexagon.V6.vS32b.nt.pred.ai.128B(i1 %v0, <32 x i32>* %a1, i32 -384, <32 x i32> %a2)
+  call void @llvm.hexagon.V6.vS32b.nt.pred.ai.128B(i1 %v0, ptr %a1, i32 -384, <32 x i32> %a2)
   ret void
 }
 
-define void @f17(i32 %a0, <32 x i32>* %a1, <32 x i32> %a2) #0 {
+define void @f17(i32 %a0, ptr %a1, <32 x i32> %a2) #0 {
 ; CHECK-LABEL: f17:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -402,11 +402,11 @@ define void @f17(i32 %a0, <32 x i32>* %a1, <32 x i32> %a2) #0 {
 ; CHECK-NEXT:    }
 b0:
   %v0 = icmp eq i32 %a0, 0
-  call void @llvm.hexagon.V6.vS32b.nt.npred.ai.128B(i1 %v0, <32 x i32>* %a1, i32 -384, <32 x i32> %a2)
+  call void @llvm.hexagon.V6.vS32b.nt.npred.ai.128B(i1 %v0, ptr %a1, i32 -384, <32 x i32> %a2)
   ret void
 }
 
-define <32 x i32>* @f18(i32 %a0, <32 x i32>* %a1, <32 x i32> %a2) #0 {
+define ptr @f18(i32 %a0, ptr %a1, <32 x i32> %a2) #0 {
 ; CHECK-LABEL: f18:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -423,11 +423,11 @@ define <32 x i32>* @f18(i32 %a0, <32 x i32>* %a1, <32 x i32> %a2) #0 {
 ; CHECK-NEXT:    }
 b0:
   %v0 = icmp eq i32 %a0, 0
-  %v1 = call <32 x i32>* @llvm.hexagon.V6.vS32b.pred.pi.128B(i1 %v0, <32 x i32>* %a1, i32 -384, <32 x i32> %a2)
-  ret <32 x i32>* %v1
+  %v1 = call ptr @llvm.hexagon.V6.vS32b.pred.pi.128B(i1 %v0, ptr %a1, i32 -384, <32 x i32> %a2)
+  ret ptr %v1
 }
 
-define <32 x i32>* @f19(i32 %a0, <32 x i32>* %a1, <32 x i32> %a2) #0 {
+define ptr @f19(i32 %a0, ptr %a1, <32 x i32> %a2) #0 {
 ; CHECK-LABEL: f19:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -444,11 +444,11 @@ define <32 x i32>* @f19(i32 %a0, <32 x i32>* %a1, <32 x i32> %a2) #0 {
 ; CHECK-NEXT:    }
 b0:
   %v0 = icmp eq i32 %a0, 0
-  %v1 = call <32 x i32>* @llvm.hexagon.V6.vS32b.npred.pi.128B(i1 %v0, <32 x i32>* %a1, i32 -384, <32 x i32> %a2)
-  ret <32 x i32>* %v1
+  %v1 = call ptr @llvm.hexagon.V6.vS32b.npred.pi.128B(i1 %v0, ptr %a1, i32 -384, <32 x i32> %a2)
+  ret ptr %v1
 }
 
-define <32 x i32>* @f20(i32 %a0, <32 x i32>* %a1, <32 x i32> %a2) #0 {
+define ptr @f20(i32 %a0, ptr %a1, <32 x i32> %a2) #0 {
 ; CHECK-LABEL: f20:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -465,11 +465,11 @@ define <32 x i32>* @f20(i32 %a0, <32 x i32>* %a1, <32 x i32> %a2) #0 {
 ; CHECK-NEXT:    }
 b0:
   %v0 = icmp eq i32 %a0, 0
-  %v1 = call <32 x i32>* @llvm.hexagon.V6.vS32Ub.pred.pi.128B(i1 %v0, <32 x i32>* %a1, i32 -384, <32 x i32> %a2)
-  ret <32 x i32>* %v1
+  %v1 = call ptr @llvm.hexagon.V6.vS32Ub.pred.pi.128B(i1 %v0, ptr %a1, i32 -384, <32 x i32> %a2)
+  ret ptr %v1
 }
 
-define <32 x i32>* @f21(i32 %a0, <32 x i32>* %a1, <32 x i32> %a2) #0 {
+define ptr @f21(i32 %a0, ptr %a1, <32 x i32> %a2) #0 {
 ; CHECK-LABEL: f21:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -486,11 +486,11 @@ define <32 x i32>* @f21(i32 %a0, <32 x i32>* %a1, <32 x i32> %a2) #0 {
 ; CHECK-NEXT:    }
 b0:
   %v0 = icmp eq i32 %a0, 0
-  %v1 = call <32 x i32>* @llvm.hexagon.V6.vS32Ub.npred.pi.128B(i1 %v0, <32 x i32>* %a1, i32 -384, <32 x i32> %a2)
-  ret <32 x i32>* %v1
+  %v1 = call ptr @llvm.hexagon.V6.vS32Ub.npred.pi.128B(i1 %v0, ptr %a1, i32 -384, <32 x i32> %a2)
+  ret ptr %v1
 }
 
-define <32 x i32>* @f22(i32 %a0, <32 x i32>* %a1, <32 x i32> %a2) #0 {
+define ptr @f22(i32 %a0, ptr %a1, <32 x i32> %a2) #0 {
 ; CHECK-LABEL: f22:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -507,11 +507,11 @@ define <32 x i32>* @f22(i32 %a0, <32 x i32>* %a1, <32 x i32> %a2) #0 {
 ; CHECK-NEXT:    }
 b0:
   %v0 = icmp eq i32 %a0, 0
-  %v1 = call <32 x i32>* @llvm.hexagon.V6.vS32b.nt.pred.pi.128B(i1 %v0, <32 x i32>* %a1, i32 -384, <32 x i32> %a2)
-  ret <32 x i32>* %v1
+  %v1 = call ptr @llvm.hexagon.V6.vS32b.nt.pred.pi.128B(i1 %v0, ptr %a1, i32 -384, <32 x i32> %a2)
+  ret ptr %v1
 }
 
-define <32 x i32>* @f23(i32 %a0, <32 x i32>* %a1, <32 x i32> %a2) #0 {
+define ptr @f23(i32 %a0, ptr %a1, <32 x i32> %a2) #0 {
 ; CHECK-LABEL: f23:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -528,11 +528,11 @@ define <32 x i32>* @f23(i32 %a0, <32 x i32>* %a1, <32 x i32> %a2) #0 {
 ; CHECK-NEXT:    }
 b0:
   %v0 = icmp eq i32 %a0, 0
-  %v1 = call <32 x i32>* @llvm.hexagon.V6.vS32b.nt.npred.pi.128B(i1 %v0, <32 x i32>* %a1, i32 -384, <32 x i32> %a2)
-  ret <32 x i32>* %v1
+  %v1 = call ptr @llvm.hexagon.V6.vS32b.nt.npred.pi.128B(i1 %v0, ptr %a1, i32 -384, <32 x i32> %a2)
+  ret ptr %v1
 }
 
-define <32 x i32>* @f24(i32 %a0, <32 x i32>* %a1, i32 %a2, <32 x i32> %a3) #0 {
+define ptr @f24(i32 %a0, ptr %a1, i32 %a2, <32 x i32> %a3) #0 {
 ; CHECK-LABEL: f24:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -552,11 +552,11 @@ define <32 x i32>* @f24(i32 %a0, <32 x i32>* %a1, i32 %a2, <32 x i32> %a3) #0 {
 ; CHECK-NEXT:    }
 b0:
   %v0 = icmp eq i32 %a0, 0
-  %v1 = call <32 x i32>* @llvm.hexagon.V6.vS32b.pred.ppu.128B(i1 %v0, <32 x i32>* %a1, i32 %a2, <32 x i32> %a3)
-  ret <32 x i32>* %v1
+  %v1 = call ptr @llvm.hexagon.V6.vS32b.pred.ppu.128B(i1 %v0, ptr %a1, i32 %a2, <32 x i32> %a3)
+  ret ptr %v1
 }
 
-define <32 x i32>* @f25(i32 %a0, <32 x i32>* %a1, i32 %a2, <32 x i32> %a3) #0 {
+define ptr @f25(i32 %a0, ptr %a1, i32 %a2, <32 x i32> %a3) #0 {
 ; CHECK-LABEL: f25:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -576,11 +576,11 @@ define <32 x i32>* @f25(i32 %a0, <32 x i32>* %a1, i32 %a2, <32 x i32> %a3) #0 {
 ; CHECK-NEXT:    }
 b0:
   %v0 = icmp eq i32 %a0, 0
-  %v1 = call <32 x i32>* @llvm.hexagon.V6.vS32b.npred.ppu.128B(i1 %v0, <32 x i32>* %a1, i32 %a2, <32 x i32> %a3)
-  ret <32 x i32>* %v1
+  %v1 = call ptr @llvm.hexagon.V6.vS32b.npred.ppu.128B(i1 %v0, ptr %a1, i32 %a2, <32 x i32> %a3)
+  ret ptr %v1
 }
 
-define <32 x i32>* @f26(i32 %a0, <32 x i32>* %a1, i32 %a2, <32 x i32> %a3) #0 {
+define ptr @f26(i32 %a0, ptr %a1, i32 %a2, <32 x i32> %a3) #0 {
 ; CHECK-LABEL: f26:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -600,11 +600,11 @@ define <32 x i32>* @f26(i32 %a0, <32 x i32>* %a1, i32 %a2, <32 x i32> %a3) #0 {
 ; CHECK-NEXT:    }
 b0:
   %v0 = icmp eq i32 %a0, 0
-  %v1 = call <32 x i32>* @llvm.hexagon.V6.vS32Ub.pred.ppu.128B(i1 %v0, <32 x i32>* %a1, i32 %a2, <32 x i32> %a3)
-  ret <32 x i32>* %v1
+  %v1 = call ptr @llvm.hexagon.V6.vS32Ub.pred.ppu.128B(i1 %v0, ptr %a1, i32 %a2, <32 x i32> %a3)
+  ret ptr %v1
 }
 
-define <32 x i32>* @f27(i32 %a0, <32 x i32>* %a1, i32 %a2, <32 x i32> %a3) #0 {
+define ptr @f27(i32 %a0, ptr %a1, i32 %a2, <32 x i32> %a3) #0 {
 ; CHECK-LABEL: f27:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -624,11 +624,11 @@ define <32 x i32>* @f27(i32 %a0, <32 x i32>* %a1, i32 %a2, <32 x i32> %a3) #0 {
 ; CHECK-NEXT:    }
 b0:
   %v0 = icmp eq i32 %a0, 0
-  %v1 = call <32 x i32>* @llvm.hexagon.V6.vS32Ub.npred.ppu.128B(i1 %v0, <32 x i32>* %a1, i32 %a2, <32 x i32> %a3)
-  ret <32 x i32>* %v1
+  %v1 = call ptr @llvm.hexagon.V6.vS32Ub.npred.ppu.128B(i1 %v0, ptr %a1, i32 %a2, <32 x i32> %a3)
+  ret ptr %v1
 }
 
-define <32 x i32>* @f28(i32 %a0, <32 x i32>* %a1, i32 %a2, <32 x i32> %a3) #0 {
+define ptr @f28(i32 %a0, ptr %a1, i32 %a2, <32 x i32> %a3) #0 {
 ; CHECK-LABEL: f28:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -648,11 +648,11 @@ define <32 x i32>* @f28(i32 %a0, <32 x i32>* %a1, i32 %a2, <32 x i32> %a3) #0 {
 ; CHECK-NEXT:    }
 b0:
   %v0 = icmp eq i32 %a0, 0
-  %v1 = call <32 x i32>* @llvm.hexagon.V6.vS32b.nt.pred.ppu.128B(i1 %v0, <32 x i32>* %a1, i32 %a2, <32 x i32> %a3)
-  ret <32 x i32>* %v1
+  %v1 = call ptr @llvm.hexagon.V6.vS32b.nt.pred.ppu.128B(i1 %v0, ptr %a1, i32 %a2, <32 x i32> %a3)
+  ret ptr %v1
 }
 
-define <32 x i32>* @f29(i32 %a0, <32 x i32>* %a1, i32 %a2, <32 x i32> %a3) #0 {
+define ptr @f29(i32 %a0, ptr %a1, i32 %a2, <32 x i32> %a3) #0 {
 ; CHECK-LABEL: f29:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -672,8 +672,8 @@ define <32 x i32>* @f29(i32 %a0, <32 x i32>* %a1, i32 %a2, <32 x i32> %a3) #0 {
 ; CHECK-NEXT:    }
 b0:
   %v0 = icmp eq i32 %a0, 0
-  %v1 = call <32 x i32>* @llvm.hexagon.V6.vS32b.nt.npred.ppu.128B(i1 %v0, <32 x i32>* %a1, i32 %a2, <32 x i32> %a3)
-  ret <32 x i32>* %v1
+  %v1 = call ptr @llvm.hexagon.V6.vS32b.nt.npred.ppu.128B(i1 %v0, ptr %a1, i32 %a2, <32 x i32> %a3)
+  ret ptr %v1
 }
 
 attributes #0 = { nounwind "target-cpu"="hexagonv66" "target-features"="+hvxv66,+hvx-length128b,-packets" }

diff  --git a/llvm/test/CodeGen/Hexagon/autohvx/pred-vmem-64b.ll b/llvm/test/CodeGen/Hexagon/autohvx/pred-vmem-64b.ll
index 8fd6f264d838f..d1a8f75f46a08 100644
--- a/llvm/test/CodeGen/Hexagon/autohvx/pred-vmem-64b.ll
+++ b/llvm/test/CodeGen/Hexagon/autohvx/pred-vmem-64b.ll
@@ -1,44 +1,44 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -march=hexagon < %s | FileCheck %s
 
-declare <16 x i32> @llvm.hexagon.V6.vL32b.pred.ai(i1, <16 x i32>*, i32)
-declare <16 x i32> @llvm.hexagon.V6.vL32b.npred.ai(i1, <16 x i32>*, i32)
-declare <16 x i32> @llvm.hexagon.V6.vL32b.nt.pred.ai(i1, <16 x i32>*, i32)
-declare <16 x i32> @llvm.hexagon.V6.vL32b.nt.npred.ai(i1, <16 x i32>*, i32)
-
-declare { <16 x i32>, <16 x i32>* } @llvm.hexagon.V6.vL32b.pred.pi(i1, <16 x i32>*, i32)
-declare { <16 x i32>, <16 x i32>* } @llvm.hexagon.V6.vL32b.npred.pi(i1, <16 x i32>*, i32)
-declare { <16 x i32>, <16 x i32>* } @llvm.hexagon.V6.vL32b.nt.pred.pi(i1, <16 x i32>*, i32)
-declare { <16 x i32>, <16 x i32>* } @llvm.hexagon.V6.vL32b.nt.npred.pi(i1, <16 x i32>*, i32)
-
-declare { <16 x i32>, <16 x i32>* } @llvm.hexagon.V6.vL32b.pred.ppu(i1, <16 x i32>*, i32)
-declare { <16 x i32>, <16 x i32>* } @llvm.hexagon.V6.vL32b.npred.ppu(i1, <16 x i32>*, i32)
-declare { <16 x i32>, <16 x i32>* } @llvm.hexagon.V6.vL32b.nt.pred.ppu(i1, <16 x i32>*, i32)
-declare { <16 x i32>, <16 x i32>* } @llvm.hexagon.V6.vL32b.nt.npred.ppu(i1, <16 x i32>*, i32)
-
-declare void @llvm.hexagon.V6.vS32b.pred.ai(i1, <16 x i32>*, i32, <16 x i32>)
-declare void @llvm.hexagon.V6.vS32b.npred.ai(i1, <16 x i32>*, i32, <16 x i32>)
-declare void @llvm.hexagon.V6.vS32Ub.pred.ai(i1, <16 x i32>*, i32, <16 x i32>)
-declare void @llvm.hexagon.V6.vS32Ub.npred.ai(i1, <16 x i32>*, i32, <16 x i32>)
-declare void @llvm.hexagon.V6.vS32b.nt.pred.ai(i1, <16 x i32>*, i32, <16 x i32>)
-declare void @llvm.hexagon.V6.vS32b.nt.npred.ai(i1, <16 x i32>*, i32, <16 x i32>)
-
-declare <16 x i32>* @llvm.hexagon.V6.vS32b.pred.pi(i1, <16 x i32>*, i32, <16 x i32>)
-declare <16 x i32>* @llvm.hexagon.V6.vS32b.npred.pi(i1, <16 x i32>*, i32, <16 x i32>)
-declare <16 x i32>* @llvm.hexagon.V6.vS32Ub.pred.pi(i1, <16 x i32>*, i32, <16 x i32>)
-declare <16 x i32>* @llvm.hexagon.V6.vS32Ub.npred.pi(i1, <16 x i32>*, i32, <16 x i32>)
-declare <16 x i32>* @llvm.hexagon.V6.vS32b.nt.pred.pi(i1, <16 x i32>*, i32, <16 x i32>)
-declare <16 x i32>* @llvm.hexagon.V6.vS32b.nt.npred.pi(i1, <16 x i32>*, i32, <16 x i32>)
-
-declare <16 x i32>* @llvm.hexagon.V6.vS32b.pred.ppu(i1, <16 x i32>*, i32, <16 x i32>)
-declare <16 x i32>* @llvm.hexagon.V6.vS32b.npred.ppu(i1, <16 x i32>*, i32, <16 x i32>)
-declare <16 x i32>* @llvm.hexagon.V6.vS32Ub.pred.ppu(i1, <16 x i32>*, i32, <16 x i32>)
-declare <16 x i32>* @llvm.hexagon.V6.vS32Ub.npred.ppu(i1, <16 x i32>*, i32, <16 x i32>)
-declare <16 x i32>* @llvm.hexagon.V6.vS32b.nt.pred.ppu(i1, <16 x i32>*, i32, <16 x i32>)
-declare <16 x i32>* @llvm.hexagon.V6.vS32b.nt.npred.ppu(i1, <16 x i32>*, i32, <16 x i32>)
-
-
-define <16 x i32> @f0(i32 %a0, <16 x i32>* %a1) #0 {
+declare <16 x i32> @llvm.hexagon.V6.vL32b.pred.ai(i1, ptr, i32)
+declare <16 x i32> @llvm.hexagon.V6.vL32b.npred.ai(i1, ptr, i32)
+declare <16 x i32> @llvm.hexagon.V6.vL32b.nt.pred.ai(i1, ptr, i32)
+declare <16 x i32> @llvm.hexagon.V6.vL32b.nt.npred.ai(i1, ptr, i32)
+
+declare { <16 x i32>, ptr } @llvm.hexagon.V6.vL32b.pred.pi(i1, ptr, i32)
+declare { <16 x i32>, ptr } @llvm.hexagon.V6.vL32b.npred.pi(i1, ptr, i32)
+declare { <16 x i32>, ptr } @llvm.hexagon.V6.vL32b.nt.pred.pi(i1, ptr, i32)
+declare { <16 x i32>, ptr } @llvm.hexagon.V6.vL32b.nt.npred.pi(i1, ptr, i32)
+
+declare { <16 x i32>, ptr } @llvm.hexagon.V6.vL32b.pred.ppu(i1, ptr, i32)
+declare { <16 x i32>, ptr } @llvm.hexagon.V6.vL32b.npred.ppu(i1, ptr, i32)
+declare { <16 x i32>, ptr } @llvm.hexagon.V6.vL32b.nt.pred.ppu(i1, ptr, i32)
+declare { <16 x i32>, ptr } @llvm.hexagon.V6.vL32b.nt.npred.ppu(i1, ptr, i32)
+
+declare void @llvm.hexagon.V6.vS32b.pred.ai(i1, ptr, i32, <16 x i32>)
+declare void @llvm.hexagon.V6.vS32b.npred.ai(i1, ptr, i32, <16 x i32>)
+declare void @llvm.hexagon.V6.vS32Ub.pred.ai(i1, ptr, i32, <16 x i32>)
+declare void @llvm.hexagon.V6.vS32Ub.npred.ai(i1, ptr, i32, <16 x i32>)
+declare void @llvm.hexagon.V6.vS32b.nt.pred.ai(i1, ptr, i32, <16 x i32>)
+declare void @llvm.hexagon.V6.vS32b.nt.npred.ai(i1, ptr, i32, <16 x i32>)
+
+declare ptr @llvm.hexagon.V6.vS32b.pred.pi(i1, ptr, i32, <16 x i32>)
+declare ptr @llvm.hexagon.V6.vS32b.npred.pi(i1, ptr, i32, <16 x i32>)
+declare ptr @llvm.hexagon.V6.vS32Ub.pred.pi(i1, ptr, i32, <16 x i32>)
+declare ptr @llvm.hexagon.V6.vS32Ub.npred.pi(i1, ptr, i32, <16 x i32>)
+declare ptr @llvm.hexagon.V6.vS32b.nt.pred.pi(i1, ptr, i32, <16 x i32>)
+declare ptr @llvm.hexagon.V6.vS32b.nt.npred.pi(i1, ptr, i32, <16 x i32>)
+
+declare ptr @llvm.hexagon.V6.vS32b.pred.ppu(i1, ptr, i32, <16 x i32>)
+declare ptr @llvm.hexagon.V6.vS32b.npred.ppu(i1, ptr, i32, <16 x i32>)
+declare ptr @llvm.hexagon.V6.vS32Ub.pred.ppu(i1, ptr, i32, <16 x i32>)
+declare ptr @llvm.hexagon.V6.vS32Ub.npred.ppu(i1, ptr, i32, <16 x i32>)
+declare ptr @llvm.hexagon.V6.vS32b.nt.pred.ppu(i1, ptr, i32, <16 x i32>)
+declare ptr @llvm.hexagon.V6.vS32b.nt.npred.ppu(i1, ptr, i32, <16 x i32>)
+
+
+define <16 x i32> @f0(i32 %a0, ptr %a1) #0 {
 ; CHECK-LABEL: f0:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -52,11 +52,11 @@ define <16 x i32> @f0(i32 %a0, <16 x i32>* %a1) #0 {
 ; CHECK-NEXT:    }
 b0:
   %v0 = icmp eq i32 %a0, 0
-  %v1 = call <16 x i32> @llvm.hexagon.V6.vL32b.pred.ai(i1 %v0, <16 x i32>* %a1, i32 192)
+  %v1 = call <16 x i32> @llvm.hexagon.V6.vL32b.pred.ai(i1 %v0, ptr %a1, i32 192)
   ret <16 x i32> %v1
 }
 
-define <16 x i32> @f1(i32 %a0, <16 x i32>* %a1) #0 {
+define <16 x i32> @f1(i32 %a0, ptr %a1) #0 {
 ; CHECK-LABEL: f1:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -70,11 +70,11 @@ define <16 x i32> @f1(i32 %a0, <16 x i32>* %a1) #0 {
 ; CHECK-NEXT:    }
 b0:
   %v0 = icmp eq i32 %a0, 0
-  %v1 = call <16 x i32> @llvm.hexagon.V6.vL32b.npred.ai(i1 %v0, <16 x i32>* %a1, i32 192)
+  %v1 = call <16 x i32> @llvm.hexagon.V6.vL32b.npred.ai(i1 %v0, ptr %a1, i32 192)
   ret <16 x i32> %v1
 }
 
-define <16 x i32> @f2(i32 %a0, <16 x i32>* %a1) #0 {
+define <16 x i32> @f2(i32 %a0, ptr %a1) #0 {
 ; CHECK-LABEL: f2:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -88,11 +88,11 @@ define <16 x i32> @f2(i32 %a0, <16 x i32>* %a1) #0 {
 ; CHECK-NEXT:    }
 b0:
   %v0 = icmp eq i32 %a0, 0
-  %v1 = call <16 x i32> @llvm.hexagon.V6.vL32b.nt.pred.ai(i1 %v0, <16 x i32>* %a1, i32 192)
+  %v1 = call <16 x i32> @llvm.hexagon.V6.vL32b.nt.pred.ai(i1 %v0, ptr %a1, i32 192)
   ret <16 x i32> %v1
 }
 
-define <16 x i32> @f3(i32 %a0, <16 x i32>* %a1) #0 {
+define <16 x i32> @f3(i32 %a0, ptr %a1) #0 {
 ; CHECK-LABEL: f3:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -106,11 +106,11 @@ define <16 x i32> @f3(i32 %a0, <16 x i32>* %a1) #0 {
 ; CHECK-NEXT:    }
 b0:
   %v0 = icmp eq i32 %a0, 0
-  %v1 = call <16 x i32> @llvm.hexagon.V6.vL32b.nt.npred.ai(i1 %v0, <16 x i32>* %a1, i32 192)
+  %v1 = call <16 x i32> @llvm.hexagon.V6.vL32b.nt.npred.ai(i1 %v0, ptr %a1, i32 192)
   ret <16 x i32> %v1
 }
 
-define <16 x i32>* @f4(i32 %a0, <16 x i32>* %a1) #0 {
+define ptr @f4(i32 %a0, ptr %a1) #0 {
 ; CHECK-LABEL: f4:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -127,12 +127,12 @@ define <16 x i32>* @f4(i32 %a0, <16 x i32>* %a1) #0 {
 ; CHECK-NEXT:    }
 b0:
   %v0 = icmp eq i32 %a0, 0
-  %v1 = call { <16 x i32>, <16 x i32>* } @llvm.hexagon.V6.vL32b.pred.pi(i1 %v0, <16 x i32>* %a1, i32 192)
-  %v2 = extractvalue { <16 x i32>, <16 x i32>* } %v1, 1
-  ret <16 x i32>* %v2
+  %v1 = call { <16 x i32>, ptr } @llvm.hexagon.V6.vL32b.pred.pi(i1 %v0, ptr %a1, i32 192)
+  %v2 = extractvalue { <16 x i32>, ptr } %v1, 1
+  ret ptr %v2
 }
 
-define <16 x i32>* @f5(i32 %a0, <16 x i32>* %a1) #0 {
+define ptr @f5(i32 %a0, ptr %a1) #0 {
 ; CHECK-LABEL: f5:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -149,12 +149,12 @@ define <16 x i32>* @f5(i32 %a0, <16 x i32>* %a1) #0 {
 ; CHECK-NEXT:    }
 b0:
   %v0 = icmp eq i32 %a0, 0
-  %v1 = call { <16 x i32>, <16 x i32>* } @llvm.hexagon.V6.vL32b.npred.pi(i1 %v0, <16 x i32>* %a1, i32 192)
-  %v2 = extractvalue { <16 x i32>, <16 x i32>* } %v1, 1
-  ret <16 x i32>* %v2
+  %v1 = call { <16 x i32>, ptr } @llvm.hexagon.V6.vL32b.npred.pi(i1 %v0, ptr %a1, i32 192)
+  %v2 = extractvalue { <16 x i32>, ptr } %v1, 1
+  ret ptr %v2
 }
 
-define <16 x i32>* @f6(i32 %a0, <16 x i32>* %a1) #0 {
+define ptr @f6(i32 %a0, ptr %a1) #0 {
 ; CHECK-LABEL: f6:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -171,12 +171,12 @@ define <16 x i32>* @f6(i32 %a0, <16 x i32>* %a1) #0 {
 ; CHECK-NEXT:    }
 b0:
   %v0 = icmp eq i32 %a0, 0
-  %v1 = call { <16 x i32>, <16 x i32>* } @llvm.hexagon.V6.vL32b.nt.pred.pi(i1 %v0, <16 x i32>* %a1, i32 192)
-  %v2 = extractvalue { <16 x i32>, <16 x i32>* } %v1, 1
-  ret <16 x i32>* %v2
+  %v1 = call { <16 x i32>, ptr } @llvm.hexagon.V6.vL32b.nt.pred.pi(i1 %v0, ptr %a1, i32 192)
+  %v2 = extractvalue { <16 x i32>, ptr } %v1, 1
+  ret ptr %v2
 }
 
-define <16 x i32>* @f7(i32 %a0, <16 x i32>* %a1) #0 {
+define ptr @f7(i32 %a0, ptr %a1) #0 {
 ; CHECK-LABEL: f7:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -193,12 +193,12 @@ define <16 x i32>* @f7(i32 %a0, <16 x i32>* %a1) #0 {
 ; CHECK-NEXT:    }
 b0:
   %v0 = icmp eq i32 %a0, 0
-  %v1 = call { <16 x i32>, <16 x i32>* } @llvm.hexagon.V6.vL32b.nt.npred.pi(i1 %v0, <16 x i32>* %a1, i32 192)
-  %v2 = extractvalue { <16 x i32>, <16 x i32>* } %v1, 1
-  ret <16 x i32>* %v2
+  %v1 = call { <16 x i32>, ptr } @llvm.hexagon.V6.vL32b.nt.npred.pi(i1 %v0, ptr %a1, i32 192)
+  %v2 = extractvalue { <16 x i32>, ptr } %v1, 1
+  ret ptr %v2
 }
 
-define <16 x i32>* @f8(i32 %a0, <16 x i32>* %a1, i32 %a2) #0 {
+define ptr @f8(i32 %a0, ptr %a1, i32 %a2) #0 {
 ; CHECK-LABEL: f8:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -218,12 +218,12 @@ define <16 x i32>* @f8(i32 %a0, <16 x i32>* %a1, i32 %a2) #0 {
 ; CHECK-NEXT:    }
 b0:
   %v0 = icmp eq i32 %a0, 0
-  %v1 = call { <16 x i32>, <16 x i32>* } @llvm.hexagon.V6.vL32b.pred.ppu(i1 %v0, <16 x i32>* %a1, i32 %a2)
-  %v2 = extractvalue { <16 x i32>, <16 x i32>* } %v1, 1
-  ret <16 x i32>* %v2
+  %v1 = call { <16 x i32>, ptr } @llvm.hexagon.V6.vL32b.pred.ppu(i1 %v0, ptr %a1, i32 %a2)
+  %v2 = extractvalue { <16 x i32>, ptr } %v1, 1
+  ret ptr %v2
 }
 
-define <16 x i32>* @f9(i32 %a0, <16 x i32>* %a1, i32 %a2) #0 {
+define ptr @f9(i32 %a0, ptr %a1, i32 %a2) #0 {
 ; CHECK-LABEL: f9:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -243,12 +243,12 @@ define <16 x i32>* @f9(i32 %a0, <16 x i32>* %a1, i32 %a2) #0 {
 ; CHECK-NEXT:    }
 b0:
   %v0 = icmp eq i32 %a0, 0
-  %v1 = call { <16 x i32>, <16 x i32>* } @llvm.hexagon.V6.vL32b.npred.ppu(i1 %v0, <16 x i32>* %a1, i32 %a2)
-  %v2 = extractvalue { <16 x i32>, <16 x i32>* } %v1, 1
-  ret <16 x i32>* %v2
+  %v1 = call { <16 x i32>, ptr } @llvm.hexagon.V6.vL32b.npred.ppu(i1 %v0, ptr %a1, i32 %a2)
+  %v2 = extractvalue { <16 x i32>, ptr } %v1, 1
+  ret ptr %v2
 }
 
-define <16 x i32>* @f10(i32 %a0, <16 x i32>* %a1, i32 %a2) #0 {
+define ptr @f10(i32 %a0, ptr %a1, i32 %a2) #0 {
 ; CHECK-LABEL: f10:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -268,12 +268,12 @@ define <16 x i32>* @f10(i32 %a0, <16 x i32>* %a1, i32 %a2) #0 {
 ; CHECK-NEXT:    }
 b0:
   %v0 = icmp eq i32 %a0, 0
-  %v1 = call { <16 x i32>, <16 x i32>* } @llvm.hexagon.V6.vL32b.nt.pred.ppu(i1 %v0, <16 x i32>* %a1, i32 %a2)
-  %v2 = extractvalue { <16 x i32>, <16 x i32>* } %v1, 1
-  ret <16 x i32>* %v2
+  %v1 = call { <16 x i32>, ptr } @llvm.hexagon.V6.vL32b.nt.pred.ppu(i1 %v0, ptr %a1, i32 %a2)
+  %v2 = extractvalue { <16 x i32>, ptr } %v1, 1
+  ret ptr %v2
 }
 
-define <16 x i32>* @f11(i32 %a0, <16 x i32>* %a1, i32 %a2) #0 {
+define ptr @f11(i32 %a0, ptr %a1, i32 %a2) #0 {
 ; CHECK-LABEL: f11:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -293,12 +293,12 @@ define <16 x i32>* @f11(i32 %a0, <16 x i32>* %a1, i32 %a2) #0 {
 ; CHECK-NEXT:    }
 b0:
   %v0 = icmp eq i32 %a0, 0
-  %v1 = call { <16 x i32>, <16 x i32>* } @llvm.hexagon.V6.vL32b.nt.npred.ppu(i1 %v0, <16 x i32>* %a1, i32 %a2)
-  %v2 = extractvalue { <16 x i32>, <16 x i32>* } %v1, 1
-  ret <16 x i32>* %v2
+  %v1 = call { <16 x i32>, ptr } @llvm.hexagon.V6.vL32b.nt.npred.ppu(i1 %v0, ptr %a1, i32 %a2)
+  %v2 = extractvalue { <16 x i32>, ptr } %v1, 1
+  ret ptr %v2
 }
 
-define void @f12(i32 %a0, <16 x i32>* %a1, <16 x i32> %a2) #0 {
+define void @f12(i32 %a0, ptr %a1, <16 x i32> %a2) #0 {
 ; CHECK-LABEL: f12:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -312,11 +312,11 @@ define void @f12(i32 %a0, <16 x i32>* %a1, <16 x i32> %a2) #0 {
 ; CHECK-NEXT:    }
 b0:
   %v0 = icmp eq i32 %a0, 0
-  call void @llvm.hexagon.V6.vS32b.pred.ai(i1 %v0, <16 x i32>* %a1, i32 -192, <16 x i32> %a2)
+  call void @llvm.hexagon.V6.vS32b.pred.ai(i1 %v0, ptr %a1, i32 -192, <16 x i32> %a2)
   ret void
 }
 
-define void @f13(i32 %a0, <16 x i32>* %a1, <16 x i32> %a2) #0 {
+define void @f13(i32 %a0, ptr %a1, <16 x i32> %a2) #0 {
 ; CHECK-LABEL: f13:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -330,11 +330,11 @@ define void @f13(i32 %a0, <16 x i32>* %a1, <16 x i32> %a2) #0 {
 ; CHECK-NEXT:    }
 b0:
   %v0 = icmp eq i32 %a0, 0
-  call void @llvm.hexagon.V6.vS32b.npred.ai(i1 %v0, <16 x i32>* %a1, i32 -192, <16 x i32> %a2)
+  call void @llvm.hexagon.V6.vS32b.npred.ai(i1 %v0, ptr %a1, i32 -192, <16 x i32> %a2)
   ret void
 }
 
-define void @f14(i32 %a0, <16 x i32>* %a1, <16 x i32> %a2) #0 {
+define void @f14(i32 %a0, ptr %a1, <16 x i32> %a2) #0 {
 ; CHECK-LABEL: f14:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -348,11 +348,11 @@ define void @f14(i32 %a0, <16 x i32>* %a1, <16 x i32> %a2) #0 {
 ; CHECK-NEXT:    }
 b0:
   %v0 = icmp eq i32 %a0, 0
-  call void @llvm.hexagon.V6.vS32Ub.pred.ai(i1 %v0, <16 x i32>* %a1, i32 -192, <16 x i32> %a2)
+  call void @llvm.hexagon.V6.vS32Ub.pred.ai(i1 %v0, ptr %a1, i32 -192, <16 x i32> %a2)
   ret void
 }
 
-define void @f15(i32 %a0, <16 x i32>* %a1, <16 x i32> %a2) #0 {
+define void @f15(i32 %a0, ptr %a1, <16 x i32> %a2) #0 {
 ; CHECK-LABEL: f15:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -366,11 +366,11 @@ define void @f15(i32 %a0, <16 x i32>* %a1, <16 x i32> %a2) #0 {
 ; CHECK-NEXT:    }
 b0:
   %v0 = icmp eq i32 %a0, 0
-  call void @llvm.hexagon.V6.vS32Ub.npred.ai(i1 %v0, <16 x i32>* %a1, i32 -192, <16 x i32> %a2)
+  call void @llvm.hexagon.V6.vS32Ub.npred.ai(i1 %v0, ptr %a1, i32 -192, <16 x i32> %a2)
   ret void
 }
 
-define void @f16(i32 %a0, <16 x i32>* %a1, <16 x i32> %a2) #0 {
+define void @f16(i32 %a0, ptr %a1, <16 x i32> %a2) #0 {
 ; CHECK-LABEL: f16:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -384,11 +384,11 @@ define void @f16(i32 %a0, <16 x i32>* %a1, <16 x i32> %a2) #0 {
 ; CHECK-NEXT:    }
 b0:
   %v0 = icmp eq i32 %a0, 0
-  call void @llvm.hexagon.V6.vS32b.nt.pred.ai(i1 %v0, <16 x i32>* %a1, i32 -192, <16 x i32> %a2)
+  call void @llvm.hexagon.V6.vS32b.nt.pred.ai(i1 %v0, ptr %a1, i32 -192, <16 x i32> %a2)
   ret void
 }
 
-define void @f17(i32 %a0, <16 x i32>* %a1, <16 x i32> %a2) #0 {
+define void @f17(i32 %a0, ptr %a1, <16 x i32> %a2) #0 {
 ; CHECK-LABEL: f17:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -402,11 +402,11 @@ define void @f17(i32 %a0, <16 x i32>* %a1, <16 x i32> %a2) #0 {
 ; CHECK-NEXT:    }
 b0:
   %v0 = icmp eq i32 %a0, 0
-  call void @llvm.hexagon.V6.vS32b.nt.npred.ai(i1 %v0, <16 x i32>* %a1, i32 -192, <16 x i32> %a2)
+  call void @llvm.hexagon.V6.vS32b.nt.npred.ai(i1 %v0, ptr %a1, i32 -192, <16 x i32> %a2)
   ret void
 }
 
-define <16 x i32>* @f18(i32 %a0, <16 x i32>* %a1, <16 x i32> %a2) #0 {
+define ptr @f18(i32 %a0, ptr %a1, <16 x i32> %a2) #0 {
 ; CHECK-LABEL: f18:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -423,11 +423,11 @@ define <16 x i32>* @f18(i32 %a0, <16 x i32>* %a1, <16 x i32> %a2) #0 {
 ; CHECK-NEXT:    }
 b0:
   %v0 = icmp eq i32 %a0, 0
-  %v1 = call <16 x i32>* @llvm.hexagon.V6.vS32b.pred.pi(i1 %v0, <16 x i32>* %a1, i32 -192, <16 x i32> %a2)
-  ret <16 x i32>* %v1
+  %v1 = call ptr @llvm.hexagon.V6.vS32b.pred.pi(i1 %v0, ptr %a1, i32 -192, <16 x i32> %a2)
+  ret ptr %v1
 }
 
-define <16 x i32>* @f19(i32 %a0, <16 x i32>* %a1, <16 x i32> %a2) #0 {
+define ptr @f19(i32 %a0, ptr %a1, <16 x i32> %a2) #0 {
 ; CHECK-LABEL: f19:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -444,11 +444,11 @@ define <16 x i32>* @f19(i32 %a0, <16 x i32>* %a1, <16 x i32> %a2) #0 {
 ; CHECK-NEXT:    }
 b0:
   %v0 = icmp eq i32 %a0, 0
-  %v1 = call <16 x i32>* @llvm.hexagon.V6.vS32b.npred.pi(i1 %v0, <16 x i32>* %a1, i32 -192, <16 x i32> %a2)
-  ret <16 x i32>* %v1
+  %v1 = call ptr @llvm.hexagon.V6.vS32b.npred.pi(i1 %v0, ptr %a1, i32 -192, <16 x i32> %a2)
+  ret ptr %v1
 }
 
-define <16 x i32>* @f20(i32 %a0, <16 x i32>* %a1, <16 x i32> %a2) #0 {
+define ptr @f20(i32 %a0, ptr %a1, <16 x i32> %a2) #0 {
 ; CHECK-LABEL: f20:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -465,11 +465,11 @@ define <16 x i32>* @f20(i32 %a0, <16 x i32>* %a1, <16 x i32> %a2) #0 {
 ; CHECK-NEXT:    }
 b0:
   %v0 = icmp eq i32 %a0, 0
-  %v1 = call <16 x i32>* @llvm.hexagon.V6.vS32Ub.pred.pi(i1 %v0, <16 x i32>* %a1, i32 -192, <16 x i32> %a2)
-  ret <16 x i32>* %v1
+  %v1 = call ptr @llvm.hexagon.V6.vS32Ub.pred.pi(i1 %v0, ptr %a1, i32 -192, <16 x i32> %a2)
+  ret ptr %v1
 }
 
-define <16 x i32>* @f21(i32 %a0, <16 x i32>* %a1, <16 x i32> %a2) #0 {
+define ptr @f21(i32 %a0, ptr %a1, <16 x i32> %a2) #0 {
 ; CHECK-LABEL: f21:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -486,11 +486,11 @@ define <16 x i32>* @f21(i32 %a0, <16 x i32>* %a1, <16 x i32> %a2) #0 {
 ; CHECK-NEXT:    }
 b0:
   %v0 = icmp eq i32 %a0, 0
-  %v1 = call <16 x i32>* @llvm.hexagon.V6.vS32Ub.npred.pi(i1 %v0, <16 x i32>* %a1, i32 -192, <16 x i32> %a2)
-  ret <16 x i32>* %v1
+  %v1 = call ptr @llvm.hexagon.V6.vS32Ub.npred.pi(i1 %v0, ptr %a1, i32 -192, <16 x i32> %a2)
+  ret ptr %v1
 }
 
-define <16 x i32>* @f22(i32 %a0, <16 x i32>* %a1, <16 x i32> %a2) #0 {
+define ptr @f22(i32 %a0, ptr %a1, <16 x i32> %a2) #0 {
 ; CHECK-LABEL: f22:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -507,11 +507,11 @@ define <16 x i32>* @f22(i32 %a0, <16 x i32>* %a1, <16 x i32> %a2) #0 {
 ; CHECK-NEXT:    }
 b0:
   %v0 = icmp eq i32 %a0, 0
-  %v1 = call <16 x i32>* @llvm.hexagon.V6.vS32b.nt.pred.pi(i1 %v0, <16 x i32>* %a1, i32 -192, <16 x i32> %a2)
-  ret <16 x i32>* %v1
+  %v1 = call ptr @llvm.hexagon.V6.vS32b.nt.pred.pi(i1 %v0, ptr %a1, i32 -192, <16 x i32> %a2)
+  ret ptr %v1
 }
 
-define <16 x i32>* @f23(i32 %a0, <16 x i32>* %a1, <16 x i32> %a2) #0 {
+define ptr @f23(i32 %a0, ptr %a1, <16 x i32> %a2) #0 {
 ; CHECK-LABEL: f23:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -528,11 +528,11 @@ define <16 x i32>* @f23(i32 %a0, <16 x i32>* %a1, <16 x i32> %a2) #0 {
 ; CHECK-NEXT:    }
 b0:
   %v0 = icmp eq i32 %a0, 0
-  %v1 = call <16 x i32>* @llvm.hexagon.V6.vS32b.nt.npred.pi(i1 %v0, <16 x i32>* %a1, i32 -192, <16 x i32> %a2)
-  ret <16 x i32>* %v1
+  %v1 = call ptr @llvm.hexagon.V6.vS32b.nt.npred.pi(i1 %v0, ptr %a1, i32 -192, <16 x i32> %a2)
+  ret ptr %v1
 }
 
-define <16 x i32>* @f24(i32 %a0, <16 x i32>* %a1, i32 %a2, <16 x i32> %a3) #0 {
+define ptr @f24(i32 %a0, ptr %a1, i32 %a2, <16 x i32> %a3) #0 {
 ; CHECK-LABEL: f24:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -552,11 +552,11 @@ define <16 x i32>* @f24(i32 %a0, <16 x i32>* %a1, i32 %a2, <16 x i32> %a3) #0 {
 ; CHECK-NEXT:    }
 b0:
   %v0 = icmp eq i32 %a0, 0
-  %v1 = call <16 x i32>* @llvm.hexagon.V6.vS32b.pred.ppu(i1 %v0, <16 x i32>* %a1, i32 %a2, <16 x i32> %a3)
-  ret <16 x i32>* %v1
+  %v1 = call ptr @llvm.hexagon.V6.vS32b.pred.ppu(i1 %v0, ptr %a1, i32 %a2, <16 x i32> %a3)
+  ret ptr %v1
 }
 
-define <16 x i32>* @f25(i32 %a0, <16 x i32>* %a1, i32 %a2, <16 x i32> %a3) #0 {
+define ptr @f25(i32 %a0, ptr %a1, i32 %a2, <16 x i32> %a3) #0 {
 ; CHECK-LABEL: f25:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -576,11 +576,11 @@ define <16 x i32>* @f25(i32 %a0, <16 x i32>* %a1, i32 %a2, <16 x i32> %a3) #0 {
 ; CHECK-NEXT:    }
 b0:
   %v0 = icmp eq i32 %a0, 0
-  %v1 = call <16 x i32>* @llvm.hexagon.V6.vS32b.npred.ppu(i1 %v0, <16 x i32>* %a1, i32 %a2, <16 x i32> %a3)
-  ret <16 x i32>* %v1
+  %v1 = call ptr @llvm.hexagon.V6.vS32b.npred.ppu(i1 %v0, ptr %a1, i32 %a2, <16 x i32> %a3)
+  ret ptr %v1
 }
 
-define <16 x i32>* @f26(i32 %a0, <16 x i32>* %a1, i32 %a2, <16 x i32> %a3) #0 {
+define ptr @f26(i32 %a0, ptr %a1, i32 %a2, <16 x i32> %a3) #0 {
 ; CHECK-LABEL: f26:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -600,11 +600,11 @@ define <16 x i32>* @f26(i32 %a0, <16 x i32>* %a1, i32 %a2, <16 x i32> %a3) #0 {
 ; CHECK-NEXT:    }
 b0:
   %v0 = icmp eq i32 %a0, 0
-  %v1 = call <16 x i32>* @llvm.hexagon.V6.vS32Ub.pred.ppu(i1 %v0, <16 x i32>* %a1, i32 %a2, <16 x i32> %a3)
-  ret <16 x i32>* %v1
+  %v1 = call ptr @llvm.hexagon.V6.vS32Ub.pred.ppu(i1 %v0, ptr %a1, i32 %a2, <16 x i32> %a3)
+  ret ptr %v1
 }
 
-define <16 x i32>* @f27(i32 %a0, <16 x i32>* %a1, i32 %a2, <16 x i32> %a3) #0 {
+define ptr @f27(i32 %a0, ptr %a1, i32 %a2, <16 x i32> %a3) #0 {
 ; CHECK-LABEL: f27:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -624,11 +624,11 @@ define <16 x i32>* @f27(i32 %a0, <16 x i32>* %a1, i32 %a2, <16 x i32> %a3) #0 {
 ; CHECK-NEXT:    }
 b0:
   %v0 = icmp eq i32 %a0, 0
-  %v1 = call <16 x i32>* @llvm.hexagon.V6.vS32Ub.npred.ppu(i1 %v0, <16 x i32>* %a1, i32 %a2, <16 x i32> %a3)
-  ret <16 x i32>* %v1
+  %v1 = call ptr @llvm.hexagon.V6.vS32Ub.npred.ppu(i1 %v0, ptr %a1, i32 %a2, <16 x i32> %a3)
+  ret ptr %v1
 }
 
-define <16 x i32>* @f28(i32 %a0, <16 x i32>* %a1, i32 %a2, <16 x i32> %a3) #0 {
+define ptr @f28(i32 %a0, ptr %a1, i32 %a2, <16 x i32> %a3) #0 {
 ; CHECK-LABEL: f28:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -648,11 +648,11 @@ define <16 x i32>* @f28(i32 %a0, <16 x i32>* %a1, i32 %a2, <16 x i32> %a3) #0 {
 ; CHECK-NEXT:    }
 b0:
   %v0 = icmp eq i32 %a0, 0
-  %v1 = call <16 x i32>* @llvm.hexagon.V6.vS32b.nt.pred.ppu(i1 %v0, <16 x i32>* %a1, i32 %a2, <16 x i32> %a3)
-  ret <16 x i32>* %v1
+  %v1 = call ptr @llvm.hexagon.V6.vS32b.nt.pred.ppu(i1 %v0, ptr %a1, i32 %a2, <16 x i32> %a3)
+  ret ptr %v1
 }
 
-define <16 x i32>* @f29(i32 %a0, <16 x i32>* %a1, i32 %a2, <16 x i32> %a3) #0 {
+define ptr @f29(i32 %a0, ptr %a1, i32 %a2, <16 x i32> %a3) #0 {
 ; CHECK-LABEL: f29:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -672,8 +672,8 @@ define <16 x i32>* @f29(i32 %a0, <16 x i32>* %a1, i32 %a2, <16 x i32> %a3) #0 {
 ; CHECK-NEXT:    }
 b0:
   %v0 = icmp eq i32 %a0, 0
-  %v1 = call <16 x i32>* @llvm.hexagon.V6.vS32b.nt.npred.ppu(i1 %v0, <16 x i32>* %a1, i32 %a2, <16 x i32> %a3)
-  ret <16 x i32>* %v1
+  %v1 = call ptr @llvm.hexagon.V6.vS32b.nt.npred.ppu(i1 %v0, ptr %a1, i32 %a2, <16 x i32> %a3)
+  ret ptr %v1
 }
 
 attributes #0 = { nounwind "target-cpu"="hexagonv66" "target-features"="+hvxv66,+hvx-length64b,-packets" }

diff  --git a/llvm/test/CodeGen/Hexagon/autohvx/vector-align-addr.ll b/llvm/test/CodeGen/Hexagon/autohvx/vector-align-addr.ll
index 25ba8e6300869..917c18e467915 100644
--- a/llvm/test/CodeGen/Hexagon/autohvx/vector-align-addr.ll
+++ b/llvm/test/CodeGen/Hexagon/autohvx/vector-align-addr.ll
@@ -7,15 +7,14 @@
 ; CHECK: [[REG:r[0-9]+]] = add(r{{[0-9]+}},#2432)
 ; CHECK: = vmem([[REG]]+#0)
 
-define dllexport void @test(i8* %a0) local_unnamed_addr #0 {
+define dllexport void @test(ptr %a0) local_unnamed_addr #0 {
 b0:
   %v0 = add nuw nsw i32 0, 3040
-  %v1 = load i8, i8* undef, align 1
+  %v1 = load i8, ptr undef, align 1
   %v2 = insertelement <19 x i8> undef, i8 %v1, i32 0
   %v3 = shufflevector <19 x i8> %v2, <19 x i8> undef, <19 x i32> zeroinitializer
-  %v4 = getelementptr inbounds i8, i8* %a0, i32 %v0
-  %v5 = bitcast i8* %v4 to <19 x i8>*
-  %v6 = load <19 x i8>, <19 x i8>* %v5, align 1
+  %v4 = getelementptr inbounds i8, ptr %a0, i32 %v0
+  %v6 = load <19 x i8>, ptr %v4, align 1
   %v7 = mul <19 x i8> %v3, %v6
   %v8 = add <19 x i8> %v7, zeroinitializer
   %v9 = add <19 x i8> zeroinitializer, %v8
@@ -28,20 +27,18 @@ b0:
   %v16 = add <19 x i8> zeroinitializer, %v15
   %v17 = add <19 x i8> zeroinitializer, %v16
   %v18 = add <19 x i8> zeroinitializer, %v17
-  %v19 = load i8, i8* undef, align 1
+  %v19 = load i8, ptr undef, align 1
   %v20 = insertelement <19 x i8> undef, i8 %v19, i32 0
   %v21 = shufflevector <19 x i8> %v20, <19 x i8> undef, <19 x i32> zeroinitializer
   %v22 = add nuw nsw i32 0, 5472
-  %v23 = getelementptr inbounds i8, i8* %a0, i32 %v22
-  %v24 = bitcast i8* %v23 to <19 x i8>*
-  %v25 = load <19 x i8>, <19 x i8>* %v24, align 1
+  %v23 = getelementptr inbounds i8, ptr %a0, i32 %v22
+  %v25 = load <19 x i8>, ptr %v23, align 1
   %v26 = mul <19 x i8> %v21, %v25
   %v27 = add <19 x i8> %v26, %v18
   %v28 = add <19 x i8> zeroinitializer, %v27
   %v29 = add <19 x i8> zeroinitializer, %v28
   %v30 = add <19 x i8> zeroinitializer, %v29
-  %v31 = bitcast i8* %a0 to <19 x i8>*
-  store <19 x i8> %v30, <19 x i8>* %v31, align 1
+  store <19 x i8> %v30, ptr %a0, align 1
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/autohvx/vector-align-bad-move.ll b/llvm/test/CodeGen/Hexagon/autohvx/vector-align-bad-move.ll
index 8da468a332f05..7aad382e67020 100644
--- a/llvm/test/CodeGen/Hexagon/autohvx/vector-align-bad-move.ll
+++ b/llvm/test/CodeGen/Hexagon/autohvx/vector-align-bad-move.ll
@@ -13,24 +13,20 @@ entry:
   br label %for_begin77
 
 for_begin77:
-  %0 = load i8*, i8** undef, align 4
-  %1 = getelementptr i8, i8* %0, i32 1794
-  %2 = bitcast i8* %1 to <64 x half>*
-  %3 = call <64 x half> @llvm.masked.load.v64f16.p0v64f16(<64 x half>* %2, i32 1, <64 x i1> <i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 true>, <64 x half> undef)
-  %4 = getelementptr i8, i8* %0, i32 1922
-  %5 = bitcast i8* %4 to <64 x half>*
-  %6 = call <64 x half> @llvm.masked.load.v64f16.p0v64f16(<64 x half>* %5, i32 1, <64 x i1> <i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 false>, <64 x half> undef)
-  %7 = shufflevector <64 x half> %3, <64 x half> %6, <64 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30, i32 32, i32 34, i32 36, i32 38, i32 40, i32 42, i32 44, i32 46, i32 48, i32 50, i32 52, i32 54, i32 56, i32 58, i32 60, i32 62, i32 63, i32 65, i32 67, i32 69, i32 71, i32 73, i32 75, i32 77, i32 79, i32 81, i32 83, i32 85, i32 87, i32 89, i32 91, i32 93, i32 95, i32 97, i32 99, i32 101, i32 103, i32 105, i32 107, i32 109, i32 111, i32 113, i32 115, i32 117, i32 119, i32 121, i32 123, i32 125>
-  call void @llvm.assume(i1 true) [ "align"(i8* null, i32 128) ]
-  %8 = getelementptr i8, i8* null, i32 128
-  %9 = bitcast i8* %8 to <64 x half>*
-  %10 = fadd <64 x half> zeroinitializer, %7
-  %11 = shufflevector <64 x half> %10, <64 x half> undef, <64 x i32> <i32 0, i32 32, i32 1, i32 33, i32 2, i32 34, i32 3, i32 35, i32 4, i32 36, i32 5, i32 37, i32 6, i32 38, i32 7, i32 39, i32 8, i32 40, i32 9, i32 41, i32 10, i32 42, i32 11, i32 43, i32 12, i32 44, i32 13, i32 45, i32 14, i32 46, i32 15, i32 47, i32 16, i32 48, i32 17, i32 49, i32 18, i32 50, i32 19, i32 51, i32 20, i32 52, i32 21, i32 53, i32 22, i32 54, i32 23, i32 55, i32 24, i32 56, i32 25, i32 57, i32 26, i32 58, i32 27, i32 59, i32 28, i32 60, i32 29, i32 61, i32 30, i32 62, i32 31, i32 63>
-  %12 = getelementptr i8, i8* %0, i32 1920
-  %13 = bitcast i8* %12 to <64 x half>*
-  %unmaskedload243 = load <64 x half>, <64 x half>* %13, align 128
-  %14 = fadd <64 x half> %11, %unmaskedload243
-  store <64 x half> %14, <64 x half>* %9, align 128
+  %0 = load ptr, ptr undef, align 4
+  %1 = getelementptr i8, ptr %0, i32 1794
+  %2 = call <64 x half> @llvm.masked.load.v64f16.p0(ptr %1, i32 1, <64 x i1> <i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 true>, <64 x half> undef)
+  %3 = getelementptr i8, ptr %0, i32 1922
+  %4 = call <64 x half> @llvm.masked.load.v64f16.p0(ptr %3, i32 1, <64 x i1> <i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 false>, <64 x half> undef)
+  %5 = shufflevector <64 x half> %2, <64 x half> %4, <64 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30, i32 32, i32 34, i32 36, i32 38, i32 40, i32 42, i32 44, i32 46, i32 48, i32 50, i32 52, i32 54, i32 56, i32 58, i32 60, i32 62, i32 63, i32 65, i32 67, i32 69, i32 71, i32 73, i32 75, i32 77, i32 79, i32 81, i32 83, i32 85, i32 87, i32 89, i32 91, i32 93, i32 95, i32 97, i32 99, i32 101, i32 103, i32 105, i32 107, i32 109, i32 111, i32 113, i32 115, i32 117, i32 119, i32 121, i32 123, i32 125>
+  call void @llvm.assume(i1 true) [ "align"(ptr null, i32 128) ]
+  %6 = getelementptr i8, ptr null, i32 128
+  %7 = fadd <64 x half> zeroinitializer, %5
+  %8 = shufflevector <64 x half> %7, <64 x half> undef, <64 x i32> <i32 0, i32 32, i32 1, i32 33, i32 2, i32 34, i32 3, i32 35, i32 4, i32 36, i32 5, i32 37, i32 6, i32 38, i32 7, i32 39, i32 8, i32 40, i32 9, i32 41, i32 10, i32 42, i32 11, i32 43, i32 12, i32 44, i32 13, i32 45, i32 14, i32 46, i32 15, i32 47, i32 16, i32 48, i32 17, i32 49, i32 18, i32 50, i32 19, i32 51, i32 20, i32 52, i32 21, i32 53, i32 22, i32 54, i32 23, i32 55, i32 24, i32 56, i32 25, i32 57, i32 26, i32 58, i32 27, i32 59, i32 28, i32 60, i32 29, i32 61, i32 30, i32 62, i32 31, i32 63>
+  %9 = getelementptr i8, ptr %0, i32 1920
+  %unmaskedload243 = load <64 x half>, ptr %9, align 128
+  %10 = fadd <64 x half> %8, %unmaskedload243
+  store <64 x half> %10, ptr %6, align 128
   br label %for_begin77
 }
 
@@ -38,7 +34,7 @@ for_begin77:
 declare void @llvm.assume(i1 noundef) #1
 
 ; Function Attrs: argmemonly nofree nosync nounwind readonly willreturn
-declare <64 x half> @llvm.masked.load.v64f16.p0v64f16(<64 x half>*, i32 immarg, <64 x i1>, <64 x half>) #2
+declare <64 x half> @llvm.masked.load.v64f16.p0(ptr, i32 immarg, <64 x i1>, <64 x half>) #2
 
 attributes #0 = { "target-features"="+hvxv68,+hvx-length128b,+hvx-qfloat" }
 attributes #1 = { nofree nosync nounwind willreturn }

diff  --git a/llvm/test/CodeGen/Hexagon/autohvx/vector-align-basic.ll b/llvm/test/CodeGen/Hexagon/autohvx/vector-align-basic.ll
index 27afe949d8892..fd679b858847a 100644
--- a/llvm/test/CodeGen/Hexagon/autohvx/vector-align-basic.ll
+++ b/llvm/test/CodeGen/Hexagon/autohvx/vector-align-basic.ll
@@ -2,7 +2,7 @@
 ; RUN: llc -march=hexagon < %s | FileCheck %s
 
 ; Function Attrs: nounwind
-define <32 x i32> @f0(i8* %a0, i32 %a1) #0 {
+define <32 x i32> @f0(ptr %a0, i32 %a1) #0 {
 ; CHECK-LABEL: f0:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -46,23 +46,21 @@ define <32 x i32> @f0(i8* %a0, i32 %a1) #0 {
 ; CHECK-NEXT:    }
 b0:
   %v0 = add i32 %a1, 128
-  %v1 = getelementptr i8, i8* %a0, i32 %v0
-  %v2 = bitcast i8* %v1 to <32 x i32>*
-  %v3 = tail call <32 x i32> @llvm.masked.load.v32i32.p0v32i32(<32 x i32>* %v2, i32 128, <32 x i1> <i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false>, <32 x i32> undef)
+  %v1 = getelementptr i8, ptr %a0, i32 %v0
+  %v3 = tail call <32 x i32> @llvm.masked.load.v32i32.p0(ptr %v1, i32 128, <32 x i1> <i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false>, <32 x i32> undef)
   %v4 = add i32 %a1, 136
-  %v5 = getelementptr i8, i8* %a0, i32 %v4
-  %v6 = bitcast i8* %v5 to <32 x i32>*
-  %v7 = tail call <32 x i32> @llvm.masked.load.v32i32.p0v32i32(<32 x i32>* %v6, i32 8, <32 x i1> <i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false>, <32 x i32> undef)
+  %v5 = getelementptr i8, ptr %a0, i32 %v4
+  %v7 = tail call <32 x i32> @llvm.masked.load.v32i32.p0(ptr %v5, i32 8, <32 x i1> <i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false>, <32 x i32> undef)
   %v8 = add <32 x i32> %v3, %v7
-  tail call void @llvm.masked.store.v32i32.p0v32i32(<32 x i32> %v8, <32 x i32>* %v2, i32 128, <32 x i1> <i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false>)
+  tail call void @llvm.masked.store.v32i32.p0(<32 x i32> %v8, ptr %v1, i32 128, <32 x i1> <i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false>)
   ret <32 x i32> %v8
 }
 
 ; Function Attrs: argmemonly nounwind readonly willreturn
-declare <32 x i32> @llvm.masked.load.v32i32.p0v32i32(<32 x i32>*, i32 immarg, <32 x i1>, <32 x i32>) #1
+declare <32 x i32> @llvm.masked.load.v32i32.p0(ptr, i32 immarg, <32 x i1>, <32 x i32>) #1
 
 ; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.masked.store.v32i32.p0v32i32(<32 x i32>, <32 x i32>*, i32 immarg, <32 x i1>) #2
+declare void @llvm.masked.store.v32i32.p0(<32 x i32>, ptr, i32 immarg, <32 x i1>) #2
 
 attributes #0 = { nounwind "target-cpu"="hexagonv66" "target-features"="+hvx,+hvx-length128b,-packets" }
 attributes #1 = { argmemonly nounwind readonly willreturn }

diff  --git a/llvm/test/CodeGen/Hexagon/autohvx/vector-align-interleaved.ll b/llvm/test/CodeGen/Hexagon/autohvx/vector-align-interleaved.ll
index 415c717586c76..f654183335391 100644
--- a/llvm/test/CodeGen/Hexagon/autohvx/vector-align-interleaved.ll
+++ b/llvm/test/CodeGen/Hexagon/autohvx/vector-align-interleaved.ll
@@ -10,7 +10,7 @@
 ; Just make sure that this compiles ok.
 
 ; Function Attrs: nounwind
-define void @f0(i16* noalias nocapture readonly %a0, i16* noalias nocapture %a1, i32 %a2) #0 {
+define void @f0(ptr noalias nocapture readonly %a0, ptr noalias nocapture %a1, i32 %a2) #0 {
 ; CHECK-LABEL: f0:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -46,29 +46,27 @@ b0:
   br i1 %v0, label %b3, label %b1
 
 b1:                                               ; preds = %b0
-  %v1 = bitcast i16* %a1 to <16 x i32>*
-  %v2 = bitcast i16* %a0 to <16 x i32>*
   br label %b2
 
 b2:                                               ; preds = %b2, %b1
-  %v3 = phi <16 x i32>* [ %v16, %b2 ], [ %v1, %b1 ]
-  %v4 = phi <16 x i32>* [ %v11, %b2 ], [ %v2, %b1 ]
-  %v5 = getelementptr inbounds <16 x i32>, <16 x i32>* %v4, i32 1
-  %v6 = load <16 x i32>, <16 x i32>* %v4, align 64
-  %v7 = getelementptr inbounds <16 x i32>, <16 x i32>* %v4, i32 2
-  %v8 = load <16 x i32>, <16 x i32>* %v5, align 64
-  %v9 = getelementptr inbounds <16 x i32>, <16 x i32>* %v4, i32 3
-  %v10 = load <16 x i32>, <16 x i32>* %v7, align 64
-  %v11 = getelementptr inbounds <16 x i32>, <16 x i32>* %v4, i32 4
-  %v12 = load <16 x i32>, <16 x i32>* %v9, align 64
-  %v13 = getelementptr inbounds <16 x i32>, <16 x i32>* %v3, i32 1
-  store <16 x i32> %v6, <16 x i32>* %v3, align 64
-  %v14 = getelementptr inbounds <16 x i32>, <16 x i32>* %v3, i32 2
-  store <16 x i32> %v8, <16 x i32>* %v13, align 64
-  %v15 = getelementptr inbounds <16 x i32>, <16 x i32>* %v3, i32 3
-  store <16 x i32> %v10, <16 x i32>* %v14, align 64
-  %v16 = getelementptr inbounds <16 x i32>, <16 x i32>* %v3, i32 4
-  store <16 x i32> %v12, <16 x i32>* %v15, align 64
+  %v3 = phi ptr [ %v16, %b2 ], [ %a1, %b1 ]
+  %v4 = phi ptr [ %v11, %b2 ], [ %a0, %b1 ]
+  %v5 = getelementptr inbounds <16 x i32>, ptr %v4, i32 1
+  %v6 = load <16 x i32>, ptr %v4, align 64
+  %v7 = getelementptr inbounds <16 x i32>, ptr %v4, i32 2
+  %v8 = load <16 x i32>, ptr %v5, align 64
+  %v9 = getelementptr inbounds <16 x i32>, ptr %v4, i32 3
+  %v10 = load <16 x i32>, ptr %v7, align 64
+  %v11 = getelementptr inbounds <16 x i32>, ptr %v4, i32 4
+  %v12 = load <16 x i32>, ptr %v9, align 64
+  %v13 = getelementptr inbounds <16 x i32>, ptr %v3, i32 1
+  store <16 x i32> %v6, ptr %v3, align 64
+  %v14 = getelementptr inbounds <16 x i32>, ptr %v3, i32 2
+  store <16 x i32> %v8, ptr %v13, align 64
+  %v15 = getelementptr inbounds <16 x i32>, ptr %v3, i32 3
+  store <16 x i32> %v10, ptr %v14, align 64
+  %v16 = getelementptr inbounds <16 x i32>, ptr %v3, i32 4
+  store <16 x i32> %v12, ptr %v15, align 64
   br label %b2
 
 b3:                                               ; preds = %b0

diff  --git a/llvm/test/CodeGen/Hexagon/autohvx/vector-align-only-phi-use.ll b/llvm/test/CodeGen/Hexagon/autohvx/vector-align-only-phi-use.ll
index dc83fc9be2df2..2f1c7035d0480 100644
--- a/llvm/test/CodeGen/Hexagon/autohvx/vector-align-only-phi-use.ll
+++ b/llvm/test/CodeGen/Hexagon/autohvx/vector-align-only-phi-use.ll
@@ -16,11 +16,9 @@ declare <16 x i32> @llvm.hexagon.V6.vand(<16 x i32>, <16 x i32>) #0
 declare <64 x i1> @llvm.hexagon.V6.vgtuw.and(<64 x i1>, <16 x i32>, <16 x i32>) #0
 declare <64 x i1> @llvm.hexagon.V6.pred.or(<64 x i1>, <64 x i1>) #0
 
-define <16 x i32> @f0(i8* %a0, i32 %a1) local_unnamed_addr #1 {
+define <16 x i32> @f0(ptr %a0, i32 %a1) local_unnamed_addr #1 {
 b0:
-  %v0 = getelementptr inbounds i8, i8* %a0, i32 576
-  %v1 = bitcast i8* %a0 to <16 x i32>*
-  %v2 = bitcast i8* %v0 to <16 x i32>*
+  %v0 = getelementptr inbounds i8, ptr %a0, i32 576
   br label %b1
 
 b1:                                               ; preds = %b4, %b0
@@ -29,10 +27,10 @@ b1:                                               ; preds = %b4, %b0
   br i1 poison, label %b2, label %b3
 
 b2:                                               ; preds = %b1
-  %v5 = getelementptr inbounds <16 x i32>, <16 x i32>* %v1, i32 %v3
-  %v6 = load <16 x i32>, <16 x i32>* %v5, align 64
-  %v7 = getelementptr inbounds <16 x i32>, <16 x i32>* %v2, i32 %v3
-  %v8 = load <16 x i32>, <16 x i32>* %v7, align 64
+  %v5 = getelementptr inbounds <16 x i32>, ptr %a0, i32 %v3
+  %v6 = load <16 x i32>, ptr %v5, align 64
+  %v7 = getelementptr inbounds <16 x i32>, ptr %v0, i32 %v3
+  %v8 = load <16 x i32>, ptr %v7, align 64
   %v9 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiewuh.acc(<16 x i32> poison, <16 x i32> %v6, <16 x i32> %v6)
   br label %b4
 
@@ -45,8 +43,8 @@ b4:                                               ; preds = %b3, %b2
   %v12 = tail call <16 x i32> @llvm.hexagon.V6.vmux(<64 x i1> poison, <16 x i32> %v10, <16 x i32> %v4)
   %v13 = tail call <16 x i32> @llvm.hexagon.V6.vmux(<64 x i1> poison, <16 x i32> %v11, <16 x i32> poison)
   %v14 = or i32 %v3, 1
-  %v15 = getelementptr inbounds <16 x i32>, <16 x i32>* %v2, i32 %v14
-  %v16 = load <16 x i32>, <16 x i32>* %v15, align 64
+  %v15 = getelementptr inbounds <16 x i32>, ptr %v0, i32 %v14
+  %v16 = load <16 x i32>, ptr %v15, align 64
   %v17 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiewuh.acc(<16 x i32> poison, <16 x i32> %v13, <16 x i32> poison)
   %v18 = tail call <16 x i32> @llvm.hexagon.V6.vand(<16 x i32> %v12, <16 x i32> poison)
   %v19 = tail call <64 x i1> @llvm.hexagon.V6.vgtuw.and(<64 x i1> poison, <16 x i32> %v17, <16 x i32> poison)

diff  --git a/llvm/test/CodeGen/Hexagon/autohvx/vector-align-rescale-nonint.ll b/llvm/test/CodeGen/Hexagon/autohvx/vector-align-rescale-nonint.ll
index 157207d5ad9df..5cf87cc5b8c4e 100644
--- a/llvm/test/CodeGen/Hexagon/autohvx/vector-align-rescale-nonint.ll
+++ b/llvm/test/CodeGen/Hexagon/autohvx/vector-align-rescale-nonint.ll
@@ -6,22 +6,20 @@
 target datalayout = "e-m:e-p:32:32:32-a:0-n16:32-i64:64:64-i32:32:32-i16:16:16-i1:8:8-f32:32:32-f64:64:64-v32:32:32-v64:64:64-v512:512:512-v1024:1024:1024-v2048:2048:2048"
 target triple = "hexagon"
 
-define dllexport void @f0(float* %a0, <32 x float> %a1, <32 x float> %a2) local_unnamed_addr #0 {
+define dllexport void @f0(ptr %a0, <32 x float> %a1, <32 x float> %a2) local_unnamed_addr #0 {
 b0:
   %v0 = add nuw nsw i32 0, 64
-  %v1 = getelementptr inbounds float, float* %a0, i32 %v0
-  %v2 = bitcast float* %v1 to <32 x float>*
+  %v1 = getelementptr inbounds float, ptr %a0, i32 %v0
   %v3 = add nuw nsw i32 0, 96
-  %v4 = getelementptr inbounds float, float* %a0, i32 %v3
-  %v5 = bitcast float* %v4 to <32 x float>*
+  %v4 = getelementptr inbounds float, ptr %a0, i32 %v3
   br label %b1
 
 b1:                                               ; preds = %b1, %b0
   br i1 undef, label %b2, label %b1
 
 b2:                                               ; preds = %b1
-  store <32 x float> %a1, <32 x float>* %v2, align 4
-  store <32 x float> %a2, <32 x float>* %v5, align 4
+  store <32 x float> %a1, ptr %v1, align 4
+  store <32 x float> %a2, ptr %v4, align 4
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/autohvx/vector-align-scalar-mask.ll b/llvm/test/CodeGen/Hexagon/autohvx/vector-align-scalar-mask.ll
index 0cb74905a28c5..345d27d96fa6a 100644
--- a/llvm/test/CodeGen/Hexagon/autohvx/vector-align-scalar-mask.ll
+++ b/llvm/test/CodeGen/Hexagon/autohvx/vector-align-scalar-mask.ll
@@ -6,24 +6,23 @@
 target datalayout = "e-m:e-p:32:32:32-a:0-n16:32-i64:64:64-i32:32:32-i16:16:16-i1:8:8-f32:32:32-f64:64:64-v32:32:32-v64:64:64-v512:512:512-v1024:1024:1024-v2048:2048:2048"
 target triple = "hexagon"
 
-define dllexport void @f0(i32* %a0, i32 %a1, i32 %a2, <32 x i32> %a3) local_unnamed_addr #0 {
+define dllexport void @f0(ptr %a0, i32 %a1, i32 %a2, <32 x i32> %a3) local_unnamed_addr #0 {
 b0:
   %v0 = add nuw nsw i32 0, 96
-  %v1 = getelementptr inbounds i32, i32* %a0, i32 %v0
-  %v2 = bitcast i32* %v1 to <32 x i32>*
+  %v1 = getelementptr inbounds i32, ptr %a0, i32 %v0
   %v3 = add nuw nsw i32 0, 225
-  %v4 = getelementptr inbounds i32, i32* %a0, i32 %v3
+  %v4 = getelementptr inbounds i32, ptr %a0, i32 %v3
   %v5 = add nuw nsw i32 0, 226
-  %v6 = getelementptr inbounds i32, i32* %a0, i32 %v5
+  %v6 = getelementptr inbounds i32, ptr %a0, i32 %v5
   br label %b1
 
 b1:                                               ; preds = %b1, %b0
   br i1 undef, label %b2, label %b1
 
 b2:                                               ; preds = %b1
-  store <32 x i32> %a3, <32 x i32>* %v2, align 4
-  store i32 %a1, i32* %v4, align 4
-  store i32 %a2, i32* %v6, align 4
+  store <32 x i32> %a3, ptr %v1, align 4
+  store i32 %a1, ptr %v4, align 4
+  store i32 %a2, ptr %v6, align 4
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/autohvx/vector-align-store-mask.ll b/llvm/test/CodeGen/Hexagon/autohvx/vector-align-store-mask.ll
index d63366fc1ca9a..4124cdf7412eb 100644
--- a/llvm/test/CodeGen/Hexagon/autohvx/vector-align-store-mask.ll
+++ b/llvm/test/CodeGen/Hexagon/autohvx/vector-align-store-mask.ll
@@ -14,19 +14,17 @@ target triple = "hexagon"
 ; CHECK: [[QREG:q[0-3]+]] = vand([[VREG2]],r{{[0-9]+}})
 ; CHECK: if ([[QREG]]) vmem({{.*}}) = v{{[0-9]+}}
 
-define dllexport void @f0(i32* %a0) local_unnamed_addr #0 {
+define dllexport void @f0(ptr %a0) local_unnamed_addr #0 {
 b0:
   br label %b1
 
 b1:                                               ; preds = %b1, %b0
   %v0 = or i32 -1, 40
-  %v1 = getelementptr inbounds i32, i32* %a0, i32 %v0
-  %v2 = bitcast i32* %v1 to <8 x i32>*
-  store <8 x i32> undef, <8 x i32>* %v2, align 32
+  %v1 = getelementptr inbounds i32, ptr %a0, i32 %v0
+  store <8 x i32> undef, ptr %v1, align 32
   %v3 = or i32 0, 48
-  %v4 = getelementptr inbounds i32, i32* %a0, i32 %v3
-  %v5 = bitcast i32* %v4 to <8 x i32>*
-  store <8 x i32> undef, <8 x i32>* %v5, align 64
+  %v4 = getelementptr inbounds i32, ptr %a0, i32 %v3
+  store <8 x i32> undef, ptr %v4, align 64
   br i1 undef, label %b2, label %b1
 
 b2:                                               ; preds = %b1

diff  --git a/llvm/test/CodeGen/Hexagon/autohvx/vector-align-store.ll b/llvm/test/CodeGen/Hexagon/autohvx/vector-align-store.ll
index ddf35fb406087..6a79f3dd152bc 100644
--- a/llvm/test/CodeGen/Hexagon/autohvx/vector-align-store.ll
+++ b/llvm/test/CodeGen/Hexagon/autohvx/vector-align-store.ll
@@ -6,16 +6,14 @@
 ; CHECK: vmem({{.*}}) =
 ; CHECK-NOT: vmem
 
-define void @f0(i16* %a0, i32 %a11, <64 x i16> %a22, <64 x i16> %a3) #0 {
+define void @f0(ptr %a0, i32 %a11, <64 x i16> %a22, <64 x i16> %a3) #0 {
 b0:
   %v0 = add i32 %a11, 64
-  %v1 = getelementptr i16, i16* %a0, i32 %v0
-  %v2 = bitcast i16* %v1 to <64 x i16>*
-  store <64 x i16> %a22, <64 x i16>* %v2, align 2
+  %v1 = getelementptr i16, ptr %a0, i32 %v0
+  store <64 x i16> %a22, ptr %v1, align 2
   %v33 = add i32 %a11, 128
-  %v44 = getelementptr i16, i16* %a0, i32 %v33
-  %v5 = bitcast i16* %v44 to <64 x i16>*
-  store <64 x i16> %a3, <64 x i16>* %v5, align 2
+  %v44 = getelementptr i16, ptr %a0, i32 %v33
+  store <64 x i16> %a3, ptr %v44, align 2
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/autohvx/vector-align-use-in-
diff erent-block.ll b/llvm/test/CodeGen/Hexagon/autohvx/vector-align-use-in-
diff erent-block.ll
index 74405c0f141d4..1f6ac0d1ff3f6 100644
--- a/llvm/test/CodeGen/Hexagon/autohvx/vector-align-use-in-
diff erent-block.ll
+++ b/llvm/test/CodeGen/Hexagon/autohvx/vector-align-use-in-
diff erent-block.ll
@@ -17,13 +17,11 @@ b0:
 
 b1:                                               ; preds = %b0
   %v0 = mul nsw i32 -4, %a0
-  %v1 = getelementptr inbounds i8, i8* null, i32 %v0
-  %v2 = getelementptr inbounds i8, i8* %v1, i32 -64
-  %v3 = bitcast i8* %v2 to <16 x i32>*
-  %v4 = load <16 x i32>, <16 x i32>* %v3, align 64
-  %v5 = getelementptr inbounds i8, i8* %v1, i32 64
-  %v6 = bitcast i8* %v5 to <16 x i32>*
-  %v7 = load <16 x i32>, <16 x i32>* %v6, align 64
+  %v1 = getelementptr inbounds i8, ptr null, i32 %v0
+  %v2 = getelementptr inbounds i8, ptr %v1, i32 -64
+  %v4 = load <16 x i32>, ptr %v2, align 64
+  %v5 = getelementptr inbounds i8, ptr %v1, i32 64
+  %v7 = load <16 x i32>, ptr %v5, align 64
   br label %b2
 
 b2:                                               ; preds = %b2, %b1

diff  --git a/llvm/test/CodeGen/Hexagon/autohvx/vector-load-store-basic.ll b/llvm/test/CodeGen/Hexagon/autohvx/vector-load-store-basic.ll
index 25eccd3d2b648..0626bf46ac541 100644
--- a/llvm/test/CodeGen/Hexagon/autohvx/vector-load-store-basic.ll
+++ b/llvm/test/CodeGen/Hexagon/autohvx/vector-load-store-basic.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -march=hexagon < %s | FileCheck %s
 
-define void @f0(<128 x i8>* %a0, <128 x i8>* %a1) #0 {
+define void @f0(ptr %a0, ptr %a1) #0 {
 ; CHECK-LABEL: f0:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    {
@@ -9,14 +9,14 @@ define void @f0(<128 x i8>* %a0, <128 x i8>* %a1) #0 {
 ; CHECK-NEXT:     v0.cur = vmem(r0+#1)
 ; CHECK-NEXT:     vmem(r1+#2) = v0
 ; CHECK-NEXT:    }
-  %v0 = getelementptr <128 x i8>, <128 x i8>* %a0, i32 1
-  %v1 = load <128 x i8>, <128 x i8>* %v0, align 128
-  %v2 = getelementptr <128 x i8>, <128 x i8>* %a1, i32 2
-  store <128 x i8> %v1, <128 x i8>* %v2, align 128
+  %v0 = getelementptr <128 x i8>, ptr %a0, i32 1
+  %v1 = load <128 x i8>, ptr %v0, align 128
+  %v2 = getelementptr <128 x i8>, ptr %a1, i32 2
+  store <128 x i8> %v1, ptr %v2, align 128
   ret void
 }
 
-define void @f1(<64 x i16>* %a0, <64 x i16>* %a1) #0 {
+define void @f1(ptr %a0, ptr %a1) #0 {
 ; CHECK-LABEL: f1:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    {
@@ -24,14 +24,14 @@ define void @f1(<64 x i16>* %a0, <64 x i16>* %a1) #0 {
 ; CHECK-NEXT:     v0.cur = vmem(r0+#1)
 ; CHECK-NEXT:     vmem(r1+#2) = v0
 ; CHECK-NEXT:    }
-  %v0 = getelementptr <64 x i16>, <64 x i16>* %a0, i32 1
-  %v1 = load <64 x i16>, <64 x i16>* %v0, align 128
-  %v2 = getelementptr <64 x i16>, <64 x i16>* %a1, i32 2
-  store <64 x i16> %v1, <64 x i16>* %v2, align 128
+  %v0 = getelementptr <64 x i16>, ptr %a0, i32 1
+  %v1 = load <64 x i16>, ptr %v0, align 128
+  %v2 = getelementptr <64 x i16>, ptr %a1, i32 2
+  store <64 x i16> %v1, ptr %v2, align 128
   ret void
 }
 
-define void @f2(<32 x i32>* %a0, <32 x i32>* %a1) #0 {
+define void @f2(ptr %a0, ptr %a1) #0 {
 ; CHECK-LABEL: f2:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    {
@@ -39,14 +39,14 @@ define void @f2(<32 x i32>* %a0, <32 x i32>* %a1) #0 {
 ; CHECK-NEXT:     v0.cur = vmem(r0+#1)
 ; CHECK-NEXT:     vmem(r1+#2) = v0
 ; CHECK-NEXT:    }
-  %v0 = getelementptr <32 x i32>, <32 x i32>* %a0, i32 1
-  %v1 = load <32 x i32>, <32 x i32>* %v0, align 128
-  %v2 = getelementptr <32 x i32>, <32 x i32>* %a1, i32 2
-  store <32 x i32> %v1, <32 x i32>* %v2, align 128
+  %v0 = getelementptr <32 x i32>, ptr %a0, i32 1
+  %v1 = load <32 x i32>, ptr %v0, align 128
+  %v2 = getelementptr <32 x i32>, ptr %a1, i32 2
+  store <32 x i32> %v1, ptr %v2, align 128
   ret void
 }
 
-define void @f3(<64 x half>* %a0, <64 x half>* %a1) #0 {
+define void @f3(ptr %a0, ptr %a1) #0 {
 ; CHECK-LABEL: f3:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    {
@@ -54,14 +54,14 @@ define void @f3(<64 x half>* %a0, <64 x half>* %a1) #0 {
 ; CHECK-NEXT:     v0.cur = vmem(r0+#1)
 ; CHECK-NEXT:     vmem(r1+#2) = v0
 ; CHECK-NEXT:    }
-  %v0 = getelementptr <64 x half>, <64 x half>* %a0, i32 1
-  %v1 = load <64 x half>, <64 x half>* %v0, align 128
-  %v2 = getelementptr <64 x half>, <64 x half>* %a1, i32 2
-  store <64 x half> %v1, <64 x half>* %v2, align 128
+  %v0 = getelementptr <64 x half>, ptr %a0, i32 1
+  %v1 = load <64 x half>, ptr %v0, align 128
+  %v2 = getelementptr <64 x half>, ptr %a1, i32 2
+  store <64 x half> %v1, ptr %v2, align 128
   ret void
 }
 
-define void @f4(<32 x float>* %a0, <32 x float>* %a1) #0 {
+define void @f4(ptr %a0, ptr %a1) #0 {
 ; CHECK-LABEL: f4:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    {
@@ -69,14 +69,14 @@ define void @f4(<32 x float>* %a0, <32 x float>* %a1) #0 {
 ; CHECK-NEXT:     v0.cur = vmem(r0+#1)
 ; CHECK-NEXT:     vmem(r1+#2) = v0
 ; CHECK-NEXT:    }
-  %v0 = getelementptr <32 x float>, <32 x float>* %a0, i32 1
-  %v1 = load <32 x float>, <32 x float>* %v0, align 128
-  %v2 = getelementptr <32 x float>, <32 x float>* %a1, i32 2
-  store <32 x float> %v1, <32 x float>* %v2, align 128
+  %v0 = getelementptr <32 x float>, ptr %a0, i32 1
+  %v1 = load <32 x float>, ptr %v0, align 128
+  %v2 = getelementptr <32 x float>, ptr %a1, i32 2
+  store <32 x float> %v1, ptr %v2, align 128
   ret void
 }
 
-define void @f5(<128 x i8>* %a0, <128 x i8>* %a1) #0 {
+define void @f5(ptr %a0, ptr %a1) #0 {
 ; CHECK-LABEL: f5:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    {
@@ -86,14 +86,14 @@ define void @f5(<128 x i8>* %a0, <128 x i8>* %a1) #0 {
 ; CHECK-NEXT:     jumpr r31
 ; CHECK-NEXT:     vmemu(r1+#2) = v0
 ; CHECK-NEXT:    }
-  %v0 = getelementptr <128 x i8>, <128 x i8>* %a0, i32 1
-  %v1 = load <128 x i8>, <128 x i8>* %v0, align 1
-  %v2 = getelementptr <128 x i8>, <128 x i8>* %a1, i32 2
-  store <128 x i8> %v1, <128 x i8>* %v2, align 1
+  %v0 = getelementptr <128 x i8>, ptr %a0, i32 1
+  %v1 = load <128 x i8>, ptr %v0, align 1
+  %v2 = getelementptr <128 x i8>, ptr %a1, i32 2
+  store <128 x i8> %v1, ptr %v2, align 1
   ret void
 }
 
-define void @f6(<64 x i16>* %a0, <64 x i16>* %a1) #0 {
+define void @f6(ptr %a0, ptr %a1) #0 {
 ; CHECK-LABEL: f6:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    {
@@ -103,14 +103,14 @@ define void @f6(<64 x i16>* %a0, <64 x i16>* %a1) #0 {
 ; CHECK-NEXT:     jumpr r31
 ; CHECK-NEXT:     vmemu(r1+#2) = v0
 ; CHECK-NEXT:    }
-  %v0 = getelementptr <64 x i16>, <64 x i16>* %a0, i32 1
-  %v1 = load <64 x i16>, <64 x i16>* %v0, align 1
-  %v2 = getelementptr <64 x i16>, <64 x i16>* %a1, i32 2
-  store <64 x i16> %v1, <64 x i16>* %v2, align 1
+  %v0 = getelementptr <64 x i16>, ptr %a0, i32 1
+  %v1 = load <64 x i16>, ptr %v0, align 1
+  %v2 = getelementptr <64 x i16>, ptr %a1, i32 2
+  store <64 x i16> %v1, ptr %v2, align 1
   ret void
 }
 
-define void @f7(<32 x i32>* %a0, <32 x i32>* %a1) #0 {
+define void @f7(ptr %a0, ptr %a1) #0 {
 ; CHECK-LABEL: f7:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    {
@@ -120,14 +120,14 @@ define void @f7(<32 x i32>* %a0, <32 x i32>* %a1) #0 {
 ; CHECK-NEXT:     jumpr r31
 ; CHECK-NEXT:     vmemu(r1+#2) = v0
 ; CHECK-NEXT:    }
-  %v0 = getelementptr <32 x i32>, <32 x i32>* %a0, i32 1
-  %v1 = load <32 x i32>, <32 x i32>* %v0, align 1
-  %v2 = getelementptr <32 x i32>, <32 x i32>* %a1, i32 2
-  store <32 x i32> %v1, <32 x i32>* %v2, align 1
+  %v0 = getelementptr <32 x i32>, ptr %a0, i32 1
+  %v1 = load <32 x i32>, ptr %v0, align 1
+  %v2 = getelementptr <32 x i32>, ptr %a1, i32 2
+  store <32 x i32> %v1, ptr %v2, align 1
   ret void
 }
 
-define void @f8(<64 x half>* %a0, <64 x half>* %a1) #0 {
+define void @f8(ptr %a0, ptr %a1) #0 {
 ; CHECK-LABEL: f8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    {
@@ -137,14 +137,14 @@ define void @f8(<64 x half>* %a0, <64 x half>* %a1) #0 {
 ; CHECK-NEXT:     jumpr r31
 ; CHECK-NEXT:     vmemu(r1+#2) = v0
 ; CHECK-NEXT:    }
-  %v0 = getelementptr <64 x half>, <64 x half>* %a0, i32 1
-  %v1 = load <64 x half>, <64 x half>* %v0, align 1
-  %v2 = getelementptr <64 x half>, <64 x half>* %a1, i32 2
-  store <64 x half> %v1, <64 x half>* %v2, align 1
+  %v0 = getelementptr <64 x half>, ptr %a0, i32 1
+  %v1 = load <64 x half>, ptr %v0, align 1
+  %v2 = getelementptr <64 x half>, ptr %a1, i32 2
+  store <64 x half> %v1, ptr %v2, align 1
   ret void
 }
 
-define void @f9(<32 x float>* %a0, <32 x float>* %a1) #0 {
+define void @f9(ptr %a0, ptr %a1) #0 {
 ; CHECK-LABEL: f9:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    {
@@ -154,14 +154,14 @@ define void @f9(<32 x float>* %a0, <32 x float>* %a1) #0 {
 ; CHECK-NEXT:     jumpr r31
 ; CHECK-NEXT:     vmemu(r1+#2) = v0
 ; CHECK-NEXT:    }
-  %v0 = getelementptr <32 x float>, <32 x float>* %a0, i32 1
-  %v1 = load <32 x float>, <32 x float>* %v0, align 1
-  %v2 = getelementptr <32 x float>, <32 x float>* %a1, i32 2
-  store <32 x float> %v1, <32 x float>* %v2, align 1
+  %v0 = getelementptr <32 x float>, ptr %a0, i32 1
+  %v1 = load <32 x float>, ptr %v0, align 1
+  %v2 = getelementptr <32 x float>, ptr %a1, i32 2
+  store <32 x float> %v1, ptr %v2, align 1
   ret void
 }
 
-define void @f10(<256 x i8>* %a0, <256 x i8>* %a1) #0 {
+define void @f10(ptr %a0, ptr %a1) #0 {
 ; CHECK-LABEL: f10:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    {
@@ -175,14 +175,14 @@ define void @f10(<256 x i8>* %a0, <256 x i8>* %a1) #0 {
 ; CHECK-NEXT:     jumpr r31
 ; CHECK-NEXT:     vmem(r1+#4) = v0
 ; CHECK-NEXT:    }
-  %v0 = getelementptr <256 x i8>, <256 x i8>* %a0, i32 1
-  %v1 = load <256 x i8>, <256 x i8>* %v0, align 128
-  %v2 = getelementptr <256 x i8>, <256 x i8>* %a1, i32 2
-  store <256 x i8> %v1, <256 x i8>* %v2, align 128
+  %v0 = getelementptr <256 x i8>, ptr %a0, i32 1
+  %v1 = load <256 x i8>, ptr %v0, align 128
+  %v2 = getelementptr <256 x i8>, ptr %a1, i32 2
+  store <256 x i8> %v1, ptr %v2, align 128
   ret void
 }
 
-define void @f11(<128 x i16>* %a0, <128 x i16>* %a1) #0 {
+define void @f11(ptr %a0, ptr %a1) #0 {
 ; CHECK-LABEL: f11:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    {
@@ -196,14 +196,14 @@ define void @f11(<128 x i16>* %a0, <128 x i16>* %a1) #0 {
 ; CHECK-NEXT:     jumpr r31
 ; CHECK-NEXT:     vmem(r1+#4) = v0
 ; CHECK-NEXT:    }
-  %v0 = getelementptr <128 x i16>, <128 x i16>* %a0, i32 1
-  %v1 = load <128 x i16>, <128 x i16>* %v0, align 128
-  %v2 = getelementptr <128 x i16>, <128 x i16>* %a1, i32 2
-  store <128 x i16> %v1, <128 x i16>* %v2, align 128
+  %v0 = getelementptr <128 x i16>, ptr %a0, i32 1
+  %v1 = load <128 x i16>, ptr %v0, align 128
+  %v2 = getelementptr <128 x i16>, ptr %a1, i32 2
+  store <128 x i16> %v1, ptr %v2, align 128
   ret void
 }
 
-define void @f12(<64 x i32>* %a0, <64 x i32>* %a1) #0 {
+define void @f12(ptr %a0, ptr %a1) #0 {
 ; CHECK-LABEL: f12:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    {
@@ -217,14 +217,14 @@ define void @f12(<64 x i32>* %a0, <64 x i32>* %a1) #0 {
 ; CHECK-NEXT:     jumpr r31
 ; CHECK-NEXT:     vmem(r1+#4) = v0
 ; CHECK-NEXT:    }
-  %v0 = getelementptr <64 x i32>, <64 x i32>* %a0, i32 1
-  %v1 = load <64 x i32>, <64 x i32>* %v0, align 128
-  %v2 = getelementptr <64 x i32>, <64 x i32>* %a1, i32 2
-  store <64 x i32> %v1, <64 x i32>* %v2, align 128
+  %v0 = getelementptr <64 x i32>, ptr %a0, i32 1
+  %v1 = load <64 x i32>, ptr %v0, align 128
+  %v2 = getelementptr <64 x i32>, ptr %a1, i32 2
+  store <64 x i32> %v1, ptr %v2, align 128
   ret void
 }
 
-define void @f13(<128 x half>* %a0, <128 x half>* %a1) #0 {
+define void @f13(ptr %a0, ptr %a1) #0 {
 ; CHECK-LABEL: f13:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    {
@@ -238,14 +238,14 @@ define void @f13(<128 x half>* %a0, <128 x half>* %a1) #0 {
 ; CHECK-NEXT:     jumpr r31
 ; CHECK-NEXT:     vmem(r1+#4) = v0
 ; CHECK-NEXT:    }
-  %v0 = getelementptr <128 x half>, <128 x half>* %a0, i32 1
-  %v1 = load <128 x half>, <128 x half>* %v0, align 128
-  %v2 = getelementptr <128 x half>, <128 x half>* %a1, i32 2
-  store <128 x half> %v1, <128 x half>* %v2, align 128
+  %v0 = getelementptr <128 x half>, ptr %a0, i32 1
+  %v1 = load <128 x half>, ptr %v0, align 128
+  %v2 = getelementptr <128 x half>, ptr %a1, i32 2
+  store <128 x half> %v1, ptr %v2, align 128
   ret void
 }
 
-define void @f14(<64 x float>* %a0, <64 x float>* %a1) #0 {
+define void @f14(ptr %a0, ptr %a1) #0 {
 ; CHECK-LABEL: f14:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    {
@@ -259,14 +259,14 @@ define void @f14(<64 x float>* %a0, <64 x float>* %a1) #0 {
 ; CHECK-NEXT:     jumpr r31
 ; CHECK-NEXT:     vmem(r1+#4) = v0
 ; CHECK-NEXT:    }
-  %v0 = getelementptr <64 x float>, <64 x float>* %a0, i32 1
-  %v1 = load <64 x float>, <64 x float>* %v0, align 128
-  %v2 = getelementptr <64 x float>, <64 x float>* %a1, i32 2
-  store <64 x float> %v1, <64 x float>* %v2, align 128
+  %v0 = getelementptr <64 x float>, ptr %a0, i32 1
+  %v1 = load <64 x float>, ptr %v0, align 128
+  %v2 = getelementptr <64 x float>, ptr %a1, i32 2
+  store <64 x float> %v1, ptr %v2, align 128
   ret void
 }
 
-define void @f15(<256 x i8>* %a0, <256 x i8>* %a1) #0 {
+define void @f15(ptr %a0, ptr %a1) #0 {
 ; CHECK-LABEL: f15:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    {
@@ -282,14 +282,14 @@ define void @f15(<256 x i8>* %a0, <256 x i8>* %a1) #0 {
 ; CHECK-NEXT:     jumpr r31
 ; CHECK-NEXT:     vmemu(r1+#4) = v1
 ; CHECK-NEXT:    }
-  %v0 = getelementptr <256 x i8>, <256 x i8>* %a0, i32 1
-  %v1 = load <256 x i8>, <256 x i8>* %v0, align 1
-  %v2 = getelementptr <256 x i8>, <256 x i8>* %a1, i32 2
-  store <256 x i8> %v1, <256 x i8>* %v2, align 1
+  %v0 = getelementptr <256 x i8>, ptr %a0, i32 1
+  %v1 = load <256 x i8>, ptr %v0, align 1
+  %v2 = getelementptr <256 x i8>, ptr %a1, i32 2
+  store <256 x i8> %v1, ptr %v2, align 1
   ret void
 }
 
-define void @f16(<128 x i16>* %a0, <128 x i16>* %a1) #0 {
+define void @f16(ptr %a0, ptr %a1) #0 {
 ; CHECK-LABEL: f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    {
@@ -305,14 +305,14 @@ define void @f16(<128 x i16>* %a0, <128 x i16>* %a1) #0 {
 ; CHECK-NEXT:     jumpr r31
 ; CHECK-NEXT:     vmemu(r1+#4) = v1
 ; CHECK-NEXT:    }
-  %v0 = getelementptr <128 x i16>, <128 x i16>* %a0, i32 1
-  %v1 = load <128 x i16>, <128 x i16>* %v0, align 1
-  %v2 = getelementptr <128 x i16>, <128 x i16>* %a1, i32 2
-  store <128 x i16> %v1, <128 x i16>* %v2, align 1
+  %v0 = getelementptr <128 x i16>, ptr %a0, i32 1
+  %v1 = load <128 x i16>, ptr %v0, align 1
+  %v2 = getelementptr <128 x i16>, ptr %a1, i32 2
+  store <128 x i16> %v1, ptr %v2, align 1
   ret void
 }
 
-define void @f17(<64 x i32>* %a0, <64 x i32>* %a1) #0 {
+define void @f17(ptr %a0, ptr %a1) #0 {
 ; CHECK-LABEL: f17:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    {
@@ -328,14 +328,14 @@ define void @f17(<64 x i32>* %a0, <64 x i32>* %a1) #0 {
 ; CHECK-NEXT:     jumpr r31
 ; CHECK-NEXT:     vmemu(r1+#4) = v1
 ; CHECK-NEXT:    }
-  %v0 = getelementptr <64 x i32>, <64 x i32>* %a0, i32 1
-  %v1 = load <64 x i32>, <64 x i32>* %v0, align 1
-  %v2 = getelementptr <64 x i32>, <64 x i32>* %a1, i32 2
-  store <64 x i32> %v1, <64 x i32>* %v2, align 1
+  %v0 = getelementptr <64 x i32>, ptr %a0, i32 1
+  %v1 = load <64 x i32>, ptr %v0, align 1
+  %v2 = getelementptr <64 x i32>, ptr %a1, i32 2
+  store <64 x i32> %v1, ptr %v2, align 1
   ret void
 }
 
-define void @f18(<128 x half>* %a0, <128 x half>* %a1) #0 {
+define void @f18(ptr %a0, ptr %a1) #0 {
 ; CHECK-LABEL: f18:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    {
@@ -351,14 +351,14 @@ define void @f18(<128 x half>* %a0, <128 x half>* %a1) #0 {
 ; CHECK-NEXT:     jumpr r31
 ; CHECK-NEXT:     vmemu(r1+#4) = v1
 ; CHECK-NEXT:    }
-  %v0 = getelementptr <128 x half>, <128 x half>* %a0, i32 1
-  %v1 = load <128 x half>, <128 x half>* %v0, align 1
-  %v2 = getelementptr <128 x half>, <128 x half>* %a1, i32 2
-  store <128 x half> %v1, <128 x half>* %v2, align 1
+  %v0 = getelementptr <128 x half>, ptr %a0, i32 1
+  %v1 = load <128 x half>, ptr %v0, align 1
+  %v2 = getelementptr <128 x half>, ptr %a1, i32 2
+  store <128 x half> %v1, ptr %v2, align 1
   ret void
 }
 
-define void @f19(<64 x float>* %a0, <64 x float>* %a1) #0 {
+define void @f19(ptr %a0, ptr %a1) #0 {
 ; CHECK-LABEL: f19:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    {
@@ -374,10 +374,10 @@ define void @f19(<64 x float>* %a0, <64 x float>* %a1) #0 {
 ; CHECK-NEXT:     jumpr r31
 ; CHECK-NEXT:     vmemu(r1+#4) = v1
 ; CHECK-NEXT:    }
-  %v0 = getelementptr <64 x float>, <64 x float>* %a0, i32 1
-  %v1 = load <64 x float>, <64 x float>* %v0, align 1
-  %v2 = getelementptr <64 x float>, <64 x float>* %a1, i32 2
-  store <64 x float> %v1, <64 x float>* %v2, align 1
+  %v0 = getelementptr <64 x float>, ptr %a0, i32 1
+  %v1 = load <64 x float>, ptr %v0, align 1
+  %v2 = getelementptr <64 x float>, ptr %a1, i32 2
+  store <64 x float> %v1, ptr %v2, align 1
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/autohvx/widen-ext.ll b/llvm/test/CodeGen/Hexagon/autohvx/widen-ext.ll
index eb4f115220820..dd39cc9e0fd5a 100644
--- a/llvm/test/CodeGen/Hexagon/autohvx/widen-ext.ll
+++ b/llvm/test/CodeGen/Hexagon/autohvx/widen-ext.ll
@@ -7,10 +7,10 @@
 ; CHECK: v[[V1:[0-9]+]]:[[V2:[0-9]+]].h = vunpack(v[[V0]].b)
 ; CHECK: q[[Q0:[0-3]]] = vsetq(r[[R0]])
 ; CHECK: if (q[[Q0]]) vmem(r1+#0) = v[[V2]]
-define void @f0(<32 x i8>* %a0, <32 x i16>* %a1) #0 {
-  %v0 = load <32 x i8>, <32 x i8>* %a0, align 128
+define void @f0(ptr %a0, ptr %a1) #0 {
+  %v0 = load <32 x i8>, ptr %a0, align 128
   %v1 = sext <32 x i8> %v0 to <32 x i16>
-  store <32 x i16> %v1, <32 x i16>* %a1, align 128
+  store <32 x i16> %v1, ptr %a1, align 128
   ret void
 }
 
@@ -20,10 +20,10 @@ define void @f0(<32 x i8>* %a0, <32 x i16>* %a1) #0 {
 ; CHECK: v[[V1:[0-9]+]]:[[V2:[0-9]+]].h = vunpack(v[[V0]].b)
 ; CHECK: v[[V3:[0-9]+]]:[[V4:[0-9]+]].w = vunpack(v[[V2]].h)
 ; CHECK: vmem(r1+#0) = v[[V4]]
-define void @f1(<32 x i8>* %a0, <32 x i32>* %a1) #0 {
-  %v0 = load <32 x i8>, <32 x i8>* %a0, align 128
+define void @f1(ptr %a0, ptr %a1) #0 {
+  %v0 = load <32 x i8>, ptr %a0, align 128
   %v1 = sext <32 x i8> %v0 to <32 x i32>
-  store <32 x i32> %v1, <32 x i32>* %a1, align 128
+  store <32 x i32> %v1, ptr %a1, align 128
   ret void
 }
 
@@ -32,10 +32,10 @@ define void @f1(<32 x i8>* %a0, <32 x i32>* %a1) #0 {
 ; CHECK: v[[V0:[0-9]+]] = vmem(r0+#0)
 ; CHECK: v[[V1:[0-9]+]]:[[V2:[0-9]+]].h = vunpack(v[[V0]].b)
 ; CHECK: vmem(r1+#0) = v[[V2]]
-define void @f2(<64 x i8>* %a0, <64 x i16>* %a1) #0 {
-  %v0 = load <64 x i8>, <64 x i8>* %a0, align 128
+define void @f2(ptr %a0, ptr %a1) #0 {
+  %v0 = load <64 x i8>, ptr %a0, align 128
   %v1 = sext <64 x i8> %v0 to <64 x i16>
-  store <64 x i16> %v1, <64 x i16>* %a1, align 128
+  store <64 x i16> %v1, ptr %a1, align 128
   ret void
 }
 
@@ -46,10 +46,10 @@ define void @f2(<64 x i8>* %a0, <64 x i16>* %a1) #0 {
 ; CHECK:     v[[V3:[0-9]+]]:[[V4:[0-9]+]].w = vunpack(v[[V2]].h)
 ; CHECK-DAG: vmem(r1+#0) = v[[V4]]
 ; CHECK-DAG: vmem(r1+#1) = v[[V3]]
-define void @f3(<64 x i8>* %a0, <64 x i32>* %a1) #0 {
-  %v0 = load <64 x i8>, <64 x i8>* %a0, align 128
+define void @f3(ptr %a0, ptr %a1) #0 {
+  %v0 = load <64 x i8>, ptr %a0, align 128
   %v1 = sext <64 x i8> %v0 to <64 x i32>
-  store <64 x i32> %v1, <64 x i32>* %a1, align 128
+  store <64 x i32> %v1, ptr %a1, align 128
   ret void
 }
 
@@ -60,10 +60,10 @@ define void @f3(<64 x i8>* %a0, <64 x i32>* %a1) #0 {
 ; CHECK: v[[V1:[0-9]+]]:[[V2:[0-9]+]].w = vunpack(v[[V0]].h)
 ; CHECK: q[[Q0:[0-3]]] = vsetq(r[[R0]])
 ; CHECK: if (q[[Q0]]) vmem(r1+#0) = v[[V2]]
-define void @f4(<16 x i16>* %a0, <16 x i32>* %a1) #0 {
-  %v0 = load <16 x i16>, <16 x i16>* %a0, align 128
+define void @f4(ptr %a0, ptr %a1) #0 {
+  %v0 = load <16 x i16>, ptr %a0, align 128
   %v1 = sext <16 x i16> %v0 to <16 x i32>
-  store <16 x i32> %v1, <16 x i32>* %a1, align 128
+  store <16 x i32> %v1, ptr %a1, align 128
   ret void
 }
 
@@ -72,10 +72,10 @@ define void @f4(<16 x i16>* %a0, <16 x i32>* %a1) #0 {
 ; CHECK: v[[V0:[0-9]+]] = vmem(r0+#0)
 ; CHECK: v[[V1:[0-9]+]]:[[V2:[0-9]+]].w = vunpack(v[[V0]].h)
 ; CHECK: vmem(r1+#0) = v[[V2]]
-define void @f5(<32 x i16>* %a0, <32 x i32>* %a1) #0 {
-  %v0 = load <32 x i16>, <32 x i16>* %a0, align 128
+define void @f5(ptr %a0, ptr %a1) #0 {
+  %v0 = load <32 x i16>, ptr %a0, align 128
   %v1 = sext <32 x i16> %v0 to <32 x i32>
-  store <32 x i32> %v1, <32 x i32>* %a1, align 128
+  store <32 x i32> %v1, ptr %a1, align 128
   ret void
 }
 
@@ -88,10 +88,10 @@ define void @f5(<32 x i16>* %a0, <32 x i32>* %a1) #0 {
 ; CHECK:     v[[V1:[0-9]+]]:[[V2:[0-9]+]].h = vunpack(v[[V0]].b)
 ; CHECK:     v[[V3:[0-9]+]]:[[V4:[0-9]+]].w = vunpack(v[[V2]].h)
 ; CHECK:     if (q[[Q0]]) vmem(r1+#0) = v[[V4]]
-define void @f6(<8 x i8>* %a0, <8 x i32>* %a1) #0 {
-  %v0 = load <8 x i8>, <8 x i8>* %a0, align 128
+define void @f6(ptr %a0, ptr %a1) #0 {
+  %v0 = load <8 x i8>, ptr %a0, align 128
   %v1 = sext <8 x i8> %v0 to <8 x i32>
-  store <8 x i32> %v1, <8 x i32>* %a1, align 128
+  store <8 x i32> %v1, ptr %a1, align 128
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/autohvx/widen-setcc.ll b/llvm/test/CodeGen/Hexagon/autohvx/widen-setcc.ll
index 3475e86feb0c8..2a14ce4687601 100644
--- a/llvm/test/CodeGen/Hexagon/autohvx/widen-setcc.ll
+++ b/llvm/test/CodeGen/Hexagon/autohvx/widen-setcc.ll
@@ -7,14 +7,13 @@
 target datalayout = "e-m:e-p:32:32:32-a:0-n16:32-i64:64:64-i32:32:32-i16:16:16-i1:8:8-f32:32:32-f64:64:64-v32:32:32-v64:64:64-v512:512:512-v1024:1024:1024-v2048:2048:2048"
 target triple = "hexagon"
 
-define dllexport void @f0(i16* %a0, <16 x i16> %a1) local_unnamed_addr #0 {
+define dllexport void @f0(ptr %a0, <16 x i16> %a1) local_unnamed_addr #0 {
 b0:
-  %v0 = getelementptr i16, i16* %a0, i32 undef
-  %v1 = bitcast i16* %v0 to <16 x i16>*
-  %v2 = load <16 x i16>, <16 x i16>* undef, align 2
+  %v0 = getelementptr i16, ptr %a0, i32 undef
+  %v2 = load <16 x i16>, ptr undef, align 2
   %v3 = icmp sgt <16 x i16> zeroinitializer, %v2
   %v4 = select <16 x i1> %v3, <16 x i16> %a1, <16 x i16> %v2
-  store <16 x i16> %v4, <16 x i16>* %v1, align 2
+  store <16 x i16> %v4, ptr %v0, align 2
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/autohvx/widen-trunc.ll b/llvm/test/CodeGen/Hexagon/autohvx/widen-trunc.ll
index 758e468c9c192..7f9a6078a26f8 100644
--- a/llvm/test/CodeGen/Hexagon/autohvx/widen-trunc.ll
+++ b/llvm/test/CodeGen/Hexagon/autohvx/widen-trunc.ll
@@ -9,10 +9,10 @@
 ; CHECK: v[[V1:[0-9]+]].b = vpacke({{.*}},v[[V0]].h)
 ; CHECK: q[[Q0:[0-3]]] = vsetq(r[[R0]])
 ; CHECK: if (q[[Q0]]) vmem(r1+#0) = v[[V1]]
-define void @f0(<32 x i16>* %a0, <32 x i8>* %a1) #0 {
-  %v0 = load <32 x i16>, <32 x i16>* %a0, align 128
+define void @f0(ptr %a0, ptr %a1) #0 {
+  %v0 = load <32 x i16>, ptr %a0, align 128
   %v1 = trunc <32 x i16> %v0 to <32 x i8>
-  store <32 x i8> %v1, <32 x i8>* %a1, align 128
+  store <32 x i8> %v1, ptr %a1, align 128
   ret void
 }
 
@@ -23,10 +23,10 @@ define void @f0(<32 x i16>* %a0, <32 x i8>* %a1) #0 {
 ; CHECK: v[[V1:[0-9]+]].b = vdeale({{.*}},v[[V0]].b)
 ; CHECK: q[[Q0:[0-3]]] = vsetq(r[[R0]])
 ; CHECK: if (q[[Q0]]) vmem(r1+#0) = v[[V1]]
-define void @f1(<32 x i32>* %a0, <32 x i8>* %a1) #0 {
-  %v0 = load <32 x i32>, <32 x i32>* %a0, align 128
+define void @f1(ptr %a0, ptr %a1) #0 {
+  %v0 = load <32 x i32>, ptr %a0, align 128
   %v1 = trunc <32 x i32> %v0 to <32 x i8>
-  store <32 x i8> %v1, <32 x i8>* %a1, align 128
+  store <32 x i8> %v1, ptr %a1, align 128
   ret void
 }
 
@@ -37,10 +37,10 @@ define void @f1(<32 x i32>* %a0, <32 x i8>* %a1) #0 {
 ; CHECK: v[[V1:[0-9]+]].b = vpacke({{.*}},v[[V0]].h)
 ; CHECK: q[[Q0:[0-3]]] = vsetq(r[[R0]])
 ; CHECK: if (q[[Q0]]) vmem(r1+#0) = v[[V1]]
-define void @f2(<64 x i16>* %a0, <64 x i8>* %a1) #0 {
-  %v0 = load <64 x i16>, <64 x i16>* %a0, align 128
+define void @f2(ptr %a0, ptr %a1) #0 {
+  %v0 = load <64 x i16>, ptr %a0, align 128
   %v1 = trunc <64 x i16> %v0 to <64 x i8>
-  store <64 x i8> %v1, <64 x i8>* %a1, align 128
+  store <64 x i8> %v1, ptr %a1, align 128
   ret void
 }
 
@@ -52,10 +52,10 @@ define void @f2(<64 x i16>* %a0, <64 x i8>* %a1) #0 {
 ; CHECK: v[[V2:[0-9]+]].h = vpacke(v[[V1]].w,v[[V0]].w)
 ; CHECK: v[[V3:[0-9]+]].b = vpacke({{.*}},v[[V2]].h)
 ; CHECK: if (q[[Q0]]) vmem(r1+#0) = v[[V3]]
-define void @f3(<64 x i32>* %a0, <64 x i8>* %a1) #0 {
-  %v0 = load <64 x i32>, <64 x i32>* %a0, align 128
+define void @f3(ptr %a0, ptr %a1) #0 {
+  %v0 = load <64 x i32>, ptr %a0, align 128
   %v1 = trunc <64 x i32> %v0 to <64 x i8>
-  store <64 x i8> %v1, <64 x i8>* %a1, align 128
+  store <64 x i8> %v1, ptr %a1, align 128
   ret void
 }
 
@@ -66,10 +66,10 @@ define void @f3(<64 x i32>* %a0, <64 x i8>* %a1) #0 {
 ; CHECK: v[[V1:[0-9]+]].h = vpacke({{.*}},v[[V0]].w)
 ; CHECK: q[[Q0:[0-3]]] = vsetq(r[[R0]])
 ; CHECK: if (q[[Q0]]) vmem(r1+#0) = v[[V1]]
-define void @f4(<16 x i32>* %a0, <16 x i16>* %a1) #0 {
-  %v0 = load <16 x i32>, <16 x i32>* %a0, align 128
+define void @f4(ptr %a0, ptr %a1) #0 {
+  %v0 = load <16 x i32>, ptr %a0, align 128
   %v1 = trunc <16 x i32> %v0 to <16 x i16>
-  store <16 x i16> %v1, <16 x i16>* %a1, align 128
+  store <16 x i16> %v1, ptr %a1, align 128
   ret void
 }
 
@@ -80,10 +80,10 @@ define void @f4(<16 x i32>* %a0, <16 x i16>* %a1) #0 {
 ; CHECK: v[[V1:[0-9]+]].h = vpacke({{.*}},v[[V0]].w)
 ; CHECK: q[[Q0:[0-3]]] = vsetq(r[[R0]])
 ; CHECK: if (q[[Q0]]) vmem(r1+#0) = v[[V1]]
-define void @f5(<32 x i32>* %a0, <32 x i16>* %a1) #0 {
-  %v0 = load <32 x i32>, <32 x i32>* %a0, align 128
+define void @f5(ptr %a0, ptr %a1) #0 {
+  %v0 = load <32 x i32>, ptr %a0, align 128
   %v1 = trunc <32 x i32> %v0 to <32 x i16>
-  store <32 x i16> %v1, <32 x i16>* %a1, align 128
+  store <32 x i16> %v1, ptr %a1, align 128
   ret void
 }
 
@@ -95,10 +95,10 @@ define void @f5(<32 x i32>* %a0, <32 x i16>* %a1) #0 {
 ; CHECK-DAG: r[[R1:[0-9]+]] = memw(r[[R0]]+#0)
 ; CHECK-DAG: r[[R2:[0-9]+]] = memw(r[[R0]]+#4)
 ; CHECK:     memd(r1+#0) = r[[R2]]:[[R1]]
-define void @f6(<8 x i32>* %a0, <8 x i8>* %a1) #0 {
-  %v0 = load <8 x i32>, <8 x i32>* %a0, align 128
+define void @f6(ptr %a0, ptr %a1) #0 {
+  %v0 = load <8 x i32>, ptr %a0, align 128
   %v1 = trunc <8 x i32> %v0 to <8 x i8>
-  store <8 x i8> %v1, <8 x i8>* %a1, align 128
+  store <8 x i8> %v1, ptr %a1, align 128
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/avoidVectorLowering.ll b/llvm/test/CodeGen/Hexagon/avoidVectorLowering.ll
index 523f6e7524fd6..3a63ea7f51b87 100644
--- a/llvm/test/CodeGen/Hexagon/avoidVectorLowering.ll
+++ b/llvm/test/CodeGen/Hexagon/avoidVectorLowering.ll
@@ -8,12 +8,12 @@ target triple = "hexagon-unknown--elf"
 ; Function Attrs: nounwind
 define i32 @f0() #0 {
 b0:
-  call void @llvm.memset.p0i8.i32(i8* align 8 bitcast ([32 x i16]* @g0 to i8*), i8 0, i32 64, i1 false)
+  call void @llvm.memset.p0.i32(ptr align 8 @g0, i8 0, i32 64, i1 false)
   ret i32 0
 }
 
 ; Function Attrs: argmemonly nounwind
-declare void @llvm.memset.p0i8.i32(i8* nocapture writeonly, i8, i32, i1) #1
+declare void @llvm.memset.p0.i32(ptr nocapture writeonly, i8, i32, i1) #1
 
 attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvx,+hvx-length64b" }
 attributes #1 = { argmemonly nounwind }

diff  --git a/llvm/test/CodeGen/Hexagon/barrier-flag.ll b/llvm/test/CodeGen/Hexagon/barrier-flag.ll
index 7518faa37c1da..d4e48c8bb6141 100644
--- a/llvm/test/CodeGen/Hexagon/barrier-flag.ll
+++ b/llvm/test/CodeGen/Hexagon/barrier-flag.ll
@@ -11,11 +11,11 @@ entry:
 }
 
 ; Function Attrs: nounwind optsize
-define void @conv3x3(i8* nocapture readonly %inp, i8* nocapture readonly %mask, i32 %shift, i8* nocapture %outp, i32 %width) #1 {
+define void @conv3x3(ptr nocapture readonly %inp, ptr nocapture readonly %mask, i32 %shift, ptr nocapture %outp, i32 %width) #1 {
 entry:
   %cmp381 = icmp sgt i32 %width, 0
-  %arrayidx16.gep = getelementptr i8, i8* %mask, i32 4
-  %arrayidx19.gep = getelementptr i8, i8* %mask, i32 8
+  %arrayidx16.gep = getelementptr i8, ptr %mask, i32 4
+  %arrayidx19.gep = getelementptr i8, ptr %mask, i32 8
   br label %for.body
 
 for.body:                                         ; preds = %for.inc48, %entry
@@ -26,35 +26,35 @@ for.body:                                         ; preds = %for.inc48, %entry
 
 for.cond5.preheader.lr.ph:                        ; preds = %for.body
   %add.ptr.sum = add i32 %arrayidx.sum, %width
-  %add.ptr1 = getelementptr inbounds i8, i8* %inp, i32 %add.ptr.sum
-  %add.ptr = getelementptr inbounds i8, i8* %inp, i32 %arrayidx.sum
-  %arrayidx = getelementptr inbounds i8, i8* %inp, i32 %mul
-  %arrayidx44.gep = getelementptr i8, i8* %outp, i32 %mul
+  %add.ptr1 = getelementptr inbounds i8, ptr %inp, i32 %add.ptr.sum
+  %add.ptr = getelementptr inbounds i8, ptr %inp, i32 %arrayidx.sum
+  %arrayidx = getelementptr inbounds i8, ptr %inp, i32 %mul
+  %arrayidx44.gep = getelementptr i8, ptr %outp, i32 %mul
   br label %for.cond5.preheader
 
 for.cond5.preheader:                              ; preds = %if.end40, %for.cond5.preheader.lr.ph
-  %arrayidx44.phi = phi i8* [ %arrayidx44.gep, %for.cond5.preheader.lr.ph ], [ %arrayidx44.inc, %if.end40 ]
+  %arrayidx44.phi = phi ptr [ %arrayidx44.gep, %for.cond5.preheader.lr.ph ], [ %arrayidx44.inc, %if.end40 ]
   %j.085 = phi i32 [ 0, %for.cond5.preheader.lr.ph ], [ %inc46, %if.end40 ]
-  %IN1.084 = phi i8* [ %arrayidx, %for.cond5.preheader.lr.ph ], [ %incdec.ptr, %if.end40 ]
-  %IN2.083 = phi i8* [ %add.ptr, %for.cond5.preheader.lr.ph ], [ %incdec.ptr33, %if.end40 ]
-  %IN3.082 = phi i8* [ %add.ptr1, %for.cond5.preheader.lr.ph ], [ %incdec.ptr34, %if.end40 ]
+  %IN1.084 = phi ptr [ %arrayidx, %for.cond5.preheader.lr.ph ], [ %incdec.ptr, %if.end40 ]
+  %IN2.083 = phi ptr [ %add.ptr, %for.cond5.preheader.lr.ph ], [ %incdec.ptr33, %if.end40 ]
+  %IN3.082 = phi ptr [ %add.ptr1, %for.cond5.preheader.lr.ph ], [ %incdec.ptr34, %if.end40 ]
   br label %for.body7
 
 for.body7:                                        ; preds = %for.body7, %for.cond5.preheader
-  %arrayidx8.phi = phi i8* [ %IN1.084, %for.cond5.preheader ], [ %arrayidx8.inc, %for.body7 ]
-  %arrayidx9.phi = phi i8* [ %IN2.083, %for.cond5.preheader ], [ %arrayidx9.inc, %for.body7 ]
-  %arrayidx11.phi = phi i8* [ %IN3.082, %for.cond5.preheader ], [ %arrayidx11.inc, %for.body7 ]
-  %arrayidx13.phi = phi i8* [ %mask, %for.cond5.preheader ], [ %arrayidx13.inc, %for.body7 ]
-  %arrayidx16.phi = phi i8* [ %arrayidx16.gep, %for.cond5.preheader ], [ %arrayidx16.inc, %for.body7 ]
-  %arrayidx19.phi = phi i8* [ %arrayidx19.gep, %for.cond5.preheader ], [ %arrayidx19.inc, %for.body7 ]
+  %arrayidx8.phi = phi ptr [ %IN1.084, %for.cond5.preheader ], [ %arrayidx8.inc, %for.body7 ]
+  %arrayidx9.phi = phi ptr [ %IN2.083, %for.cond5.preheader ], [ %arrayidx9.inc, %for.body7 ]
+  %arrayidx11.phi = phi ptr [ %IN3.082, %for.cond5.preheader ], [ %arrayidx11.inc, %for.body7 ]
+  %arrayidx13.phi = phi ptr [ %mask, %for.cond5.preheader ], [ %arrayidx13.inc, %for.body7 ]
+  %arrayidx16.phi = phi ptr [ %arrayidx16.gep, %for.cond5.preheader ], [ %arrayidx16.inc, %for.body7 ]
+  %arrayidx19.phi = phi ptr [ %arrayidx19.gep, %for.cond5.preheader ], [ %arrayidx19.inc, %for.body7 ]
   %k.080 = phi i32 [ 0, %for.cond5.preheader ], [ %inc, %for.body7 ]
   %sum.079 = phi i32 [ 0, %for.cond5.preheader ], [ %add32, %for.body7 ]
-  %0 = load i8, i8* %arrayidx8.phi, align 1, !tbaa !1
-  %1 = load i8, i8* %arrayidx9.phi, align 1, !tbaa !1
-  %2 = load i8, i8* %arrayidx11.phi, align 1, !tbaa !1
-  %3 = load i8, i8* %arrayidx13.phi, align 1, !tbaa !1
-  %4 = load i8, i8* %arrayidx16.phi, align 1, !tbaa !1
-  %5 = load i8, i8* %arrayidx19.phi, align 1, !tbaa !1
+  %0 = load i8, ptr %arrayidx8.phi, align 1, !tbaa !1
+  %1 = load i8, ptr %arrayidx9.phi, align 1, !tbaa !1
+  %2 = load i8, ptr %arrayidx11.phi, align 1, !tbaa !1
+  %3 = load i8, ptr %arrayidx13.phi, align 1, !tbaa !1
+  %4 = load i8, ptr %arrayidx16.phi, align 1, !tbaa !1
+  %5 = load i8, ptr %arrayidx19.phi, align 1, !tbaa !1
   %conv21 = zext i8 %0 to i32
   %conv22 = sext i8 %3 to i32
   %mul23 = mul nsw i32 %conv22, %conv21
@@ -69,18 +69,18 @@ for.body7:                                        ; preds = %for.body7, %for.con
   %add32 = add i32 %add31, %mul29
   %inc = add nsw i32 %k.080, 1
   %exitcond = icmp eq i32 %inc, 3
-  %arrayidx8.inc = getelementptr i8, i8* %arrayidx8.phi, i32 1
-  %arrayidx9.inc = getelementptr i8, i8* %arrayidx9.phi, i32 1
-  %arrayidx11.inc = getelementptr i8, i8* %arrayidx11.phi, i32 1
-  %arrayidx13.inc = getelementptr i8, i8* %arrayidx13.phi, i32 1
-  %arrayidx16.inc = getelementptr i8, i8* %arrayidx16.phi, i32 1
-  %arrayidx19.inc = getelementptr i8, i8* %arrayidx19.phi, i32 1
+  %arrayidx8.inc = getelementptr i8, ptr %arrayidx8.phi, i32 1
+  %arrayidx9.inc = getelementptr i8, ptr %arrayidx9.phi, i32 1
+  %arrayidx11.inc = getelementptr i8, ptr %arrayidx11.phi, i32 1
+  %arrayidx13.inc = getelementptr i8, ptr %arrayidx13.phi, i32 1
+  %arrayidx16.inc = getelementptr i8, ptr %arrayidx16.phi, i32 1
+  %arrayidx19.inc = getelementptr i8, ptr %arrayidx19.phi, i32 1
   br i1 %exitcond, label %for.end, label %for.body7
 
 for.end:                                          ; preds = %for.body7
-  %incdec.ptr = getelementptr inbounds i8, i8* %IN1.084, i32 1
-  %incdec.ptr33 = getelementptr inbounds i8, i8* %IN2.083, i32 1
-  %incdec.ptr34 = getelementptr inbounds i8, i8* %IN3.082, i32 1
+  %incdec.ptr = getelementptr inbounds i8, ptr %IN1.084, i32 1
+  %incdec.ptr33 = getelementptr inbounds i8, ptr %IN2.083, i32 1
+  %incdec.ptr34 = getelementptr inbounds i8, ptr %IN3.082, i32 1
   %shr = ashr i32 %add32, %shift
   %cmp35 = icmp slt i32 %shr, 0
   br i1 %cmp35, label %if.end40, label %if.end
@@ -95,10 +95,10 @@ if.then39:                                        ; preds = %if.end
 if.end40:                                         ; preds = %for.end, %if.then39, %if.end
   %sum.2 = phi i32 [ 255, %if.then39 ], [ %shr, %if.end ], [ 0, %for.end ]
   %conv41 = trunc i32 %sum.2 to i8
-  store i8 %conv41, i8* %arrayidx44.phi, align 1, !tbaa !1
+  store i8 %conv41, ptr %arrayidx44.phi, align 1, !tbaa !1
   %inc46 = add nsw i32 %j.085, 1
   %exitcond87 = icmp eq i32 %inc46, %width
-  %arrayidx44.inc = getelementptr i8, i8* %arrayidx44.phi, i32 1
+  %arrayidx44.inc = getelementptr i8, ptr %arrayidx44.phi, i32 1
   br i1 %exitcond87, label %for.inc48.loopexit, label %for.cond5.preheader
 
 for.inc48.loopexit:                               ; preds = %if.end40

diff  --git a/llvm/test/CodeGen/Hexagon/base-offset-addr.ll b/llvm/test/CodeGen/Hexagon/base-offset-addr.ll
index 6d379a087eaae..3104448a675f6 100644
--- a/llvm/test/CodeGen/Hexagon/base-offset-addr.ll
+++ b/llvm/test/CodeGen/Hexagon/base-offset-addr.ll
@@ -3,12 +3,12 @@
 
 ; Make sure the base is a register and not an address.
 
-define fastcc void @Get_lsp_pol(i32* nocapture %f) #0 {
+define fastcc void @Get_lsp_pol(ptr nocapture %f) #0 {
 entry:
   %f5 = alloca i32, align 4
-  %arrayidx103 = getelementptr inbounds i32, i32* %f, i32 4
-  store i32 0, i32* %arrayidx103, align 4
-  %f5.0.load185 = load volatile i32, i32* %f5, align 4
+  %arrayidx103 = getelementptr inbounds i32, ptr %f, i32 4
+  store i32 0, ptr %arrayidx103, align 4
+  %f5.0.load185 = load volatile i32, ptr %f5, align 4
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/base-offset-post.ll b/llvm/test/CodeGen/Hexagon/base-offset-post.ll
index 5af384c53ed9b..388d66728bcd3 100644
--- a/llvm/test/CodeGen/Hexagon/base-offset-post.ll
+++ b/llvm/test/CodeGen/Hexagon/base-offset-post.ll
@@ -14,12 +14,12 @@ entry:
 for.cond64.preheader.i:
   %i.1984.i = phi i32 [ 0, %entry ], [ %inc166.i.1, %for.cond64.preheader.i ]
   %inc166.i = add nsw i32 %i.1984.i, 1
-  %arrayidx71.i1422.1 = getelementptr inbounds %struct.A, %struct.A* undef, i32 0, i32 7, i32 %inc166.i
+  %arrayidx71.i1422.1 = getelementptr inbounds %struct.A, ptr undef, i32 0, i32 7, i32 %inc166.i
   %storemerge800.i.1 = select i1 undef, i32 1310, i32 undef
   %sub156.i.1 = sub nsw i32 0, %storemerge800.i.1
   %sub156.storemerge800.i.1 = select i1 undef, i32 %storemerge800.i.1, i32 %sub156.i.1
-  store i32 %sub156.storemerge800.i.1, i32* %arrayidx71.i1422.1, align 4
-  store i32 0, i32* undef, align 4
+  store i32 %sub156.storemerge800.i.1, ptr %arrayidx71.i1422.1, align 4
+  store i32 0, ptr undef, align 4
   %inc166.i.1 = add nsw i32 %i.1984.i, 2
   br label %for.cond64.preheader.i
 

diff  --git a/llvm/test/CodeGen/Hexagon/base-offset-stv4.ll b/llvm/test/CodeGen/Hexagon/base-offset-stv4.ll
index 0332271dd47b5..9b6305e7fe94d 100644
--- a/llvm/test/CodeGen/Hexagon/base-offset-stv4.ll
+++ b/llvm/test/CodeGen/Hexagon/base-offset-stv4.ll
@@ -7,16 +7,16 @@ b0:
   br i1 undef, label %b1, label %b4
 
 b1:                                               ; preds = %b0
-  %v0 = load i16*, i16** undef, align 4
+  %v0 = load ptr, ptr undef, align 4
   br label %b2
 
 b2:                                               ; preds = %b2, %b1
   %v1 = phi i32 [ 13, %b1 ], [ %v5, %b2 ]
-  %v2 = getelementptr inbounds i16, i16* %v0, i32 %v1
+  %v2 = getelementptr inbounds i16, ptr %v0, i32 %v1
   %v3 = add nsw i32 0, %v1
-  %v4 = getelementptr inbounds i16, i16* %v0, i32 %v3
-  store i16 0, i16* %v4, align 2
-  store i16 0, i16* %v2, align 2
+  %v4 = getelementptr inbounds i16, ptr %v0, i32 %v3
+  store i16 0, ptr %v4, align 2
+  store i16 0, ptr %v2, align 2
   %v5 = add i32 %v1, 1
   %v6 = icmp eq i32 %v5, 26
   br i1 %v6, label %b3, label %b2

diff  --git a/llvm/test/CodeGen/Hexagon/bit-bitsplit-at.ll b/llvm/test/CodeGen/Hexagon/bit-bitsplit-at.ll
index cb1c8fdce9e24..6d2375c2be464 100644
--- a/llvm/test/CodeGen/Hexagon/bit-bitsplit-at.ll
+++ b/llvm/test/CodeGen/Hexagon/bit-bitsplit-at.ll
@@ -7,7 +7,7 @@
 
 target triple = "hexagon"
 
-define fastcc i32 @fred(i32 %a0, i8* %a1, i1 %a2, i1 %a3) #0 {
+define fastcc i32 @fred(i32 %a0, ptr %a1, i1 %a2, i1 %a3) #0 {
 b1:
   %v2 = lshr i32 %a0, 16
   %v3 = trunc i32 %v2 to i8
@@ -22,7 +22,7 @@ b6:                                               ; preds = %b1
   br label %b9
 
 b8:                                               ; preds = %b4
-  store i8 %v3, i8* %a1, align 2
+  store i8 %v3, ptr %a1, align 2
   ret i32 1
 
 b9:                                               ; preds = %b6, %b4

diff  --git a/llvm/test/CodeGen/Hexagon/bit-bitsplit-regclass.ll b/llvm/test/CodeGen/Hexagon/bit-bitsplit-regclass.ll
index 82eb87fec3eaf..d4c7b741b2b6a 100644
--- a/llvm/test/CodeGen/Hexagon/bit-bitsplit-regclass.ll
+++ b/llvm/test/CodeGen/Hexagon/bit-bitsplit-regclass.ll
@@ -28,7 +28,7 @@ b3:                                               ; preds = %b2, %b1
   %v3 = trunc i64 %v2 to i32
   %v4 = tail call i32 @llvm.hexagon.C2.mux(i32 %v3, i32 undef, i32 undef)
   %v5 = trunc i32 %v4 to i8
-  store i8 %v5, i8* undef, align 1
+  store i8 %v5, ptr undef, align 1
   %v6 = lshr i64 %v1, 1
   br label %b1
 }

diff  --git a/llvm/test/CodeGen/Hexagon/bit-bitsplit-src.ll b/llvm/test/CodeGen/Hexagon/bit-bitsplit-src.ll
index edac4cb34b6e8..6fbcffd372810 100644
--- a/llvm/test/CodeGen/Hexagon/bit-bitsplit-src.ll
+++ b/llvm/test/CodeGen/Hexagon/bit-bitsplit-src.ll
@@ -12,7 +12,7 @@ target triple = "hexagon"
 
 define void @fred() local_unnamed_addr #0 {
 b0:
-  %v1 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @g0, i32 0, i32 0), align 8
+  %v1 = load i64, ptr @g0, align 8
   %v2 = trunc i64 %v1 to i32
   %v3 = lshr i64 %v1, 16
   %v4 = trunc i64 %v3 to i32
@@ -24,12 +24,12 @@ b0:
   %v10 = and i32 %v4, 65535
   %v11 = add nuw nsw i32 %v10, %v9
   %v12 = zext i32 %v11 to i64
-  tail call void (i8*, ...) @printf(i8* getelementptr inbounds ([29 x i8], [29 x i8]* @g1, i32 0, i32 0), i64 %v8) #0
-  tail call void (i8*, ...) @printf(i8* getelementptr inbounds ([29 x i8], [29 x i8]* @g2, i32 0, i32 0), i64 %v12) #0
+  tail call void (ptr, ...) @printf(ptr @g1, i64 %v8) #0
+  tail call void (ptr, ...) @printf(ptr @g2, i64 %v12) #0
   ret void
 }
 
 ; Function Attrs: nounwind
-declare void @printf(i8* nocapture readonly, ...) local_unnamed_addr #0
+declare void @printf(ptr nocapture readonly, ...) local_unnamed_addr #0
 
 attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="-hvx,-long-calls" }

diff  --git a/llvm/test/CodeGen/Hexagon/bit-bitsplit.ll b/llvm/test/CodeGen/Hexagon/bit-bitsplit.ll
index 52ae69af994b6..0123d00ebc716 100644
--- a/llvm/test/CodeGen/Hexagon/bit-bitsplit.ll
+++ b/llvm/test/CodeGen/Hexagon/bit-bitsplit.ll
@@ -3,12 +3,12 @@
 
 target triple = "hexagon"
 
-define i32 @fred(i32 %a, i32* nocapture readonly %b) local_unnamed_addr #0 {
+define i32 @fred(i32 %a, ptr nocapture readonly %b) local_unnamed_addr #0 {
 entry:
   %and = and i32 %a, 31
   %shr = lshr i32 %a, 5
-  %arrayidx = getelementptr inbounds i32, i32* %b, i32 %shr
-  %0 = load i32, i32* %arrayidx, align 4
+  %arrayidx = getelementptr inbounds i32, ptr %b, i32 %shr
+  %0 = load i32, ptr %arrayidx, align 4
   %shr1 = lshr i32 %0, %and
   %and2 = and i32 %shr1, 1
   ret i32 %and2

diff  --git a/llvm/test/CodeGen/Hexagon/bit-extract-off.ll b/llvm/test/CodeGen/Hexagon/bit-extract-off.ll
index 032fb806e3d77..fb5d3a4db118c 100644
--- a/llvm/test/CodeGen/Hexagon/bit-extract-off.ll
+++ b/llvm/test/CodeGen/Hexagon/bit-extract-off.ll
@@ -9,13 +9,13 @@ target triple = "hexagon"
 
 @g0 = global double zeroinitializer, align 8
 
-define hidden i32 @fred([101 x double]* %a0, i32 %a1, i32* %a2, i32* %a3) #0 {
+define hidden i32 @fred(ptr %a0, i32 %a1, ptr %a2, ptr %a3) #0 {
 b4:
   br label %b5
 
 b5:                                               ; preds = %b5, %b4
   %v6 = call double @fabs(double undef) #1
-  store double %v6, double* @g0, align 8
+  store double %v6, ptr @g0, align 8
   br label %b5
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/bit-gen-rseq.ll b/llvm/test/CodeGen/Hexagon/bit-gen-rseq.ll
index 08d4b7877159c..2857c05db764f 100644
--- a/llvm/test/CodeGen/Hexagon/bit-gen-rseq.ll
+++ b/llvm/test/CodeGen/Hexagon/bit-gen-rseq.ll
@@ -6,9 +6,9 @@
 
 target triple = "hexagon"
 
-define i32 @fred(i32* nocapture readonly %p, i32 %n) #0 {
+define i32 @fred(ptr nocapture readonly %p, i32 %n) #0 {
 entry:
-  %t.sroa.0.048 = load i32, i32* %p, align 4
+  %t.sroa.0.048 = load i32, ptr %p, align 4
   %cmp49 = icmp ugt i32 %n, 1
   br i1 %cmp49, label %for.body, label %for.end
 
@@ -20,9 +20,9 @@ for.body:                                         ; preds = %entry, %for.body
   %t.sroa.0.0.insert.insert = or i64 %t.sroa.0.0.insert.ext, %t.sroa.11.051
   %0 = tail call i64 @llvm.hexagon.A2.addp(i64 %t.sroa.0.0.insert.insert, i64 %t.sroa.0.0.insert.insert)
   %t.sroa.11.0.extract.shift = and i64 %0, -4294967296
-  %arrayidx4 = getelementptr inbounds i32, i32* %p, i32 %i.050
+  %arrayidx4 = getelementptr inbounds i32, ptr %p, i32 %i.050
   %inc = add nuw i32 %i.050, 1
-  %t.sroa.0.0 = load i32, i32* %arrayidx4, align 4
+  %t.sroa.0.0 = load i32, ptr %arrayidx4, align 4
   %exitcond = icmp eq i32 %inc, %n
   br i1 %exitcond, label %for.end, label %for.body
 

diff  --git a/llvm/test/CodeGen/Hexagon/bit-has.ll b/llvm/test/CodeGen/Hexagon/bit-has.ll
index f0b2ae1539202..41f234b326c9d 100644
--- a/llvm/test/CodeGen/Hexagon/bit-has.ll
+++ b/llvm/test/CodeGen/Hexagon/bit-has.ll
@@ -8,7 +8,7 @@ target triple = "hexagon"
 
 define void @fred() local_unnamed_addr #0 {
 b0:
-  %v1 = load i32, i32* undef, align 4
+  %v1 = load i32, ptr undef, align 4
   %v2 = tail call i32 @llvm.hexagon.A2.sath(i32 undef)
   %v3 = and i32 %v1, 603979776
   %v4 = trunc i32 %v3 to i30
@@ -21,9 +21,9 @@ b5:                                               ; preds = %b0
   unreachable
 
 b6:                                               ; preds = %b0
-  %v7 = load i32, i32* undef, align 4
+  %v7 = load i32, ptr undef, align 4
   %v8 = sub nsw i32 65536, %v7
-  %v9 = load i32, i32* undef, align 4
+  %v9 = load i32, ptr undef, align 4
   %v10 = mul nsw i32 %v9, %v9
   %v11 = zext i32 %v10 to i64
   %v12 = mul nsw i32 %v2, %v8
@@ -31,10 +31,10 @@ b6:                                               ; preds = %b0
   %v14 = mul nsw i64 %v13, %v11
   %v15 = trunc i64 %v14 to i32
   %v16 = and i32 %v15, 2147483647
-  store i32 %v16, i32* undef, align 4
+  store i32 %v16, ptr undef, align 4
   %v17 = lshr i64 %v14, 31
   %v18 = trunc i64 %v17 to i32
-  store i32 %v18, i32* undef, align 4
+  store i32 %v18, ptr undef, align 4
   br label %b19
 
 b19:                                              ; preds = %b6
@@ -50,11 +50,11 @@ b22:                                              ; preds = %b0
   unreachable
 
 b23:                                              ; preds = %b21
-  %v24 = load i32, i32* undef, align 4
+  %v24 = load i32, ptr undef, align 4
   %v25 = shl i32 %v24, 1
   %v26 = and i32 %v25, 65534
   %v27 = or i32 %v26, 0
-  store i32 %v27, i32* undef, align 4
+  store i32 %v27, ptr undef, align 4
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/bit-loop-rc-mismatch.ll b/llvm/test/CodeGen/Hexagon/bit-loop-rc-mismatch.ll
index e2f044b8b60a3..351091095651b 100644
--- a/llvm/test/CodeGen/Hexagon/bit-loop-rc-mismatch.ll
+++ b/llvm/test/CodeGen/Hexagon/bit-loop-rc-mismatch.ll
@@ -4,25 +4,25 @@
 target datalayout = "e-m:e-p:32:32:32-a:0-n16:32-i64:64:64-i32:32:32-i16:16:16-i1:8:8-f32:32:32-f64:64:64-v32:32:32-v64:64:64-v512:512:512-v1024:1024:1024-v2048:2048:2048"
 target triple = "hexagon"
 
-define weak_odr hidden i32 @fred(i32* %this, i32* nocapture readonly dereferenceable(4) %__k) #0 align 2 {
+define weak_odr hidden i32 @fred(ptr %this, ptr nocapture readonly dereferenceable(4) %__k) #0 align 2 {
 entry:
-  %call = tail call i64 @danny(i32* %this, i32* nonnull dereferenceable(4) %__k) #2
+  %call = tail call i64 @danny(ptr %this, ptr nonnull dereferenceable(4) %__k) #2
   %__p.sroa.0.0.extract.trunc = trunc i64 %call to i32
   br i1 undef, label %for.end, label %for.body
 
 for.body:                                         ; preds = %for.body, %entry
   %__p.sroa.0.018 = phi i32 [ %call8, %for.body ], [ %__p.sroa.0.0.extract.trunc, %entry ]
-  %call8 = tail call i32 @sammy(i32* %this, i32 %__p.sroa.0.018) #2
-  %0 = inttoptr i32 %call8 to i32*
-  %lnot.i = icmp eq i32* %0, undef
+  %call8 = tail call i32 @sammy(ptr %this, i32 %__p.sroa.0.018) #2
+  %0 = inttoptr i32 %call8 to ptr
+  %lnot.i = icmp eq ptr %0, undef
   br i1 %lnot.i, label %for.end, label %for.body
 
 for.end:                                          ; preds = %for.body, %entry
   ret i32 0
 }
 
-declare hidden i64 @danny(i32*, i32* nocapture readonly dereferenceable(4)) #1 align 2
-declare hidden i32 @sammy(i32* nocapture, i32) #0 align 2
+declare hidden i64 @danny(ptr, ptr nocapture readonly dereferenceable(4)) #1 align 2
+declare hidden i32 @sammy(ptr nocapture, i32) #0 align 2
 
 attributes #0 = { nounwind optsize "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="hexagonv60" "target-features"="+hvx,+hvx-length64b" "unsafe-fp-math"="false" "use-soft-float"="false" }
 attributes #1 = { nounwind optsize readonly "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="hexagonv60" "target-features"="+hvx,+hvx-length64b" "unsafe-fp-math"="false" "use-soft-float"="false" }

diff  --git a/llvm/test/CodeGen/Hexagon/bit-loop.ll b/llvm/test/CodeGen/Hexagon/bit-loop.ll
index 021890d627c60..453790e427b5b 100644
--- a/llvm/test/CodeGen/Hexagon/bit-loop.ll
+++ b/llvm/test/CodeGen/Hexagon/bit-loop.ll
@@ -7,14 +7,14 @@
 target triple = "hexagon"
 
 ; Function Attrs: nounwind
-define void @foo(i64* nocapture readonly %r64, i16 zeroext %n, i16 zeroext %s, i64* nocapture %p64) #0 {
+define void @foo(ptr nocapture readonly %r64, i16 zeroext %n, i16 zeroext %s, ptr nocapture %p64) #0 {
 entry:
   %conv = zext i16 %n to i32
   %cmp = icmp eq i16 %n, 0
   br i1 %cmp, label %for.end, label %for.body.preheader
 
 for.body.preheader:                               ; preds = %entry
-  %0 = load i64, i64* %r64, align 8, !tbaa !1
+  %0 = load i64, ptr %r64, align 8, !tbaa !1
   %v.sroa.0.0.extract.trunc = trunc i64 %0 to i16
   %v.sroa.4.0.extract.shift = lshr i64 %0, 16
   %v.sroa.4.0.extract.trunc = trunc i64 %v.sroa.4.0.extract.shift to i16
@@ -22,47 +22,46 @@ for.body.preheader:                               ; preds = %entry
   %v.sroa.5.0.extract.trunc = trunc i64 %v.sroa.5.0.extract.shift to i16
   %v.sroa.6.0.extract.shift = lshr i64 %0, 48
   %v.sroa.6.0.extract.trunc = trunc i64 %v.sroa.6.0.extract.shift to i16
-  %1 = bitcast i64* %p64 to i16*
   %conv2 = zext i16 %s to i32
-  %add.ptr = getelementptr inbounds i16, i16* %1, i32 %conv2
+  %add.ptr = getelementptr inbounds i16, ptr %p64, i32 %conv2
   %add.ptr.sum = add nuw nsw i32 %conv2, 1
-  %add.ptr3 = getelementptr inbounds i16, i16* %1, i32 %add.ptr.sum
+  %add.ptr3 = getelementptr inbounds i16, ptr %p64, i32 %add.ptr.sum
   %add.ptr.sum50 = add nuw nsw i32 %conv2, 2
-  %add.ptr4 = getelementptr inbounds i16, i16* %1, i32 %add.ptr.sum50
+  %add.ptr4 = getelementptr inbounds i16, ptr %p64, i32 %add.ptr.sum50
   %add.ptr.sum51 = add nuw nsw i32 %conv2, 3
-  %add.ptr5 = getelementptr inbounds i16, i16* %1, i32 %add.ptr.sum51
+  %add.ptr5 = getelementptr inbounds i16, ptr %p64, i32 %add.ptr.sum51
   br label %for.body
 
 for.body:                                         ; preds = %for.body.preheader, %for.body
-  %add.ptr11.phi = phi i16* [ %add.ptr11.inc, %for.body ], [ %add.ptr, %for.body.preheader ]
-  %add.ptr16.phi = phi i16* [ %add.ptr16.inc, %for.body ], [ %add.ptr3, %for.body.preheader ]
-  %add.ptr21.phi = phi i16* [ %add.ptr21.inc, %for.body ], [ %add.ptr4, %for.body.preheader ]
-  %add.ptr26.phi = phi i16* [ %add.ptr26.inc, %for.body ], [ %add.ptr5, %for.body.preheader ]
+  %add.ptr11.phi = phi ptr [ %add.ptr11.inc, %for.body ], [ %add.ptr, %for.body.preheader ]
+  %add.ptr16.phi = phi ptr [ %add.ptr16.inc, %for.body ], [ %add.ptr3, %for.body.preheader ]
+  %add.ptr21.phi = phi ptr [ %add.ptr21.inc, %for.body ], [ %add.ptr4, %for.body.preheader ]
+  %add.ptr26.phi = phi ptr [ %add.ptr26.inc, %for.body ], [ %add.ptr5, %for.body.preheader ]
   %i.058.pmt = phi i32 [ %inc.pmt, %for.body ], [ 0, %for.body.preheader ]
   %v.sroa.0.157 = phi i16 [ %v.sroa.0.0.extract.trunc34, %for.body ], [ %v.sroa.0.0.extract.trunc, %for.body.preheader ]
   %v.sroa.4.156 = phi i16 [ %v.sroa.4.0.extract.trunc36, %for.body ], [ %v.sroa.4.0.extract.trunc, %for.body.preheader ]
   %v.sroa.5.155 = phi i16 [ %v.sroa.5.0.extract.trunc38, %for.body ], [ %v.sroa.5.0.extract.trunc, %for.body.preheader ]
   %v.sroa.6.154 = phi i16 [ %v.sroa.6.0.extract.trunc40, %for.body ], [ %v.sroa.6.0.extract.trunc, %for.body.preheader ]
-  %q64.153.pn = phi i64* [ %q64.153, %for.body ], [ %r64, %for.body.preheader ]
-  %q64.153 = getelementptr inbounds i64, i64* %q64.153.pn, i32 1
-  store i16 %v.sroa.0.157, i16* %add.ptr11.phi, align 2, !tbaa !5
-  store i16 %v.sroa.4.156, i16* %add.ptr16.phi, align 2, !tbaa !5
-  store i16 %v.sroa.5.155, i16* %add.ptr21.phi, align 2, !tbaa !5
-  store i16 %v.sroa.6.154, i16* %add.ptr26.phi, align 2, !tbaa !5
-  %2 = load i64, i64* %q64.153, align 8, !tbaa !1
-  %v.sroa.0.0.extract.trunc34 = trunc i64 %2 to i16
-  %v.sroa.4.0.extract.shift35 = lshr i64 %2, 16
+  %q64.153.pn = phi ptr [ %q64.153, %for.body ], [ %r64, %for.body.preheader ]
+  %q64.153 = getelementptr inbounds i64, ptr %q64.153.pn, i32 1
+  store i16 %v.sroa.0.157, ptr %add.ptr11.phi, align 2, !tbaa !5
+  store i16 %v.sroa.4.156, ptr %add.ptr16.phi, align 2, !tbaa !5
+  store i16 %v.sroa.5.155, ptr %add.ptr21.phi, align 2, !tbaa !5
+  store i16 %v.sroa.6.154, ptr %add.ptr26.phi, align 2, !tbaa !5
+  %1 = load i64, ptr %q64.153, align 8, !tbaa !1
+  %v.sroa.0.0.extract.trunc34 = trunc i64 %1 to i16
+  %v.sroa.4.0.extract.shift35 = lshr i64 %1, 16
   %v.sroa.4.0.extract.trunc36 = trunc i64 %v.sroa.4.0.extract.shift35 to i16
-  %v.sroa.5.0.extract.shift37 = lshr i64 %2, 32
+  %v.sroa.5.0.extract.shift37 = lshr i64 %1, 32
   %v.sroa.5.0.extract.trunc38 = trunc i64 %v.sroa.5.0.extract.shift37 to i16
-  %v.sroa.6.0.extract.shift39 = lshr i64 %2, 48
+  %v.sroa.6.0.extract.shift39 = lshr i64 %1, 48
   %v.sroa.6.0.extract.trunc40 = trunc i64 %v.sroa.6.0.extract.shift39 to i16
   %inc.pmt = add i32 %i.058.pmt, 1
   %cmp8 = icmp slt i32 %inc.pmt, %conv
-  %add.ptr11.inc = getelementptr i16, i16* %add.ptr11.phi, i32 4
-  %add.ptr16.inc = getelementptr i16, i16* %add.ptr16.phi, i32 4
-  %add.ptr21.inc = getelementptr i16, i16* %add.ptr21.phi, i32 4
-  %add.ptr26.inc = getelementptr i16, i16* %add.ptr26.phi, i32 4
+  %add.ptr11.inc = getelementptr i16, ptr %add.ptr11.phi, i32 4
+  %add.ptr16.inc = getelementptr i16, ptr %add.ptr16.phi, i32 4
+  %add.ptr21.inc = getelementptr i16, ptr %add.ptr21.phi, i32 4
+  %add.ptr26.inc = getelementptr i16, ptr %add.ptr26.phi, i32 4
   br i1 %cmp8, label %for.body, label %for.end
 
 for.end:                                          ; preds = %for.body, %entry

diff  --git a/llvm/test/CodeGen/Hexagon/bit-phi.ll b/llvm/test/CodeGen/Hexagon/bit-phi.ll
index 7abfba079bb07..39e13a2e2d948 100644
--- a/llvm/test/CodeGen/Hexagon/bit-phi.ll
+++ b/llvm/test/CodeGen/Hexagon/bit-phi.ll
@@ -5,12 +5,12 @@
 target datalayout = "e-m:e-p:32:32-i1:32-i64:64-a:0-v32:32-n16:32"
 target triple = "hexagon-unknown--elf"
 
-%struct.item = type { i32, i8*, i8*, i32, i8, i8, i16, i32, i8, i16, i32 }
+%struct.item = type { i32, ptr, ptr, i32, i8, i8, i16, i32, i8, i16, i32 }
 
-declare %struct.item* @foo(%struct.item*, i8*, i32) #1
+declare ptr @foo(ptr, ptr, i32) #1
 
 ; Function Attrs: nounwind
-define i32 @bar(%struct.item** %ptr, i8* %buf, i32 %c, i8* %d, i32 %e) #1 {
+define i32 @bar(ptr %ptr, ptr %buf, i32 %c, ptr %d, i32 %e) #1 {
 entry:
   br i1 undef, label %return, label %if.end
 
@@ -34,14 +34,14 @@ while.body20.if.end38_crit_edge:                  ; preds = %while.body20
 
 if.then32:                                        ; preds = %while.body20
   %conv33 = and i32 %cond, 65535
-  %.pre = load %struct.item*, %struct.item** %ptr, align 4, !tbaa !1
+  %.pre = load ptr, ptr %ptr, align 4, !tbaa !1
   br label %if.end38
 
 if.end38:                                         ; preds = %if.then32, %while.body20.if.end38_crit_edge
   %conv39.pre-phi = phi i32 [ %conv39.pre, %while.body20.if.end38_crit_edge ], [ %conv33, %if.then32 ]
-  %0 = phi %struct.item* [ undef, %while.body20.if.end38_crit_edge ], [ %.pre, %if.then32 ]
+  %0 = phi ptr [ undef, %while.body20.if.end38_crit_edge ], [ %.pre, %if.then32 ]
   %add = add i32 %conv39.pre-phi, 0
-  %call52 = tail call %struct.item* @foo(%struct.item* %0, i8* %d, i32 %e) #1
+  %call52 = tail call ptr @foo(ptr %0, ptr %d, i32 %e) #1
   br i1 undef, label %while.body20, label %return
 
 return:                                           ; preds = %if.end38, %while.cond13.preheader, %entry

diff  --git a/llvm/test/CodeGen/Hexagon/bit-rie.ll b/llvm/test/CodeGen/Hexagon/bit-rie.ll
index a090a668d9f3a..9b670b2e6207b 100644
--- a/llvm/test/CodeGen/Hexagon/bit-rie.ll
+++ b/llvm/test/CodeGen/Hexagon/bit-rie.ll
@@ -8,7 +8,7 @@ target triple = "hexagon"
 @g0 = external constant [146 x i16], align 8
 @g1 = external constant [0 x i16], align 2
 
-define void @fred(i32* nocapture readonly %p0, i16 signext %p1, i16* nocapture %p2, i16 signext %p3, i16 signext %p4, i16 signext %p5) #0 {
+define void @fred(ptr nocapture readonly %p0, i16 signext %p1, ptr nocapture %p2, i16 signext %p3, i16 signext %p4, i16 signext %p5) #0 {
 entry:
   %conv = sext i16 %p1 to i32
   %0 = tail call i32 @llvm.hexagon.S2.asl.r.r.sat(i32 %conv, i32 1)
@@ -19,22 +19,22 @@ entry:
   br i1 %cmp144, label %for.body, label %for.end
 
 for.body:                                         ; preds = %entry, %for.body
-  %arrayidx.phi = phi i32* [ %arrayidx.inc, %for.body ], [ %p0, %entry ]
+  %arrayidx.phi = phi ptr [ %arrayidx.inc, %for.body ], [ %p0, %entry ]
   %i.0146.apmt = phi i32 [ %inc.apmt, %for.body ], [ 0, %entry ]
   %L_temp1.0145 = phi i32 [ %5, %for.body ], [ 1, %entry ]
-  %3 = load i32, i32* %arrayidx.phi, align 4, !tbaa !1
+  %3 = load i32, ptr %arrayidx.phi, align 4, !tbaa !1
   %4 = tail call i32 @llvm.hexagon.A2.abssat(i32 %3)
   %5 = tail call i32 @llvm.hexagon.A2.max(i32 %L_temp1.0145, i32 %4)
   %inc.apmt = add nuw nsw i32 %i.0146.apmt, 1
   %exitcond151 = icmp eq i32 %inc.apmt, %conv3
-  %arrayidx.inc = getelementptr i32, i32* %arrayidx.phi, i32 1
+  %arrayidx.inc = getelementptr i32, ptr %arrayidx.phi, i32 1
   br i1 %exitcond151, label %for.end, label %for.body, !llvm.loop !5
 
 for.end:                                          ; preds = %for.body, %entry
   %L_temp1.0.lcssa = phi i32 [ 1, %entry ], [ %5, %for.body ]
   %6 = tail call i32 @llvm.hexagon.S2.clbnorm(i32 %L_temp1.0.lcssa)
-  %arrayidx6 = getelementptr inbounds [146 x i16], [146 x i16]* @g0, i32 0, i32 %conv3
-  %7 = load i16, i16* %arrayidx6, align 2, !tbaa !7
+  %arrayidx6 = getelementptr inbounds [146 x i16], ptr @g0, i32 0, i32 %conv3
+  %7 = load i16, ptr %arrayidx6, align 2, !tbaa !7
   %conv7 = sext i16 %7 to i32
   %8 = tail call i32 @llvm.hexagon.A2.subh.l16.sat.ll(i32 %6, i32 %conv7)
   br i1 %cmp144, label %for.body14.lr.ph, label %for.end29
@@ -45,10 +45,10 @@ for.body14.lr.ph:                                 ; preds = %for.end
   br label %for.body14
 
 for.body14:                                       ; preds = %for.body14, %for.body14.lr.ph
-  %arrayidx16.phi = phi i32* [ %p0, %for.body14.lr.ph ], [ %arrayidx16.inc, %for.body14 ]
+  %arrayidx16.phi = phi ptr [ %p0, %for.body14.lr.ph ], [ %arrayidx16.inc, %for.body14 ]
   %i.1143.apmt = phi i32 [ 0, %for.body14.lr.ph ], [ %inc28.apmt, %for.body14 ]
   %L_temp.0142 = phi i32 [ 0, %for.body14.lr.ph ], [ %12, %for.body14 ]
-  %9 = load i32, i32* %arrayidx16.phi, align 4, !tbaa !1
+  %9 = load i32, ptr %arrayidx16.phi, align 4, !tbaa !1
   %10 = tail call i32 @llvm.hexagon.S2.asl.r.r.sat(i32 %9, i32 %conv17)
   %11 = tail call i32 @llvm.hexagon.A2.asrh(i32 %10)
   %sext133 = shl i32 %11, 16
@@ -56,7 +56,7 @@ for.body14:                                       ; preds = %for.body14, %for.bo
   %12 = tail call i32 @llvm.hexagon.M2.mpy.acc.sat.ll.s0(i32 %L_temp.0142, i32 %conv23, i32 %conv23)
   %inc28.apmt = add nuw nsw i32 %i.1143.apmt, 1
   %exitcond = icmp eq i32 %inc28.apmt, %conv3
-  %arrayidx16.inc = getelementptr i32, i32* %arrayidx16.phi, i32 1
+  %arrayidx16.inc = getelementptr i32, ptr %arrayidx16.phi, i32 1
   br i1 %exitcond, label %for.end29, label %for.body14
 
 for.end29:                                        ; preds = %for.body14, %for.end
@@ -66,8 +66,8 @@ for.end29:                                        ; preds = %for.body14, %for.en
   br i1 %cmp31, label %if.then, label %if.end
 
 if.then:                                          ; preds = %for.end29
-  %arrayidx34 = getelementptr inbounds [0 x i16], [0 x i16]* @g1, i32 0, i32 %conv3
-  %14 = load i16, i16* %arrayidx34, align 2, !tbaa !7
+  %arrayidx34 = getelementptr inbounds [0 x i16], ptr @g1, i32 0, i32 %conv3
+  %14 = load i16, ptr %arrayidx34, align 2, !tbaa !7
   %cmp.i = icmp eq i32 %L_temp.0.lcssa, -2147483648
   %cmp1.i = icmp eq i16 %14, -32768
   %or.cond.i = and i1 %cmp.i, %cmp1.i
@@ -167,7 +167,7 @@ if.end80:                                         ; preds = %if.end74, %if.then7
   %conv82 = trunc i32 %37 to i16
   %cmp.i134 = icmp sgt i16 %var_out.0.i136, %conv82
   %var_out.0.i = select i1 %cmp.i134, i16 %conv82, i16 %var_out.0.i136
-  store i16 %var_out.0.i, i16* %p2, align 2, !tbaa !7
+  store i16 %var_out.0.i, ptr %p2, align 2, !tbaa !7
   ret void
 }
 
@@ -187,8 +187,8 @@ declare i32 @llvm.hexagon.S2.asr.r.r.sat(i32, i32) #2
 declare i32 @llvm.hexagon.S2.clbnorm(i32) #2
 declare i32 @llvm.hexagon.S2.lsr.r.r(i32, i32) #2
 declare i64 @llvm.hexagon.M2.mpyd.ll.s1(i32, i32) #2
-declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #1
-declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #1
+declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
 
 attributes #0 = { norecurse nounwind "target-cpu"="hexagonv60" "target-features"="+hvx,,+hvx-length64b" "unsafe-fp-math"="false" "use-soft-float"="false" }
 attributes #1 = { argmemonly nounwind }

diff  --git a/llvm/test/CodeGen/Hexagon/bit-skip-byval.ll b/llvm/test/CodeGen/Hexagon/bit-skip-byval.ll
index 139726626c3d8..796d36ad7f414 100644
--- a/llvm/test/CodeGen/Hexagon/bit-skip-byval.ll
+++ b/llvm/test/CodeGen/Hexagon/bit-skip-byval.ll
@@ -5,7 +5,7 @@
 
 %struct.t0 = type { i32 }
 
-define i32 @foo(%struct.t0* byval(%struct.t0) align 8 %s, i8 zeroext %t, i8 %u) #0 {
+define i32 @foo(ptr byval(%struct.t0) align 8 %s, i8 zeroext %t, i8 %u) #0 {
   %a = zext i8 %u to i32
   ret i32 %a
 }

diff  --git a/llvm/test/CodeGen/Hexagon/bit-visit-flowq.ll b/llvm/test/CodeGen/Hexagon/bit-visit-flowq.ll
index f0786da3bed83..62ffb1ed1b534 100644
--- a/llvm/test/CodeGen/Hexagon/bit-visit-flowq.ll
+++ b/llvm/test/CodeGen/Hexagon/bit-visit-flowq.ll
@@ -31,7 +31,7 @@ if.then44:                                        ; preds = %for.cond
   br label %if.end51
 
 if.end51:                                         ; preds = %if.then44, %for.cond
-  %.b433 = load i1, i1* @debug, align 4
+  %.b433 = load i1, ptr @debug, align 4
   %or.cond290 = and i1 %or.cond288, %.b433
   br i1 %or.cond290, label %if.then55, label %if.end63
 

diff  --git a/llvm/test/CodeGen/Hexagon/bitcast-i128-to-v128i1.ll b/llvm/test/CodeGen/Hexagon/bitcast-i128-to-v128i1.ll
index dfe73895b298f..323bac933fb65 100644
--- a/llvm/test/CodeGen/Hexagon/bitcast-i128-to-v128i1.ll
+++ b/llvm/test/CodeGen/Hexagon/bitcast-i128-to-v128i1.ll
@@ -4,12 +4,12 @@
 declare <64 x i32> @llvm.hexagon.V6.vshuffvdd.128B(<32 x i32>, <32 x i32>, i32)
 
 ; Function Attrs: argmemonly nofree nosync nounwind willreturn writeonly
-declare void @llvm.masked.store.v128i8.p0v128i8(<128 x i8>, <128 x i8>*, i32 immarg, <128 x i1>)
+declare void @llvm.masked.store.v128i8.p0(<128 x i8>, ptr, i32 immarg, <128 x i1>)
 
 define void @foo2() {
   %1 = call <64 x i32> @llvm.hexagon.V6.vshuffvdd.128B(<32 x i32> undef, <32 x i32> undef, i32 0)
   %2 = bitcast <64 x i32> %1 to <2048 x i1>
   %3 = shufflevector <2048 x i1> %2, <2048 x i1> undef, <128 x i32> <i32 384, i32 385, i32 386, i32 387, i32 388, i32 389, i32 390, i32 391, i32 392, i32 393, i32 394, i32 395, i32 396, i32 397, i32 398, i32 399, i32 400, i32 401, i32 402, i32 403, i32 404, i32 405, i32 406, i32 407, i32 408, i32 409, i32 410, i32 411, i32 412, i32 413, i32 414, i32 415, i32 416, i32 417, i32 418, i32 419, i32 420, i32 421, i32 422, i32 423, i32 424, i32 425, i32 426, i32 427, i32 428, i32 429, i32 430, i32 431, i32 432, i32 433, i32 434, i32 435, i32 436, i32 437, i32 438, i32 439, i32 440, i32 441, i32 442, i32 443, i32 444, i32 445, i32 446, i32 447, i32 448, i32 449, i32 450, i32 451, i32 452, i32 453, i32 454, i32 455, i32 456, i32 457, i32 458, i32 459, i32 460, i32 461, i32 462, i32 463, i32 464, i32 465, i32 466, i32 467, i32 468, i32 469, i32 470, i32 471, i32 472, i32 473, i32 474, i32 475, i32 476, i32 477, i32 478, i32 479, i32 480, i32 481, i32 482, i32 483, i32 484, i32 485, i32 486, i32 487, i32 488, i32 489, i32 490, i32 491, i32 492, i32 493, i32 494, i32 495, i32 496, i32 497, i32 498, i32 499, i32 500, i32 501, i32 502, i32 503, i32 504, i32 505, i32 506, i32 507, i32 508, i32 509, i32 510, i32 511>
-  call void @llvm.masked.store.v128i8.p0v128i8(<128 x i8> undef, <128 x i8>* nonnull undef, i32 1, <128 x i1> %3)
+  call void @llvm.masked.store.v128i8.p0(<128 x i8> undef, ptr nonnull undef, i32 1, <128 x i1> %3)
   ret void
 }

diff  --git a/llvm/test/CodeGen/Hexagon/bitconvert-vector.ll b/llvm/test/CodeGen/Hexagon/bitconvert-vector.ll
index 21846c90f5b02..dee7ae8c38969 100644
--- a/llvm/test/CodeGen/Hexagon/bitconvert-vector.ll
+++ b/llvm/test/CodeGen/Hexagon/bitconvert-vector.ll
@@ -11,14 +11,14 @@ declare <32 x i32> @llvm.hexagon.V6.lo.128B(<64 x i32>) #0
 declare <64 x i32> @llvm.hexagon.V6.vshuffvdd.128B(<32 x i32>, <32 x i32>, i32) #0
 declare <32 x i32> @llvm.hexagon.V6.valignbi.128B(<32 x i32>, <32 x i32>, i32) #0
 
-define void @fred(<64 x i16>* %a0, <32 x i32>* %a1) #1 {
+define void @fred(ptr %a0, ptr %a1) #1 {
 entry:
   %t0 = bitcast <64 x i16> zeroinitializer to <32 x i32>
   %t1 = tail call <32 x i32> @llvm.hexagon.V6.valignbi.128B(<32 x i32> %t0, <32 x i32> undef, i32 2)
   %t2 = tail call <64 x i32> @llvm.hexagon.V6.vshuffvdd.128B(<32 x i32> undef, <32 x i32> %t1, i32 -2)
   %t3 = tail call <32 x i32> @llvm.hexagon.V6.lo.128B(<64 x i32> %t2)
-  store <64 x i16> zeroinitializer, <64 x i16>* %a0, align 128
-  store <32 x i32> %t3, <32 x i32>* %a1, align 128
+  store <64 x i16> zeroinitializer, ptr %a0, align 128
+  store <32 x i32> %t3, ptr %a1, align 128
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/bkfir.ll b/llvm/test/CodeGen/Hexagon/bkfir.ll
index 0e03ab352c839..fc8bcb5424b2c 100644
--- a/llvm/test/CodeGen/Hexagon/bkfir.ll
+++ b/llvm/test/CodeGen/Hexagon/bkfir.ll
@@ -5,27 +5,25 @@
 target triple = "hexagon-unknown--elf"
 
 ; Function Attrs: nounwind optsize
-define void @f0(i16* nocapture readonly %a0, i16* nocapture readonly %a1, i16* nocapture %a2, i32 %a3, i32 %a4, i32 %a5) #0 {
+define void @f0(ptr nocapture readonly %a0, ptr nocapture readonly %a1, ptr nocapture %a2, i32 %a3, i32 %a4, i32 %a5) #0 {
 b0:
-  %v0 = bitcast i16* %a0 to i64*
-  %v1 = bitcast i16* %a1 to i64*
   %v2 = icmp sgt i32 %a5, 0
   br i1 %v2, label %b1, label %b6
 
 b1:                                               ; preds = %b0
   %v3 = icmp sgt i32 %a4, 0
-  %v4 = getelementptr i16, i16* %a2, i32 5
-  %v5 = getelementptr i16, i16* %a2, i32 6
-  %v6 = getelementptr i16, i16* %a2, i32 7
+  %v4 = getelementptr i16, ptr %a2, i32 5
+  %v5 = getelementptr i16, ptr %a2, i32 6
+  %v6 = getelementptr i16, ptr %a2, i32 7
   br label %b2
 
 b2:                                               ; preds = %b5, %b1
-  %v7 = phi i16* [ %a2, %b1 ], [ %v12, %b5 ]
-  %v8 = phi i16* [ %v4, %b1 ], [ %v59, %b5 ]
-  %v9 = phi i16* [ %v5, %b1 ], [ %v60, %b5 ]
-  %v10 = phi i16* [ %v6, %b1 ], [ %v61, %b5 ]
+  %v7 = phi ptr [ %a2, %b1 ], [ %v12, %b5 ]
+  %v8 = phi ptr [ %v4, %b1 ], [ %v59, %b5 ]
+  %v9 = phi ptr [ %v5, %b1 ], [ %v60, %b5 ]
+  %v10 = phi ptr [ %v6, %b1 ], [ %v61, %b5 ]
   %v11 = phi i32 [ 0, %b1 ], [ %v57, %b5 ]
-  %v12 = getelementptr i16, i16* %v7, i32 4
+  %v12 = getelementptr i16, ptr %v7, i32 4
   br i1 %v3, label %b3, label %b5
 
 b3:                                               ; preds = %b3, %b2
@@ -36,14 +34,14 @@ b3:                                               ; preds = %b3, %b2
   %v17 = phi i32 [ %v38, %b3 ], [ 0, %b2 ]
   %v18 = add nsw i32 %v13, %v11
   %v19 = sdiv i32 %v18, 4
-  %v20 = getelementptr inbounds i64, i64* %v0, i32 %v19
-  %v21 = load i64, i64* %v20, align 8
+  %v20 = getelementptr inbounds i64, ptr %a0, i32 %v19
+  %v21 = load i64, ptr %v20, align 8
   %v22 = add nsw i32 %v19, 1
-  %v23 = getelementptr inbounds i64, i64* %v0, i32 %v22
-  %v24 = load i64, i64* %v23, align 8
+  %v23 = getelementptr inbounds i64, ptr %a0, i32 %v22
+  %v24 = load i64, ptr %v23, align 8
   %v25 = sdiv i32 %v13, 4
-  %v26 = getelementptr inbounds i64, i64* %v1, i32 %v25
-  %v27 = load i64, i64* %v26, align 8
+  %v26 = getelementptr inbounds i64, ptr %a1, i32 %v25
+  %v27 = load i64, ptr %v26, align 8
   %v28 = sext i32 %v14 to i64
   %v29 = tail call i64 @llvm.hexagon.M2.vrmac.s0(i64 %v28, i64 %v21, i64 %v27)
   %v30 = trunc i64 %v29 to i32
@@ -80,14 +78,14 @@ b5:                                               ; preds = %b4, %b2
   %v55 = phi i16 [ %v52, %b4 ], [ 0, %b2 ]
   %v56 = phi i16 [ %v50, %b4 ], [ 0, %b2 ]
   %v57 = add nsw i32 %v11, 4
-  store i16 %v53, i16* %v12, align 8
-  store i16 %v54, i16* %v8, align 8
-  store i16 %v56, i16* %v9, align 8
-  store i16 %v55, i16* %v10, align 8
+  store i16 %v53, ptr %v12, align 8
+  store i16 %v54, ptr %v8, align 8
+  store i16 %v56, ptr %v9, align 8
+  store i16 %v55, ptr %v10, align 8
   %v58 = icmp slt i32 %v57, %a5
-  %v59 = getelementptr i16, i16* %v8, i32 4
-  %v60 = getelementptr i16, i16* %v9, i32 4
-  %v61 = getelementptr i16, i16* %v10, i32 4
+  %v59 = getelementptr i16, ptr %v8, i32 4
+  %v60 = getelementptr i16, ptr %v9, i32 4
+  %v61 = getelementptr i16, ptr %v10, i32 4
   br i1 %v58, label %b2, label %b6
 
 b6:                                               ; preds = %b5, %b0

diff  --git a/llvm/test/CodeGen/Hexagon/block-addr.ll b/llvm/test/CodeGen/Hexagon/block-addr.ll
index 87d46d90dbd53..cbe824643aca2 100644
--- a/llvm/test/CodeGen/Hexagon/block-addr.ll
+++ b/llvm/test/CodeGen/Hexagon/block-addr.ll
@@ -9,7 +9,7 @@ entry:
   br label %while.body
 
 while.body:
-  %ret.0.load17 = load volatile i32, i32* %ret, align 4
+  %ret.0.load17 = load volatile i32, ptr %ret, align 4
   switch i32 %ret.0.load17, label %label6 [
     i32 0, label %label0
     i32 1, label %label1
@@ -20,43 +20,43 @@ while.body:
   ]
 
 label0:
-  %ret.0.load18 = load volatile i32, i32* %ret, align 4
+  %ret.0.load18 = load volatile i32, ptr %ret, align 4
   %inc = add nsw i32 %ret.0.load18, 1
-  store volatile i32 %inc, i32* %ret, align 4
+  store volatile i32 %inc, ptr %ret, align 4
   br label %while.body
 
 label1:
-  %ret.0.load19 = load volatile i32, i32* %ret, align 4
+  %ret.0.load19 = load volatile i32, ptr %ret, align 4
   %inc2 = add nsw i32 %ret.0.load19, 1
-  store volatile i32 %inc2, i32* %ret, align 4
+  store volatile i32 %inc2, ptr %ret, align 4
   br label %while.body
 
 label2:
-  %ret.0.load20 = load volatile i32, i32* %ret, align 4
+  %ret.0.load20 = load volatile i32, ptr %ret, align 4
   %inc4 = add nsw i32 %ret.0.load20, 1
-  store volatile i32 %inc4, i32* %ret, align 4
+  store volatile i32 %inc4, ptr %ret, align 4
   br label %while.body
 
 label3:
-  %ret.0.load21 = load volatile i32, i32* %ret, align 4
+  %ret.0.load21 = load volatile i32, ptr %ret, align 4
   %inc6 = add nsw i32 %ret.0.load21, 1
-  store volatile i32 %inc6, i32* %ret, align 4
+  store volatile i32 %inc6, ptr %ret, align 4
   br label %while.body
 
 label4:
-  %ret.0.load22 = load volatile i32, i32* %ret, align 4
+  %ret.0.load22 = load volatile i32, ptr %ret, align 4
   %inc8 = add nsw i32 %ret.0.load22, 1
-  store volatile i32 %inc8, i32* %ret, align 4
+  store volatile i32 %inc8, ptr %ret, align 4
   br label %while.body
 
 label5:
-  %ret.0.load23 = load volatile i32, i32* %ret, align 4
+  %ret.0.load23 = load volatile i32, ptr %ret, align 4
   %inc10 = add nsw i32 %ret.0.load23, 1
-  store volatile i32 %inc10, i32* %ret, align 4
+  store volatile i32 %inc10, ptr %ret, align 4
   br label %while.body
 
 label6:
-  store volatile i32 0, i32* %ret, align 4
+  store volatile i32 0, ptr %ret, align 4
   br label %while.body
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/block-address.ll b/llvm/test/CodeGen/Hexagon/block-address.ll
index f7ced9eae19d9..62efaa3406c99 100644
--- a/llvm/test/CodeGen/Hexagon/block-address.ll
+++ b/llvm/test/CodeGen/Hexagon/block-address.ll
@@ -2,21 +2,21 @@
 ; RUN: llc -march=hexagon -hexagon-small-data-threshold=0 < %s
 ; REQUIRES: asserts
 
- at g0 = external global i8*
+ at g0 = external global ptr
 
 ; Function Attrs: nounwind
 define i32 @f0(i32 %a0, i32 %a1) #0 {
 b0:
-  %v0 = load i8*, i8** @g0, align 4, !tbaa !0
+  %v0 = load ptr, ptr @g0, align 4, !tbaa !0
   br label %b1
 
 b1:                                               ; preds = %b1, %b0
-  %v1 = phi i8* [ %v0, %b0 ], [ %v5, %b1 ]
+  %v1 = phi ptr [ %v0, %b0 ], [ %v5, %b1 ]
   %v2 = phi i32 [ %a0, %b0 ], [ %v3, %b1 ]
   %v3 = add nsw i32 %v2, 10
-  %v4 = tail call i32 @f1(i8* %v1, i8* blockaddress(@f0, %b1), i8* blockaddress(@f0, %b2)) #0
-  %v5 = load i8*, i8** @g0, align 4, !tbaa !0
-  indirectbr i8* %v5, [label %b1, label %b2]
+  %v4 = tail call i32 @f1(ptr %v1, ptr blockaddress(@f0, %b1), ptr blockaddress(@f0, %b2)) #0
+  %v5 = load ptr, ptr @g0, align 4, !tbaa !0
+  indirectbr ptr %v5, [label %b1, label %b2]
 
 b2:                                               ; preds = %b1
   %v6 = add nsw i32 %v2, 19
@@ -26,7 +26,7 @@ b2:                                               ; preds = %b1
   ret i32 %v9
 }
 
-declare i32 @f1(i8*, i8*, i8*)
+declare i32 @f1(ptr, ptr, ptr)
 
 attributes #0 = { nounwind }
 

diff  --git a/llvm/test/CodeGen/Hexagon/block-ranges-nodef.ll b/llvm/test/CodeGen/Hexagon/block-ranges-nodef.ll
index aaa3652433273..d38a74f7e67ca 100644
--- a/llvm/test/CodeGen/Hexagon/block-ranges-nodef.ll
+++ b/llvm/test/CodeGen/Hexagon/block-ranges-nodef.ll
@@ -10,7 +10,7 @@ entry:
   %cmp17 = icmp ne i64 %c, 0
   %conv19 = zext i1 %cmp17 to i64
   %or = or i64 %conv19, %b
-  store i64 %or, i64* undef, align 8
+  store i64 %or, ptr undef, align 8
   br i1 undef, label %if.then44, label %if.end96
 
 if.then44:                                        ; preds = %entry

diff  --git a/llvm/test/CodeGen/Hexagon/blockaddr-fpic.ll b/llvm/test/CodeGen/Hexagon/blockaddr-fpic.ll
index 4f0ede5a59566..aeff33c9c824b 100644
--- a/llvm/test/CodeGen/Hexagon/blockaddr-fpic.ll
+++ b/llvm/test/CodeGen/Hexagon/blockaddr-fpic.ll
@@ -4,7 +4,7 @@
 
 target triple = "hexagon"
 
-%s.0 = type { [7 x i8*], [7 x i8*], [12 x i8*], [12 x i8*], [2 x i8*], i8*, i8*, i8*, i8* }
+%s.0 = type { [7 x ptr], [7 x ptr], [12 x ptr], [12 x ptr], [2 x ptr], ptr, ptr, ptr, ptr }
 %s.1 = type { i32, i32, i32, i32, i32, i32, i32, i32, i32 }
 
 @g0 = private unnamed_addr constant [4 x i8] c"Sun\00", align 1
@@ -50,40 +50,40 @@ target triple = "hexagon"
 @g40 = private unnamed_addr constant [9 x i8] c"%m/%d/%y\00", align 1
 @g41 = private unnamed_addr constant [9 x i8] c"%H:%M:%S\00", align 1
 @g42 = private unnamed_addr constant [12 x i8] c"%I:%M:%S %p\00", align 1
- at g43 = constant %s.0 { [7 x i8*] [i8* getelementptr inbounds ([4 x i8], [4 x i8]* @g0, i32 0, i32 0), i8* getelementptr inbounds ([4 x i8], [4 x i8]* @g1, i32 0, i32 0), i8* getelementptr inbounds ([4 x i8], [4 x i8]* @g2, i32 0, i32 0), i8* getelementptr inbounds ([4 x i8], [4 x i8]* @g3, i32 0, i32 0), i8* getelementptr inbounds ([4 x i8], [4 x i8]* @g4, i32 0, i32 0), i8* getelementptr inbounds ([4 x i8], [4 x i8]* @g5, i32 0, i32 0), i8* getelementptr inbounds ([4 x i8], [4 x i8]* @g6, i32 0, i32 0)], [7 x i8*] [i8* getelementptr inbounds ([7 x i8], [7 x i8]* @g7, i32 0, i32 0), i8* getelementptr inbounds ([7 x i8], [7 x i8]* @g8, i32 0, i32 0), i8* getelementptr inbounds ([8 x i8], [8 x i8]* @g9, i32 0, i32 0), i8* getelementptr inbounds ([10 x i8], [10 x i8]* @g10, i32 0, i32 0), i8* getelementptr inbounds ([9 x i8], [9 x i8]* @g11, i32 0, i32 0), i8* getelementptr inbounds ([7 x i8], [7 x i8]* @g12, i32 0, i32 0), i8* getelementptr inbounds ([9 x i8], [9 x i8]* @g13, i32 0, i32 0)], [12 x i8*] [i8* getelementptr inbounds ([4 x i8], [4 x i8]* @g14, i32 0, i32 0), i8* getelementptr inbounds ([4 x i8], [4 x i8]* @g15, i32 0, i32 0), i8* getelementptr inbounds ([4 x i8], [4 x i8]* @g16, i32 0, i32 0), i8* getelementptr inbounds ([4 x i8], [4 x i8]* @g17, i32 0, i32 0), i8* getelementptr inbounds ([4 x i8], [4 x i8]* @g18, i32 0, i32 0), i8* getelementptr inbounds ([4 x i8], [4 x i8]* @g19, i32 0, i32 0), i8* getelementptr inbounds ([4 x i8], [4 x i8]* @g20, i32 0, i32 0), i8* getelementptr inbounds ([4 x i8], [4 x i8]* @g21, i32 0, i32 0), i8* getelementptr inbounds ([4 x i8], [4 x i8]* @g22, i32 0, i32 0), i8* getelementptr inbounds ([4 x i8], [4 x i8]* @g23, i32 0, i32 0), i8* getelementptr inbounds ([4 x i8], [4 x i8]* @g24, i32 0, i32 0), i8* getelementptr inbounds ([4 x i8], [4 x i8]* @g25, i32 0, i32 0)], [12 x i8*] [i8* getelementptr inbounds ([8 x i8], [8 x i8]* @g26, i32 0, i32 0), i8* getelementptr inbounds ([9 x i8], [9 x i8]* @g27, i32 0, i32 0), i8* getelementptr inbounds ([6 x i8], [6 x i8]* @g28, i32 0, i32 0), i8* getelementptr inbounds ([6 x i8], [6 x i8]* @g29, i32 0, i32 0), i8* getelementptr inbounds ([4 x i8], [4 x i8]* @g18, i32 0, i32 0), i8* getelementptr inbounds ([5 x i8], [5 x i8]* @g30, i32 0, i32 0), i8* getelementptr inbounds ([5 x i8], [5 x i8]* @g31, i32 0, i32 0), i8* getelementptr inbounds ([7 x i8], [7 x i8]* @g32, i32 0, i32 0), i8* getelementptr inbounds ([10 x i8], [10 x i8]* @g33, i32 0, i32 0), i8* getelementptr inbounds ([8 x i8], [8 x i8]* @g34, i32 0, i32 0), i8* getelementptr inbounds ([9 x i8], [9 x i8]* @g35, i32 0, i32 0), i8* getelementptr inbounds ([9 x i8], [9 x i8]* @g36, i32 0, i32 0)], [2 x i8*] [i8* getelementptr inbounds ([3 x i8], [3 x i8]* @g37, i32 0, i32 0), i8* getelementptr inbounds ([3 x i8], [3 x i8]* @g38, i32 0, i32 0)], i8* getelementptr inbounds ([21 x i8], [21 x i8]* @g39, i32 0, i32 0), i8* getelementptr inbounds ([9 x i8], [9 x i8]* @g40, i32 0, i32 0), i8* getelementptr inbounds ([9 x i8], [9 x i8]* @g41, i32 0, i32 0), i8* getelementptr inbounds ([12 x i8], [12 x i8]* @g42, i32 0, i32 0) }, align 4
- at g44 = global %s.0* @g43, align 4
+ at g43 = constant %s.0 { [7 x ptr] [ptr @g0, ptr @g1, ptr @g2, ptr @g3, ptr @g4, ptr @g5, ptr @g6], [7 x ptr] [ptr @g7, ptr @g8, ptr @g9, ptr @g10, ptr @g11, ptr @g12, ptr @g13], [12 x ptr] [ptr @g14, ptr @g15, ptr @g16, ptr @g17, ptr @g18, ptr @g19, ptr @g20, ptr @g21, ptr @g22, ptr @g23, ptr @g24, ptr @g25], [12 x ptr] [ptr @g26, ptr @g27, ptr @g28, ptr @g29, ptr @g18, ptr @g30, ptr @g31, ptr @g32, ptr @g33, ptr @g34, ptr @g35, ptr @g36], [2 x ptr] [ptr @g37, ptr @g38], ptr @g39, ptr @g40, ptr @g41, ptr @g42 }, align 4
+ at g44 = global ptr @g43, align 4
 @g45 = private unnamed_addr constant [6 x i8] c"%H:%M\00", align 1
 
 ; Function Attrs: nounwind readonly
-define i8* @f0(i8* readonly %a0, i8* nocapture readonly %a1, %s.1* readonly %a2) #0 {
+define ptr @f0(ptr readonly %a0, ptr nocapture readonly %a1, ptr readonly %a2) #0 {
 b0:
-  %v0 = icmp eq i8* %a0, null
+  %v0 = icmp eq ptr %a0, null
   br i1 %v0, label %b15, label %b1
 
 b1:                                               ; preds = %b0
-  %v1 = load %s.0*, %s.0** @g44, align 4, !tbaa !0
-  %v2 = getelementptr inbounds %s.0, %s.0* %v1, i32 0, i32 5
-  %v3 = getelementptr inbounds %s.0, %s.0* %v1, i32 0, i32 6
+  %v1 = load ptr, ptr @g44, align 4, !tbaa !0
+  %v2 = getelementptr inbounds %s.0, ptr %v1, i32 0, i32 5
+  %v3 = getelementptr inbounds %s.0, ptr %v1, i32 0, i32 6
   br label %b2
 
 b2:                                               ; preds = %b14, %b6, %b1
   %v4 = phi i32 [ undef, %b1 ], [ %v31, %b14 ], [ 0, %b6 ]
-  %v5 = phi i8* [ %a0, %b1 ], [ %v30, %b14 ], [ %v18, %b6 ]
-  %v6 = phi i8* [ %a1, %b1 ], [ %v13, %b14 ], [ %v13, %b6 ]
-  %v7 = load i8, i8* %v6, align 1, !tbaa !4
+  %v5 = phi ptr [ %a0, %b1 ], [ %v30, %b14 ], [ %v18, %b6 ]
+  %v6 = phi ptr [ %a1, %b1 ], [ %v13, %b14 ], [ %v13, %b6 ]
+  %v7 = load i8, ptr %v6, align 1, !tbaa !4
   %v8 = icmp eq i8 %v7, 0
   br i1 %v8, label %b15, label %b3
 
 b3:                                               ; preds = %b2
-  %v9 = getelementptr inbounds i8, i8* %v6, i32 1
+  %v9 = getelementptr inbounds i8, ptr %v6, i32 1
   br label %b4
 
 b4:                                               ; preds = %b7, %b3
-  %v10 = phi i8* [ %v6, %b3 ], [ %v11, %b7 ]
-  %v11 = phi i8* [ %v9, %b3 ], [ %v13, %b7 ]
+  %v10 = phi ptr [ %v6, %b3 ], [ %v11, %b7 ]
+  %v11 = phi ptr [ %v9, %b3 ], [ %v13, %b7 ]
   %v12 = phi i32 [ %v4, %b3 ], [ %v21, %b7 ]
-  %v13 = getelementptr inbounds i8, i8* %v10, i32 2
-  %v14 = load i8, i8* %v11, align 1, !tbaa !4
+  %v13 = getelementptr inbounds i8, ptr %v10, i32 2
+  %v14 = load i8, ptr %v11, align 1, !tbaa !4
   %v15 = zext i8 %v14 to i32
   switch i32 %v15, label %b15 [
     i32 37, label %b5
@@ -96,22 +96,22 @@ b4:                                               ; preds = %b7, %b3
   ]
 
 b5:                                               ; preds = %b4
-  %v16 = load i8, i8* %v5, align 1, !tbaa !4
+  %v16 = load i8, ptr %v5, align 1, !tbaa !4
   %v17 = icmp eq i8 %v14, %v16
   br i1 %v17, label %b6, label %b15
 
 b6:                                               ; preds = %b5
-  %v18 = getelementptr inbounds i8, i8* %v5, i32 1
+  %v18 = getelementptr inbounds i8, ptr %v5, i32 1
   %v19 = icmp eq i32 %v12, 0
   br i1 %v19, label %b2, label %b15
 
 b7:                                               ; preds = %b10, %b9, %b8, %b4
-  %v20 = phi i8* [ blockaddress(@f0, %b4), %b8 ], [ blockaddress(@f0, %b11), %b9 ], [ blockaddress(@f0, %b11), %b10 ], [ blockaddress(@f0, %b4), %b4 ]
+  %v20 = phi ptr [ blockaddress(@f0, %b4), %b8 ], [ blockaddress(@f0, %b11), %b9 ], [ blockaddress(@f0, %b11), %b10 ], [ blockaddress(@f0, %b4), %b4 ]
   %v21 = phi i32 [ 2, %b8 ], [ 1, %b9 ], [ 1, %b10 ], [ 1, %b4 ]
-  %v22 = phi i8* [ getelementptr inbounds ([9 x i8], [9 x i8]* @g40, i32 0, i32 0), %b8 ], [ getelementptr inbounds ([9 x i8], [9 x i8]* @g40, i32 0, i32 0), %b9 ], [ getelementptr inbounds ([6 x i8], [6 x i8]* @g45, i32 0, i32 0), %b10 ], [ getelementptr inbounds ([9 x i8], [9 x i8]* @g40, i32 0, i32 0), %b4 ]
+  %v22 = phi ptr [ @g40, %b8 ], [ @g40, %b9 ], [ @g45, %b10 ], [ @g40, %b4 ]
   %v23 = icmp eq i32 %v12, 0
-  %v24 = select i1 %v23, i8* %v20, i8* blockaddress(@f0, %b15)
-  indirectbr i8* %v24, [label %b4, label %b11, label %b15]
+  %v24 = select i1 %v23, ptr %v20, ptr blockaddress(@f0, %b15)
+  indirectbr ptr %v24, [label %b4, label %b11, label %b15]
 
 b8:                                               ; preds = %b4
   br label %b7
@@ -123,28 +123,28 @@ b10:                                              ; preds = %b4
   br label %b7
 
 b11:                                              ; preds = %b7
-  %v25 = tail call i8* @f0(i8* %v5, i8* %v22, %s.1* %a2) #1
+  %v25 = tail call ptr @f0(ptr %v5, ptr %v22, ptr %a2) #1
   br label %b14
 
 b12:                                              ; preds = %b4
   br label %b13
 
 b13:                                              ; preds = %b12, %b4
-  %v26 = phi i8** [ %v3, %b12 ], [ %v2, %b4 ]
-  %v27 = load i8*, i8** %v26, align 4
-  %v28 = tail call i8* @f0(i8* %v5, i8* %v27, %s.1* %a2) #1
+  %v26 = phi ptr [ %v3, %b12 ], [ %v2, %b4 ]
+  %v27 = load ptr, ptr %v26, align 4
+  %v28 = tail call ptr @f0(ptr %v5, ptr %v27, ptr %a2) #1
   %v29 = icmp ugt i32 %v12, 1
   br i1 %v29, label %b15, label %b14
 
 b14:                                              ; preds = %b13, %b11
-  %v30 = phi i8* [ %v28, %b13 ], [ %v25, %b11 ]
+  %v30 = phi ptr [ %v28, %b13 ], [ %v25, %b11 ]
   %v31 = phi i32 [ %v12, %b13 ], [ 0, %b11 ]
-  %v32 = icmp eq i8* %v30, null
+  %v32 = icmp eq ptr %v30, null
   br i1 %v32, label %b15, label %b2
 
 b15:                                              ; preds = %b14, %b13, %b7, %b6, %b5, %b4, %b2, %b0
-  %v33 = phi i8* [ null, %b0 ], [ null, %b4 ], [ null, %b7 ], [ null, %b13 ], [ null, %b14 ], [ %v5, %b2 ], [ null, %b5 ], [ null, %b6 ]
-  ret i8* %v33
+  %v33 = phi ptr [ null, %b0 ], [ null, %b4 ], [ null, %b7 ], [ null, %b13 ], [ null, %b14 ], [ %v5, %b2 ], [ null, %b5 ], [ null, %b6 ]
+  ret ptr %v33
 }
 
 attributes #0 = { nounwind readonly }

diff  --git a/llvm/test/CodeGen/Hexagon/branch-non-mbb.ll b/llvm/test/CodeGen/Hexagon/branch-non-mbb.ll
index e86ca2ed4023c..a0782695a76a1 100644
--- a/llvm/test/CodeGen/Hexagon/branch-non-mbb.ll
+++ b/llvm/test/CodeGen/Hexagon/branch-non-mbb.ll
@@ -18,8 +18,8 @@ declare void @bar(i32, i32) #2
 define void @fred(i8 signext %a, i8 signext %b) #1 {
 entry:
   %i = sext i8 %a to i32
-  %t = getelementptr inbounds [3 x %struct.t1], [3 x %struct.t1]* @var, i32 0, i32 %i, i32 3, i32 0
-  %0 = load i8, i8* %t, align 8
+  %t = getelementptr inbounds [3 x %struct.t1], ptr @var, i32 0, i32 %i, i32 3, i32 0
+  %0 = load i8, ptr %t, align 8
   switch i8 %0, label %if.end14 [
     i8 1, label %if.then
     i8 0, label %do.body
@@ -27,8 +27,8 @@ entry:
 
 if.then:                                          ; preds = %entry
   %j = sext i8 %b to i32
-  %u = getelementptr inbounds [3 x %struct.t1], [3 x %struct.t1]* @var, i32 0, i32 %i, i32 3, i32 1, i32 %j
-  store i8 1, i8* %u, align 1
+  %u = getelementptr inbounds [3 x %struct.t1], ptr @var, i32 0, i32 %i, i32 3, i32 1, i32 %j
+  store i8 1, ptr %u, align 1
   tail call void @foo() #0
   br label %if.end14
 

diff  --git a/llvm/test/CodeGen/Hexagon/branchfolder-keep-impdef.ll b/llvm/test/CodeGen/Hexagon/branchfolder-keep-impdef.ll
index db56e0a2fafe5..27959b1ec9b50 100644
--- a/llvm/test/CodeGen/Hexagon/branchfolder-keep-impdef.ll
+++ b/llvm/test/CodeGen/Hexagon/branchfolder-keep-impdef.ll
@@ -12,17 +12,17 @@ b0:
   br i1 undef, label %b1, label %b2
 
 b1:                                               ; preds = %b0
-  %t0 = load i8*, i8** undef, align 4
+  %t0 = load ptr, ptr undef, align 4
   br label %b2
 
 b2:                                               ; preds = %b1, %b0
-  %t1 = phi i8* [ %t0, %b1 ], [ undef, %b0 ]
-  %t2 = getelementptr inbounds i8, i8* %t1, i32 %p0
-  tail call void @llvm.memmove.p0i8.p0i8.i32(i8* undef, i8* %t2, i32 undef, i1 false) #1
+  %t1 = phi ptr [ %t0, %b1 ], [ undef, %b0 ]
+  %t2 = getelementptr inbounds i8, ptr %t1, i32 %p0
+  tail call void @llvm.memmove.p0.p0.i32(ptr undef, ptr %t2, i32 undef, i1 false) #1
   ret void
 }
 
-declare void @llvm.memmove.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i1) #0
+declare void @llvm.memmove.p0.p0.i32(ptr nocapture, ptr nocapture readonly, i32, i1) #0
 
 attributes #0 = { argmemonly nounwind }
 attributes #1 = { nounwind }

diff  --git a/llvm/test/CodeGen/Hexagon/brcond-setne.ll b/llvm/test/CodeGen/Hexagon/brcond-setne.ll
index d5ff8098aadc2..61393b7064421 100644
--- a/llvm/test/CodeGen/Hexagon/brcond-setne.ll
+++ b/llvm/test/CodeGen/Hexagon/brcond-setne.ll
@@ -4,10 +4,10 @@
 target triple = "hexagon"
 
 ; Function Attrs: nounwind
-define zeroext i8 @f0(i8** nocapture %a0, i32* nocapture %a1) #0 {
+define zeroext i8 @f0(ptr nocapture %a0, ptr nocapture %a1) #0 {
 b0:
-  %v0 = load i8*, i8** %a0, align 4, !tbaa !0
-  %v1 = load i8, i8* %v0, align 1, !tbaa !4
+  %v0 = load ptr, ptr %a0, align 4, !tbaa !0
+  %v1 = load i8, ptr %v0, align 1, !tbaa !4
   %v2 = icmp eq i8 %v1, 0
   br i1 %v2, label %b11, label %b1
 
@@ -17,14 +17,14 @@ b1:                                               ; preds = %b0
 b2:                                               ; preds = %b9, %b1
   %v3 = phi i8 [ %v20, %b9 ], [ %v1, %b1 ]
   %v4 = phi i8 [ %v17, %b9 ], [ 0, %b1 ]
-  %v5 = phi i8* [ %v18, %b9 ], [ %v0, %b1 ]
+  %v5 = phi ptr [ %v18, %b9 ], [ %v0, %b1 ]
   %v6 = icmp eq i8 %v3, 44
   br i1 %v6, label %b3, label %b4
 
 b3:                                               ; preds = %b2
-  %v7 = phi i8* [ %v5, %b2 ]
+  %v7 = phi ptr [ %v5, %b2 ]
   %v8 = phi i8 [ %v4, %b2 ]
-  %v9 = getelementptr inbounds i8, i8* %v7, i32 1
+  %v9 = getelementptr inbounds i8, ptr %v7, i32 1
   br label %b11
 
 b4:                                               ; preds = %b2
@@ -45,30 +45,30 @@ b7:                                               ; preds = %b6
 
 b8:                                               ; preds = %b7, %b6, %b5
   %v14 = phi i8 [ 2, %b7 ], [ 0, %b6 ], [ 4, %b5 ]
-  %v15 = load i32, i32* %a1, align 4, !tbaa !5
+  %v15 = load i32, ptr %a1, align 4, !tbaa !5
   %v16 = add i32 %v15, 1
-  store i32 %v16, i32* %a1, align 4, !tbaa !5
+  store i32 %v16, ptr %a1, align 4, !tbaa !5
   br label %b9
 
 b9:                                               ; preds = %b8, %b4
   %v17 = phi i8 [ %v14, %b8 ], [ %v4, %b4 ]
-  %v18 = getelementptr inbounds i8, i8* %v5, i32 1
-  %v19 = getelementptr i8, i8* %v5, i32 1
-  %v20 = load i8, i8* %v19, align 1, !tbaa !4
+  %v18 = getelementptr inbounds i8, ptr %v5, i32 1
+  %v19 = getelementptr i8, ptr %v5, i32 1
+  %v20 = load i8, ptr %v19, align 1, !tbaa !4
   %v21 = icmp ne i8 %v20, 0
   %v22 = icmp ne i8 %v17, 1
   %v23 = and i1 %v21, %v22
   br i1 %v23, label %b2, label %b10
 
 b10:                                              ; preds = %b9
-  %v24 = phi i8* [ %v18, %b9 ]
+  %v24 = phi ptr [ %v18, %b9 ]
   %v25 = phi i8 [ %v17, %b9 ]
   br label %b11
 
 b11:                                              ; preds = %b10, %b3, %b0
   %v26 = phi i8 [ %v8, %b3 ], [ 0, %b0 ], [ %v25, %b10 ]
-  %v27 = phi i8* [ %v9, %b3 ], [ %v0, %b0 ], [ %v24, %b10 ]
-  store i8* %v27, i8** %a0, align 4, !tbaa !0
+  %v27 = phi ptr [ %v9, %b3 ], [ %v0, %b0 ], [ %v24, %b10 ]
+  store ptr %v27, ptr %a0, align 4, !tbaa !0
   ret i8 %v26
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/brev_ld.ll b/llvm/test/CodeGen/Hexagon/brev_ld.ll
index 0da839e52463d..8cc1d0bc858e3 100644
--- a/llvm/test/CodeGen/Hexagon/brev_ld.ll
+++ b/llvm/test/CodeGen/Hexagon/brev_ld.ll
@@ -19,74 +19,66 @@ target datalayout = "e-m:e-p:32:32:32-a:0-n16:32-i64:64:64-i32:32:32-i16:16:16-i
 target triple = "hexagon-unknown--elf"
 
 ; CHECK: @call_brev_ldd
-define i64* @call_brev_ldd(i64* %ptr, i64 %dst, i32 %mod) local_unnamed_addr #0 {
+define ptr @call_brev_ldd(ptr %ptr, i64 %dst, i32 %mod) local_unnamed_addr #0 {
 entry:
-  %0 = bitcast i64* %ptr to i8*
 ; CHECK: = memd(r{{[0-9]*}}++m{{[0-1]}}:brev)
-  %1 = tail call { i64, i8* } @llvm.hexagon.L2.loadrd.pbr(i8* %0, i32 %mod)
-  %2 = extractvalue { i64, i8* } %1, 1
-  %3 = bitcast i8* %2 to i64*
-  ret i64* %3
+  %0 = tail call { i64, ptr } @llvm.hexagon.L2.loadrd.pbr(ptr %ptr, i32 %mod)
+  %1 = extractvalue { i64, ptr } %0, 1
+  ret ptr %1
 }
 
 ; CHECK: @call_brev_ldw
-define i32* @call_brev_ldw(i32* %ptr, i32 %dst, i32 %mod) local_unnamed_addr #0 {
+define ptr @call_brev_ldw(ptr %ptr, i32 %dst, i32 %mod) local_unnamed_addr #0 {
 entry:
-  %0 = bitcast i32* %ptr to i8*
 ; CHECK: = memw(r{{[0-9]*}}++m{{[0-1]}}:brev)
-  %1 = tail call { i32, i8* } @llvm.hexagon.L2.loadri.pbr(i8* %0, i32 %mod)
-  %2 = extractvalue { i32, i8* } %1, 1
-  %3 = bitcast i8* %2 to i32*
-  ret i32* %3
+  %0 = tail call { i32, ptr } @llvm.hexagon.L2.loadri.pbr(ptr %ptr, i32 %mod)
+  %1 = extractvalue { i32, ptr } %0, 1
+  ret ptr %1
 }
 
 ; CHECK: @call_brev_ldh
-define i16* @call_brev_ldh(i16* %ptr, i16 signext %dst, i32 %mod) local_unnamed_addr #0 {
+define ptr @call_brev_ldh(ptr %ptr, i16 signext %dst, i32 %mod) local_unnamed_addr #0 {
 entry:
-  %0 = bitcast i16* %ptr to i8*
 ; CHECK: = memh(r{{[0-9]*}}++m{{[0-1]}}:brev)
-  %1 = tail call { i32, i8* } @llvm.hexagon.L2.loadrh.pbr(i8* %0, i32 %mod)
-  %2 = extractvalue { i32, i8* } %1, 1
-  %3 = bitcast i8* %2 to i16*
-  ret i16* %3
+  %0 = tail call { i32, ptr } @llvm.hexagon.L2.loadrh.pbr(ptr %ptr, i32 %mod)
+  %1 = extractvalue { i32, ptr } %0, 1
+  ret ptr %1
 }
 
 ; CHECK: @call_brev_lduh
-define i16* @call_brev_lduh(i16* %ptr, i16 zeroext %dst, i32 %mod) local_unnamed_addr #0 {
+define ptr @call_brev_lduh(ptr %ptr, i16 zeroext %dst, i32 %mod) local_unnamed_addr #0 {
 entry:
-  %0 = bitcast i16* %ptr to i8*
 ; CHECK: = memuh(r{{[0-9]*}}++m{{[0-1]}}:brev)
-  %1 = tail call { i32, i8* } @llvm.hexagon.L2.loadruh.pbr(i8* %0, i32 %mod)
-  %2 = extractvalue { i32, i8* } %1, 1
-  %3 = bitcast i8* %2 to i16*
-  ret i16* %3
+  %0 = tail call { i32, ptr } @llvm.hexagon.L2.loadruh.pbr(ptr %ptr, i32 %mod)
+  %1 = extractvalue { i32, ptr } %0, 1
+  ret ptr %1
 }
 
 ; CHECK: @call_brev_ldb
-define i8* @call_brev_ldb(i8* %ptr, i8 signext %dst, i32 %mod) local_unnamed_addr #0 {
+define ptr @call_brev_ldb(ptr %ptr, i8 signext %dst, i32 %mod) local_unnamed_addr #0 {
 entry:
 ; CHECK: = memb(r{{[0-9]*}}++m{{[0-1]}}:brev)
-  %0 = tail call { i32, i8* } @llvm.hexagon.L2.loadrb.pbr(i8* %ptr, i32 %mod)
-  %1 = extractvalue { i32, i8* } %0, 1
-  ret i8* %1
+  %0 = tail call { i32, ptr } @llvm.hexagon.L2.loadrb.pbr(ptr %ptr, i32 %mod)
+  %1 = extractvalue { i32, ptr } %0, 1
+  ret ptr %1
 }
 
 ; Function Attrs: nounwind readonly
 ; CHECK: @call_brev_ldub
-define i8* @call_brev_ldub(i8* %ptr, i8 zeroext %dst, i32 %mod) local_unnamed_addr #0 {
+define ptr @call_brev_ldub(ptr %ptr, i8 zeroext %dst, i32 %mod) local_unnamed_addr #0 {
 entry:
 ; CHECK: = memub(r{{[0-9]*}}++m{{[0-1]}}:brev)
-  %0 = tail call { i32, i8* } @llvm.hexagon.L2.loadrub.pbr(i8* %ptr, i32 %mod)
-  %1 = extractvalue { i32, i8* } %0, 1
-  ret i8* %1
+  %0 = tail call { i32, ptr } @llvm.hexagon.L2.loadrub.pbr(ptr %ptr, i32 %mod)
+  %1 = extractvalue { i32, ptr } %0, 1
+  ret ptr %1
 }
 
-declare { i64, i8* } @llvm.hexagon.L2.loadrd.pbr(i8*, i32) #1
-declare { i32, i8* } @llvm.hexagon.L2.loadri.pbr(i8*, i32) #1
-declare { i32, i8* } @llvm.hexagon.L2.loadrh.pbr(i8*, i32) #1
-declare { i32, i8* } @llvm.hexagon.L2.loadruh.pbr(i8*, i32) #1
-declare { i32, i8* } @llvm.hexagon.L2.loadrb.pbr(i8*, i32) #1
-declare { i32, i8* } @llvm.hexagon.L2.loadrub.pbr(i8*, i32) #1
+declare { i64, ptr } @llvm.hexagon.L2.loadrd.pbr(ptr, i32) #1
+declare { i32, ptr } @llvm.hexagon.L2.loadri.pbr(ptr, i32) #1
+declare { i32, ptr } @llvm.hexagon.L2.loadrh.pbr(ptr, i32) #1
+declare { i32, ptr } @llvm.hexagon.L2.loadruh.pbr(ptr, i32) #1
+declare { i32, ptr } @llvm.hexagon.L2.loadrb.pbr(ptr, i32) #1
+declare { i32, ptr } @llvm.hexagon.L2.loadrub.pbr(ptr, i32) #1
 
 attributes #0 = { nounwind readonly "target-cpu"="hexagonv60" }
 attributes #1 = { nounwind readonly }

diff  --git a/llvm/test/CodeGen/Hexagon/brev_st.ll b/llvm/test/CodeGen/Hexagon/brev_st.ll
index 5f754ccf74ec6..db5d8314b593d 100644
--- a/llvm/test/CodeGen/Hexagon/brev_st.ll
+++ b/llvm/test/CodeGen/Hexagon/brev_st.ll
@@ -17,85 +17,80 @@
 target datalayout = "e-p:32:32:32-i64:64:64-i32:32:32-i16:16:16-i1:32:32-f64:64:64-f32:32:32-v64:64:64-v32:32:32-a0:0-n16:32"
 target triple = "hexagon"
 
-define i64 @foo(i16 zeroext %filtMemLen, i16* %filtMemLR, i16 signext %filtMemIndex) nounwind {
+define i64 @foo(i16 zeroext %filtMemLen, ptr %filtMemLR, i16 signext %filtMemIndex) nounwind {
 entry:
   %conv = zext i16 %filtMemLen to i32
   %shr2 = lshr i32 %conv, 1
   %idxprom = sext i16 %filtMemIndex to i32
-  %arrayidx = getelementptr inbounds i16, i16* %filtMemLR, i32 %idxprom
-  %0 = bitcast i16* %arrayidx to i8*
+  %arrayidx = getelementptr inbounds i16, ptr %filtMemLR, i32 %idxprom
   %sub = sub i32 13, %shr2
   %shl = shl i32 1, %sub
 ; CHECK: memd(r{{[0-9]*}}++m{{[0-1]}}:brev)
-  %1 = tail call i8* @llvm.hexagon.S2.storerd.pbr(i8* %0, i64 undef, i32 %shl)
+  %0 = tail call ptr @llvm.hexagon.S2.storerd.pbr(ptr %arrayidx, i64 undef, i32 %shl)
   ret i64 0
 }
 
-declare i8* @llvm.hexagon.S2.storerd.pbr(i8*, i64, i32) nounwind
+declare ptr @llvm.hexagon.S2.storerd.pbr(ptr, i64, i32) nounwind
 
-define i32 @foo1(i16 zeroext %filtMemLen, i16* %filtMemLR, i16 signext %filtMemIndex) nounwind {
+define i32 @foo1(i16 zeroext %filtMemLen, ptr %filtMemLR, i16 signext %filtMemIndex) nounwind {
 entry:
   %conv = zext i16 %filtMemLen to i32
   %shr1 = lshr i32 %conv, 1
   %idxprom = sext i16 %filtMemIndex to i32
-  %arrayidx = getelementptr inbounds i16, i16* %filtMemLR, i32 %idxprom
-  %0 = bitcast i16* %arrayidx to i8*
+  %arrayidx = getelementptr inbounds i16, ptr %filtMemLR, i32 %idxprom
   %sub = sub i32 14, %shr1
   %shl = shl i32 1, %sub
 ; CHECK: memw(r{{[0-9]*}}++m{{[0-1]}}:brev)
-  %1 = tail call i8* @llvm.hexagon.S2.storeri.pbr(i8* %0, i32 undef, i32 %shl)
+  %0 = tail call ptr @llvm.hexagon.S2.storeri.pbr(ptr %arrayidx, i32 undef, i32 %shl)
   ret i32 0
 }
 
-declare i8* @llvm.hexagon.S2.storeri.pbr(i8*, i32, i32) nounwind
+declare ptr @llvm.hexagon.S2.storeri.pbr(ptr, i32, i32) nounwind
 
-define signext i16 @foo2(i16 zeroext %filtMemLen, i16* %filtMemLR, i16 signext %filtMemIndex) nounwind {
+define signext i16 @foo2(i16 zeroext %filtMemLen, ptr %filtMemLR, i16 signext %filtMemIndex) nounwind {
 entry:
   %conv = zext i16 %filtMemLen to i32
   %shr2 = lshr i32 %conv, 1
   %idxprom = sext i16 %filtMemIndex to i32
-  %arrayidx = getelementptr inbounds i16, i16* %filtMemLR, i32 %idxprom
-  %0 = bitcast i16* %arrayidx to i8*
+  %arrayidx = getelementptr inbounds i16, ptr %filtMemLR, i32 %idxprom
   %sub = sub i32 15, %shr2
   %shl = shl i32 1, %sub
 ; CHECK: memh(r{{[0-9]*}}++m{{[0-1]}}:brev)
-  %1 = tail call i8* @llvm.hexagon.S2.storerh.pbr(i8* %0, i32 0, i32 %shl)
+  %0 = tail call ptr @llvm.hexagon.S2.storerh.pbr(ptr %arrayidx, i32 0, i32 %shl)
   ret i16 0
 }
 
-declare i8* @llvm.hexagon.S2.storerh.pbr(i8*, i32, i32) nounwind
+declare ptr @llvm.hexagon.S2.storerh.pbr(ptr, i32, i32) nounwind
 
-define signext i16 @foo3(i16 zeroext %filtMemLen, i16* %filtMemLR, i16 signext %filtMemIndex) nounwind {
+define signext i16 @foo3(i16 zeroext %filtMemLen, ptr %filtMemLR, i16 signext %filtMemIndex) nounwind {
 entry:
   %conv = zext i16 %filtMemLen to i32
   %shr2 = lshr i32 %conv, 1
   %idxprom = sext i16 %filtMemIndex to i32
-  %arrayidx = getelementptr inbounds i16, i16* %filtMemLR, i32 %idxprom
-  %0 = bitcast i16* %arrayidx to i8*
+  %arrayidx = getelementptr inbounds i16, ptr %filtMemLR, i32 %idxprom
   %sub = sub i32 15, %shr2
   %shl = shl i32 1, %sub
 ; CHECK: memh(r{{[0-9]*}}++m{{[0-1]}}:brev) = r{{[0-9]*}}.h
-  %1 = tail call i8* @llvm.hexagon.S2.storerf.pbr(i8* %0, i32 0, i32 %shl)
+  %0 = tail call ptr @llvm.hexagon.S2.storerf.pbr(ptr %arrayidx, i32 0, i32 %shl)
   ret i16 0
 }
 
-declare i8* @llvm.hexagon.S2.storerf.pbr(i8*, i32, i32) nounwind
+declare ptr @llvm.hexagon.S2.storerf.pbr(ptr, i32, i32) nounwind
 
-define zeroext i8 @foo5(i16 zeroext %filtMemLen, i16* %filtMemLR, i16 signext %filtMemIndex) nounwind {
+define zeroext i8 @foo5(i16 zeroext %filtMemLen, ptr %filtMemLR, i16 signext %filtMemIndex) nounwind {
 entry:
   %conv = zext i16 %filtMemLen to i32
   %shr2 = lshr i32 %conv, 1
   %idxprom = sext i16 %filtMemIndex to i32
-  %arrayidx = getelementptr inbounds i16, i16* %filtMemLR, i32 %idxprom
-  %0 = bitcast i16* %arrayidx to i8*
+  %arrayidx = getelementptr inbounds i16, ptr %filtMemLR, i32 %idxprom
   %sub = sub nsw i32 16, %shr2
   ; CHECK: memb(r{{[0-9]*}}++m{{[0-1]}}:brev)
   %shl = shl i32 1, %sub
-  %1 = tail call i8* @llvm.hexagon.S2.storerb.pbr(i8* %0, i32 0, i32 %shl)
+  %0 = tail call ptr @llvm.hexagon.S2.storerb.pbr(ptr %arrayidx, i32 0, i32 %shl)
   ret i8 0
 }
 
-declare i8* @llvm.hexagon.S2.storerb.pbr(i8*, i32, i32) nounwind
+declare ptr @llvm.hexagon.S2.storerb.pbr(ptr, i32, i32) nounwind
 
 !0 = !{!"omnipotent char", !1}
 !1 = !{!"Simple C/C++ TBAA"}

diff  --git a/llvm/test/CodeGen/Hexagon/bss-local.ll b/llvm/test/CodeGen/Hexagon/bss-local.ll
index f6a5baefb08cc..671e8aeb303b4 100644
--- a/llvm/test/CodeGen/Hexagon/bss-local.ll
+++ b/llvm/test/CodeGen/Hexagon/bss-local.ll
@@ -10,15 +10,15 @@ target triple = "hexagon"
 ; Function Attrs: nounwind
 define i32 @f0(i32 %a0) #0 {
 b0:
-  call void @f1(i32* getelementptr inbounds ([16 x i32], [16 x i32]* @g0, i32 0, i32 0), i32* getelementptr inbounds ([16 x i32], [16 x i32]* @g1, i32 0, i32 0))
-  %v0 = getelementptr inbounds [16 x i32], [16 x i32]* @g0, i32 0, i32 %a0
-  %v1 = load i32, i32* %v0, align 4
-  %v2 = getelementptr inbounds [16 x i32], [16 x i32]* @g1, i32 0, i32 %a0
-  %v3 = load i32, i32* %v2, align 4
+  call void @f1(ptr @g0, ptr @g1)
+  %v0 = getelementptr inbounds [16 x i32], ptr @g0, i32 0, i32 %a0
+  %v1 = load i32, ptr %v0, align 4
+  %v2 = getelementptr inbounds [16 x i32], ptr @g1, i32 0, i32 %a0
+  %v3 = load i32, ptr %v2, align 4
   %v4 = add nsw i32 %v1, %v3
   ret i32 %v4
 }
 
-declare void @f1(i32*, i32*)
+declare void @f1(ptr, ptr)
 
 attributes #0 = { nounwind "target-cpu"="hexagonv55" }

diff  --git a/llvm/test/CodeGen/Hexagon/bug-aa4463-ifconv-vecpred.ll b/llvm/test/CodeGen/Hexagon/bug-aa4463-ifconv-vecpred.ll
index 339cc38873000..3868dc979e250 100644
--- a/llvm/test/CodeGen/Hexagon/bug-aa4463-ifconv-vecpred.ll
+++ b/llvm/test/CodeGen/Hexagon/bug-aa4463-ifconv-vecpred.ll
@@ -1,7 +1,7 @@
 ; RUN: llc -march=hexagon -O2 < %s
 ; REQUIRES: asserts
 
-define inreg <16 x i32> @f0(i32 %a0, <16 x i32>* nocapture %a1) #0 {
+define inreg <16 x i32> @f0(i32 %a0, ptr nocapture %a1) #0 {
 b0:
   %v0 = tail call <64 x i1> @llvm.hexagon.V6.pred.scalar2(i32 %a0)
   %v1 = tail call <64 x i1> @llvm.hexagon.V6.pred.not(<64 x i1> %v0)
@@ -17,12 +17,12 @@ b1:                                               ; preds = %b0
 b2:                                               ; preds = %b1, %b0
   %v6 = phi <64 x i1> [ %v5, %b1 ], [ %v1, %b0 ]
   %v7 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %v6, i32 -1)
-  %v8 = getelementptr inbounds <16 x i32>, <16 x i32>* %a1, i32 1
-  %v9 = load <16 x i32>, <16 x i32>* %v8, align 64
-  %v10 = getelementptr inbounds <16 x i32>, <16 x i32>* %a1, i32 2
-  %v11 = load <16 x i32>, <16 x i32>* %v10, align 64
+  %v8 = getelementptr inbounds <16 x i32>, ptr %a1, i32 1
+  %v9 = load <16 x i32>, ptr %v8, align 64
+  %v10 = getelementptr inbounds <16 x i32>, ptr %a1, i32 2
+  %v11 = load <16 x i32>, ptr %v10, align 64
   %v12 = tail call <16 x i32> @llvm.hexagon.V6.vmux(<64 x i1> %v6, <16 x i32> %v9, <16 x i32> %v11)
-  store <16 x i32> %v12, <16 x i32>* %a1, align 64
+  store <16 x i32> %v12, ptr %a1, align 64
   ret <16 x i32> %v7
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/bug-allocframe-size.ll b/llvm/test/CodeGen/Hexagon/bug-allocframe-size.ll
index a9a74349ef054..20070345e6b9d 100644
--- a/llvm/test/CodeGen/Hexagon/bug-allocframe-size.ll
+++ b/llvm/test/CodeGen/Hexagon/bug-allocframe-size.ll
@@ -11,13 +11,13 @@ b0:
   %v0 = alloca float, align 4
   %v1 = alloca i16, align 2
   %v2 = alloca float, align 4
-  store float %a0, float* %v0, align 4, !tbaa !0
-  %v3 = call signext i16 @f1(i16* %v1, float* %v0) #1
+  store float %a0, ptr %v0, align 4, !tbaa !0
+  %v3 = call signext i16 @f1(ptr %v1, ptr %v0) #1
   %v4 = icmp ult i16 %v3, 3
   br i1 %v4, label %b11, label %b1
 
 b1:                                               ; preds = %b0
-  %v5 = load i16, i16* %v1, align 2, !tbaa !4
+  %v5 = load i16, ptr %v1, align 2, !tbaa !4
   %v6 = sext i16 %v5 to i32
   %v7 = srem i32 %v6, 3
   %v8 = icmp eq i32 %v7, 0
@@ -40,25 +40,24 @@ b4:                                               ; preds = %b3
   %v16 = phi i16 [ %v12, %b3 ]
   %v17 = phi i32 [ %v11, %b3 ]
   %v18 = phi i32 [ %v10, %b3 ]
-  store i16 %v16, i16* %v1, align 2, !tbaa !4
+  store i16 %v16, ptr %v1, align 2, !tbaa !4
   %v19 = icmp slt i32 %v18, 1
   br i1 %v19, label %b5, label %b6
 
 b5:                                               ; preds = %b4
-  %v20 = call signext i16 @f2(float* %v0, i32 %v17) #1
+  %v20 = call signext i16 @f2(ptr %v0, i32 %v17) #1
   br label %b6
 
 b6:                                               ; preds = %b5, %b4, %b1
-  %v21 = bitcast float* %v0 to i16*
-  %v22 = getelementptr inbounds i16, i16* %v21, i32 1
-  %v23 = load i16, i16* %v22, align 2, !tbaa !6
+  %v22 = getelementptr inbounds i16, ptr %v0, i32 1
+  %v23 = load i16, ptr %v22, align 2, !tbaa !6
   %v24 = icmp slt i16 %v23, 0
-  %v25 = load float, float* %v0, align 4, !tbaa !0
+  %v25 = load float, ptr %v0, align 4, !tbaa !0
   br i1 %v24, label %b7, label %b8
 
 b7:                                               ; preds = %b6
   %v26 = fsub float -0.000000e+00, %v25
-  store float %v26, float* %v0, align 4, !tbaa !0
+  store float %v26, ptr %v0, align 4, !tbaa !0
   br label %b8
 
 b8:                                               ; preds = %b7, %b6
@@ -70,7 +69,7 @@ b8:                                               ; preds = %b7, %b6
   %v32 = fadd float %v31, 0x3FB43419E0000000
   %v33 = fadd float %v27, 0x3FD1E54B40000000
   %v34 = fdiv float %v32, %v33
-  store float %v34, float* %v2, align 4, !tbaa !0
+  store float %v34, ptr %v2, align 4, !tbaa !0
   %v35 = fmul float %v27, 1.500000e+00
   %v36 = fmul float %v34, %v34
   %v37 = fmul float %v27, 5.000000e-01
@@ -87,22 +86,22 @@ b9:                                               ; preds = %b8
 
 b10:                                              ; preds = %b9, %b8
   %v44 = phi float [ %v43, %b9 ], [ %v42, %b8 ]
-  store float %v44, float* %v2, align 4, !tbaa !0
-  %v45 = load i16, i16* %v1, align 2, !tbaa !4
+  store float %v44, ptr %v2, align 4, !tbaa !0
+  %v45 = load i16, ptr %v1, align 2, !tbaa !4
   %v46 = sext i16 %v45 to i32
   %v47 = sdiv i32 %v46, 3
-  %v48 = call signext i16 @f2(float* %v2, i32 %v47) #1
+  %v48 = call signext i16 @f2(ptr %v2, i32 %v47) #1
   br label %b11
 
 b11:                                              ; preds = %b10, %b0
-  %v49 = phi float* [ %v2, %b10 ], [ %v0, %b0 ]
-  %v50 = load float, float* %v49, align 4
+  %v49 = phi ptr [ %v2, %b10 ], [ %v0, %b0 ]
+  %v50 = load float, ptr %v49, align 4
   ret float %v50
 }
 
-declare signext i16 @f1(i16*, float*) #1
+declare signext i16 @f1(ptr, ptr) #1
 
-declare signext i16 @f2(float*, i32) #1
+declare signext i16 @f2(ptr, i32) #1
 
 attributes #0 = { nounwind "target-cpu"="hexagonv60" }
 attributes #1 = { nounwind }

diff  --git a/llvm/test/CodeGen/Hexagon/bug-hcp-tied-kill.ll b/llvm/test/CodeGen/Hexagon/bug-hcp-tied-kill.ll
index e8346e277bdc8..d2a006af89f9f 100644
--- a/llvm/test/CodeGen/Hexagon/bug-hcp-tied-kill.ll
+++ b/llvm/test/CodeGen/Hexagon/bug-hcp-tied-kill.ll
@@ -17,33 +17,33 @@ target triple = "hexagon"
 @g10 = private unnamed_addr constant [54 x i8] c"%x :  Q6_R_mpyiacc_RR(INT32_MAX,INT32_MAX,INT32_MAX)\0A\00", align 1
 
 ; Function Attrs: nounwind
-declare i32 @f0(i8* nocapture readonly, ...) #0
+declare i32 @f0(ptr nocapture readonly, ...) #0
 
 ; Function Attrs: nounwind
 define i32 @f1() #0 {
 b0:
   %v0 = tail call i32 @llvm.hexagon.M2.maci(i32 2147483647, i32 0, i32 2147483647)
-  %v1 = tail call i32 (i8*, ...) @f0(i8* getelementptr inbounds ([46 x i8], [46 x i8]* @g0, i32 0, i32 0), i32 %v0) #2
+  %v1 = tail call i32 (ptr, ...) @f0(ptr @g0, i32 %v0) #2
   %v2 = tail call i32 @llvm.hexagon.M2.maci(i32 -2147483648, i32 1, i32 2147483647)
-  %v3 = tail call i32 (i8*, ...) @f0(i8* getelementptr inbounds ([46 x i8], [46 x i8]* @g1, i32 0, i32 0), i32 %v2) #2
+  %v3 = tail call i32 (ptr, ...) @f0(ptr @g1, i32 %v2) #2
   %v4 = tail call i32 @llvm.hexagon.M2.maci(i32 -1, i32 1, i32 2147483647)
-  %v5 = tail call i32 (i8*, ...) @f0(i8* getelementptr inbounds ([39 x i8], [39 x i8]* @g2, i32 0, i32 0), i32 %v4) #2
+  %v5 = tail call i32 (ptr, ...) @f0(ptr @g2, i32 %v4) #2
   %v6 = tail call i32 @llvm.hexagon.M2.maci(i32 0, i32 1, i32 2147483647)
-  %v7 = tail call i32 (i8*, ...) @f0(i8* getelementptr inbounds ([38 x i8], [38 x i8]* @g3, i32 0, i32 0), i32 %v6) #2
+  %v7 = tail call i32 (ptr, ...) @f0(ptr @g3, i32 %v6) #2
   %v8 = tail call i32 @llvm.hexagon.M2.maci(i32 1, i32 1, i32 2147483647)
-  %v9 = tail call i32 (i8*, ...) @f0(i8* getelementptr inbounds ([38 x i8], [38 x i8]* @g4, i32 0, i32 0), i32 %v8) #2
+  %v9 = tail call i32 (ptr, ...) @f0(ptr @g4, i32 %v8) #2
   %v10 = tail call i32 @llvm.hexagon.M2.maci(i32 2147483647, i32 1, i32 2147483647)
-  %v11 = tail call i32 (i8*, ...) @f0(i8* getelementptr inbounds ([46 x i8], [46 x i8]* @g5, i32 0, i32 0), i32 %v10) #2
+  %v11 = tail call i32 (ptr, ...) @f0(ptr @g5, i32 %v10) #2
   %v12 = tail call i32 @llvm.hexagon.M2.maci(i32 -2147483648, i32 2147483647, i32 2147483647)
-  %v13 = tail call i32 (i8*, ...) @f0(i8* getelementptr inbounds ([54 x i8], [54 x i8]* @g6, i32 0, i32 0), i32 %v12) #2
+  %v13 = tail call i32 (ptr, ...) @f0(ptr @g6, i32 %v12) #2
   %v14 = tail call i32 @llvm.hexagon.M2.maci(i32 -1, i32 2147483647, i32 2147483647)
-  %v15 = tail call i32 (i8*, ...) @f0(i8* getelementptr inbounds ([47 x i8], [47 x i8]* @g7, i32 0, i32 0), i32 %v14) #2
+  %v15 = tail call i32 (ptr, ...) @f0(ptr @g7, i32 %v14) #2
   %v16 = tail call i32 @llvm.hexagon.M2.maci(i32 0, i32 2147483647, i32 2147483647)
-  %v17 = tail call i32 (i8*, ...) @f0(i8* getelementptr inbounds ([46 x i8], [46 x i8]* @g8, i32 0, i32 0), i32 %v16) #2
+  %v17 = tail call i32 (ptr, ...) @f0(ptr @g8, i32 %v16) #2
   %v18 = tail call i32 @llvm.hexagon.M2.maci(i32 1, i32 2147483647, i32 2147483647)
-  %v19 = tail call i32 (i8*, ...) @f0(i8* getelementptr inbounds ([46 x i8], [46 x i8]* @g9, i32 0, i32 0), i32 %v18) #2
+  %v19 = tail call i32 (ptr, ...) @f0(ptr @g9, i32 %v18) #2
   %v20 = tail call i32 @llvm.hexagon.M2.maci(i32 2147483647, i32 2147483647, i32 2147483647)
-  %v21 = tail call i32 (i8*, ...) @f0(i8* getelementptr inbounds ([54 x i8], [54 x i8]* @g10, i32 0, i32 0), i32 %v20) #2
+  %v21 = tail call i32 (ptr, ...) @f0(ptr @g10, i32 %v20) #2
   ret i32 0
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/bug14859-iv-cleanup-lpad.ll b/llvm/test/CodeGen/Hexagon/bug14859-iv-cleanup-lpad.ll
index 7edad88df7ff5..e0417e7340025 100644
--- a/llvm/test/CodeGen/Hexagon/bug14859-iv-cleanup-lpad.ll
+++ b/llvm/test/CodeGen/Hexagon/bug14859-iv-cleanup-lpad.ll
@@ -3,45 +3,40 @@
 
 target triple = "hexagon"
 
-%s.0 = type { i32 (...)**, i32, %s.1 }
-%s.1 = type { %s.2, %s.5*, %s.6*, i32 }
-%s.2 = type { i32 (...)**, i32, i8, i8, i16, i32, i32, %s.3*, %s.4*, i32* }
-%s.3 = type { %s.3*, i32, i32, i8* }
-%s.4 = type { %s.4*, i32, void (i8, %s.2*, i32)* }
-%s.5 = type { i32 (...)**, i8, i32*, i32*, i32**, i32**, i32*, i32*, i32**, i32**, i32*, i32*, i32**, i32**, i32* }
-%s.6 = type { i32 (...)**, %s.1 }
+%s.0 = type { ptr, i32, %s.1 }
+%s.1 = type { %s.2, ptr, ptr, i32 }
+%s.2 = type { ptr, i32, i8, i8, i16, i32, i32, ptr, ptr, ptr }
+%s.3 = type { ptr, i32, i32, ptr }
+%s.4 = type { ptr, i32, ptr }
+%s.5 = type { ptr, i8, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr }
+%s.6 = type { ptr, %s.1 }
 %s.7 = type { %s.8, i8 }
-%s.8 = type { %s.0* }
+%s.8 = type { ptr }
 
-define %s.0* @f0(%s.0* %a0, i32* nocapture %a1, i32 %a2, i32 signext %a3) align 2 personality i8* bitcast (i32 (...)* @f11 to i8*) {
+define ptr @f0(ptr %a0, ptr nocapture %a1, i32 %a2, i32 signext %a3) align 2 personality ptr @f11 {
 b0:
   %v0 = alloca %s.7, align 4
-  %v1 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 1
-  store i32 0, i32* %v1, align 4, !tbaa !0
-  call void @f2(%s.7* %v0, %s.0* %a0, i1 zeroext true)
-  %v2 = getelementptr inbounds %s.7, %s.7* %v0, i32 0, i32 1
-  %v3 = load i8, i8* %v2, align 4, !tbaa !4, !range !6
+  %v1 = getelementptr inbounds %s.0, ptr %a0, i32 0, i32 1
+  store i32 0, ptr %v1, align 4, !tbaa !0
+  call void @f2(ptr %v0, ptr %a0, i1 zeroext true)
+  %v2 = getelementptr inbounds %s.7, ptr %v0, i32 0, i32 1
+  %v3 = load i8, ptr %v2, align 4, !tbaa !4, !range !6
   %v4 = icmp ne i8 %v3, 0
   %v5 = icmp sgt i32 %a2, 0
   %v6 = and i1 %v4, %v5
-  %v7 = bitcast %s.0* %a0 to i8**
   br i1 %v6, label %b2, label %b1
 
 b1:                                               ; preds = %b0
-  %v8 = bitcast %s.0* %a0 to i8*
   br label %b16
 
 b2:                                               ; preds = %b0
-  %v9 = load i8*, i8** %v7, align 4, !tbaa !7
-  %v10 = getelementptr i8, i8* %v9, i32 -12
-  %v11 = bitcast i8* %v10 to i32*
-  %v12 = load i32, i32* %v11, align 4
-  %v13 = bitcast %s.0* %a0 to i8*
+  %v9 = load ptr, ptr %a0, align 4, !tbaa !7
+  %v10 = getelementptr i8, ptr %v9, i32 -12
+  %v12 = load i32, ptr %v10, align 4
   %v14 = add i32 %v12, 32
-  %v15 = getelementptr inbounds i8, i8* %v13, i32 %v14
-  %v16 = bitcast i8* %v15 to %s.5**
-  %v17 = load %s.5*, %s.5** %v16, align 4, !tbaa !9
-  %v18 = invoke signext i32 @f3(%s.5* %v17)
+  %v15 = getelementptr inbounds i8, ptr %a0, i32 %v14
+  %v17 = load ptr, ptr %v15, align 4, !tbaa !9
+  %v18 = invoke signext i32 @f3(ptr %v17)
           to label %b3 unwind label %b7
 
 b3:                                               ; preds = %b2
@@ -50,44 +45,42 @@ b3:                                               ; preds = %b2
 b4:                                               ; preds = %b13, %b3
   %v19 = phi i32 [ %v68, %b13 ], [ %v18, %b3 ]
   %v20 = phi i32 [ %v55, %b13 ], [ %a2, %b3 ]
-  %v21 = phi i32* [ %v59, %b13 ], [ %a1, %b3 ]
+  %v21 = phi ptr [ %v59, %b13 ], [ %a1, %b3 ]
   %v22 = icmp eq i32 %v19, -1
   br i1 %v22, label %b15, label %b10
 
 b5:                                               ; preds = %b16, %b9
-  %v23 = landingpad { i8*, i32 }
+  %v23 = landingpad { ptr, i32 }
           cleanup
-  %v24 = extractvalue { i8*, i32 } %v23, 0
-  %v25 = extractvalue { i8*, i32 } %v23, 1
+  %v24 = extractvalue { ptr, i32 } %v23, 0
+  %v25 = extractvalue { ptr, i32 } %v23, 1
   br label %b18
 
 b6:                                               ; preds = %b13
-  %v26 = landingpad { i8*, i32 }
-          catch i8* null
+  %v26 = landingpad { ptr, i32 }
+          catch ptr null
   br label %b8
 
 b7:                                               ; preds = %b11, %b2
-  %v27 = phi i32* [ %v21, %b11 ], [ %a1, %b2 ]
-  %v28 = landingpad { i8*, i32 }
-          catch i8* null
+  %v27 = phi ptr [ %v21, %b11 ], [ %a1, %b2 ]
+  %v28 = landingpad { ptr, i32 }
+          catch ptr null
   br label %b8
 
 b8:                                               ; preds = %b7, %b6
-  %v29 = phi i32* [ %v59, %b6 ], [ %v27, %b7 ]
-  %v30 = phi { i8*, i32 } [ %v26, %b6 ], [ %v28, %b7 ]
-  %v31 = extractvalue { i8*, i32 } %v30, 0
-  %v32 = call i8* @f9(i8* %v31) #0
-  %v33 = load i8*, i8** %v7, align 4, !tbaa !7
-  %v34 = getelementptr i8, i8* %v33, i32 -12
-  %v35 = bitcast i8* %v34 to i32*
-  %v36 = load i32, i32* %v35, align 4
-  %v37 = getelementptr inbounds i8, i8* %v13, i32 %v36
-  %v38 = bitcast i8* %v37 to %s.1*
+  %v29 = phi ptr [ %v59, %b6 ], [ %v27, %b7 ]
+  %v30 = phi { ptr, i32 } [ %v26, %b6 ], [ %v28, %b7 ]
+  %v31 = extractvalue { ptr, i32 } %v30, 0
+  %v32 = call ptr @f9(ptr %v31) #0
+  %v33 = load ptr, ptr %a0, align 4, !tbaa !7
+  %v34 = getelementptr i8, ptr %v33, i32 -12
+  %v36 = load i32, ptr %v34, align 4
+  %v37 = getelementptr inbounds i8, ptr %a0, i32 %v36
   %v39 = add i32 %v36, 8
-  %v40 = getelementptr inbounds i8, i8* %v13, i32 %v39
-  %v41 = load i8, i8* %v40, align 1, !tbaa !11
+  %v40 = getelementptr inbounds i8, ptr %a0, i32 %v39
+  %v41 = load i8, ptr %v40, align 1, !tbaa !11
   %v42 = or i8 %v41, 4
-  invoke void @f6(%s.1* %v38, i8 zeroext %v42, i1 zeroext true)
+  invoke void @f6(ptr %v37, i8 zeroext %v42, i1 zeroext true)
           to label %b9 unwind label %b14
 
 b9:                                               ; preds = %b8
@@ -99,18 +92,16 @@ b10:                                              ; preds = %b4
   br i1 %v43, label %b11, label %b12
 
 b11:                                              ; preds = %b10
-  %v44 = load i32, i32* %v1, align 4, !tbaa !0
+  %v44 = load i32, ptr %v1, align 4, !tbaa !0
   %v45 = add nsw i32 %v44, 1
-  store i32 %v45, i32* %v1, align 4, !tbaa !0
-  %v46 = load i8*, i8** %v7, align 4, !tbaa !7
-  %v47 = getelementptr i8, i8* %v46, i32 -12
-  %v48 = bitcast i8* %v47 to i32*
-  %v49 = load i32, i32* %v48, align 4
+  store i32 %v45, ptr %v1, align 4, !tbaa !0
+  %v46 = load ptr, ptr %a0, align 4, !tbaa !7
+  %v47 = getelementptr i8, ptr %v46, i32 -12
+  %v49 = load i32, ptr %v47, align 4
   %v50 = add i32 %v49, 32
-  %v51 = getelementptr inbounds i8, i8* %v13, i32 %v50
-  %v52 = bitcast i8* %v51 to %s.5**
-  %v53 = load %s.5*, %s.5** %v52, align 4, !tbaa !9
-  %v54 = invoke signext i32 @f4(%s.5* %v53)
+  %v51 = getelementptr inbounds i8, ptr %a0, i32 %v50
+  %v53 = load ptr, ptr %v51, align 4, !tbaa !9
+  %v54 = invoke signext i32 @f4(ptr %v53)
           to label %b16 unwind label %b7
 
 b12:                                              ; preds = %b10
@@ -119,27 +110,25 @@ b12:                                              ; preds = %b10
   br i1 %v56, label %b15, label %b13
 
 b13:                                              ; preds = %b12
-  %v57 = load i32, i32* %v1, align 4, !tbaa !0
+  %v57 = load i32, ptr %v1, align 4, !tbaa !0
   %v58 = add nsw i32 %v57, 1
-  store i32 %v58, i32* %v1, align 4, !tbaa !0
-  %v59 = getelementptr inbounds i32, i32* %v21, i32 1
-  store i32 %v19, i32* %v21, align 4, !tbaa !13
-  %v60 = load i8*, i8** %v7, align 4, !tbaa !7
-  %v61 = getelementptr i8, i8* %v60, i32 -12
-  %v62 = bitcast i8* %v61 to i32*
-  %v63 = load i32, i32* %v62, align 4
+  store i32 %v58, ptr %v1, align 4, !tbaa !0
+  %v59 = getelementptr inbounds i32, ptr %v21, i32 1
+  store i32 %v19, ptr %v21, align 4, !tbaa !13
+  %v60 = load ptr, ptr %a0, align 4, !tbaa !7
+  %v61 = getelementptr i8, ptr %v60, i32 -12
+  %v63 = load i32, ptr %v61, align 4
   %v64 = add i32 %v63, 32
-  %v65 = getelementptr inbounds i8, i8* %v13, i32 %v64
-  %v66 = bitcast i8* %v65 to %s.5**
-  %v67 = load %s.5*, %s.5** %v66, align 4, !tbaa !9
-  %v68 = invoke signext i32 @f5(%s.5* %v67)
+  %v65 = getelementptr inbounds i8, ptr %a0, i32 %v64
+  %v67 = load ptr, ptr %v65, align 4, !tbaa !9
+  %v68 = invoke signext i32 @f5(ptr %v67)
           to label %b4 unwind label %b6
 
 b14:                                              ; preds = %b8
-  %v69 = landingpad { i8*, i32 }
+  %v69 = landingpad { ptr, i32 }
           cleanup
-  %v70 = extractvalue { i8*, i32 } %v69, 0
-  %v71 = extractvalue { i8*, i32 } %v69, 1
+  %v70 = extractvalue { ptr, i32 } %v69, 0
+  %v71 = extractvalue { ptr, i32 } %v69, 1
   invoke void @f10()
           to label %b18 unwind label %b20
 
@@ -148,64 +137,60 @@ b15:                                              ; preds = %b12, %b4
   br label %b16
 
 b16:                                              ; preds = %b15, %b11, %b9, %b1
-  %v73 = phi i8* [ %v8, %b1 ], [ %v13, %b11 ], [ %v13, %b9 ], [ %v13, %b15 ]
+  %v73 = phi ptr [ %a0, %b1 ], [ %a0, %b11 ], [ %a0, %b9 ], [ %a0, %b15 ]
   %v74 = phi i8 [ 0, %b1 ], [ 0, %b11 ], [ 0, %b9 ], [ %v72, %b15 ]
-  %v75 = phi i32* [ %a1, %b1 ], [ %v21, %b11 ], [ %v29, %b9 ], [ %v21, %b15 ]
-  store i32 0, i32* %v75, align 4, !tbaa !13
-  %v76 = load i8*, i8** %v7, align 4, !tbaa !7
-  %v77 = getelementptr i8, i8* %v76, i32 -12
-  %v78 = bitcast i8* %v77 to i32*
-  %v79 = load i32, i32* %v78, align 4
-  %v80 = getelementptr inbounds i8, i8* %v73, i32 %v79
-  %v81 = bitcast i8* %v80 to %s.1*
-  %v82 = load i32, i32* %v1, align 4, !tbaa !0
+  %v75 = phi ptr [ %a1, %b1 ], [ %v21, %b11 ], [ %v29, %b9 ], [ %v21, %b15 ]
+  store i32 0, ptr %v75, align 4, !tbaa !13
+  %v76 = load ptr, ptr %a0, align 4, !tbaa !7
+  %v77 = getelementptr i8, ptr %v76, i32 -12
+  %v79 = load i32, ptr %v77, align 4
+  %v80 = getelementptr inbounds i8, ptr %v73, i32 %v79
+  %v82 = load i32, ptr %v1, align 4, !tbaa !0
   %v83 = icmp eq i32 %v82, 0
   %v84 = or i8 %v74, 2
   %v85 = select i1 %v83, i8 %v84, i8 %v74
-  invoke void @f7(%s.1* %v81, i8 zeroext %v85, i1 zeroext false)
+  invoke void @f7(ptr %v80, i8 zeroext %v85, i1 zeroext false)
           to label %b17 unwind label %b5
 
 b17:                                              ; preds = %b16
-  %v86 = getelementptr inbounds %s.7, %s.7* %v0, i32 0, i32 0
-  call void @f1(%s.8* %v86)
-  ret %s.0* %a0
+  call void @f1(ptr %v0)
+  ret ptr %a0
 
 b18:                                              ; preds = %b14, %b5
-  %v87 = phi i8* [ %v24, %b5 ], [ %v70, %b14 ]
+  %v87 = phi ptr [ %v24, %b5 ], [ %v70, %b14 ]
   %v88 = phi i32 [ %v25, %b5 ], [ %v71, %b14 ]
-  %v89 = getelementptr inbounds %s.7, %s.7* %v0, i32 0, i32 0
-  invoke void @f1(%s.8* %v89)
+  invoke void @f1(ptr %v0)
           to label %b19 unwind label %b20
 
 b19:                                              ; preds = %b18
-  %v90 = insertvalue { i8*, i32 } undef, i8* %v87, 0
-  %v91 = insertvalue { i8*, i32 } %v90, i32 %v88, 1
-  resume { i8*, i32 } %v91
+  %v90 = insertvalue { ptr, i32 } undef, ptr %v87, 0
+  %v91 = insertvalue { ptr, i32 } %v90, i32 %v88, 1
+  resume { ptr, i32 } %v91
 
 b20:                                              ; preds = %b18, %b14
-  %v92 = landingpad { i8*, i32 }
-          catch i8* null
+  %v92 = landingpad { ptr, i32 }
+          catch ptr null
   call void @f8() #1
   unreachable
 }
 
-declare void @f1(%s.8* nocapture) unnamed_addr align 2
+declare void @f1(ptr nocapture) unnamed_addr align 2
 
-declare void @f2(%s.7* nocapture, %s.0*, i1 zeroext) unnamed_addr align 2
+declare void @f2(ptr nocapture, ptr, i1 zeroext) unnamed_addr align 2
 
-declare signext i32 @f3(%s.5*) align 2
+declare signext i32 @f3(ptr) align 2
 
-declare signext i32 @f4(%s.5*) align 2
+declare signext i32 @f4(ptr) align 2
 
-declare signext i32 @f5(%s.5*) align 2
+declare signext i32 @f5(ptr) align 2
 
-declare void @f6(%s.1*, i8 zeroext, i1 zeroext) align 2
+declare void @f6(ptr, i8 zeroext, i1 zeroext) align 2
 
-declare void @f7(%s.1*, i8 zeroext, i1 zeroext) align 2
+declare void @f7(ptr, i8 zeroext, i1 zeroext) align 2
 
 declare void @f8()
 
-declare i8* @f9(i8*)
+declare ptr @f9(ptr)
 
 declare void @f10()
 

diff  --git a/llvm/test/CodeGen/Hexagon/bug14859-split-const-block-addr.ll b/llvm/test/CodeGen/Hexagon/bug14859-split-const-block-addr.ll
index 032d02b3eb126..0efec7222cea9 100644
--- a/llvm/test/CodeGen/Hexagon/bug14859-split-const-block-addr.ll
+++ b/llvm/test/CodeGen/Hexagon/bug14859-split-const-block-addr.ll
@@ -3,17 +3,17 @@
 
 target triple = "hexagon"
 
-%s.0 = type { %s.1, %s.1* }
-%s.1 = type { i8*, i8*, i8*, i32 }
+%s.0 = type { %s.1, ptr }
+%s.1 = type { ptr, ptr, ptr, i32 }
 
 ; Function Attrs: nounwind
-declare i32 @f0(%s.0* nocapture) #0 align 2
+declare i32 @f0(ptr nocapture) #0 align 2
 
 ; Function Attrs: nounwind
-declare void @f1(%s.0* nocapture) unnamed_addr #0 align 2
+declare void @f1(ptr nocapture) unnamed_addr #0 align 2
 
 ; Function Attrs: inlinehint
-define void @f2(i32 %a0, i32 %a1, i32 %a2, i32 %a3, i32 %a4, %s.0* %a5, i1 (i8, i8)* %a6) #1 {
+define void @f2(i32 %a0, i32 %a1, i32 %a2, i32 %a3, i32 %a4, ptr %a5, ptr %a6) #1 {
 b0:
   %v0 = alloca %s.0, align 4
   %v1 = alloca %s.0, align 4
@@ -21,14 +21,14 @@ b0:
   %v3 = alloca %s.0, align 4
   %v4 = alloca %s.0, align 4
   %v5 = alloca %s.0, align 4
-  %v6 = inttoptr i32 %a0 to i8*
-  %v7 = inttoptr i32 %a1 to i8*
+  %v6 = inttoptr i32 %a0 to ptr
+  %v7 = inttoptr i32 %a1 to ptr
   %v8 = add nsw i32 %a4, %a3
   %v9 = icmp eq i32 %v8, 2
   br i1 %v9, label %b1, label %b2
 
 b1:                                               ; preds = %b0
-  call void @f7(i8* %v7, i8* %v6, i1 (i8, i8)* %a6)
+  call void @f7(ptr %v7, ptr %v6, ptr %a6)
   br label %b43
 
 b2:                                               ; preds = %b0
@@ -36,227 +36,217 @@ b2:                                               ; preds = %b0
   br i1 %v10, label %b18, label %b3
 
 b3:                                               ; preds = %b2
-  %v11 = call i32 @f0(%s.0* %a5)
+  %v11 = call i32 @f0(ptr %a5)
   %v12 = icmp slt i32 %v11, %a3
   br i1 %v12, label %b18, label %b4
 
 b4:                                               ; preds = %b3
-  %v13 = getelementptr inbounds %s.0, %s.0* %a5, i32 0, i32 1
-  %v14 = load %s.1*, %s.1** %v13, align 4, !tbaa !0
-  %v15 = getelementptr inbounds %s.1, %s.1* %v14, i32 0, i32 0
-  %v16 = load i8*, i8** %v15, align 4, !tbaa !0
-  %v17 = getelementptr inbounds %s.1, %s.1* %v14, i32 0, i32 1
-  store i8* %v16, i8** %v17, align 4, !tbaa !0
-  %v18 = bitcast %s.0* %v3 to i8*
-  call void @llvm.memset.p0i8.i64(i8* align 4 %v18, i8 0, i64 16, i1 false)
-  %v19 = load %s.1*, %s.1** %v13, align 4, !tbaa !0
-  %v20 = getelementptr inbounds %s.0, %s.0* %v3, i32 0, i32 1
-  store %s.1* %v19, %s.1** %v20, align 4, !tbaa !0
-  %v21 = bitcast %s.0* %v1 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 -1, i8* %v21)
-  call void @llvm.memset.p0i8.i64(i8* align 4 %v21, i8 0, i64 16, i1 false)
-  %v22 = getelementptr inbounds %s.0, %s.0* %v1, i32 0, i32 1
-  store %s.1* %v19, %s.1** %v22, align 4, !tbaa !0
-  %v23 = icmp eq i8* %v6, %v7
+  %v13 = getelementptr inbounds %s.0, ptr %a5, i32 0, i32 1
+  %v14 = load ptr, ptr %v13, align 4, !tbaa !0
+  %v16 = load ptr, ptr %v14, align 4, !tbaa !0
+  %v17 = getelementptr inbounds %s.1, ptr %v14, i32 0, i32 1
+  store ptr %v16, ptr %v17, align 4, !tbaa !0
+  call void @llvm.memset.p0.i64(ptr align 4 %v3, i8 0, i64 16, i1 false)
+  %v19 = load ptr, ptr %v13, align 4, !tbaa !0
+  %v20 = getelementptr inbounds %s.0, ptr %v3, i32 0, i32 1
+  store ptr %v19, ptr %v20, align 4, !tbaa !0
+  call void @llvm.lifetime.start.p0(i64 -1, ptr %v1)
+  call void @llvm.memset.p0.i64(ptr align 4 %v1, i8 0, i64 16, i1 false)
+  %v22 = getelementptr inbounds %s.0, ptr %v1, i32 0, i32 1
+  store ptr %v19, ptr %v22, align 4, !tbaa !0
+  %v23 = icmp eq ptr %v6, %v7
   br i1 %v23, label %b6, label %b5
 
 b5:                                               ; preds = %b4
-  call void @f8(i8* %v6, %s.0* %v1, i8* %v7)
-  %v24 = load %s.1*, %s.1** %v22, align 4, !tbaa !0
+  call void @f8(ptr %v6, ptr %v1, ptr %v7)
+  %v24 = load ptr, ptr %v22, align 4, !tbaa !0
   br label %b6
 
 b6:                                               ; preds = %b5, %b4
-  %v25 = phi %s.1* [ %v24, %b5 ], [ %v19, %b4 ]
-  %v26 = bitcast %s.0* %v2 to i8*
-  call void @llvm.memset.p0i8.i64(i8* align 4 %v26, i8 0, i64 16, i1 false)
-  %v27 = getelementptr inbounds %s.0, %s.0* %v2, i32 0, i32 1
-  store %s.1* %v25, %s.1** %v27, align 4, !tbaa !0
-  call void @f1(%s.0* %v1) #0
-  call void @llvm.lifetime.end.p0i8(i64 -1, i8* %v21)
-  call void @f1(%s.0* %v2) #0
-  call void @f1(%s.0* %v3) #0
-  %v28 = load %s.1*, %s.1** %v13, align 4, !tbaa !0
-  %v29 = getelementptr inbounds %s.1, %s.1* %v28, i32 0, i32 0
-  %v30 = load i8*, i8** %v29, align 4, !tbaa !0
-  %v31 = getelementptr inbounds %s.1, %s.1* %v28, i32 0, i32 1
-  %v32 = load i8*, i8** %v31, align 4, !tbaa !0
-  %v33 = inttoptr i32 %a2 to i8*
-  %v34 = icmp eq i8* %v30, %v32
+  %v25 = phi ptr [ %v24, %b5 ], [ %v19, %b4 ]
+  call void @llvm.memset.p0.i64(ptr align 4 %v2, i8 0, i64 16, i1 false)
+  %v27 = getelementptr inbounds %s.0, ptr %v2, i32 0, i32 1
+  store ptr %v25, ptr %v27, align 4, !tbaa !0
+  call void @f1(ptr %v1) #0
+  call void @llvm.lifetime.end.p0(i64 -1, ptr %v1)
+  call void @f1(ptr %v2) #0
+  call void @f1(ptr %v3) #0
+  %v28 = load ptr, ptr %v13, align 4, !tbaa !0
+  %v30 = load ptr, ptr %v28, align 4, !tbaa !0
+  %v31 = getelementptr inbounds %s.1, ptr %v28, i32 0, i32 1
+  %v32 = load ptr, ptr %v31, align 4, !tbaa !0
+  %v33 = inttoptr i32 %a2 to ptr
+  %v34 = icmp eq ptr %v30, %v32
   br i1 %v34, label %b15, label %b7
 
 b7:                                               ; preds = %b6
   br label %b8
 
 b8:                                               ; preds = %b12, %b7
-  %v35 = phi i8* [ %v47, %b12 ], [ %v30, %b7 ]
-  %v36 = phi i8* [ %v48, %b12 ], [ %v6, %b7 ]
-  %v37 = phi i8* [ %v46, %b12 ], [ %v7, %b7 ]
-  %v38 = icmp eq i8* %v37, %v33
+  %v35 = phi ptr [ %v47, %b12 ], [ %v30, %b7 ]
+  %v36 = phi ptr [ %v48, %b12 ], [ %v6, %b7 ]
+  %v37 = phi ptr [ %v46, %b12 ], [ %v7, %b7 ]
+  %v38 = icmp eq ptr %v37, %v33
   br i1 %v38, label %b13, label %b9
 
 b9:                                               ; preds = %b8
-  %v39 = load i8, i8* %v37, align 1, !tbaa !4
-  %v40 = load i8, i8* %v35, align 1, !tbaa !4
+  %v39 = load i8, ptr %v37, align 1, !tbaa !4
+  %v40 = load i8, ptr %v35, align 1, !tbaa !4
   %v41 = call zeroext i1 %a6(i8 zeroext %v39, i8 zeroext %v40)
   br i1 %v41, label %b10, label %b11
 
 b10:                                              ; preds = %b9
-  %v42 = load i8, i8* %v37, align 1, !tbaa !4
-  store i8 %v42, i8* %v36, align 1, !tbaa !4
-  %v43 = getelementptr inbounds i8, i8* %v37, i32 1
+  %v42 = load i8, ptr %v37, align 1, !tbaa !4
+  store i8 %v42, ptr %v36, align 1, !tbaa !4
+  %v43 = getelementptr inbounds i8, ptr %v37, i32 1
   br label %b12
 
 b11:                                              ; preds = %b9
-  %v44 = load i8, i8* %v35, align 1, !tbaa !4
-  store i8 %v44, i8* %v36, align 1, !tbaa !4
-  %v45 = getelementptr inbounds i8, i8* %v35, i32 1
+  %v44 = load i8, ptr %v35, align 1, !tbaa !4
+  store i8 %v44, ptr %v36, align 1, !tbaa !4
+  %v45 = getelementptr inbounds i8, ptr %v35, i32 1
   br label %b12
 
 b12:                                              ; preds = %b11, %b10
-  %v46 = phi i8* [ %v43, %b10 ], [ %v37, %b11 ]
-  %v47 = phi i8* [ %v35, %b10 ], [ %v45, %b11 ]
-  %v48 = getelementptr inbounds i8, i8* %v36, i32 1
-  %v49 = icmp eq i8* %v47, %v32
+  %v46 = phi ptr [ %v43, %b10 ], [ %v37, %b11 ]
+  %v47 = phi ptr [ %v35, %b10 ], [ %v45, %b11 ]
+  %v48 = getelementptr inbounds i8, ptr %v36, i32 1
+  %v49 = icmp eq ptr %v47, %v32
   br i1 %v49, label %b14, label %b8
 
 b13:                                              ; preds = %b8
-  call void @f9(i8* %v35, i8* %v36, i8* %v32)
+  call void @f9(ptr %v35, ptr %v36, ptr %v32)
   br label %b43
 
 b14:                                              ; preds = %b12
   br label %b15
 
 b15:                                              ; preds = %b14, %b6
-  %v50 = phi i8* [ %v7, %b6 ], [ %v46, %b14 ]
-  %v51 = phi i8* [ %v6, %b6 ], [ %v48, %b14 ]
-  %v52 = icmp eq i8* %v50, %v33
+  %v50 = phi ptr [ %v7, %b6 ], [ %v46, %b14 ]
+  %v51 = phi ptr [ %v6, %b6 ], [ %v48, %b14 ]
+  %v52 = icmp eq ptr %v50, %v33
   br i1 %v52, label %b43, label %b16
 
 b16:                                              ; preds = %b15
   br label %b17
 
 b17:                                              ; preds = %b17, %b16
-  %v53 = phi i8* [ %v56, %b17 ], [ %v51, %b16 ]
-  %v54 = phi i8* [ %v57, %b17 ], [ %v50, %b16 ]
-  %v55 = load i8, i8* %v54, align 1, !tbaa !4
-  store i8 %v55, i8* %v53, align 1, !tbaa !4
-  %v56 = getelementptr inbounds i8, i8* %v53, i32 1
-  %v57 = getelementptr inbounds i8, i8* %v54, i32 1
-  %v58 = icmp eq i8* %v57, %v33
+  %v53 = phi ptr [ %v56, %b17 ], [ %v51, %b16 ]
+  %v54 = phi ptr [ %v57, %b17 ], [ %v50, %b16 ]
+  %v55 = load i8, ptr %v54, align 1, !tbaa !4
+  store i8 %v55, ptr %v53, align 1, !tbaa !4
+  %v56 = getelementptr inbounds i8, ptr %v53, i32 1
+  %v57 = getelementptr inbounds i8, ptr %v54, i32 1
+  %v58 = icmp eq ptr %v57, %v33
   br i1 %v58, label %b42, label %b17
 
 b18:                                              ; preds = %b3, %b2
-  %v59 = call i32 @f0(%s.0* %a5)
+  %v59 = call i32 @f0(ptr %a5)
   %v60 = icmp slt i32 %v59, %a4
   br i1 %v60, label %b33, label %b19
 
 b19:                                              ; preds = %b18
-  %v61 = getelementptr inbounds %s.0, %s.0* %a5, i32 0, i32 1
-  %v62 = load %s.1*, %s.1** %v61, align 4, !tbaa !0
-  %v63 = getelementptr inbounds %s.1, %s.1* %v62, i32 0, i32 0
-  %v64 = load i8*, i8** %v63, align 4, !tbaa !0
-  %v65 = getelementptr inbounds %s.1, %s.1* %v62, i32 0, i32 1
-  store i8* %v64, i8** %v65, align 4, !tbaa !0
-  %v66 = bitcast %s.0* %v5 to i8*
-  call void @llvm.memset.p0i8.i64(i8* align 4 %v66, i8 0, i64 16, i1 false)
-  %v67 = load %s.1*, %s.1** %v61, align 4, !tbaa !0
-  %v68 = getelementptr inbounds %s.0, %s.0* %v5, i32 0, i32 1
-  store %s.1* %v67, %s.1** %v68, align 4, !tbaa !0
-  %v69 = bitcast %s.0* %v0 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 -1, i8* %v69)
-  call void @llvm.memset.p0i8.i64(i8* align 4 %v69, i8 0, i64 16, i1 false)
-  %v70 = getelementptr inbounds %s.0, %s.0* %v0, i32 0, i32 1
-  store %s.1* %v67, %s.1** %v70, align 4, !tbaa !0
-  %v71 = inttoptr i32 %a2 to i8*
-  %v72 = icmp eq i8* %v7, %v71
+  %v61 = getelementptr inbounds %s.0, ptr %a5, i32 0, i32 1
+  %v62 = load ptr, ptr %v61, align 4, !tbaa !0
+  %v64 = load ptr, ptr %v62, align 4, !tbaa !0
+  %v65 = getelementptr inbounds %s.1, ptr %v62, i32 0, i32 1
+  store ptr %v64, ptr %v65, align 4, !tbaa !0
+  call void @llvm.memset.p0.i64(ptr align 4 %v5, i8 0, i64 16, i1 false)
+  %v67 = load ptr, ptr %v61, align 4, !tbaa !0
+  %v68 = getelementptr inbounds %s.0, ptr %v5, i32 0, i32 1
+  store ptr %v67, ptr %v68, align 4, !tbaa !0
+  call void @llvm.lifetime.start.p0(i64 -1, ptr %v0)
+  call void @llvm.memset.p0.i64(ptr align 4 %v0, i8 0, i64 16, i1 false)
+  %v70 = getelementptr inbounds %s.0, ptr %v0, i32 0, i32 1
+  store ptr %v67, ptr %v70, align 4, !tbaa !0
+  %v71 = inttoptr i32 %a2 to ptr
+  %v72 = icmp eq ptr %v7, %v71
   br i1 %v72, label %b21, label %b20
 
 b20:                                              ; preds = %b19
-  call void @f8(i8* %v7, %s.0* %v0, i8* %v71)
-  %v73 = load %s.1*, %s.1** %v70, align 4, !tbaa !0
+  call void @f8(ptr %v7, ptr %v0, ptr %v71)
+  %v73 = load ptr, ptr %v70, align 4, !tbaa !0
   br label %b21
 
 b21:                                              ; preds = %b20, %b19
-  %v74 = phi %s.1* [ %v73, %b20 ], [ %v67, %b19 ]
-  %v75 = bitcast %s.0* %v4 to i8*
-  call void @llvm.memset.p0i8.i64(i8* align 4 %v75, i8 0, i64 16, i1 false)
-  %v76 = getelementptr inbounds %s.0, %s.0* %v4, i32 0, i32 1
-  store %s.1* %v74, %s.1** %v76, align 4, !tbaa !0
-  call void @f1(%s.0* %v0) #0
-  call void @llvm.lifetime.end.p0i8(i64 -1, i8* %v69)
-  call void @f1(%s.0* %v4) #0
-  call void @f1(%s.0* %v5) #0
-  %v77 = load %s.1*, %s.1** %v61, align 4, !tbaa !0
-  %v78 = getelementptr inbounds %s.1, %s.1* %v77, i32 0, i32 0
-  %v79 = load i8*, i8** %v78, align 4, !tbaa !0
-  %v80 = getelementptr inbounds %s.1, %s.1* %v77, i32 0, i32 1
-  %v81 = load i8*, i8** %v80, align 4, !tbaa !0
-  %v82 = icmp eq i8* %v6, %v7
+  %v74 = phi ptr [ %v73, %b20 ], [ %v67, %b19 ]
+  call void @llvm.memset.p0.i64(ptr align 4 %v4, i8 0, i64 16, i1 false)
+  %v76 = getelementptr inbounds %s.0, ptr %v4, i32 0, i32 1
+  store ptr %v74, ptr %v76, align 4, !tbaa !0
+  call void @f1(ptr %v0) #0
+  call void @llvm.lifetime.end.p0(i64 -1, ptr %v0)
+  call void @f1(ptr %v4) #0
+  call void @f1(ptr %v5) #0
+  %v77 = load ptr, ptr %v61, align 4, !tbaa !0
+  %v79 = load ptr, ptr %v77, align 4, !tbaa !0
+  %v80 = getelementptr inbounds %s.1, ptr %v77, i32 0, i32 1
+  %v81 = load ptr, ptr %v80, align 4, !tbaa !0
+  %v82 = icmp eq ptr %v6, %v7
   br i1 %v82, label %b25, label %b22
 
 b22:                                              ; preds = %b21
   br label %b23
 
 b23:                                              ; preds = %b31, %b22
-  %v83 = phi i8* [ %v100, %b31 ], [ %v81, %b22 ]
-  %v84 = phi i8* [ %v111, %b31 ], [ %v71, %b22 ]
-  %v85 = phi i8* [ %v86, %b31 ], [ %v7, %b22 ]
-  %v86 = getelementptr inbounds i8, i8* %v85, i32 -1
-  %v87 = icmp eq i8* %v83, %v79
+  %v83 = phi ptr [ %v100, %b31 ], [ %v81, %b22 ]
+  %v84 = phi ptr [ %v111, %b31 ], [ %v71, %b22 ]
+  %v85 = phi ptr [ %v86, %b31 ], [ %v7, %b22 ]
+  %v86 = getelementptr inbounds i8, ptr %v85, i32 -1
+  %v87 = icmp eq ptr %v83, %v79
   br i1 %v87, label %b28, label %b24
 
 b24:                                              ; preds = %b23
   br label %b30
 
 b25:                                              ; preds = %b31, %b21
-  %v88 = phi i8* [ %v81, %b21 ], [ %v100, %b31 ]
-  %v89 = phi i8* [ %v71, %b21 ], [ %v111, %b31 ]
-  %v90 = icmp eq i8* %v88, %v79
+  %v88 = phi ptr [ %v81, %b21 ], [ %v100, %b31 ]
+  %v89 = phi ptr [ %v71, %b21 ], [ %v111, %b31 ]
+  %v90 = icmp eq ptr %v88, %v79
   br i1 %v90, label %b43, label %b26
 
 b26:                                              ; preds = %b25
   br label %b27
 
 b27:                                              ; preds = %b27, %b26
-  %v91 = phi i8* [ %v93, %b27 ], [ %v88, %b26 ]
-  %v92 = phi i8* [ %v95, %b27 ], [ %v89, %b26 ]
-  %v93 = getelementptr inbounds i8, i8* %v91, i32 -1
-  %v94 = load i8, i8* %v93, align 1, !tbaa !4
-  %v95 = getelementptr inbounds i8, i8* %v92, i32 -1
-  store i8 %v94, i8* %v95, align 1, !tbaa !4
-  %v96 = icmp eq i8* %v93, %v79
+  %v91 = phi ptr [ %v93, %b27 ], [ %v88, %b26 ]
+  %v92 = phi ptr [ %v95, %b27 ], [ %v89, %b26 ]
+  %v93 = getelementptr inbounds i8, ptr %v91, i32 -1
+  %v94 = load i8, ptr %v93, align 1, !tbaa !4
+  %v95 = getelementptr inbounds i8, ptr %v92, i32 -1
+  store i8 %v94, ptr %v95, align 1, !tbaa !4
+  %v96 = icmp eq ptr %v93, %v79
   br i1 %v96, label %b41, label %b27
 
 b28:                                              ; preds = %b31, %b23
-  %v97 = phi i8* [ %v111, %b31 ], [ %v84, %b23 ]
-  %v98 = icmp eq i8* %v6, %v85
+  %v97 = phi ptr [ %v111, %b31 ], [ %v84, %b23 ]
+  %v98 = icmp eq ptr %v6, %v85
   br i1 %v98, label %b43, label %b29
 
 b29:                                              ; preds = %b28
-  call void @f6(i8* %v97, i8* %v85, i8* %v6)
+  call void @f6(ptr %v97, ptr %v85, ptr %v6)
   br label %b43
 
 b30:                                              ; preds = %b31, %b24
-  %v99 = phi i8* [ %v111, %b31 ], [ %v84, %b24 ]
-  %v100 = phi i8* [ %v101, %b31 ], [ %v83, %b24 ]
-  %v101 = getelementptr inbounds i8, i8* %v100, i32 -1
-  %v102 = load i8, i8* %v101, align 1, !tbaa !4
-  %v103 = load i8, i8* %v86, align 1, !tbaa !4
+  %v99 = phi ptr [ %v111, %b31 ], [ %v84, %b24 ]
+  %v100 = phi ptr [ %v101, %b31 ], [ %v83, %b24 ]
+  %v101 = getelementptr inbounds i8, ptr %v100, i32 -1
+  %v102 = load i8, ptr %v101, align 1, !tbaa !4
+  %v103 = load i8, ptr %v86, align 1, !tbaa !4
   %v104 = call zeroext i1 %a6(i8 zeroext %v102, i8 zeroext %v103)
   br i1 %v104, label %b31, label %b32
 
 b31:                                              ; preds = %b32, %b30
-  %v105 = phi i8* [ %v101, %b32 ], [ %v86, %b30 ]
-  %v106 = phi i8* [ %v101, %b32 ], [ %v6, %b30 ]
-  %v107 = phi i8* [ %v79, %b32 ], [ %v86, %b30 ]
-  %v108 = phi i8* [ blockaddress(@f2, %b30), %b32 ], [ blockaddress(@f2, %b23), %b30 ]
-  %v109 = phi i8* [ blockaddress(@f2, %b28), %b32 ], [ blockaddress(@f2, %b25), %b30 ]
-  %v110 = load i8, i8* %v105, align 1, !tbaa !4
-  %v111 = getelementptr inbounds i8, i8* %v99, i32 -1
-  store i8 %v110, i8* %v111, align 1, !tbaa !4
-  %v112 = icmp eq i8* %v106, %v107
-  %v113 = select i1 %v112, i8* %v109, i8* %v108
-  indirectbr i8* %v113, [label %b25, label %b28, label %b23, label %b30]
+  %v105 = phi ptr [ %v101, %b32 ], [ %v86, %b30 ]
+  %v106 = phi ptr [ %v101, %b32 ], [ %v6, %b30 ]
+  %v107 = phi ptr [ %v79, %b32 ], [ %v86, %b30 ]
+  %v108 = phi ptr [ blockaddress(@f2, %b30), %b32 ], [ blockaddress(@f2, %b23), %b30 ]
+  %v109 = phi ptr [ blockaddress(@f2, %b28), %b32 ], [ blockaddress(@f2, %b25), %b30 ]
+  %v110 = load i8, ptr %v105, align 1, !tbaa !4
+  %v111 = getelementptr inbounds i8, ptr %v99, i32 -1
+  store i8 %v110, ptr %v111, align 1, !tbaa !4
+  %v112 = icmp eq ptr %v106, %v107
+  %v113 = select i1 %v112, ptr %v109, ptr %v108
+  indirectbr ptr %v113, [label %b25, label %b28, label %b23, label %b30]
 
 b32:                                              ; preds = %b30
   br label %b31
@@ -266,50 +256,50 @@ b33:                                              ; preds = %b18
 
 b34:                                              ; preds = %b33
   %v114 = sdiv i32 %a3, 2
-  %v115 = getelementptr inbounds i8, i8* %v6, i32 %v114
+  %v115 = getelementptr inbounds i8, ptr %v6, i32 %v114
   %v116 = sub i32 %a2, %a1
   %v117 = icmp sgt i32 %v116, 0
   br i1 %v117, label %b35, label %b36
 
 b35:                                              ; preds = %b34
-  %v118 = call i8* @f5(i8* %v7, i32 %v116, i8* %v115, i1 (i8, i8)* %a6)
+  %v118 = call ptr @f5(ptr %v7, i32 %v116, ptr %v115, ptr %a6)
   br label %b36
 
 b36:                                              ; preds = %b35, %b34
-  %v119 = phi i8* [ %v7, %b34 ], [ %v118, %b35 ]
-  %v120 = ptrtoint i8* %v119 to i32
+  %v119 = phi ptr [ %v7, %b34 ], [ %v118, %b35 ]
+  %v120 = ptrtoint ptr %v119 to i32
   %v121 = sub i32 %v120, %a1
   br label %b40
 
 b37:                                              ; preds = %b33
   %v122 = sdiv i32 %a4, 2
-  %v123 = getelementptr inbounds i8, i8* %v7, i32 %v122
+  %v123 = getelementptr inbounds i8, ptr %v7, i32 %v122
   %v124 = sub i32 %a1, %a0
   %v125 = icmp sgt i32 %v124, 0
   br i1 %v125, label %b38, label %b39
 
 b38:                                              ; preds = %b37
-  %v126 = call i8* @f4(i8* %v6, i32 %v124, i8* %v123, i1 (i8, i8)* %a6)
+  %v126 = call ptr @f4(ptr %v6, i32 %v124, ptr %v123, ptr %a6)
   br label %b39
 
 b39:                                              ; preds = %b38, %b37
-  %v127 = phi i8* [ %v6, %b37 ], [ %v126, %b38 ]
-  %v128 = ptrtoint i8* %v127 to i32
+  %v127 = phi ptr [ %v6, %b37 ], [ %v126, %b38 ]
+  %v128 = ptrtoint ptr %v127 to i32
   %v129 = sub i32 %v128, %a0
   br label %b40
 
 b40:                                              ; preds = %b39, %b36
-  %v130 = phi i8* [ %v127, %b39 ], [ %v115, %b36 ]
-  %v131 = phi i8* [ %v123, %b39 ], [ %v119, %b36 ]
+  %v130 = phi ptr [ %v127, %b39 ], [ %v115, %b36 ]
+  %v131 = phi ptr [ %v123, %b39 ], [ %v119, %b36 ]
   %v132 = phi i32 [ %v129, %b39 ], [ %v114, %b36 ]
   %v133 = phi i32 [ %v122, %b39 ], [ %v121, %b36 ]
   %v134 = sub nsw i32 %a3, %v132
-  %v135 = ptrtoint i8* %v130 to i32
-  %v136 = ptrtoint i8* %v131 to i32
-  %v137 = call i32 @f3(i32 %v135, i32 %a1, i32 %v136, i32 %v134, i32 %v133, %s.0* %a5)
-  call void @f2(i32 %a0, i32 %v135, i32 %v137, i32 %v132, i32 %v133, %s.0* %a5, i1 (i8, i8)* %a6)
+  %v135 = ptrtoint ptr %v130 to i32
+  %v136 = ptrtoint ptr %v131 to i32
+  %v137 = call i32 @f3(i32 %v135, i32 %a1, i32 %v136, i32 %v134, i32 %v133, ptr %a5)
+  call void @f2(i32 %a0, i32 %v135, i32 %v137, i32 %v132, i32 %v133, ptr %a5, ptr %a6)
   %v138 = sub nsw i32 %a4, %v133
-  call void @f2(i32 %v137, i32 %v136, i32 %a2, i32 %v134, i32 %v138, %s.0* %a5, i1 (i8, i8)* %a6)
+  call void @f2(i32 %v137, i32 %v136, i32 %a2, i32 %v134, i32 %v138, ptr %a5, ptr %a6)
   br label %b43
 
 b41:                                              ; preds = %b27
@@ -323,34 +313,34 @@ b43:                                              ; preds = %b42, %b41, %b40, %b
 }
 
 ; Function Attrs: inlinehint
-declare i32 @f3(i32, i32, i32, i32, i32, %s.0* nocapture) #1
+declare i32 @f3(i32, i32, i32, i32, i32, ptr nocapture) #1
 
 ; Function Attrs: inlinehint
-declare i8* @f4(i8*, i32, i8*, i1 (i8, i8)*) #1
+declare ptr @f4(ptr, i32, ptr, ptr) #1
 
 ; Function Attrs: inlinehint
-declare i8* @f5(i8*, i32, i8*, i1 (i8, i8)*) #1
+declare ptr @f5(ptr, i32, ptr, ptr) #1
 
 ; Function Attrs: inlinehint
-declare void @f6(i8*, i8*, i8*) #1
+declare void @f6(ptr, ptr, ptr) #1
 
 ; Function Attrs: inlinehint
-declare void @f7(i8*, i8*, i1 (i8, i8)*) #1
+declare void @f7(ptr, ptr, ptr) #1
 
 ; Function Attrs: inlinehint
-declare void @f8(i8*, %s.0*, i8*) #1
+declare void @f8(ptr, ptr, ptr) #1
 
 ; Function Attrs: inlinehint
-declare void @f9(i8*, i8*, i8*) #1
+declare void @f9(ptr, ptr, ptr) #1
 
 ; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #2
+declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #2
 
 ; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #2
+declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #2
 
 ; Function Attrs: argmemonly nounwind
-declare void @llvm.memset.p0i8.i64(i8* nocapture writeonly, i8, i64, i1) #2
+declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1) #2
 
 attributes #0 = { nounwind }
 attributes #1 = { inlinehint }

diff  --git a/llvm/test/CodeGen/Hexagon/bug15515-shuffle.ll b/llvm/test/CodeGen/Hexagon/bug15515-shuffle.ll
index 1c1a54720ec91..5b3894a5b6afa 100644
--- a/llvm/test/CodeGen/Hexagon/bug15515-shuffle.ll
+++ b/llvm/test/CodeGen/Hexagon/bug15515-shuffle.ll
@@ -8,34 +8,34 @@ target triple = "hexagon"
 
 @g0 = global i8 -1, align 1
 @g1 = common global [15 x i8] zeroinitializer, align 8
- at g2 = common global [15 x i8*] zeroinitializer, align 8
+ at g2 = common global [15 x ptr] zeroinitializer, align 8
 
 ; Function Attrs: nounwind
 define void @f0() #0 {
 b0:
   %v0 = alloca i32, align 4
-  store i32 0, i32* %v0, align 4
-  store i32 0, i32* %v0, align 4
+  store i32 0, ptr %v0, align 4
+  store i32 0, ptr %v0, align 4
   br label %b1
 
 b1:                                               ; preds = %b3, %b0
-  %v1 = load i32, i32* %v0, align 4
+  %v1 = load i32, ptr %v0, align 4
   %v2 = icmp slt i32 %v1, 15
   br i1 %v2, label %b2, label %b4
 
 b2:                                               ; preds = %b1
-  %v3 = load i32, i32* %v0, align 4
-  %v4 = getelementptr inbounds [15 x i8], [15 x i8]* @g1, i32 0, i32 %v3
-  store i8 0, i8* %v4, align 1
-  %v5 = load i32, i32* %v0, align 4
-  %v6 = getelementptr inbounds [15 x i8*], [15 x i8*]* @g2, i32 0, i32 %v5
-  store i8* @g0, i8** %v6, align 4
+  %v3 = load i32, ptr %v0, align 4
+  %v4 = getelementptr inbounds [15 x i8], ptr @g1, i32 0, i32 %v3
+  store i8 0, ptr %v4, align 1
+  %v5 = load i32, ptr %v0, align 4
+  %v6 = getelementptr inbounds [15 x ptr], ptr @g2, i32 0, i32 %v5
+  store ptr @g0, ptr %v6, align 4
   br label %b3
 
 b3:                                               ; preds = %b2
-  %v7 = load i32, i32* %v0, align 4
+  %v7 = load i32, ptr %v0, align 4
   %v8 = add nsw i32 %v7, 1
-  store i32 %v8, i32* %v0, align 4
+  store i32 %v8, ptr %v0, align 4
   br label %b1
 
 b4:                                               ; preds = %b1

diff  --git a/llvm/test/CodeGen/Hexagon/bug17276.ll b/llvm/test/CodeGen/Hexagon/bug17276.ll
index 4e189870e7bc2..6997364822916 100644
--- a/llvm/test/CodeGen/Hexagon/bug17276.ll
+++ b/llvm/test/CodeGen/Hexagon/bug17276.ll
@@ -5,23 +5,23 @@
 
 target triple = "hexagon-unknown--elf"
 
-%s.0 = type { i8*, i8* }
-%s.1 = type { i8, [2 x %s.2*] }
+%s.0 = type { ptr, ptr }
+%s.1 = type { i8, [2 x ptr] }
 %s.2 = type { i32, i32 }
 
 @g0 = internal constant %s.0 zeroinitializer, align 4
 
 ; Function Attrs: minsize nounwind
-define i32 @f0(%s.1* %a0) #0 {
+define i32 @f0(ptr %a0) #0 {
 b0:
-  %v0 = tail call i32 @f1(%s.1* %a0, i32 0)
+  %v0 = tail call i32 @f1(ptr %a0, i32 0)
   ret i32 %v0
 }
 
 ; Function Attrs: minsize nounwind
-define internal i32 @f1(%s.1* %a0, i32 %a1) #0 {
+define internal i32 @f1(ptr %a0, i32 %a1) #0 {
 b0:
-  %v0 = icmp eq %s.1* %a0, null
+  %v0 = icmp eq ptr %a0, null
   br i1 %v0, label %b4, label %b1
 
 b1:                                               ; preds = %b0
@@ -29,16 +29,16 @@ b1:                                               ; preds = %b0
   br i1 %v1, label %b3, label %b2
 
 b2:                                               ; preds = %b1
-  tail call void @f2(%s.0* null) #3
+  tail call void @f2(ptr null) #3
   unreachable
 
 b3:                                               ; preds = %b1
-  tail call void @f2(%s.0* @g0) #3
+  tail call void @f2(ptr @g0) #3
   unreachable
 
 b4:                                               ; preds = %b0
-  %v2 = load %s.2*, %s.2** inttoptr (i32 4 to %s.2**), align 4, !tbaa !0
-  %v3 = icmp eq %s.2* %v2, null
+  %v2 = load ptr, ptr inttoptr (i32 4 to ptr), align 4, !tbaa !0
+  %v3 = icmp eq ptr %v2, null
   br i1 %v3, label %b5, label %b6
 
 b5:                                               ; preds = %b4
@@ -46,7 +46,7 @@ b5:                                               ; preds = %b4
   br label %b10
 
 b6:                                               ; preds = %b4
-  %v4 = tail call zeroext i8 @f4(%s.1* null) #4
+  %v4 = tail call zeroext i8 @f4(ptr null) #4
   %v5 = icmp eq i8 %v4, 0
   br i1 %v5, label %b7, label %b8
 
@@ -55,12 +55,11 @@ b7:                                               ; preds = %b6
   br label %b9
 
 b8:                                               ; preds = %b6
-  %v6 = load %s.2*, %s.2** inttoptr (i32 4 to %s.2**), align 4, !tbaa !0
+  %v6 = load ptr, ptr inttoptr (i32 4 to ptr), align 4, !tbaa !0
   %v7 = icmp eq i32 %a1, 1
-  %v8 = getelementptr inbounds %s.2, %s.2* %v6, i32 0, i32 1
-  %v9 = getelementptr inbounds %s.2, %s.2* %v6, i32 0, i32 0
-  %v10 = select i1 %v7, i32* %v8, i32* %v9
-  %v11 = tail call i32 @f5(i32* %v10) #4
+  %v8 = getelementptr inbounds %s.2, ptr %v6, i32 0, i32 1
+  %v10 = select i1 %v7, ptr %v8, ptr %v6
+  %v11 = tail call i32 @f5(ptr %v10) #4
   br label %b9
 
 b9:                                               ; preds = %b8, %b7
@@ -74,21 +73,21 @@ b10:                                              ; preds = %b9, %b5
 }
 
 ; Function Attrs: noreturn optsize
-declare void @f2(%s.0*) #1
+declare void @f2(ptr) #1
 
 ; Function Attrs: optsize
 declare void @f3(i32) #2
 
 ; Function Attrs: optsize
-declare zeroext i8 @f4(%s.1*) #2
+declare zeroext i8 @f4(ptr) #2
 
 ; Function Attrs: optsize
-declare i32 @f5(i32*) #2
+declare i32 @f5(ptr) #2
 
 ; Function Attrs: minsize nounwind
-define i32 @f6(%s.1* %a0) #0 {
+define i32 @f6(ptr %a0) #0 {
 b0:
-  %v0 = tail call i32 @f1(%s.1* %a0, i32 1)
+  %v0 = tail call i32 @f1(ptr %a0, i32 1)
   ret i32 %v0
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/bug17386.ll b/llvm/test/CodeGen/Hexagon/bug17386.ll
index 416770cb03c7b..c36b825a93f77 100644
--- a/llvm/test/CodeGen/Hexagon/bug17386.ll
+++ b/llvm/test/CodeGen/Hexagon/bug17386.ll
@@ -4,7 +4,7 @@
 target triple = "hexagon"
 
 ; Function Attrs: nounwind
-define void @f0(i8* %a0, ...) #0 {
+define void @f0(ptr %a0, ...) #0 {
 b0:
   br i1 undef, label %b1, label %b2
 
@@ -27,7 +27,7 @@ b6:                                               ; preds = %b12, %b5
   br i1 undef, label %b9, label %b7
 
 b7:                                               ; preds = %b6
-  %v0 = load i8, i8* undef, align 1, !tbaa !0
+  %v0 = load i8, ptr undef, align 1, !tbaa !0
   %v1 = zext i8 %v0 to i32
   br i1 undef, label %b9, label %b8
 
@@ -63,9 +63,9 @@ b16:                                              ; preds = %b15, %b14
   br label %b17
 
 b17:                                              ; preds = %b18, %b16
-  %v6 = phi i8* [ undef, %b16 ], [ %v7, %b18 ]
-  %v7 = getelementptr inbounds i8, i8* %v6, i32 1
-  %v8 = load i8, i8* %v7, align 1, !tbaa !0
+  %v6 = phi ptr [ undef, %b16 ], [ %v7, %b18 ]
+  %v7 = getelementptr inbounds i8, ptr %v6, i32 1
+  %v8 = load i8, ptr %v7, align 1, !tbaa !0
   br label %b18
 
 b18:                                              ; preds = %b19, %b17

diff  --git a/llvm/test/CodeGen/Hexagon/bug18008.ll b/llvm/test/CodeGen/Hexagon/bug18008.ll
index 7d52efd63699e..6be045cdfdf93 100644
--- a/llvm/test/CodeGen/Hexagon/bug18008.ll
+++ b/llvm/test/CodeGen/Hexagon/bug18008.ll
@@ -10,7 +10,7 @@ target triple = "hexagon"
 define i32 @f0() #0 {
 b0:
   %v0 = call <32 x i32> @llvm.hexagon.V6.vd0.128B()
-  store <32 x i32> %v0, <32 x i32>* @g0, align 128
+  store <32 x i32> %v0, ptr @g0, align 128
   ret i32 0
 }
 ; CHECK: { v{{[0-9]}} = vxor(v{{[0-9]}},v{{[0-9]}})
@@ -22,13 +22,13 @@ b0:
   %v1 = alloca i8, align 1
   %v2 = tail call i64 @llvm.hexagon.S2.asr.i.p.rnd.goodsyntax(i64 5, i32 0)
   %v3 = trunc i64 %v2 to i8
-  store volatile i8 %v3, i8* %v0, align 1
+  store volatile i8 %v3, ptr %v0, align 1
   %v4 = tail call i64 @llvm.hexagon.S2.asr.i.p.rnd.goodsyntax(i64 4, i32 4)
   %v5 = trunc i64 %v4 to i8
-  store volatile i8 %v5, i8* %v1, align 1
-  %v6 = load volatile i8, i8* %v0, align 1
+  store volatile i8 %v5, ptr %v1, align 1
+  %v6 = load volatile i8, ptr %v0, align 1
   %v7 = zext i8 %v6 to i32
-  %v8 = load volatile i8, i8* %v1, align 1
+  %v8 = load volatile i8, ptr %v1, align 1
   %v9 = zext i8 %v8 to i32
   %v10 = add nuw nsw i32 %v9, %v7
   ret i32 %v10
@@ -46,13 +46,13 @@ b0:
   %v1 = alloca i8, align 1
   %v2 = tail call i64 @llvm.hexagon.S5.vasrhrnd.goodsyntax(i64 6, i32 0)
   %v3 = trunc i64 %v2 to i8
-  store volatile i8 %v3, i8* %v0, align 1
+  store volatile i8 %v3, ptr %v0, align 1
   %v4 = tail call i64 @llvm.hexagon.S5.vasrhrnd.goodsyntax(i64 4, i32 4)
   %v5 = trunc i64 %v4 to i8
-  store volatile i8 %v5, i8* %v0, align 1
-  %v6 = load volatile i8, i8* %v0, align 1
+  store volatile i8 %v5, ptr %v0, align 1
+  %v6 = load volatile i8, ptr %v0, align 1
   %v7 = zext i8 %v6 to i32
-  %v8 = load volatile i8, i8* %v1, align 1
+  %v8 = load volatile i8, ptr %v1, align 1
   %v9 = zext i8 %v8 to i32
   %v10 = add nuw nsw i32 %v9, %v7
   ret i32 %v10
@@ -70,13 +70,13 @@ b0:
   %v1 = alloca i8, align 1
   %v2 = tail call i32 @llvm.hexagon.S5.asrhub.rnd.sat.goodsyntax(i64 0, i32 0)
   %v3 = trunc i32 %v2 to i8
-  store volatile i8 %v3, i8* %v0, align 1
+  store volatile i8 %v3, ptr %v0, align 1
   %v4 = tail call i32 @llvm.hexagon.S5.asrhub.rnd.sat.goodsyntax(i64 4, i32 4)
   %v5 = trunc i32 %v4 to i8
-  store volatile i8 %v5, i8* %v1, align 1
-  %v6 = load volatile i8, i8* %v0, align 1
+  store volatile i8 %v5, ptr %v1, align 1
+  %v6 = load volatile i8, ptr %v0, align 1
   %v7 = zext i8 %v6 to i32
-  %v8 = load volatile i8, i8* %v1, align 1
+  %v8 = load volatile i8, ptr %v1, align 1
   %v9 = zext i8 %v8 to i32
   %v10 = add nuw nsw i32 %v9, %v7
   ret i32 %v10

diff  --git a/llvm/test/CodeGen/Hexagon/bug18491-optsize.ll b/llvm/test/CodeGen/Hexagon/bug18491-optsize.ll
index 43fc099b72ae5..ecbf81fa5d068 100644
--- a/llvm/test/CodeGen/Hexagon/bug18491-optsize.ll
+++ b/llvm/test/CodeGen/Hexagon/bug18491-optsize.ll
@@ -13,22 +13,22 @@ target triple = "hexagon"
 ; Function Attrs: nounwind optsize
 define void @f0(i32 %a0) #0 {
 b0:
-  store i32 1, i32* @g0, align 4
+  store i32 1, ptr @g0, align 4
   ret void
 }
 
 ; Function Attrs: nounwind optsize
 define void @f1(i32 %a0) #0 {
 b0:
-  store i32 1, i32* @g0, align 4
-  store i32 2, i32* @g1, align 4
-  store i32 3, i32* @g2, align 4
-  store i32 4, i32* @g3, align 4
+  store i32 1, ptr @g0, align 4
+  store i32 2, ptr @g1, align 4
+  store i32 3, ptr @g2, align 4
+  store i32 4, ptr @g3, align 4
   ret void
 }
 
 ; Function Attrs: nounwind optsize readnone
-define i32 @f2(i32 %a0, i8** nocapture readnone %a1) #1 {
+define i32 @f2(i32 %a0, ptr nocapture readnone %a1) #1 {
 b0:
   ret i32 %a0
 }

diff  --git a/llvm/test/CodeGen/Hexagon/bug19076.ll b/llvm/test/CodeGen/Hexagon/bug19076.ll
index 8ab82063b1654..2e9103e493f4d 100644
--- a/llvm/test/CodeGen/Hexagon/bug19076.ll
+++ b/llvm/test/CodeGen/Hexagon/bug19076.ll
@@ -1,92 +1,89 @@
 ; REQUIRES: asserts
 ; RUN: llc -march=hexagon -stats -o /dev/null < %s
 
-%s.0 = type { %s.1*, %s.2*, %s.17*, i32, i32, i32, i32, i8*, i8*, i8* }
+%s.0 = type { ptr, ptr, ptr, i32, i32, i32, i32, ptr, ptr, ptr }
 %s.1 = type opaque
-%s.2 = type { %s.3, %s.4*, i8* }
+%s.2 = type { %s.3, ptr, ptr }
 %s.3 = type { i32, i32 }
-%s.4 = type { %s.4*, %s.4*, %s.4*, %s.4*, i32, i32, i32, %s.3*, i32, [1 x %s.5]*, [1 x %s.5]*, i8, i8, i8, i8*, i32, %s.4*, %s.8*, i8, i8, i8, i32*, i32, i32*, i32, i8*, i8, %s.9, [32 x i8**], [7 x i8*], i32, i8*, i32, %s.4*, i32, i32, %s.11, %s.13, i8, i8, i8, %s.14*, %s.15*, %s.15*, i32, [12 x i8] }
+%s.4 = type { ptr, ptr, ptr, ptr, i32, i32, i32, ptr, i32, ptr, ptr, i8, i8, i8, ptr, i32, ptr, ptr, i8, i8, i8, ptr, i32, ptr, i32, ptr, i8, %s.9, [32 x ptr], [7 x ptr], i32, ptr, i32, ptr, i32, i32, %s.11, %s.13, i8, i8, i8, ptr, ptr, ptr, i32, [12 x i8] }
 %s.5 = type { [1 x %s.6], i32, %s.7, [4 x i8] }
 %s.6 = type { [16 x i32] }
 %s.7 = type { [2 x i32] }
-%s.8 = type { void (i8*)*, i8*, i32, %s.8* }
-%s.9 = type { i8* (i8*)*, i8*, %s.7, i32, %s.10 }
+%s.8 = type { ptr, ptr, i32, ptr }
+%s.9 = type { ptr, ptr, %s.7, i32, %s.10 }
 %s.10 = type { i32 }
-%s.11 = type { %s.12, i8, i8* }
+%s.11 = type { %s.12, i8, ptr }
 %s.12 = type { [2 x i32] }
 %s.13 = type { i32, i32 }
-%s.14 = type { i8*, i32 (i8*, %s.4*)* }
-%s.15 = type { %s.15*, %s.16*, i32 }
-%s.16 = type { %s.3, i32, %s.4*, %s.4*, %s.4*, i32, i32 }
-%s.17 = type { i32, void (i8*)* }
-%s.18 = type { %s.0*, i8* }
+%s.14 = type { ptr, ptr }
+%s.15 = type { ptr, ptr, i32 }
+%s.16 = type { %s.3, i32, ptr, ptr, ptr, i32, i32 }
+%s.17 = type { i32, ptr }
+%s.18 = type { ptr, ptr }
 
 ; Function Attrs: nounwind
-define zeroext i8 @f0(%s.0* %a0, i32 %a1, %s.18* %a2) #0 {
+define zeroext i8 @f0(ptr %a0, i32 %a1, ptr %a2) #0 {
 b0:
   %v0 = alloca i8, align 1
-  %v1 = alloca %s.0*, align 4
+  %v1 = alloca ptr, align 4
   %v2 = alloca i32, align 4
-  %v3 = alloca %s.18*, align 4
+  %v3 = alloca ptr, align 4
   %v4 = alloca i32, align 4
   %v5 = alloca i32, align 4
-  %v6 = alloca i8*
+  %v6 = alloca ptr
   %v7 = alloca i32, align 4
   %v8 = alloca i32
   %v9 = alloca %s.4, align 32
-  store %s.0* %a0, %s.0** %v1, align 4
-  store i32 %a1, i32* %v2, align 4
-  store %s.18* %a2, %s.18** %v3, align 4
-  %v10 = load %s.0*, %s.0** %v1, align 4
-  %v11 = getelementptr inbounds %s.0, %s.0* %v10, i32 0, i32 3
-  %v12 = load i32, i32* %v11, align 4
-  store i32 %v12, i32* %v4, align 4
-  %v13 = load %s.0*, %s.0** %v1, align 4
-  %v14 = getelementptr inbounds %s.0, %s.0* %v13, i32 0, i32 6
-  %v15 = load i32, i32* %v14, align 4
-  store i32 %v15, i32* %v5, align 4
-  %v16 = load i32, i32* %v4, align 4
-  %v17 = call i8* @llvm.stacksave()
-  store i8* %v17, i8** %v6
+  store ptr %a0, ptr %v1, align 4
+  store i32 %a1, ptr %v2, align 4
+  store ptr %a2, ptr %v3, align 4
+  %v10 = load ptr, ptr %v1, align 4
+  %v11 = getelementptr inbounds %s.0, ptr %v10, i32 0, i32 3
+  %v12 = load i32, ptr %v11, align 4
+  store i32 %v12, ptr %v4, align 4
+  %v13 = load ptr, ptr %v1, align 4
+  %v14 = getelementptr inbounds %s.0, ptr %v13, i32 0, i32 6
+  %v15 = load i32, ptr %v14, align 4
+  store i32 %v15, ptr %v5, align 4
+  %v16 = load i32, ptr %v4, align 4
+  %v17 = call ptr @llvm.stacksave()
+  store ptr %v17, ptr %v6
   %v18 = alloca %s.2, i32 %v16, align 8
-  %v19 = load %s.0*, %s.0** %v1, align 4
-  %v20 = call i32 @f1(%s.0* %v19)
+  %v19 = load ptr, ptr %v1, align 4
+  %v20 = call i32 @f1(ptr %v19)
   %v21 = icmp ne i32 %v20, 0
   br i1 %v21, label %b2, label %b1
 
 b1:                                               ; preds = %b0
-  store i8 8, i8* %v0
-  store i32 1, i32* %v8
+  store i8 8, ptr %v0
+  store i32 1, ptr %v8
   br label %b23
 
 b2:                                               ; preds = %b0
-  %v22 = load %s.0*, %s.0** %v1, align 4
-  %v23 = getelementptr inbounds %s.0, %s.0* %v22, i32 0, i32 0
-  %v24 = load %s.1*, %s.1** %v23, align 4
-  %v25 = load %s.0*, %s.0** %v1, align 4
-  %v26 = getelementptr inbounds %s.0, %s.0* %v25, i32 0, i32 1
-  %v27 = load %s.2*, %s.2** %v26, align 4
-  %v28 = bitcast %s.2* %v27 to i8*
-  %v29 = bitcast %s.2* %v18 to i8*
-  %v30 = load i32, i32* %v4, align 4
+  %v22 = load ptr, ptr %v1, align 4
+  %v24 = load ptr, ptr %v22, align 4
+  %v25 = load ptr, ptr %v1, align 4
+  %v26 = getelementptr inbounds %s.0, ptr %v25, i32 0, i32 1
+  %v27 = load ptr, ptr %v26, align 4
+  %v30 = load i32, ptr %v4, align 4
   %v31 = mul i32 16, %v30
-  %v32 = call zeroext i8 @f2(%s.1* %v24, i8* %v28, i8* %v29, i32 %v31)
+  %v32 = call zeroext i8 @f2(ptr %v24, ptr %v27, ptr %v18, i32 %v31)
   %v33 = zext i8 %v32 to i32
   %v34 = icmp ne i32 %v33, 0
   br i1 %v34, label %b3, label %b4
 
 b3:                                               ; preds = %b2
-  store i8 1, i8* %v0
-  store i32 1, i32* %v8
+  store i8 1, ptr %v0
+  store i32 1, ptr %v8
   br label %b23
 
 b4:                                               ; preds = %b2
-  store i32 0, i32* %v7, align 4
+  store i32 0, ptr %v7, align 4
   br label %b5
 
 b5:                                               ; preds = %b21, %b4
-  %v35 = load i32, i32* %v7, align 4
-  %v36 = load i32, i32* %v4, align 4
+  %v35 = load i32, ptr %v7, align 4
+  %v36 = load i32, ptr %v4, align 4
   %v37 = icmp ult i32 %v35, %v36
   br i1 %v37, label %b6, label %b7
 
@@ -98,37 +95,34 @@ b7:                                               ; preds = %b6, %b5
   br i1 %v38, label %b8, label %b22
 
 b8:                                               ; preds = %b7
-  %v39 = load i32, i32* %v7, align 4
-  %v40 = getelementptr inbounds %s.2, %s.2* %v18, i32 %v39
-  %v41 = getelementptr inbounds %s.2, %s.2* %v40, i32 0, i32 1
-  %v42 = load %s.4*, %s.4** %v41, align 4
-  %v43 = icmp ne %s.4* %v42, null
+  %v39 = load i32, ptr %v7, align 4
+  %v40 = getelementptr inbounds %s.2, ptr %v18, i32 %v39
+  %v41 = getelementptr inbounds %s.2, ptr %v40, i32 0, i32 1
+  %v42 = load ptr, ptr %v41, align 4
+  %v43 = icmp ne ptr %v42, null
   br i1 %v43, label %b9, label %b17
 
 b9:                                               ; preds = %b8
-  %v44 = load %s.0*, %s.0** %v1, align 4
-  %v45 = getelementptr inbounds %s.0, %s.0* %v44, i32 0, i32 0
-  %v46 = load %s.1*, %s.1** %v45, align 4
-  %v47 = load i32, i32* %v7, align 4
-  %v48 = getelementptr inbounds %s.2, %s.2* %v18, i32 %v47
-  %v49 = getelementptr inbounds %s.2, %s.2* %v48, i32 0, i32 1
-  %v50 = load %s.4*, %s.4** %v49, align 4
-  %v51 = bitcast %s.4* %v50 to i8*
-  %v52 = bitcast %s.4* %v9 to i8*
-  %v53 = load i32, i32* %v5, align 4
-  %v54 = call zeroext i8 @f2(%s.1* %v46, i8* %v51, i8* %v52, i32 %v53)
+  %v44 = load ptr, ptr %v1, align 4
+  %v46 = load ptr, ptr %v44, align 4
+  %v47 = load i32, ptr %v7, align 4
+  %v48 = getelementptr inbounds %s.2, ptr %v18, i32 %v47
+  %v49 = getelementptr inbounds %s.2, ptr %v48, i32 0, i32 1
+  %v50 = load ptr, ptr %v49, align 4
+  %v53 = load i32, ptr %v5, align 4
+  %v54 = call zeroext i8 @f2(ptr %v46, ptr %v50, ptr %v9, i32 %v53)
   %v55 = zext i8 %v54 to i32
   %v56 = icmp ne i32 %v55, 0
   br i1 %v56, label %b10, label %b11
 
 b10:                                              ; preds = %b9
-  store i8 1, i8* %v0
-  store i32 1, i32* %v8
+  store i8 1, ptr %v0
+  store i32 1, ptr %v8
   br label %b23
 
 b11:                                              ; preds = %b9
-  %v57 = getelementptr inbounds %s.4, %s.4* %v9, i32 0, i32 5
-  %v58 = load i32, i32* %v57, align 4
+  %v57 = getelementptr inbounds %s.4, ptr %v9, i32 0, i32 5
+  %v58 = load i32, ptr %v57, align 4
   %v59 = icmp ne i32 %v58, 0
   br i1 %v59, label %b12, label %b13
 
@@ -136,53 +130,49 @@ b12:                                              ; preds = %b11
   br label %b14
 
 b13:                                              ; preds = %b11
-  %v60 = load %s.0*, %s.0** %v1, align 4
-  %v61 = getelementptr inbounds %s.0, %s.0* %v60, i32 0, i32 0
-  %v62 = load %s.1*, %s.1** %v61, align 4
-  %v63 = call i32 @f3(%s.1* %v62)
+  %v60 = load ptr, ptr %v1, align 4
+  %v62 = load ptr, ptr %v60, align 4
+  %v63 = call i32 @f3(ptr %v62)
   br label %b14
 
 b14:                                              ; preds = %b13, %b12
   %v64 = phi i32 [ %v58, %b12 ], [ %v63, %b13 ]
-  %v65 = load i32, i32* %v2, align 4
+  %v65 = load i32, ptr %v2, align 4
   %v66 = icmp eq i32 %v64, %v65
   br i1 %v66, label %b15, label %b16
 
 b15:                                              ; preds = %b14
-  %v67 = load %s.0*, %s.0** %v1, align 4
-  %v68 = load %s.18*, %s.18** %v3, align 4
-  %v69 = getelementptr inbounds %s.18, %s.18* %v68, i32 0, i32 0
-  store %s.0* %v67, %s.0** %v69, align 4
-  %v70 = load i32, i32* %v7, align 4
-  %v71 = getelementptr inbounds %s.2, %s.2* %v18, i32 %v70
-  %v72 = getelementptr inbounds %s.2, %s.2* %v71, i32 0, i32 1
-  %v73 = load %s.4*, %s.4** %v72, align 4
-  %v74 = bitcast %s.4* %v73 to i8*
-  %v75 = load %s.18*, %s.18** %v3, align 4
-  %v76 = getelementptr inbounds %s.18, %s.18* %v75, i32 0, i32 1
-  store i8* %v74, i8** %v76, align 4
-  store i8 0, i8* %v0
-  store i32 1, i32* %v8
+  %v67 = load ptr, ptr %v1, align 4
+  %v68 = load ptr, ptr %v3, align 4
+  store ptr %v67, ptr %v68, align 4
+  %v70 = load i32, ptr %v7, align 4
+  %v71 = getelementptr inbounds %s.2, ptr %v18, i32 %v70
+  %v72 = getelementptr inbounds %s.2, ptr %v71, i32 0, i32 1
+  %v73 = load ptr, ptr %v72, align 4
+  %v75 = load ptr, ptr %v3, align 4
+  %v76 = getelementptr inbounds %s.18, ptr %v75, i32 0, i32 1
+  store ptr %v73, ptr %v76, align 4
+  store i8 0, ptr %v0
+  store i32 1, ptr %v8
   br label %b23
 
 b16:                                              ; preds = %b14
   br label %b20
 
 b17:                                              ; preds = %b8
-  %v77 = load i32, i32* %v7, align 4
+  %v77 = load i32, ptr %v7, align 4
   %v78 = icmp eq i32 %v77, 0
   br i1 %v78, label %b18, label %b19
 
 b18:                                              ; preds = %b17
-  %v79 = load %s.0*, %s.0** %v1, align 4
-  %v80 = load %s.18*, %s.18** %v3, align 4
-  %v81 = getelementptr inbounds %s.18, %s.18* %v80, i32 0, i32 0
-  store %s.0* %v79, %s.0** %v81, align 4
-  %v82 = load %s.18*, %s.18** %v3, align 4
-  %v83 = getelementptr inbounds %s.18, %s.18* %v82, i32 0, i32 1
-  store i8* null, i8** %v83, align 4
-  store i8 0, i8* %v0
-  store i32 1, i32* %v8
+  %v79 = load ptr, ptr %v1, align 4
+  %v80 = load ptr, ptr %v3, align 4
+  store ptr %v79, ptr %v80, align 4
+  %v82 = load ptr, ptr %v3, align 4
+  %v83 = getelementptr inbounds %s.18, ptr %v82, i32 0, i32 1
+  store ptr null, ptr %v83, align 4
+  store i8 0, ptr %v0
+  store i32 1, ptr %v8
   br label %b23
 
 b19:                                              ; preds = %b17
@@ -192,35 +182,35 @@ b20:                                              ; preds = %b19, %b16
   br label %b21
 
 b21:                                              ; preds = %b20
-  %v84 = load i32, i32* %v7, align 4
+  %v84 = load i32, ptr %v7, align 4
   %v85 = add i32 %v84, 1
-  store i32 %v85, i32* %v7, align 4
+  store i32 %v85, ptr %v7, align 4
   br label %b5
 
 b22:                                              ; preds = %b7
-  store i8 4, i8* %v0
-  store i32 1, i32* %v8
+  store i8 4, ptr %v0
+  store i32 1, ptr %v8
   br label %b23
 
 b23:                                              ; preds = %b22, %b18, %b15, %b10, %b3, %b1
-  %v86 = load i8*, i8** %v6
-  call void @llvm.stackrestore(i8* %v86)
-  %v87 = load i8, i8* %v0
+  %v86 = load ptr, ptr %v6
+  call void @llvm.stackrestore(ptr %v86)
+  %v87 = load i8, ptr %v0
   ret i8 %v87
 }
 
 ; Function Attrs: nounwind
-declare i8* @llvm.stacksave() #0
+declare ptr @llvm.stacksave() #0
 
 ; Function Attrs: inlinehint nounwind
-declare i32 @f1(%s.0*) #1
+declare i32 @f1(ptr) #1
 
-declare zeroext i8 @f2(%s.1*, i8*, i8*, i32) #0
+declare zeroext i8 @f2(ptr, ptr, ptr, i32) #0
 
-declare i32 @f3(%s.1*) #0
+declare i32 @f3(ptr) #0
 
 ; Function Attrs: nounwind
-declare void @llvm.stackrestore(i8*) #0
+declare void @llvm.stackrestore(ptr) #0
 
 attributes #0 = { nounwind }
 attributes #1 = { inlinehint nounwind }

diff  --git a/llvm/test/CodeGen/Hexagon/bug19119.ll b/llvm/test/CodeGen/Hexagon/bug19119.ll
index 65969d7674b41..ebd17da225d59 100644
--- a/llvm/test/CodeGen/Hexagon/bug19119.ll
+++ b/llvm/test/CodeGen/Hexagon/bug19119.ll
@@ -7,23 +7,22 @@ target triple = "hexagon-unknown--elf"
 
 @g0 = global %s.0 { i32 3 }, align 4 #0
 @g1 = global i32 0, align 4 #1
- at g2 = global %s.0* @g0, align 4 #2
+ at g2 = global ptr @g0, align 4 #2
 @g3 = global i32 0, align 4 #3
 @g4 = global i32 0, align 4 #4
 
 ; Function Attrs: nounwind optsize
 define i32 @f0() #5 section ".text.main" {
 b0:
-  %v0 = load i32, i32* @g3, align 4, !tbaa !4
+  %v0 = load i32, ptr @g3, align 4, !tbaa !4
   %v1 = add nsw i32 %v0, 1
-  store i32 %v1, i32* @g3, align 4, !tbaa !4
-  %v2 = load i8*, i8** bitcast (%s.0** @g2 to i8**), align 4, !tbaa !8
-  %v3 = load i32, i32* @g1, align 4, !tbaa !10
-  %v4 = getelementptr inbounds i8, i8* %v2, i32 %v3
-  %v5 = bitcast i8* %v4 to i32*
-  %v6 = load i32, i32* %v5, align 4, !tbaa !4
-  store i32 %v6, i32* @g4, align 4, !tbaa !4
-  store i32 1, i32* @g3, align 4, !tbaa !4
+  store i32 %v1, ptr @g3, align 4, !tbaa !4
+  %v2 = load ptr, ptr @g2, align 4, !tbaa !8
+  %v3 = load i32, ptr @g1, align 4, !tbaa !10
+  %v4 = getelementptr inbounds i8, ptr %v2, i32 %v3
+  %v6 = load i32, ptr %v4, align 4, !tbaa !4
+  store i32 %v6, ptr @g4, align 4, !tbaa !4
+  store i32 1, ptr @g3, align 4, !tbaa !4
   ret i32 0
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/bug19254-ifconv-vec.ll b/llvm/test/CodeGen/Hexagon/bug19254-ifconv-vec.ll
index 2779c2f27b2de..3aab757250624 100644
--- a/llvm/test/CodeGen/Hexagon/bug19254-ifconv-vec.ll
+++ b/llvm/test/CodeGen/Hexagon/bug19254-ifconv-vec.ll
@@ -7,14 +7,14 @@ target triple = "hexagon"
 @g0 = private unnamed_addr constant [39 x i8] c"\0AnumTrainingSet =%d  numFeatures = %d\0A\00", align 1
 
 ; Function Attrs: nounwind
-declare i32 @f0(i8* nocapture readonly, ...) #0
+declare i32 @f0(ptr nocapture readonly, ...) #0
 
 ; Function Attrs: nounwind
-define void @f1(i16* nocapture readnone %a0, i16 signext %a1, i16 signext %a2, i16* nocapture readnone %a3, i16* nocapture readnone %a4, i16* nocapture %a5, i16 signext %a6, i16 signext %a7) #0 {
+define void @f1(ptr nocapture readnone %a0, i16 signext %a1, i16 signext %a2, ptr nocapture readnone %a3, ptr nocapture readnone %a4, ptr nocapture %a5, i16 signext %a6, i16 signext %a7) #0 {
 b0:
   %v0 = sext i16 %a1 to i32
   %v1 = sext i16 %a2 to i32
-  %v2 = tail call i32 (i8*, ...) @f0(i8* getelementptr inbounds ([39 x i8], [39 x i8]* @g0, i32 0, i32 0), i32 %v0, i32 %v1) #2
+  %v2 = tail call i32 (ptr, ...) @f0(ptr @g0, i32 %v0, i32 %v1) #2
   %v3 = tail call <32 x i32> @llvm.hexagon.V6.vd0.128B()
   br label %b1
 
@@ -48,7 +48,7 @@ b4:                                               ; preds = %b18
   %v21 = extractelement <32 x i32> %v56, i32 0
   %v22 = mul nsw i32 %v20, %v21
   %v23 = trunc i32 %v22 to i16
-  store i16 %v23, i16* %a5, align 2, !tbaa !0
+  store i16 %v23, ptr %a5, align 2, !tbaa !0
   ret void
 
 b5:                                               ; preds = %b3

diff  --git a/llvm/test/CodeGen/Hexagon/bug27085.ll b/llvm/test/CodeGen/Hexagon/bug27085.ll
index d79e39fee6513..7289e969c6a30 100644
--- a/llvm/test/CodeGen/Hexagon/bug27085.ll
+++ b/llvm/test/CodeGen/Hexagon/bug27085.ll
@@ -13,19 +13,19 @@ target triple = "hexagon--linux"
 ; Function Attrs: norecurse nounwind
 define void @f0(i32 %a0) local_unnamed_addr #0 {
 b0:
-  store volatile i32 1, i32* @g0, align 4, !tbaa !1
+  store volatile i32 1, ptr @g0, align 4, !tbaa !1
   ret void
 }
 
 ; Function Attrs: norecurse nounwind
 define zeroext i1 @f1() local_unnamed_addr #0 {
 b0:
-  %v0 = load volatile i32, i32* @g0, align 4, !tbaa !1
+  %v0 = load volatile i32, ptr @g0, align 4, !tbaa !1
   %v1 = icmp eq i32 %v0, 0
   br i1 %v1, label %b2, label %b1
 
 b1:                                               ; preds = %b0
-  store volatile i32 0, i32* @g0, align 4, !tbaa !1
+  store volatile i32 0, ptr @g0, align 4, !tbaa !1
   br label %b2
 
 b2:                                               ; preds = %b1, %b0

diff  --git a/llvm/test/CodeGen/Hexagon/bug31839.ll b/llvm/test/CodeGen/Hexagon/bug31839.ll
index 83f19b3e3e377..117cccb55021d 100644
--- a/llvm/test/CodeGen/Hexagon/bug31839.ll
+++ b/llvm/test/CodeGen/Hexagon/bug31839.ll
@@ -3,24 +3,22 @@
 
 ; Check for successful compilation.
 
-define i8* @f0(i32 %a0, i32 %a1) {
+define ptr @f0(i32 %a0, i32 %a1) {
 b0:
-  %v0 = call noalias i8* @f1(i32 undef, i32 undef)
+  %v0 = call noalias ptr @f1(i32 undef, i32 undef)
   br i1 undef, label %b2, label %b1
 
 b1:                                               ; preds = %b0
-  %v1 = ptrtoint i8* %v0 to i32
-  %v2 = bitcast i8* %v0 to i32*
-  store volatile i32 %v1, i32* %v2, align 4
-  %v3 = getelementptr inbounds i8, i8* %v0, i32 4
-  %v4 = bitcast i8* %v3 to i8**
-  store i8* %v0, i8** %v4, align 4
-  %v5 = getelementptr inbounds i8, i8* %v0, i32 16
+  %v1 = ptrtoint ptr %v0 to i32
+  store volatile i32 %v1, ptr %v0, align 4
+  %v3 = getelementptr inbounds i8, ptr %v0, i32 4
+  store ptr %v0, ptr %v3, align 4
+  %v5 = getelementptr inbounds i8, ptr %v0, i32 16
   br label %b2
 
 b2:                                               ; preds = %b1, %b0
-  %v6 = phi i8* [ %v5, %b1 ], [ null, %b0 ]
-  ret i8* %v6
+  %v6 = phi ptr [ %v5, %b1 ], [ null, %b0 ]
+  ret ptr %v6
 }
 
-declare noalias i8* @f1(i32, i32) local_unnamed_addr
+declare noalias ptr @f1(i32, i32) local_unnamed_addr

diff  --git a/llvm/test/CodeGen/Hexagon/bug6757-endloop.ll b/llvm/test/CodeGen/Hexagon/bug6757-endloop.ll
index 9fec47e54cd72..ae743e50fddce 100644
--- a/llvm/test/CodeGen/Hexagon/bug6757-endloop.ll
+++ b/llvm/test/CodeGen/Hexagon/bug6757-endloop.ll
@@ -9,22 +9,20 @@
 ; CHECK-NOT: loop1(
 ; CHECK: endloop1
 
-%s.0 = type { i32, i8* }
+%s.0 = type { i32, ptr }
 %s.1 = type { i32, i32, i32, i32 }
 
-define void @f0(%s.0* nocapture readonly %a0, %s.1* nocapture readonly %a1) {
+define void @f0(ptr nocapture readonly %a0, ptr nocapture readonly %a1) {
 b0:
-  %v0 = getelementptr inbounds %s.1, %s.1* %a1, i32 0, i32 0
-  %v1 = load i32, i32* %v0, align 4
-  %v2 = getelementptr inbounds %s.1, %s.1* %a1, i32 0, i32 3
-  %v3 = load i32, i32* %v2, align 4
-  %v4 = getelementptr inbounds %s.1, %s.1* %a1, i32 0, i32 2
-  %v5 = load i32, i32* %v4, align 4
-  %v6 = getelementptr inbounds %s.1, %s.1* %a1, i32 0, i32 1
-  %v7 = load i32, i32* %v6, align 4
-  %v8 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 1
-  %v9 = load i8*, i8** %v8, align 4
-  %v10 = bitcast i8* %v9 to i32*
+  %v1 = load i32, ptr %a1, align 4
+  %v2 = getelementptr inbounds %s.1, ptr %a1, i32 0, i32 3
+  %v3 = load i32, ptr %v2, align 4
+  %v4 = getelementptr inbounds %s.1, ptr %a1, i32 0, i32 2
+  %v5 = load i32, ptr %v4, align 4
+  %v6 = getelementptr inbounds %s.1, ptr %a1, i32 0, i32 1
+  %v7 = load i32, ptr %v6, align 4
+  %v8 = getelementptr inbounds %s.0, ptr %a0, i32 0, i32 1
+  %v9 = load ptr, ptr %v8, align 4
   %v11 = mul i32 %v1, 10
   %v12 = icmp eq i32 %v1, %v3
   %v13 = icmp eq i32 %v5, 0
@@ -35,7 +33,7 @@ b1:                                               ; preds = %b0
 
 b2:                                               ; preds = %b1
   %v14 = lshr i32 %v11, 5
-  %v15 = getelementptr inbounds i32, i32* %v10, i32 %v14
+  %v15 = getelementptr inbounds i32, ptr %v9, i32 %v14
   %v16 = and i32 %v11, 30
   %v17 = icmp eq i32 %v16, 0
   br label %b11
@@ -44,7 +42,6 @@ b3:                                               ; preds = %b0
   br i1 %v13, label %b14, label %b4
 
 b4:                                               ; preds = %b3
-  %v18 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 0
   br label %b5
 
 b5:                                               ; preds = %b6, %b4
@@ -53,9 +50,9 @@ b5:                                               ; preds = %b6, %b4
   %v21 = add i32 %v20, -1
   %v22 = add i32 %v19, -10
   %v23 = lshr i32 %v22, 5
-  %v24 = getelementptr inbounds i32, i32* %v10, i32 %v23
+  %v24 = getelementptr inbounds i32, ptr %v9, i32 %v23
   %v25 = and i32 %v22, 31
-  %v26 = load i32, i32* %v18, align 4
+  %v26 = load i32, ptr %a0, align 4
   %v27 = mul i32 %v26, %v7
   %v28 = icmp eq i32 %v25, 0
   br i1 %v28, label %b7, label %b6
@@ -73,15 +70,15 @@ b8:                                               ; preds = %b7
   br i1 %v31, label %b10, label %b6
 
 b9:                                               ; preds = %b7
-  %v32 = load volatile i32, i32* %v24, align 4
-  store volatile i32 %v32, i32* %v24, align 4
+  %v32 = load volatile i32, ptr %v24, align 4
+  store volatile i32 %v32, ptr %v24, align 4
   br label %b6
 
 b10:                                              ; preds = %b10, %b8
   %v33 = phi i32 [ %v37, %b10 ], [ %v27, %b8 ]
-  %v34 = phi i32* [ %v35, %b10 ], [ %v24, %b8 ]
-  %v35 = getelementptr inbounds i32, i32* %v34, i32 -1
-  %v36 = load volatile i32, i32* %v34, align 4
+  %v34 = phi ptr [ %v35, %b10 ], [ %v24, %b8 ]
+  %v35 = getelementptr inbounds i32, ptr %v34, i32 -1
+  %v36 = load volatile i32, ptr %v34, align 4
   %v37 = add i32 %v33, -4
   %v38 = icmp ugt i32 %v37, 3
   br i1 %v38, label %b10, label %b6
@@ -96,10 +93,10 @@ b12:                                              ; preds = %b13, %b11
   br i1 %v41, label %b14, label %b11
 
 b13:                                              ; preds = %b11
-  %v42 = load volatile i32, i32* %v15, align 4
-  %v43 = load volatile i32, i32* %v15, align 4
+  %v42 = load volatile i32, ptr %v15, align 4
+  %v43 = load volatile i32, ptr %v15, align 4
   %v44 = and i32 %v43, %v42
-  store volatile i32 %v44, i32* %v15, align 4
+  store volatile i32 %v44, ptr %v15, align 4
   br label %b12
 
 b14:                                              ; preds = %b12, %b6, %b3, %b1

diff  --git a/llvm/test/CodeGen/Hexagon/bug9049.ll b/llvm/test/CodeGen/Hexagon/bug9049.ll
index 124859362f485..e6c92d9cda61b 100644
--- a/llvm/test/CodeGen/Hexagon/bug9049.ll
+++ b/llvm/test/CodeGen/Hexagon/bug9049.ll
@@ -6,8 +6,7 @@ target triple = "hexagon-unknown-linux-gnu"
 define void @f0() #0 {
 b0:
   %v0 = alloca i32, align 4
-  %v1 = bitcast i32* %v0 to i64*
-  %v2 = load i64, i64* %v1, align 8
+  %v2 = load i64, ptr %v0, align 8
 ; CHECK: 	call f1
   %v3 = call i32 @f1(i64 %v2)
   unreachable

diff  --git a/llvm/test/CodeGen/Hexagon/bugAsmHWloop.ll b/llvm/test/CodeGen/Hexagon/bugAsmHWloop.ll
index c7e95ed056646..d9f48fd19835d 100644
--- a/llvm/test/CodeGen/Hexagon/bugAsmHWloop.ll
+++ b/llvm/test/CodeGen/Hexagon/bugAsmHWloop.ll
@@ -7,27 +7,26 @@
 target datalayout = "e-p:32:32:32-i64:64:64-i32:32:32-i16:16:16-i1:32:32-f64:64:64-f32:32:32-v64:64:64-v32:32:32-a0:0-n16:32"
 target triple = "hexagon"
 
-define i32 @q6zip_uncompress(i8* %out_buf, i32* %out_buf_size, i8* %in_buf, i32 %in_buf_size, i8* nocapture %dict, i32 %dict_size) nounwind {
+define i32 @q6zip_uncompress(ptr %out_buf, ptr %out_buf_size, ptr %in_buf, i32 %in_buf_size, ptr nocapture %dict, i32 %dict_size) nounwind {
 entry:
-  %0 = bitcast i8* %in_buf to i32*
-  %incdec.ptr = getelementptr inbounds i8, i8* %in_buf, i32 4
-  %1 = load i32, i32* %0, align 4, !tbaa !0
-  %2 = ptrtoint i8* %incdec.ptr to i32
-  %and.i = and i32 %2, 31
-  %sub.i = sub i32 %2, %and.i
-  %3 = inttoptr i32 %sub.i to i8*
+  %incdec.ptr = getelementptr inbounds i8, ptr %in_buf, i32 4
+  %0 = load i32, ptr %in_buf, align 4, !tbaa !0
+  %1 = ptrtoint ptr %incdec.ptr to i32
+  %and.i = and i32 %1, 31
+  %sub.i = sub i32 %1, %and.i
+  %2 = inttoptr i32 %sub.i to ptr
   %add.i = add i32 %in_buf_size, 31
   %sub2.i = add i32 %add.i, %and.i
   %div.i = lshr i32 %sub2.i, 5
-  %4 = tail call i32 @llvm.hexagon.A2.combine.ll(i32 32, i32 %div.i) nounwind
-  %5 = tail call i64 @llvm.hexagon.A4.combineir(i32 32, i32 %4) nounwind
-  tail call void asm sideeffect "l2fetch($0,$1)", "r,r,~{memory}"(i8* %3, i64 %5) nounwind, !srcloc !3
-  %6 = ptrtoint i8* %out_buf to i32
+  %3 = tail call i32 @llvm.hexagon.A2.combine.ll(i32 32, i32 %div.i) nounwind
+  %4 = tail call i64 @llvm.hexagon.A4.combineir(i32 32, i32 %3) nounwind
+  tail call void asm sideeffect "l2fetch($0,$1)", "r,r,~{memory}"(ptr %2, i64 %4) nounwind, !srcloc !3
+  %5 = ptrtoint ptr %out_buf to i32
   br label %for.body.i
 
 for.body.i:                                       ; preds = %for.body.i, %entry
   %i.02.i = phi i32 [ 0, %entry ], [ %inc.i, %for.body.i ]
-  %addr.addr.01.i = phi i32 [ %6, %entry ], [ %add.i14, %for.body.i ]
+  %addr.addr.01.i = phi i32 [ %5, %entry ], [ %add.i14, %for.body.i ]
   tail call void asm sideeffect "dczeroa($0)", "r"(i32 %addr.addr.01.i) nounwind, !srcloc !4
   %add.i14 = add i32 %addr.addr.01.i, 32
   %inc.i = add i32 %i.02.i, 1
@@ -35,7 +34,7 @@ for.body.i:                                       ; preds = %for.body.i, %entry
   br i1 %exitcond.i, label %while.cond.preheader, label %for.body.i
 
 while.cond.preheader:                             ; preds = %for.body.i
-  %and = and i32 %1, 3
+  %and = and i32 %0, 3
   switch i32 %and, label %infloop.preheader [
     i32 0, label %exit_inflate.split
     i32 2, label %if.then.preheader
@@ -48,7 +47,7 @@ infloop.preheader:                                ; preds = %while.cond.preheade
   br label %infloop
 
 if.then:                                          ; preds = %if.then.preheader, %if.then
-  tail call void @llvm.prefetch(i8* %incdec.ptr, i32 0, i32 3, i32 1)
+  tail call void @llvm.prefetch(ptr %incdec.ptr, i32 0, i32 3, i32 1)
   br label %if.then
 
 exit_inflate.split:                               ; preds = %while.cond.preheader
@@ -58,7 +57,7 @@ infloop:                                          ; preds = %infloop.preheader,
   br label %infloop
 }
 
-declare void @llvm.prefetch(i8* nocapture, i32, i32, i32) nounwind
+declare void @llvm.prefetch(ptr nocapture, i32, i32, i32) nounwind
 
 declare i64 @llvm.hexagon.A4.combineir(i32, i32) nounwind readnone
 

diff  --git a/llvm/test/CodeGen/Hexagon/build-vector-shuffle.ll b/llvm/test/CodeGen/Hexagon/build-vector-shuffle.ll
index f79f79e94a3ab..223c161f2d8c1 100644
--- a/llvm/test/CodeGen/Hexagon/build-vector-shuffle.ll
+++ b/llvm/test/CodeGen/Hexagon/build-vector-shuffle.ll
@@ -4,13 +4,13 @@
 
 target triple = "hexagon"
 
-define void @f0(<16 x i32>* %a0) #0 {
+define void @f0(ptr %a0) #0 {
 entry:
   %v0 = icmp eq i32 undef, 0
   %v1 = select i1 %v0, <32 x i16> undef, <32 x i16> zeroinitializer
   %v2 = bitcast <32 x i16> %v1 to <16 x i32>
   %v3 = tail call <16 x i32> @llvm.hexagon.V6.vshuffh(<16 x i32> %v2)
-  store <16 x i32> %v3, <16 x i32>* %a0, align 2
+  store <16 x i32> %v3, ptr %a0, align 2
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/builtin-prefetch-offset.ll b/llvm/test/CodeGen/Hexagon/builtin-prefetch-offset.ll
index eac372d114b7f..bbab0f0ffe9ed 100644
--- a/llvm/test/CodeGen/Hexagon/builtin-prefetch-offset.ll
+++ b/llvm/test/CodeGen/Hexagon/builtin-prefetch-offset.ll
@@ -13,16 +13,16 @@
 target datalayout = "e-p:32:32:32-i64:64:64-i32:32:32-i16:16:16-i1:32:32-f64:64:64-f32:32:32-v64:64:64-v32:32:32-a0:0-n16:32"
 target triple = "hexagon"
 
-define void @foo(i8* %addr) nounwind {
+define void @foo(ptr %addr) nounwind {
 entry:
-  %addr.addr = alloca i8*, align 4
-  store i8* %addr, i8** %addr.addr, align 4
-  %0 = load i8*, i8** %addr.addr, align 4
-  %1 = getelementptr i8, i8* %0, i32 8
-  call void @llvm.prefetch(i8* %1, i32 0, i32 3, i32 1)
-  %2 = getelementptr i8, i8* %0, i32 9
-  call void @llvm.prefetch(i8* %2, i32 0, i32 3, i32 1)
+  %addr.addr = alloca ptr, align 4
+  store ptr %addr, ptr %addr.addr, align 4
+  %0 = load ptr, ptr %addr.addr, align 4
+  %1 = getelementptr i8, ptr %0, i32 8
+  call void @llvm.prefetch(ptr %1, i32 0, i32 3, i32 1)
+  %2 = getelementptr i8, ptr %0, i32 9
+  call void @llvm.prefetch(ptr %2, i32 0, i32 3, i32 1)
   ret void
 }
 
-declare void @llvm.prefetch(i8* nocapture, i32, i32, i32) nounwind
+declare void @llvm.prefetch(ptr nocapture, i32, i32, i32) nounwind

diff  --git a/llvm/test/CodeGen/Hexagon/builtin-prefetch.ll b/llvm/test/CodeGen/Hexagon/builtin-prefetch.ll
index 8f7b44f659862..e23e3488e68b9 100644
--- a/llvm/test/CodeGen/Hexagon/builtin-prefetch.ll
+++ b/llvm/test/CodeGen/Hexagon/builtin-prefetch.ll
@@ -5,25 +5,24 @@ target datalayout = "e-p:32:32:32-i64:64:64-i32:32:32-i16:16:16-i1:32:32-f64:64:
 target triple = "hexagon"
 
 ; Function Attrs: nounwind
-define zeroext i8 @foo(i8* %addr) #0 {
+define zeroext i8 @foo(ptr %addr) #0 {
 entry:
-  %addr.addr = alloca i8*, align 4
-  store i8* %addr, i8** %addr.addr, align 4
-  %0 = load i8*, i8** %addr.addr, align 4
-  call void @llvm.prefetch(i8* %0, i32 0, i32 3, i32 1)
-  %1 = load i8*, i8** %addr.addr, align 4
-  %2 = bitcast i8* %1 to i32*
-  %3 = load i32, i32* %2, align 4
-  %4 = add i32 %3, 8
-  %5 = inttoptr i32 %4 to i8*
-  call void @llvm.hexagon.prefetch(i8* %5)
-  %6 = load i8, i8* %5
-  ret i8 %6
+  %addr.addr = alloca ptr, align 4
+  store ptr %addr, ptr %addr.addr, align 4
+  %0 = load ptr, ptr %addr.addr, align 4
+  call void @llvm.prefetch(ptr %0, i32 0, i32 3, i32 1)
+  %1 = load ptr, ptr %addr.addr, align 4
+  %2 = load i32, ptr %1, align 4
+  %3 = add i32 %2, 8
+  %4 = inttoptr i32 %3 to ptr
+  call void @llvm.hexagon.prefetch(ptr %4)
+  %5 = load i8, ptr %4
+  ret i8 %5
 }
 
 ; Function Attrs: nounwind
-declare void @llvm.prefetch(i8* nocapture, i32, i32, i32) #1
-declare void @llvm.hexagon.prefetch(i8* nocapture) #1
+declare void @llvm.prefetch(ptr nocapture, i32, i32, i32) #1
+declare void @llvm.hexagon.prefetch(ptr nocapture) #1
 
 attributes #0 = { nounwind "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
 attributes #1 = { nounwind }

diff  --git a/llvm/test/CodeGen/Hexagon/call-long1.ll b/llvm/test/CodeGen/Hexagon/call-long1.ll
index ca05cf40bf26f..9e4794f933080 100644
--- a/llvm/test/CodeGen/Hexagon/call-long1.ll
+++ b/llvm/test/CodeGen/Hexagon/call-long1.ll
@@ -9,8 +9,8 @@ target triple = "hexagon"
 ; Function Attrs: nounwind
 define i32 @f0(i32 %a0, i32 %a1, i32 %a2) #0 {
 b0:
-  %v0 = tail call i32 bitcast (i32 (...)* @f1 to i32 (i32, i32, i32)*)(i32 %a0, i32 %a1, i32 %a2) #1
-  %v1 = tail call i32 bitcast (i32 (...)* @f2 to i32 (i32, i32, i32)*)(i32 %a0, i32 %a1, i32 %a2) #1
+  %v0 = tail call i32 @f1(i32 %a0, i32 %a1, i32 %a2) #1
+  %v1 = tail call i32 @f2(i32 %a0, i32 %a1, i32 %a2) #1
   ret i32 0
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/call-ret-i1.ll b/llvm/test/CodeGen/Hexagon/call-ret-i1.ll
index 3838e8a6e88fe..5cf3dfbfef2b7 100644
--- a/llvm/test/CodeGen/Hexagon/call-ret-i1.ll
+++ b/llvm/test/CodeGen/Hexagon/call-ret-i1.ll
@@ -6,18 +6,18 @@
 
 %returntype = type { i1, i32 }
 
-define i32 @test(i32* %a0, i32* %a1, i32* %a2) #0 {
+define i32 @test(ptr %a0, ptr %a1, ptr %a2) #0 {
 b3:
   br i1 undef, label %b6, label %b4
 
 b4:                                               ; preds = %b3
-  %v5 = call %returntype @foo(i32* nonnull undef, i32* %a2, i32* %a0) #0
+  %v5 = call %returntype @foo(ptr nonnull undef, ptr %a2, ptr %a0) #0
   ret i32 1
 
 b6:                                               ; preds = %b3
   unreachable
 }
 
-declare %returntype @foo(i32*, i32*, i32*) #0
+declare %returntype @foo(ptr, ptr, ptr) #0
 
 attributes #0 = { nounwind }

diff  --git a/llvm/test/CodeGen/Hexagon/call-v4.ll b/llvm/test/CodeGen/Hexagon/call-v4.ll
index 045aa23d2e9ac..67548c81fd9b9 100644
--- a/llvm/test/CodeGen/Hexagon/call-v4.ll
+++ b/llvm/test/CodeGen/Hexagon/call-v4.ll
@@ -11,7 +11,7 @@ target triple = "hexagon"
 ; Function Attrs: nounwind
 define i32 @f0() #0 {
 b0:
-  %v0 = load i32, i32* @g0, align 4
+  %v0 = load i32, ptr @g0, align 4
   %v1 = tail call i32 @f1(i32 %v0) #0
   %v2 = icmp eq i32 %v1, 0
   br i1 %v2, label %b1, label %b2

diff  --git a/llvm/test/CodeGen/Hexagon/callR_noreturn.ll b/llvm/test/CodeGen/Hexagon/callR_noreturn.ll
index d78b1ab33f12f..ad198ef853b2a 100644
--- a/llvm/test/CodeGen/Hexagon/callR_noreturn.ll
+++ b/llvm/test/CodeGen/Hexagon/callR_noreturn.ll
@@ -1,11 +1,11 @@
 ; RUN: llc -march=hexagon  < %s | FileCheck %s
 ; CHECK: callr {{r[0-9]+}}
 
-%s.0 = type { [1 x %s.1], [4 x i8*] }
+%s.0 = type { [1 x %s.1], [4 x ptr] }
 %s.1 = type { [1 x %s.2], i32, [4 x i8] }
 %s.2 = type { [16 x i32] }
 
-define hidden void @f0(void (%s.0*)* %a0) #0 {
+define hidden void @f0(ptr %a0) #0 {
 b0:
   br i1 undef, label %b2, label %b1
 
@@ -13,7 +13,7 @@ b1:                                               ; preds = %b0
   ret void
 
 b2:                                               ; preds = %b0
-  call void %a0(%s.0* null) #1
+  call void %a0(ptr null) #1
   unreachable
 }
  

diff  --git a/llvm/test/CodeGen/Hexagon/calling-conv-2.ll b/llvm/test/CodeGen/Hexagon/calling-conv-2.ll
index bca6ac71927e3..74913417d2f76 100644
--- a/llvm/test/CodeGen/Hexagon/calling-conv-2.ll
+++ b/llvm/test/CodeGen/Hexagon/calling-conv-2.ll
@@ -3,12 +3,12 @@
 %struct.test_struct = type { i32, i8, i64 }
 
 ; CHECK: r1 = #45
-define void @foo(%struct.test_struct* noalias nocapture sret(%struct.test_struct) %agg.result, i32 %a) #0 {
+define void @foo(ptr noalias nocapture sret(%struct.test_struct) %agg.result, i32 %a) #0 {
 entry:
-  call void @bar(%struct.test_struct* sret(%struct.test_struct) %agg.result, i32 45) #0
+  call void @bar(ptr sret(%struct.test_struct) %agg.result, i32 45) #0
   ret void
 }
 
-declare void @bar(%struct.test_struct* sret(%struct.test_struct), i32) #0
+declare void @bar(ptr sret(%struct.test_struct), i32) #0
 
 attributes #0 = { nounwind }

diff  --git a/llvm/test/CodeGen/Hexagon/calling-conv.ll b/llvm/test/CodeGen/Hexagon/calling-conv.ll
index 881709f98c0d7..68cce1a8ed5c8 100644
--- a/llvm/test/CodeGen/Hexagon/calling-conv.ll
+++ b/llvm/test/CodeGen/Hexagon/calling-conv.ll
@@ -5,33 +5,30 @@
 %s.0 = type { i32, i8, i64 }
 %s.1 = type { i8, i64 }
 
- at g0 = external global %s.0*
+ at g0 = external global ptr
 
 ; CHECK-ONE:    memw(r29+#48) = r2
 ; CHECK-TWO:    memw(r29+#52) = r2
 ; CHECK-THREE:  memw(r29+#56) = r2
 
-define void @f0(%s.0* noalias nocapture sret(%s.0) %a0, i32 %a1, i8 zeroext %a2, %s.0* byval(%s.0) nocapture readnone align 8 %a3, %s.1* byval(%s.1) nocapture readnone align 8 %a4) #0 {
+define void @f0(ptr noalias nocapture sret(%s.0) %a0, i32 %a1, i8 zeroext %a2, ptr byval(%s.0) nocapture readnone align 8 %a3, ptr byval(%s.1) nocapture readnone align 8 %a4) #0 {
 b0:
   %v0 = alloca %s.0, align 8
-  %v1 = load %s.0*, %s.0** @g0, align 4
+  %v1 = load ptr, ptr @g0, align 4
   %v2 = sext i32 %a1 to i64
   %v3 = add nsw i64 %v2, 1
   %v4 = add nsw i32 %a1, 2
   %v5 = add nsw i64 %v2, 3
-  call void @f1(%s.0* sret(%s.0) %v0, i32 45, %s.0* byval(%s.0) align 8 %v1, %s.0* byval(%s.0) align 8 %v1, i8 zeroext %a2, i64 %v3, i32 %v4, i64 %v5, i8 zeroext %a2, i8 zeroext %a2, i8 zeroext %a2, i32 45)
-  %v6 = bitcast %s.0* %v0 to i32*
-  store i32 20, i32* %v6, align 8
-  %v7 = bitcast %s.0* %a0 to i8*
-  %v8 = bitcast %s.0* %v0 to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 %v7, i8* align 8 %v8, i32 16, i1 false)
+  call void @f1(ptr sret(%s.0) %v0, i32 45, ptr byval(%s.0) align 8 %v1, ptr byval(%s.0) align 8 %v1, i8 zeroext %a2, i64 %v3, i32 %v4, i64 %v5, i8 zeroext %a2, i8 zeroext %a2, i8 zeroext %a2, i32 45)
+  store i32 20, ptr %v0, align 8
+  call void @llvm.memcpy.p0.p0.i32(ptr align 8 %a0, ptr align 8 %v0, i32 16, i1 false)
   ret void
 }
 
-declare void @f1(%s.0* sret(%s.0), i32, %s.0* byval(%s.0) align 8, %s.0* byval(%s.0) align 8, i8 zeroext, i64, i32, i64, i8 zeroext, i8 zeroext, i8 zeroext, i32)
+declare void @f1(ptr sret(%s.0), i32, ptr byval(%s.0) align 8, ptr byval(%s.0) align 8, i8 zeroext, i64, i32, i64, i8 zeroext, i8 zeroext, i8 zeroext, i32)
 
 ; Function Attrs: argmemonly nounwind
-declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture writeonly, i8* nocapture readonly, i32, i1) #1
+declare void @llvm.memcpy.p0.p0.i32(ptr nocapture writeonly, ptr nocapture readonly, i32, i1) #1
 
 attributes #0 = { nounwind "target-cpu"="hexagonv55" }
 attributes #1 = { argmemonly nounwind }

diff  --git a/llvm/test/CodeGen/Hexagon/callr-dep-edge.ll b/llvm/test/CodeGen/Hexagon/callr-dep-edge.ll
index 24e7995d372d6..7049121336c5d 100644
--- a/llvm/test/CodeGen/Hexagon/callr-dep-edge.ll
+++ b/llvm/test/CodeGen/Hexagon/callr-dep-edge.ll
@@ -3,7 +3,7 @@
 
 target triple = "hexagon"
 
- at fp = common global i32 (...)* null, align 4
+ at fp = common global ptr null, align 4
 
 ; CHECK: [[REG:r[0-9]+]] = memw
 ; CHECK: {
@@ -12,7 +12,7 @@ target triple = "hexagon"
 ; Function Attrs: nounwind
 define i32 @foo() #0 {
 entry:
-  %0 = load i32 ()*, i32 ()** bitcast (i32 (...)** @fp to i32 ()**), align 4
+  %0 = load ptr, ptr @fp, align 4
   %call = tail call i32 %0() #0
   ret i32 %call
 }

diff  --git a/llvm/test/CodeGen/Hexagon/cext-check.ll b/llvm/test/CodeGen/Hexagon/cext-check.ll
index 38dfa9ca03563..0369866b054c9 100644
--- a/llvm/test/CodeGen/Hexagon/cext-check.ll
+++ b/llvm/test/CodeGen/Hexagon/cext-check.ll
@@ -1,25 +1,25 @@
 ; RUN: llc -march=hexagon -hexagon-eif=0 -ifcvt-limit=0 -hexagon-initial-cfg-cleanup=0 < %s | FileCheck %s
 ; Check that we constant extended instructions only when necessary.
 
-define i32 @cext_test1(i32* %a) nounwind {
+define i32 @cext_test1(ptr %a) nounwind {
 ; CHECK: r{{[0-9]+}} = memw(r{{[0-9]+}}+##8000)
 ; CHECK: r{{[0-9]+}} = add(r{{[0-9]+}},##300000)
 ; CHECK-NOT: r{{[0-9]+}} = memw(r{{[0-9]+}}+##4092)
 ; CHECK-NOT: r{{[0-9]+}} = add(r{{[0-9]+}},##300)
 entry:
-  %0 = load i32, i32* %a, align 4
+  %0 = load i32, ptr %a, align 4
   %tobool = icmp ne i32 %0, 0
   br i1 %tobool, label %if.then, label %if.end
 
 if.then:
-  %arrayidx1 = getelementptr inbounds i32, i32* %a, i32 2000
-  %1 = load i32, i32* %arrayidx1, align 4
+  %arrayidx1 = getelementptr inbounds i32, ptr %a, i32 2000
+  %1 = load i32, ptr %arrayidx1, align 4
   %add = add nsw i32 %1, 300000
   br label %return
 
 if.end:
-  %arrayidx2 = getelementptr inbounds i32, i32* %a, i32 1023
-  %2 = load i32, i32* %arrayidx2, align 4
+  %arrayidx2 = getelementptr inbounds i32, ptr %a, i32 1023
+  %2 = load i32, ptr %arrayidx2, align 4
   %add3 = add nsw i32 %2, 300
   br label %return
 
@@ -28,25 +28,25 @@ return:
   ret i32 %retval.0
 }
 
-define i32 @cext_test2(i8* %a) nounwind {
+define i32 @cext_test2(ptr %a) nounwind {
 ; CHECK-NOT: r{{[0-9]+}} = memub(r{{[0-9]+}}+##1023)
 ; CHECK: r{{[0-9]+}} = add(r{{[0-9]+}},##300000)
 ; CHECK: r{{[0-9]+}} = memub(r{{[0-9]+}}+##1024)
 ; CHECK-NOT: r{{[0-9]+}} = add(r{{[0-9]+}},##6000)
 entry:
-  %tobool = icmp ne i8* %a, null
+  %tobool = icmp ne ptr %a, null
   br i1 %tobool, label %if.then, label %if.end
 
 if.then:
-  %arrayidx = getelementptr inbounds i8, i8* %a, i32 1023
-  %0 = load i8, i8* %arrayidx, align 1
+  %arrayidx = getelementptr inbounds i8, ptr %a, i32 1023
+  %0 = load i8, ptr %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 300000
   br label %return
 
 if.end:
-  %arrayidx1 = getelementptr inbounds i8, i8* %a, i32 1024
-  %1 = load i8, i8* %arrayidx1, align 1
+  %arrayidx1 = getelementptr inbounds i8, ptr %a, i32 1024
+  %1 = load i8, ptr %arrayidx1, align 1
   %conv2 = zext i8 %1 to i32
   %add3 = add nsw i32 %conv2, 6000
   br label %return

diff  --git a/llvm/test/CodeGen/Hexagon/cext-ice.ll b/llvm/test/CodeGen/Hexagon/cext-ice.ll
index 1f39892c76cac..a56157b156549 100644
--- a/llvm/test/CodeGen/Hexagon/cext-ice.ll
+++ b/llvm/test/CodeGen/Hexagon/cext-ice.ll
@@ -7,27 +7,25 @@ target triple = "hexagon-unknown--elf"
 define void @f0(i32 %a0, i32 %a1) #0 {
 b0:
   %v0 = alloca [8 x i32], align 8
-  %v1 = bitcast [8 x i32]* %v0 to i8*
-  call void @llvm.memset.p0i8.i32(i8* align 8 %v1, i8 0, i32 32, i1 false)
+  call void @llvm.memset.p0.i32(ptr align 8 %v0, i8 0, i32 32, i1 false)
   %v2 = icmp sgt i32 %a0, 0
   br i1 %v2, label %b1, label %b18
 
 b1:                                               ; preds = %b0
-  %v3 = getelementptr inbounds [8 x i32], [8 x i32]* %v0, i32 0, i32 6
-  %v4 = inttoptr i32 %a1 to i32*
+  %v3 = getelementptr inbounds [8 x i32], ptr %v0, i32 0, i32 6
+  %v4 = inttoptr i32 %a1 to ptr
   %v5 = add i32 %a0, -1
   %v6 = icmp sgt i32 %v5, 0
   br i1 %v6, label %b2, label %b13
 
 b2:                                               ; preds = %b1
-  %v7 = getelementptr [8 x i32], [8 x i32]* %v0, i32 0, i32 0
-  %v8 = getelementptr [8 x i32], [8 x i32]* %v0, i32 0, i32 1
-  %v9 = getelementptr [8 x i32], [8 x i32]* %v0, i32 0, i32 2
-  %v10 = getelementptr [8 x i32], [8 x i32]* %v0, i32 0, i32 3
-  %v11 = getelementptr [8 x i32], [8 x i32]* %v0, i32 0, i32 4
-  %v12 = getelementptr [8 x i32], [8 x i32]* %v0, i32 0, i32 5
-  %v13 = getelementptr [8 x i32], [8 x i32]* %v0, i32 0, i32 6
-  %v14 = getelementptr [8 x i32], [8 x i32]* %v0, i32 0, i32 7
+  %v8 = getelementptr [8 x i32], ptr %v0, i32 0, i32 1
+  %v9 = getelementptr [8 x i32], ptr %v0, i32 0, i32 2
+  %v10 = getelementptr [8 x i32], ptr %v0, i32 0, i32 3
+  %v11 = getelementptr [8 x i32], ptr %v0, i32 0, i32 4
+  %v12 = getelementptr [8 x i32], ptr %v0, i32 0, i32 5
+  %v13 = getelementptr [8 x i32], ptr %v0, i32 0, i32 6
+  %v14 = getelementptr [8 x i32], ptr %v0, i32 0, i32 7
   %v15 = add i32 %a0, -2
   %v16 = lshr i32 %v15, 1
   %v17 = add i32 %v16, 1
@@ -45,24 +43,24 @@ b4:                                               ; preds = %b22, %b3
   %v23 = phi i32 [ 0, %b3 ], [ %v136, %b22 ]
   %v24 = mul nsw i32 %v22, 4
   %v25 = add nsw i32 %v24, 268435456
-  %v26 = inttoptr i32 %v25 to i32*
-  store volatile i32 %a1, i32* %v26, align 4, !tbaa !0
-  %v27 = load i32, i32* %v7, align 8, !tbaa !0
-  store volatile i32 %v27, i32* %v4, align 4, !tbaa !0
-  %v28 = load i32, i32* %v8, align 4, !tbaa !0
-  store volatile i32 %v28, i32* %v4, align 4, !tbaa !0
-  %v29 = load i32, i32* %v9, align 8, !tbaa !0
-  store volatile i32 %v29, i32* %v4, align 4, !tbaa !0
-  %v30 = load i32, i32* %v10, align 4, !tbaa !0
-  store volatile i32 %v30, i32* %v4, align 4, !tbaa !0
-  %v31 = load i32, i32* %v11, align 8, !tbaa !0
-  store volatile i32 %v31, i32* %v4, align 4, !tbaa !0
-  %v32 = load i32, i32* %v12, align 4, !tbaa !0
-  store volatile i32 %v32, i32* %v4, align 4, !tbaa !0
-  %v33 = load i32, i32* %v13, align 8, !tbaa !0
-  store volatile i32 %v33, i32* %v4, align 4, !tbaa !0
-  %v34 = load i32, i32* %v14, align 4, !tbaa !0
-  store volatile i32 %v34, i32* %v4, align 4, !tbaa !0
+  %v26 = inttoptr i32 %v25 to ptr
+  store volatile i32 %a1, ptr %v26, align 4, !tbaa !0
+  %v27 = load i32, ptr %v0, align 8, !tbaa !0
+  store volatile i32 %v27, ptr %v4, align 4, !tbaa !0
+  %v28 = load i32, ptr %v8, align 4, !tbaa !0
+  store volatile i32 %v28, ptr %v4, align 4, !tbaa !0
+  %v29 = load i32, ptr %v9, align 8, !tbaa !0
+  store volatile i32 %v29, ptr %v4, align 4, !tbaa !0
+  %v30 = load i32, ptr %v10, align 4, !tbaa !0
+  store volatile i32 %v30, ptr %v4, align 4, !tbaa !0
+  %v31 = load i32, ptr %v11, align 8, !tbaa !0
+  store volatile i32 %v31, ptr %v4, align 4, !tbaa !0
+  %v32 = load i32, ptr %v12, align 4, !tbaa !0
+  store volatile i32 %v32, ptr %v4, align 4, !tbaa !0
+  %v33 = load i32, ptr %v13, align 8, !tbaa !0
+  store volatile i32 %v33, ptr %v4, align 4, !tbaa !0
+  %v34 = load i32, ptr %v14, align 4, !tbaa !0
+  store volatile i32 %v34, ptr %v4, align 4, !tbaa !0
   %v35 = icmp eq i32 %v23, 0
   br i1 %v35, label %b19, label %b20
 
@@ -84,53 +82,53 @@ b8:                                               ; preds = %b10, %b7
   %v41 = phi i32 [ %v38, %b7 ], [ %v66, %b10 ]
   %v42 = mul nsw i32 %v40, 4
   %v43 = add nsw i32 %v42, 268435456
-  %v44 = inttoptr i32 %v43 to i32*
-  store volatile i32 %a1, i32* %v44, align 4, !tbaa !0
-  %v45 = load i32, i32* %v7, align 8, !tbaa !0
-  store volatile i32 %v45, i32* %v4, align 4, !tbaa !0
-  %v46 = load i32, i32* %v8, align 4, !tbaa !0
-  store volatile i32 %v46, i32* %v4, align 4, !tbaa !0
-  %v47 = load i32, i32* %v9, align 8, !tbaa !0
-  store volatile i32 %v47, i32* %v4, align 4, !tbaa !0
-  %v48 = load i32, i32* %v10, align 4, !tbaa !0
-  store volatile i32 %v48, i32* %v4, align 4, !tbaa !0
-  %v49 = load i32, i32* %v11, align 8, !tbaa !0
-  store volatile i32 %v49, i32* %v4, align 4, !tbaa !0
-  %v50 = load i32, i32* %v12, align 4, !tbaa !0
-  store volatile i32 %v50, i32* %v4, align 4, !tbaa !0
-  %v51 = load i32, i32* %v13, align 8, !tbaa !0
-  store volatile i32 %v51, i32* %v4, align 4, !tbaa !0
-  %v52 = load i32, i32* %v14, align 4, !tbaa !0
-  store volatile i32 %v52, i32* %v4, align 4, !tbaa !0
+  %v44 = inttoptr i32 %v43 to ptr
+  store volatile i32 %a1, ptr %v44, align 4, !tbaa !0
+  %v45 = load i32, ptr %v0, align 8, !tbaa !0
+  store volatile i32 %v45, ptr %v4, align 4, !tbaa !0
+  %v46 = load i32, ptr %v8, align 4, !tbaa !0
+  store volatile i32 %v46, ptr %v4, align 4, !tbaa !0
+  %v47 = load i32, ptr %v9, align 8, !tbaa !0
+  store volatile i32 %v47, ptr %v4, align 4, !tbaa !0
+  %v48 = load i32, ptr %v10, align 4, !tbaa !0
+  store volatile i32 %v48, ptr %v4, align 4, !tbaa !0
+  %v49 = load i32, ptr %v11, align 8, !tbaa !0
+  store volatile i32 %v49, ptr %v4, align 4, !tbaa !0
+  %v50 = load i32, ptr %v12, align 4, !tbaa !0
+  store volatile i32 %v50, ptr %v4, align 4, !tbaa !0
+  %v51 = load i32, ptr %v13, align 8, !tbaa !0
+  store volatile i32 %v51, ptr %v4, align 4, !tbaa !0
+  %v52 = load i32, ptr %v14, align 4, !tbaa !0
+  store volatile i32 %v52, ptr %v4, align 4, !tbaa !0
   %v53 = icmp eq i32 %v41, 0
   br i1 %v53, label %b9, label %b10
 
 b9:                                               ; preds = %b8
-  store i32 0, i32* %v3, align 8, !tbaa !0
+  store i32 0, ptr %v3, align 8, !tbaa !0
   br label %b10
 
 b10:                                              ; preds = %b9, %b8
   %v54 = phi i32 [ 3, %b9 ], [ %v40, %b8 ]
   %v55 = mul nsw i32 %v54, 4
   %v56 = add nsw i32 %v55, 268435456
-  %v57 = inttoptr i32 %v56 to i32*
-  store volatile i32 %a1, i32* %v57, align 4, !tbaa !0
-  %v58 = load i32, i32* %v7, align 8, !tbaa !0
-  store volatile i32 %v58, i32* %v4, align 4, !tbaa !0
-  %v59 = load i32, i32* %v8, align 4, !tbaa !0
-  store volatile i32 %v59, i32* %v4, align 4, !tbaa !0
-  %v60 = load i32, i32* %v9, align 8, !tbaa !0
-  store volatile i32 %v60, i32* %v4, align 4, !tbaa !0
-  %v61 = load i32, i32* %v10, align 4, !tbaa !0
-  store volatile i32 %v61, i32* %v4, align 4, !tbaa !0
-  %v62 = load i32, i32* %v11, align 8, !tbaa !0
-  store volatile i32 %v62, i32* %v4, align 4, !tbaa !0
-  %v63 = load i32, i32* %v12, align 4, !tbaa !0
-  store volatile i32 %v63, i32* %v4, align 4, !tbaa !0
-  %v64 = load i32, i32* %v13, align 8, !tbaa !0
-  store volatile i32 %v64, i32* %v4, align 4, !tbaa !0
-  %v65 = load i32, i32* %v14, align 4, !tbaa !0
-  store volatile i32 %v65, i32* %v4, align 4, !tbaa !0
+  %v57 = inttoptr i32 %v56 to ptr
+  store volatile i32 %a1, ptr %v57, align 4, !tbaa !0
+  %v58 = load i32, ptr %v0, align 8, !tbaa !0
+  store volatile i32 %v58, ptr %v4, align 4, !tbaa !0
+  %v59 = load i32, ptr %v8, align 4, !tbaa !0
+  store volatile i32 %v59, ptr %v4, align 4, !tbaa !0
+  %v60 = load i32, ptr %v9, align 8, !tbaa !0
+  store volatile i32 %v60, ptr %v4, align 4, !tbaa !0
+  %v61 = load i32, ptr %v10, align 4, !tbaa !0
+  store volatile i32 %v61, ptr %v4, align 4, !tbaa !0
+  %v62 = load i32, ptr %v11, align 8, !tbaa !0
+  store volatile i32 %v62, ptr %v4, align 4, !tbaa !0
+  %v63 = load i32, ptr %v12, align 4, !tbaa !0
+  store volatile i32 %v63, ptr %v4, align 4, !tbaa !0
+  %v64 = load i32, ptr %v13, align 8, !tbaa !0
+  store volatile i32 %v64, ptr %v4, align 4, !tbaa !0
+  %v65 = load i32, ptr %v14, align 4, !tbaa !0
+  store volatile i32 %v65, ptr %v4, align 4, !tbaa !0
   %v66 = add nsw i32 %v41, 2
   %v67 = icmp slt i32 %v66, %v5
   br i1 %v67, label %b8, label %b11
@@ -149,14 +147,13 @@ b12:                                              ; preds = %b11, %b5
 b13:                                              ; preds = %b12, %b1
   %v73 = phi i32 [ 0, %b1 ], [ %v70, %b12 ]
   %v74 = phi i32 [ 0, %b1 ], [ %v71, %b12 ]
-  %v75 = getelementptr [8 x i32], [8 x i32]* %v0, i32 0, i32 0
-  %v76 = getelementptr [8 x i32], [8 x i32]* %v0, i32 0, i32 1
-  %v77 = getelementptr [8 x i32], [8 x i32]* %v0, i32 0, i32 2
-  %v78 = getelementptr [8 x i32], [8 x i32]* %v0, i32 0, i32 3
-  %v79 = getelementptr [8 x i32], [8 x i32]* %v0, i32 0, i32 4
-  %v80 = getelementptr [8 x i32], [8 x i32]* %v0, i32 0, i32 5
-  %v81 = getelementptr [8 x i32], [8 x i32]* %v0, i32 0, i32 6
-  %v82 = getelementptr [8 x i32], [8 x i32]* %v0, i32 0, i32 7
+  %v76 = getelementptr [8 x i32], ptr %v0, i32 0, i32 1
+  %v77 = getelementptr [8 x i32], ptr %v0, i32 0, i32 2
+  %v78 = getelementptr [8 x i32], ptr %v0, i32 0, i32 3
+  %v79 = getelementptr [8 x i32], ptr %v0, i32 0, i32 4
+  %v80 = getelementptr [8 x i32], ptr %v0, i32 0, i32 5
+  %v81 = getelementptr [8 x i32], ptr %v0, i32 0, i32 6
+  %v82 = getelementptr [8 x i32], ptr %v0, i32 0, i32 7
   br label %b14
 
 b14:                                              ; preds = %b16, %b13
@@ -166,31 +163,31 @@ b14:                                              ; preds = %b16, %b13
   br i1 %v85, label %b15, label %b16
 
 b15:                                              ; preds = %b14
-  store i32 0, i32* %v3, align 8, !tbaa !0
+  store i32 0, ptr %v3, align 8, !tbaa !0
   br label %b16
 
 b16:                                              ; preds = %b15, %b14
   %v86 = phi i32 [ 3, %b15 ], [ %v83, %b14 ]
   %v87 = mul nsw i32 %v86, 4
   %v88 = add nsw i32 %v87, 268435456
-  %v89 = inttoptr i32 %v88 to i32*
-  store volatile i32 %a1, i32* %v89, align 4, !tbaa !0
-  %v90 = load i32, i32* %v75, align 8, !tbaa !0
-  store volatile i32 %v90, i32* %v4, align 4, !tbaa !0
-  %v91 = load i32, i32* %v76, align 4, !tbaa !0
-  store volatile i32 %v91, i32* %v4, align 4, !tbaa !0
-  %v92 = load i32, i32* %v77, align 8, !tbaa !0
-  store volatile i32 %v92, i32* %v4, align 4, !tbaa !0
-  %v93 = load i32, i32* %v78, align 4, !tbaa !0
-  store volatile i32 %v93, i32* %v4, align 4, !tbaa !0
-  %v94 = load i32, i32* %v79, align 8, !tbaa !0
-  store volatile i32 %v94, i32* %v4, align 4, !tbaa !0
-  %v95 = load i32, i32* %v80, align 4, !tbaa !0
-  store volatile i32 %v95, i32* %v4, align 4, !tbaa !0
-  %v96 = load i32, i32* %v81, align 8, !tbaa !0
-  store volatile i32 %v96, i32* %v4, align 4, !tbaa !0
-  %v97 = load i32, i32* %v82, align 4, !tbaa !0
-  store volatile i32 %v97, i32* %v4, align 4, !tbaa !0
+  %v89 = inttoptr i32 %v88 to ptr
+  store volatile i32 %a1, ptr %v89, align 4, !tbaa !0
+  %v90 = load i32, ptr %v0, align 8, !tbaa !0
+  store volatile i32 %v90, ptr %v4, align 4, !tbaa !0
+  %v91 = load i32, ptr %v76, align 4, !tbaa !0
+  store volatile i32 %v91, ptr %v4, align 4, !tbaa !0
+  %v92 = load i32, ptr %v77, align 8, !tbaa !0
+  store volatile i32 %v92, ptr %v4, align 4, !tbaa !0
+  %v93 = load i32, ptr %v78, align 4, !tbaa !0
+  store volatile i32 %v93, ptr %v4, align 4, !tbaa !0
+  %v94 = load i32, ptr %v79, align 8, !tbaa !0
+  store volatile i32 %v94, ptr %v4, align 4, !tbaa !0
+  %v95 = load i32, ptr %v80, align 4, !tbaa !0
+  store volatile i32 %v95, ptr %v4, align 4, !tbaa !0
+  %v96 = load i32, ptr %v81, align 8, !tbaa !0
+  store volatile i32 %v96, ptr %v4, align 4, !tbaa !0
+  %v97 = load i32, ptr %v82, align 4, !tbaa !0
+  store volatile i32 %v97, ptr %v4, align 4, !tbaa !0
   %v98 = add nsw i32 %v84, 1
   %v99 = icmp eq i32 %v98, %a0
   br i1 %v99, label %b17, label %b14
@@ -202,80 +199,80 @@ b18:                                              ; preds = %b17, %b12, %b0
   ret void
 
 b19:                                              ; preds = %b4
-  store i32 0, i32* %v3, align 8, !tbaa !0
+  store i32 0, ptr %v3, align 8, !tbaa !0
   br label %b20
 
 b20:                                              ; preds = %b19, %b4
   %v100 = phi i32 [ 3, %b19 ], [ %v22, %b4 ]
   %v101 = mul nsw i32 %v100, 4
   %v102 = add nsw i32 %v101, 268435456
-  %v103 = inttoptr i32 %v102 to i32*
-  store volatile i32 %a1, i32* %v103, align 4, !tbaa !0
-  %v104 = load i32, i32* %v7, align 8, !tbaa !0
-  store volatile i32 %v104, i32* %v4, align 4, !tbaa !0
-  %v105 = load i32, i32* %v8, align 4, !tbaa !0
-  store volatile i32 %v105, i32* %v4, align 4, !tbaa !0
-  %v106 = load i32, i32* %v9, align 8, !tbaa !0
-  store volatile i32 %v106, i32* %v4, align 4, !tbaa !0
-  %v107 = load i32, i32* %v10, align 4, !tbaa !0
-  store volatile i32 %v107, i32* %v4, align 4, !tbaa !0
-  %v108 = load i32, i32* %v11, align 8, !tbaa !0
-  store volatile i32 %v108, i32* %v4, align 4, !tbaa !0
-  %v109 = load i32, i32* %v12, align 4, !tbaa !0
-  store volatile i32 %v109, i32* %v4, align 4, !tbaa !0
-  %v110 = load i32, i32* %v13, align 8, !tbaa !0
-  store volatile i32 %v110, i32* %v4, align 4, !tbaa !0
-  %v111 = load i32, i32* %v14, align 4, !tbaa !0
-  store volatile i32 %v111, i32* %v4, align 4, !tbaa !0
+  %v103 = inttoptr i32 %v102 to ptr
+  store volatile i32 %a1, ptr %v103, align 4, !tbaa !0
+  %v104 = load i32, ptr %v0, align 8, !tbaa !0
+  store volatile i32 %v104, ptr %v4, align 4, !tbaa !0
+  %v105 = load i32, ptr %v8, align 4, !tbaa !0
+  store volatile i32 %v105, ptr %v4, align 4, !tbaa !0
+  %v106 = load i32, ptr %v9, align 8, !tbaa !0
+  store volatile i32 %v106, ptr %v4, align 4, !tbaa !0
+  %v107 = load i32, ptr %v10, align 4, !tbaa !0
+  store volatile i32 %v107, ptr %v4, align 4, !tbaa !0
+  %v108 = load i32, ptr %v11, align 8, !tbaa !0
+  store volatile i32 %v108, ptr %v4, align 4, !tbaa !0
+  %v109 = load i32, ptr %v12, align 4, !tbaa !0
+  store volatile i32 %v109, ptr %v4, align 4, !tbaa !0
+  %v110 = load i32, ptr %v13, align 8, !tbaa !0
+  store volatile i32 %v110, ptr %v4, align 4, !tbaa !0
+  %v111 = load i32, ptr %v14, align 4, !tbaa !0
+  store volatile i32 %v111, ptr %v4, align 4, !tbaa !0
   %v112 = add nsw i32 %v23, 2
   %v113 = mul nsw i32 %v100, 4
   %v114 = add nsw i32 %v113, 268435456
-  %v115 = inttoptr i32 %v114 to i32*
-  store volatile i32 %a1, i32* %v115, align 4, !tbaa !0
-  %v116 = load i32, i32* %v7, align 8, !tbaa !0
-  store volatile i32 %v116, i32* %v4, align 4, !tbaa !0
-  %v117 = load i32, i32* %v8, align 4, !tbaa !0
-  store volatile i32 %v117, i32* %v4, align 4, !tbaa !0
-  %v118 = load i32, i32* %v9, align 8, !tbaa !0
-  store volatile i32 %v118, i32* %v4, align 4, !tbaa !0
-  %v119 = load i32, i32* %v10, align 4, !tbaa !0
-  store volatile i32 %v119, i32* %v4, align 4, !tbaa !0
-  %v120 = load i32, i32* %v11, align 8, !tbaa !0
-  store volatile i32 %v120, i32* %v4, align 4, !tbaa !0
-  %v121 = load i32, i32* %v12, align 4, !tbaa !0
-  store volatile i32 %v121, i32* %v4, align 4, !tbaa !0
-  %v122 = load i32, i32* %v13, align 8, !tbaa !0
-  store volatile i32 %v122, i32* %v4, align 4, !tbaa !0
-  %v123 = load i32, i32* %v14, align 4, !tbaa !0
-  store volatile i32 %v123, i32* %v4, align 4, !tbaa !0
+  %v115 = inttoptr i32 %v114 to ptr
+  store volatile i32 %a1, ptr %v115, align 4, !tbaa !0
+  %v116 = load i32, ptr %v0, align 8, !tbaa !0
+  store volatile i32 %v116, ptr %v4, align 4, !tbaa !0
+  %v117 = load i32, ptr %v8, align 4, !tbaa !0
+  store volatile i32 %v117, ptr %v4, align 4, !tbaa !0
+  %v118 = load i32, ptr %v9, align 8, !tbaa !0
+  store volatile i32 %v118, ptr %v4, align 4, !tbaa !0
+  %v119 = load i32, ptr %v10, align 4, !tbaa !0
+  store volatile i32 %v119, ptr %v4, align 4, !tbaa !0
+  %v120 = load i32, ptr %v11, align 8, !tbaa !0
+  store volatile i32 %v120, ptr %v4, align 4, !tbaa !0
+  %v121 = load i32, ptr %v12, align 4, !tbaa !0
+  store volatile i32 %v121, ptr %v4, align 4, !tbaa !0
+  %v122 = load i32, ptr %v13, align 8, !tbaa !0
+  store volatile i32 %v122, ptr %v4, align 4, !tbaa !0
+  %v123 = load i32, ptr %v14, align 4, !tbaa !0
+  store volatile i32 %v123, ptr %v4, align 4, !tbaa !0
   br i1 false, label %b21, label %b22
 
 b21:                                              ; preds = %b20
-  store i32 0, i32* %v3, align 8, !tbaa !0
+  store i32 0, ptr %v3, align 8, !tbaa !0
   br label %b22
 
 b22:                                              ; preds = %b21, %b20
   %v124 = phi i32 [ 3, %b21 ], [ %v100, %b20 ]
   %v125 = mul nsw i32 %v124, 4
   %v126 = add nsw i32 %v125, 268435456
-  %v127 = inttoptr i32 %v126 to i32*
-  store volatile i32 %a1, i32* %v127, align 4, !tbaa !0
-  %v128 = load i32, i32* %v7, align 8, !tbaa !0
-  store volatile i32 %v128, i32* %v4, align 4, !tbaa !0
-  %v129 = load i32, i32* %v8, align 4, !tbaa !0
-  store volatile i32 %v129, i32* %v4, align 4, !tbaa !0
-  %v130 = load i32, i32* %v9, align 8, !tbaa !0
-  store volatile i32 %v130, i32* %v4, align 4, !tbaa !0
-  %v131 = load i32, i32* %v10, align 4, !tbaa !0
-  store volatile i32 %v131, i32* %v4, align 4, !tbaa !0
-  %v132 = load i32, i32* %v11, align 8, !tbaa !0
-  store volatile i32 %v132, i32* %v4, align 4, !tbaa !0
-  %v133 = load i32, i32* %v12, align 4, !tbaa !0
-  store volatile i32 %v133, i32* %v4, align 4, !tbaa !0
-  %v134 = load i32, i32* %v13, align 8, !tbaa !0
-  store volatile i32 %v134, i32* %v4, align 4, !tbaa !0
-  %v135 = load i32, i32* %v14, align 4, !tbaa !0
-  store volatile i32 %v135, i32* %v4, align 4, !tbaa !0
+  %v127 = inttoptr i32 %v126 to ptr
+  store volatile i32 %a1, ptr %v127, align 4, !tbaa !0
+  %v128 = load i32, ptr %v0, align 8, !tbaa !0
+  store volatile i32 %v128, ptr %v4, align 4, !tbaa !0
+  %v129 = load i32, ptr %v8, align 4, !tbaa !0
+  store volatile i32 %v129, ptr %v4, align 4, !tbaa !0
+  %v130 = load i32, ptr %v9, align 8, !tbaa !0
+  store volatile i32 %v130, ptr %v4, align 4, !tbaa !0
+  %v131 = load i32, ptr %v10, align 4, !tbaa !0
+  store volatile i32 %v131, ptr %v4, align 4, !tbaa !0
+  %v132 = load i32, ptr %v11, align 8, !tbaa !0
+  store volatile i32 %v132, ptr %v4, align 4, !tbaa !0
+  %v133 = load i32, ptr %v12, align 4, !tbaa !0
+  store volatile i32 %v133, ptr %v4, align 4, !tbaa !0
+  %v134 = load i32, ptr %v13, align 8, !tbaa !0
+  store volatile i32 %v134, ptr %v4, align 4, !tbaa !0
+  %v135 = load i32, ptr %v14, align 4, !tbaa !0
+  store volatile i32 %v135, ptr %v4, align 4, !tbaa !0
   %v136 = add nsw i32 %v112, 2
   %v137 = icmp slt i32 %v136, %v20
   br i1 %v137, label %b4, label %b5
@@ -289,7 +286,7 @@ b0:
 }
 
 ; Function Attrs: argmemonly nounwind
-declare void @llvm.memset.p0i8.i32(i8* nocapture writeonly, i8, i32, i1) #1
+declare void @llvm.memset.p0.i32(ptr nocapture writeonly, i8, i32, i1) #1
 
 attributes #0 = { nounwind }
 attributes #1 = { argmemonly nounwind }

diff  --git a/llvm/test/CodeGen/Hexagon/cext-valid-packet2.ll b/llvm/test/CodeGen/Hexagon/cext-valid-packet2.ll
index afebc7c3d53e3..2d51414fea22c 100644
--- a/llvm/test/CodeGen/Hexagon/cext-valid-packet2.ll
+++ b/llvm/test/CodeGen/Hexagon/cext-valid-packet2.ll
@@ -12,11 +12,11 @@
 ; CHECK-NEW: [[REG0:r([0-9]+)]] = add(r{{[0-9]+}},##200000)
 ; CHECK-NEW: memw(r{{[0-9]+}}+##12000) = [[REG0]].new
 
-define void @test(i32* nocapture %a, i32* nocapture %b, i32 %c) nounwind {
+define void @test(ptr nocapture %a, ptr nocapture %b, i32 %c) nounwind {
 entry:
-  %0 = load i32, i32* %a, align 4
+  %0 = load i32, ptr %a, align 4
   %add1 = add nsw i32 %0, 200000
-  %arrayidx2 = getelementptr inbounds i32, i32* %a, i32 3000
-  store i32 %add1, i32* %arrayidx2, align 4
+  %arrayidx2 = getelementptr inbounds i32, ptr %a, i32 3000
+  store i32 %add1, ptr %arrayidx2, align 4
   ret void
 }

diff  --git a/llvm/test/CodeGen/Hexagon/cext.ll b/llvm/test/CodeGen/Hexagon/cext.ll
index 5390657fd9a2f..7cd654fa221db 100644
--- a/llvm/test/CodeGen/Hexagon/cext.ll
+++ b/llvm/test/CodeGen/Hexagon/cext.ll
@@ -6,8 +6,8 @@
 define zeroext i8 @foo(i8 zeroext %l) nounwind readonly {
 for.end:
   %idxprom = zext i8 %l to i32
-  %arrayidx1 = getelementptr inbounds [5 x [2 x i8]], [5 x [2 x i8]]* @a, i32 0, i32 %idxprom, i32 0
-  %0 = load i8, i8* %arrayidx1, align 1
+  %arrayidx1 = getelementptr inbounds [5 x [2 x i8]], ptr @a, i32 0, i32 %idxprom, i32 0
+  %0 = load i8, ptr %arrayidx1, align 1
   %conv = zext i8 %0 to i32
   %mul = mul nsw i32 %conv, 20
   %conv2 = trunc i32 %mul to i8

diff  --git a/llvm/test/CodeGen/Hexagon/cexti16.ll b/llvm/test/CodeGen/Hexagon/cexti16.ll
index 56e1e49271308..e93ffa3f3ba90 100644
--- a/llvm/test/CodeGen/Hexagon/cexti16.ll
+++ b/llvm/test/CodeGen/Hexagon/cexti16.ll
@@ -6,8 +6,8 @@
 define signext i16 @foo(i16 zeroext %l) nounwind readonly {
 for.end:
   %idxprom = zext i16 %l to i32
-  %arrayidx1 = getelementptr inbounds [5 x [2 x i16]], [5 x [2 x i16]]* @a, i32 0, i32 %idxprom, i32 0
-  %0 = load i16, i16* %arrayidx1, align 2
+  %arrayidx1 = getelementptr inbounds [5 x [2 x i16]], ptr @a, i32 0, i32 %idxprom, i32 0
+  %0 = load i16, ptr %arrayidx1, align 2
   %conv = zext i16 %0 to i32
   %mul = mul nsw i32 %conv, 20
   %conv2 = trunc i32 %mul to i16

diff  --git a/llvm/test/CodeGen/Hexagon/cfgopt-fall-through.ll b/llvm/test/CodeGen/Hexagon/cfgopt-fall-through.ll
index 5ade8709068ba..53c610d659ab6 100644
--- a/llvm/test/CodeGen/Hexagon/cfgopt-fall-through.ll
+++ b/llvm/test/CodeGen/Hexagon/cfgopt-fall-through.ll
@@ -48,16 +48,16 @@ b12:                                              ; preds = %b5
   ]
 
 b13:                                              ; preds = %b12, %b12
-  store i32 %a0, i32* undef, align 4
+  store i32 %a0, ptr undef, align 4
   br label %b17
 
 b14:                                              ; preds = %b12
-  store i16 undef, i16* undef, align 4
+  store i16 undef, ptr undef, align 4
   br label %b17
 
 b15:                                              ; preds = %b12, %b9
   %v16 = phi i32 [ 0, %b12 ], [ %v10, %b9 ]
-  store i32 undef, i32* undef, align 4
+  store i32 undef, ptr undef, align 4
   br label %b17
 
 b17:                                              ; preds = %b15, %b14, %b13, %b12, %b6

diff  --git a/llvm/test/CodeGen/Hexagon/cfi-offset.ll b/llvm/test/CodeGen/Hexagon/cfi-offset.ll
index f3ee869fa43a9..ce750da9a6ca5 100644
--- a/llvm/test/CodeGen/Hexagon/cfi-offset.ll
+++ b/llvm/test/CodeGen/Hexagon/cfi-offset.ll
@@ -10,16 +10,16 @@
 
 target triple = "hexagon"
 
-define i64 @_Z3fooxxx(i64 %x, i64 %y, i64 %z) #0 personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+define i64 @_Z3fooxxx(i64 %x, i64 %y, i64 %z) #0 personality ptr @__gxx_personality_v0 {
 entry:
   %call = invoke i64 @_Z3barxxx(i64 %x, i64 %y, i64 %z)
           to label %try.cont unwind label %lpad
 
 lpad:                                             ; preds = %entry
-  %0 = landingpad { i8*, i32 }
-          catch i8* null
-  %1 = extractvalue { i8*, i32 } %0, 0
-  %2 = tail call i8* @__cxa_begin_catch(i8* %1) #1
+  %0 = landingpad { ptr, i32 }
+          catch ptr null
+  %1 = extractvalue { ptr, i32 } %0, 0
+  %2 = tail call ptr @__cxa_begin_catch(ptr %1) #1
   tail call void @__cxa_end_catch()
   br label %try.cont
 
@@ -35,7 +35,7 @@ declare i64 @_Z3barxxx(i64, i64, i64) #0
 
 declare i32 @__gxx_personality_v0(...)
 
-declare i8* @__cxa_begin_catch(i8*)
+declare ptr @__cxa_begin_catch(ptr)
 
 declare void @__cxa_end_catch()
 

diff  --git a/llvm/test/CodeGen/Hexagon/cfi_offset.ll b/llvm/test/CodeGen/Hexagon/cfi_offset.ll
index 18c5d46b97045..5795d7f8ec608 100644
--- a/llvm/test/CodeGen/Hexagon/cfi_offset.ll
+++ b/llvm/test/CodeGen/Hexagon/cfi_offset.ll
@@ -4,72 +4,70 @@
 ; CHECK: .cfi_offset r30
 
 @g0 = global i32 0, align 4
- at g1 = external constant i8*
+ at g1 = external constant ptr
 
-define i32 @f0() personality i8* bitcast (i32 (...)* @f3 to i8*) {
+define i32 @f0() personality ptr @f3 {
 b0:
   %v0 = alloca i32, align 4
-  %v1 = alloca i8*
+  %v1 = alloca ptr
   %v2 = alloca i32
   %v3 = alloca i32, align 4
-  store i32 0, i32* %v0
-  %v4 = call i8* @f1(i32 4) #1
-  %v5 = bitcast i8* %v4 to i32*
-  store i32 20, i32* %v5
-  invoke void @f2(i8* %v4, i8* bitcast (i8** @g1 to i8*), i8* null) #2
+  store i32 0, ptr %v0
+  %v4 = call ptr @f1(i32 4) #1
+  store i32 20, ptr %v4
+  invoke void @f2(ptr %v4, ptr @g1, ptr null) #2
           to label %b6 unwind label %b1
 
 b1:                                               ; preds = %b0
-  %v6 = landingpad { i8*, i32 }
-          catch i8* bitcast (i8** @g1 to i8*)
-  %v7 = extractvalue { i8*, i32 } %v6, 0
-  store i8* %v7, i8** %v1
-  %v8 = extractvalue { i8*, i32 } %v6, 1
-  store i32 %v8, i32* %v2
+  %v6 = landingpad { ptr, i32 }
+          catch ptr @g1
+  %v7 = extractvalue { ptr, i32 } %v6, 0
+  store ptr %v7, ptr %v1
+  %v8 = extractvalue { ptr, i32 } %v6, 1
+  store i32 %v8, ptr %v2
   br label %b2
 
 b2:                                               ; preds = %b1
-  %v9 = load i32, i32* %v2
-  %v10 = call i32 @llvm.eh.typeid.for(i8* bitcast (i8** @g1 to i8*)) #1
+  %v9 = load i32, ptr %v2
+  %v10 = call i32 @llvm.eh.typeid.for(ptr @g1) #1
   %v11 = icmp eq i32 %v9, %v10
   br i1 %v11, label %b3, label %b5
 
 b3:                                               ; preds = %b2
-  %v12 = load i8*, i8** %v1
-  %v13 = call i8* @f4(i8* %v12) #1
-  %v14 = bitcast i8* %v13 to i32*
-  %v15 = load i32, i32* %v14, align 4
-  store i32 %v15, i32* %v3, align 4
-  %v16 = load i32, i32* %v3, align 4
-  store i32 %v16, i32* @g0, align 4
+  %v12 = load ptr, ptr %v1
+  %v13 = call ptr @f4(ptr %v12) #1
+  %v15 = load i32, ptr %v13, align 4
+  store i32 %v15, ptr %v3, align 4
+  %v16 = load i32, ptr %v3, align 4
+  store i32 %v16, ptr @g0, align 4
   call void @f5() #1
   br label %b4
 
 b4:                                               ; preds = %b3
-  %v17 = load i32, i32* @g0, align 4
+  %v17 = load i32, ptr @g0, align 4
   ret i32 %v17
 
 b5:                                               ; preds = %b2
-  %v18 = load i8*, i8** %v1
-  %v19 = load i32, i32* %v2
-  %v20 = insertvalue { i8*, i32 } undef, i8* %v18, 0
-  %v21 = insertvalue { i8*, i32 } %v20, i32 %v19, 1
-  resume { i8*, i32 } %v21
+  %v18 = load ptr, ptr %v1
+  %v19 = load i32, ptr %v2
+  %v20 = insertvalue { ptr, i32 } undef, ptr %v18, 0
+  %v21 = insertvalue { ptr, i32 } %v20, i32 %v19, 1
+  resume { ptr, i32 } %v21
 
 b6:                                               ; preds = %b0
   unreachable
 }
 
-declare i8* @f1(i32)
+declare ptr @f1(i32)
 
-declare void @f2(i8*, i8*, i8*)
+declare void @f2(ptr, ptr, ptr)
 
 declare i32 @f3(...)
 
 ; Function Attrs: nounwind readnone
-declare i32 @llvm.eh.typeid.for(i8*) #0
+declare i32 @llvm.eh.typeid.for(ptr) #0
 
-declare i8* @f4(i8*)
+declare ptr @f4(ptr)
 
 declare void @f5()
 

diff  --git a/llvm/test/CodeGen/Hexagon/cfi_offset2.ll b/llvm/test/CodeGen/Hexagon/cfi_offset2.ll
index 96a8ae5f6ff0a..b068110cc8a02 100644
--- a/llvm/test/CodeGen/Hexagon/cfi_offset2.ll
+++ b/llvm/test/CodeGen/Hexagon/cfi_offset2.ll
@@ -5,42 +5,41 @@
 ; CHECK: .cfi_offset r16, -16
 
 @g0 = private unnamed_addr constant [4 x i8] c"%d\0A\00", align 1
- at g1 = external constant i8*
+ at g1 = external constant ptr
 @g2 = private unnamed_addr constant [15 x i8] c"blah blah blah\00", align 1
- at g3 = external constant i8*
+ at g3 = external constant ptr
 @g4 = private unnamed_addr constant [2 x i8] c"{\00"
 @g5 = private unnamed_addr constant [2 x i8] c"}\00"
 @g6 = private unnamed_addr constant [27 x i8] c"FAIL:Unexpected exception.\00"
 
 ; Function Attrs: nounwind
-declare i32 @f0(i8* nocapture readonly, ...) #0
+declare i32 @f0(ptr nocapture readonly, ...) #0
 
 ; Function Attrs: nounwind
 define void @f1(i32 %a0) #0 {
 b0:
-  %v0 = tail call i32 (i8*, ...) @f0(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @g0, i32 0, i32 0), i32 %a0)
+  %v0 = tail call i32 (ptr, ...) @f0(ptr @g0, i32 %a0)
   ret void
 }
 
-define i32 @f2(i32 %a0, i8** nocapture readnone %a1) personality i8* bitcast (i32 (...)* @f5 to i8*) {
+define i32 @f2(i32 %a0, ptr nocapture readnone %a1) personality ptr @f5 {
 b0:
-  %v0 = tail call i32 (i8*, ...) @f0(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @g0, i32 0, i32 0), i32 %a0) #0
-  %v1 = tail call i32 @f8(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @g4, i32 0, i32 0)) #0
-  %v2 = tail call i8* @f3(i32 4) #0
-  %v3 = bitcast i8* %v2 to i8**
-  store i8* getelementptr inbounds ([15 x i8], [15 x i8]* @g2, i32 0, i32 0), i8** %v3, align 4, !tbaa !0
-  invoke void @f4(i8* %v2, i8* bitcast (i8** @g1 to i8*), i8* null) #2
+  %v0 = tail call i32 (ptr, ...) @f0(ptr @g0, i32 %a0) #0
+  %v1 = tail call i32 @f8(ptr @g4) #0
+  %v2 = tail call ptr @f3(i32 4) #0
+  store ptr @g2, ptr %v2, align 4, !tbaa !0
+  invoke void @f4(ptr %v2, ptr @g1, ptr null) #2
           to label %b9 unwind label %b1
 
 b1:                                               ; preds = %b0
-  %v4 = landingpad { i8*, i32 }
-          catch i8* bitcast (i8** @g1 to i8*)
-          catch i8* null
-  %v5 = extractvalue { i8*, i32 } %v4, 0
-  %v6 = extractvalue { i8*, i32 } %v4, 1
-  %v7 = tail call i32 @llvm.eh.typeid.for(i8* bitcast (i8** @g1 to i8*)) #0
+  %v4 = landingpad { ptr, i32 }
+          catch ptr @g1
+          catch ptr null
+  %v5 = extractvalue { ptr, i32 } %v4, 0
+  %v6 = extractvalue { ptr, i32 } %v4, 1
+  %v7 = tail call i32 @llvm.eh.typeid.for(ptr @g1) #0
   %v8 = icmp eq i32 %v6, %v7
-  %v9 = tail call i8* @f6(i8* %v5) #0
+  %v9 = tail call ptr @f6(ptr %v5) #0
   br i1 %v8, label %b2, label %b3
 
 b2:                                               ; preds = %b1
@@ -48,28 +47,27 @@ b2:                                               ; preds = %b1
   br label %b4
 
 b3:                                               ; preds = %b1
-  %v10 = tail call i32 @f8(i8* getelementptr inbounds ([27 x i8], [27 x i8]* @g6, i32 0, i32 0))
+  %v10 = tail call i32 @f8(ptr @g6)
   tail call void @f7()
   br label %b4
 
 b4:                                               ; preds = %b3, %b2
-  %v11 = tail call i32 @f8(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @g5, i32 0, i32 0)) #0
-  %v12 = tail call i32 @f8(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @g4, i32 0, i32 0)) #0
-  %v13 = tail call i8* @f3(i32 4) #0
-  %v14 = bitcast i8* %v13 to i32*
-  store i32 777, i32* %v14, align 4, !tbaa !4
-  invoke void @f4(i8* %v13, i8* bitcast (i8** @g3 to i8*), i8* null) #2
+  %v11 = tail call i32 @f8(ptr @g5) #0
+  %v12 = tail call i32 @f8(ptr @g4) #0
+  %v13 = tail call ptr @f3(i32 4) #0
+  store i32 777, ptr %v13, align 4, !tbaa !4
+  invoke void @f4(ptr %v13, ptr @g3, ptr null) #2
           to label %b9 unwind label %b5
 
 b5:                                               ; preds = %b4
-  %v15 = landingpad { i8*, i32 }
-          catch i8* bitcast (i8** @g3 to i8*)
-          catch i8* null
-  %v16 = extractvalue { i8*, i32 } %v15, 0
-  %v17 = extractvalue { i8*, i32 } %v15, 1
-  %v18 = tail call i32 @llvm.eh.typeid.for(i8* bitcast (i8** @g3 to i8*)) #0
+  %v15 = landingpad { ptr, i32 }
+          catch ptr @g3
+          catch ptr null
+  %v16 = extractvalue { ptr, i32 } %v15, 0
+  %v17 = extractvalue { ptr, i32 } %v15, 1
+  %v18 = tail call i32 @llvm.eh.typeid.for(ptr @g3) #0
   %v19 = icmp eq i32 %v17, %v18
-  %v20 = tail call i8* @f6(i8* %v16) #0
+  %v20 = tail call ptr @f6(ptr %v16) #0
   br i1 %v19, label %b6, label %b7
 
 b6:                                               ; preds = %b5
@@ -77,33 +75,33 @@ b6:                                               ; preds = %b5
   br label %b8
 
 b7:                                               ; preds = %b5
-  %v21 = tail call i32 @f8(i8* getelementptr inbounds ([27 x i8], [27 x i8]* @g6, i32 0, i32 0))
+  %v21 = tail call i32 @f8(ptr @g6)
   tail call void @f7()
   br label %b8
 
 b8:                                               ; preds = %b7, %b6
-  %v22 = tail call i32 @f8(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @g5, i32 0, i32 0)) #0
+  %v22 = tail call i32 @f8(ptr @g5) #0
   ret i32 0
 
 b9:                                               ; preds = %b4, %b0
   unreachable
 }
 
-declare i8* @f3(i32)
+declare ptr @f3(i32)
 
-declare void @f4(i8*, i8*, i8*)
+declare void @f4(ptr, ptr, ptr)
 
 declare i32 @f5(...)
 
 ; Function Attrs: nounwind readnone
-declare i32 @llvm.eh.typeid.for(i8*) #1
+declare i32 @llvm.eh.typeid.for(ptr) #1
 
-declare i8* @f6(i8*)
+declare ptr @f6(ptr)
 
 declare void @f7()
 
 ; Function Attrs: nounwind
-declare i32 @f8(i8* nocapture readonly) #0
+declare i32 @f8(ptr nocapture readonly) #0
 
 attributes #0 = { nounwind }
 attributes #1 = { nounwind readnone }

diff  --git a/llvm/test/CodeGen/Hexagon/check-dot-new.ll b/llvm/test/CodeGen/Hexagon/check-dot-new.ll
index c9c0f1d268128..d9240fe9b31d4 100644
--- a/llvm/test/CodeGen/Hexagon/check-dot-new.ll
+++ b/llvm/test/CodeGen/Hexagon/check-dot-new.ll
@@ -9,7 +9,7 @@
 define void @f0(i32 %a0) #0 {
 b0:
   %v0 = add i32 %a0, 1
-  store i32 %v0, i32* @g0, align 4
+  store i32 %v0, ptr @g0, align 4
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/check-subregister-for-latency.ll b/llvm/test/CodeGen/Hexagon/check-subregister-for-latency.ll
index 8290f768585dd..0cd24b883a1f1 100644
--- a/llvm/test/CodeGen/Hexagon/check-subregister-for-latency.ll
+++ b/llvm/test/CodeGen/Hexagon/check-subregister-for-latency.ll
@@ -4,45 +4,45 @@
 target datalayout = "e-m:e-p:32:32:32-a:0-n16:32-i64:64:64-i32:32:32-i16:16:16-i1:8:8-f32:32:32-f64:64:64-v32:32:32-v64:64:64-v512:512:512-v1024:1024:1024-v2048:2048:2048"
 target triple = "hexagon"
 
-%s.0 = type { double, double, double, double, double, double, i32, double, double, double, double, i8*, i8, [9 x i8], double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, [200 x i8*], [32 x i8*], [32 x i8], i32 }
+%s.0 = type { double, double, double, double, double, double, i32, double, double, double, double, ptr, i8, [9 x i8], double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, [200 x ptr], [32 x ptr], [32 x i8], i32 }
 
 define hidden fastcc void @f0() unnamed_addr #0 {
 b0:
-  %v0 = getelementptr inbounds %s.0, %s.0* null, i32 0, i32 33
-  %v1 = getelementptr inbounds %s.0, %s.0* null, i32 0, i32 34
+  %v0 = getelementptr inbounds %s.0, ptr null, i32 0, i32 33
+  %v1 = getelementptr inbounds %s.0, ptr null, i32 0, i32 34
   br label %b1
 
 b1:                                               ; preds = %b1, %b0
-  %v2 = phi i32* [ undef, %b0 ], [ %v27, %b1 ]
-  %v3 = load i32, i32* %v2, align 1, !tbaa !1
-  %v4 = getelementptr inbounds [0 x %s.0*], [0 x %s.0*]* null, i32 0, i32 %v3
-  %v5 = load %s.0*, %s.0** %v4, align 1, !tbaa !5
-  %v6 = load double, double* undef, align 1, !tbaa !7
+  %v2 = phi ptr [ undef, %b0 ], [ %v27, %b1 ]
+  %v3 = load i32, ptr %v2, align 1, !tbaa !1
+  %v4 = getelementptr inbounds [0 x ptr], ptr null, i32 0, i32 %v3
+  %v5 = load ptr, ptr %v4, align 1, !tbaa !5
+  %v6 = load double, ptr undef, align 1, !tbaa !7
   %v7 = fdiv double 1.000000e+00, %v6
   %v8 = fmul double %v7, 0.000000e+00
   %v9 = fmul double %v7, 0.000000e+00
   %v10 = fmul double %v8, -4.800000e+01
   %v11 = fmul double %v9, 1.680000e+02
   %v12 = fmul double %v7, 0.000000e+00
-  %v13 = load double, double* null, align 1, !tbaa !7
+  %v13 = load double, ptr null, align 1, !tbaa !7
   %v14 = fmul double %v7, %v13
   %v15 = fmul double %v12, 0.000000e+00
-  %v16 = getelementptr inbounds %s.0, %s.0* %v5, i32 0, i32 30
+  %v16 = getelementptr inbounds %s.0, ptr %v5, i32 0, i32 30
   %v17 = fsub double 0.000000e+00, %v15
-  store double %v17, double* %v16, align 8, !tbaa !9
+  store double %v17, ptr %v16, align 8, !tbaa !9
   %v18 = fmul double %v14, 0.000000e+00
-  %v19 = getelementptr inbounds %s.0, %s.0* %v5, i32 0, i32 32
-  %v20 = load double, double* %v19, align 8, !tbaa !11
+  %v19 = getelementptr inbounds %s.0, ptr %v5, i32 0, i32 32
+  %v20 = load double, ptr %v19, align 8, !tbaa !11
   %v21 = fsub double %v20, %v18
-  store double %v21, double* %v19, align 8, !tbaa !11
+  store double %v21, ptr %v19, align 8, !tbaa !11
   %v22 = fmul double %v10, 0.000000e+00
   %v23 = fadd double 0.000000e+00, %v22
   %v24 = fmul double 0.000000e+00, %v11
   %v25 = fadd double %v23, %v24
   %v26 = fsub double 0.000000e+00, %v25
-  store double %v26, double* %v0, align 8, !tbaa !12
-  store double 0.000000e+00, double* %v1, align 8, !tbaa !13
-  %v27 = getelementptr i32, i32* %v2, i32 1
+  store double %v26, ptr %v0, align 8, !tbaa !12
+  store double 0.000000e+00, ptr %v1, align 8, !tbaa !13
+  %v27 = getelementptr i32, ptr %v2, i32 1
   br label %b1
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/circ-load-isel.ll b/llvm/test/CodeGen/Hexagon/circ-load-isel.ll
index dd343a3fdf1c8..61d0beea14aef 100644
--- a/llvm/test/CodeGen/Hexagon/circ-load-isel.ll
+++ b/llvm/test/CodeGen/Hexagon/circ-load-isel.ll
@@ -8,11 +8,11 @@ target triple = "hexagon"
 ; Function Attrs: nounwind optsize
 define void @circ2() #0 {
 entry:
-  store i32 0, i32* @l, align 4
-  %0 = tail call i8* @llvm.hexagon.circ.ldw(i8* undef, i8* undef, i32 150995968, i32 4)
+  store i32 0, ptr @l, align 4
+  %0 = tail call ptr @llvm.hexagon.circ.ldw(ptr undef, ptr undef, i32 150995968, i32 4)
   ret void
 }
 
-declare i8* @llvm.hexagon.circ.ldw(i8*, i8*, i32, i32) #1
+declare ptr @llvm.hexagon.circ.ldw(ptr, ptr, i32, i32) #1
 attributes #0 = { nounwind optsize }
 attributes #1 = { argmemonly nounwind }

diff  --git a/llvm/test/CodeGen/Hexagon/circ_ld.ll b/llvm/test/CodeGen/Hexagon/circ_ld.ll
index 8c158bb800130..0984e20347f92 100644
--- a/llvm/test/CodeGen/Hexagon/circ_ld.ll
+++ b/llvm/test/CodeGen/Hexagon/circ_ld.ll
@@ -17,117 +17,103 @@
 target datalayout = "e-p:32:32:32-i64:64:64-i32:32:32-i16:16:16-i1:32:32-f64:64:64-f32:32:32-v64:64:64-v32:32:32-a0:0-n16:32"
 target triple = "hexagon"
 
-define signext i8 @foo1(i16 zeroext %filtMemLen, i16* %filtMemLR, i16 signext %filtMemIndex) nounwind {
+define signext i8 @foo1(i16 zeroext %filtMemLen, ptr %filtMemLR, i16 signext %filtMemIndex) nounwind {
 entry:
   %inputLR = alloca i8, align 1
   %conv = zext i16 %filtMemLen to i32
   %shr1 = lshr i32 %conv, 1
   %idxprom = sext i16 %filtMemIndex to i32
-  %arrayidx = getelementptr inbounds i16, i16* %filtMemLR, i32 %idxprom
-  %0 = bitcast i16* %arrayidx to i8*
+  %arrayidx = getelementptr inbounds i16, ptr %filtMemLR, i32 %idxprom
   %or = or i32 %shr1, 33554432
 ; CHECK: = memb(r{{[0-9]+}}++#-1:circ(m{{[0-1]}}))
-  %1 = call i8* @llvm.hexagon.circ.ldb(i8* %0, i8* %inputLR, i32 %or, i32 -1)
-  %2 = load i8, i8* %inputLR, align 1, !tbaa !0
-  ret i8 %2
+  %0 = call ptr @llvm.hexagon.circ.ldb(ptr %arrayidx, ptr %inputLR, i32 %or, i32 -1)
+  %1 = load i8, ptr %inputLR, align 1, !tbaa !0
+  ret i8 %1
 }
 
-declare i8* @llvm.hexagon.circ.ldb(i8*, i8*, i32, i32) nounwind
+declare ptr @llvm.hexagon.circ.ldb(ptr, ptr, i32, i32) nounwind
 
-define i64 @foo2(i16 zeroext %filtMemLen, i16* %filtMemLR, i16 signext %filtMemIndex) nounwind {
+define i64 @foo2(i16 zeroext %filtMemLen, ptr %filtMemLR, i16 signext %filtMemIndex) nounwind {
 entry:
   %inputLR = alloca i64, align 8
   %conv = zext i16 %filtMemLen to i32
   %shr1 = lshr i32 %conv, 1
   %idxprom = sext i16 %filtMemIndex to i32
-  %arrayidx = getelementptr inbounds i16, i16* %filtMemLR, i32 %idxprom
-  %0 = bitcast i16* %arrayidx to i8*
-  %1 = bitcast i64* %inputLR to i8*
+  %arrayidx = getelementptr inbounds i16, ptr %filtMemLR, i32 %idxprom
   %shl = shl nuw nsw i32 %shr1, 3
   %or = or i32 %shl, 83886080
 ; CHECK: = memd(r{{[0-9]+}}++#-8:circ(m{{[0-1]}}))
-  %2 = call i8* @llvm.hexagon.circ.ldd(i8* %0, i8* %1, i32 %or, i32 -8)
-  %3 = bitcast i8* %1 to i64*
-  %4 = load i64, i64* %3, align 8, !tbaa !0
-  ret i64 %4
+  %0 = call ptr @llvm.hexagon.circ.ldd(ptr %arrayidx, ptr %inputLR, i32 %or, i32 -8)
+  %1 = load i64, ptr %inputLR, align 8, !tbaa !0
+  ret i64 %1
 }
 
-declare i8* @llvm.hexagon.circ.ldd(i8*, i8*, i32, i32) nounwind
+declare ptr @llvm.hexagon.circ.ldd(ptr, ptr, i32, i32) nounwind
 
-define signext i16 @foo3(i16 zeroext %filtMemLen, i16* %filtMemLR, i16 signext %filtMemIndex) nounwind {
+define signext i16 @foo3(i16 zeroext %filtMemLen, ptr %filtMemLR, i16 signext %filtMemIndex) nounwind {
 entry:
   %inputLR = alloca i16, align 2
   %conv = zext i16 %filtMemLen to i32
   %shr1 = and i32 %conv, 65534
   %idxprom = sext i16 %filtMemIndex to i32
-  %arrayidx = getelementptr inbounds i16, i16* %filtMemLR, i32 %idxprom
-  %0 = bitcast i16* %arrayidx to i8*
-  %1 = bitcast i16* %inputLR to i8*
+  %arrayidx = getelementptr inbounds i16, ptr %filtMemLR, i32 %idxprom
   %or = or i32 %shr1, 50331648
 ; CHECK: = memh(r{{[0-9]+}}++#-2:circ(m{{[0-1]}}))
-  %2 = call i8* @llvm.hexagon.circ.ldh(i8* %0, i8* %1, i32 %or, i32 -2)
-  %3 = bitcast i8* %1 to i16*
-  %4 = load i16, i16* %3, align 2, !tbaa !2
-  ret i16 %4
+  %0 = call ptr @llvm.hexagon.circ.ldh(ptr %arrayidx, ptr %inputLR, i32 %or, i32 -2)
+  %1 = load i16, ptr %inputLR, align 2, !tbaa !2
+  ret i16 %1
 }
 
-declare i8* @llvm.hexagon.circ.ldh(i8*, i8*, i32, i32) nounwind
+declare ptr @llvm.hexagon.circ.ldh(ptr, ptr, i32, i32) nounwind
 
-define zeroext i8 @foo4(i16 zeroext %filtMemLen, i16* %filtMemLR, i16 signext %filtMemIndex) nounwind {
+define zeroext i8 @foo4(i16 zeroext %filtMemLen, ptr %filtMemLR, i16 signext %filtMemIndex) nounwind {
 entry:
   %inputLR = alloca i8, align 1
   %conv = zext i16 %filtMemLen to i32
   %shr1 = lshr i32 %conv, 1
   %idxprom = sext i16 %filtMemIndex to i32
-  %arrayidx = getelementptr inbounds i16, i16* %filtMemLR, i32 %idxprom
-  %0 = bitcast i16* %arrayidx to i8*
+  %arrayidx = getelementptr inbounds i16, ptr %filtMemLR, i32 %idxprom
   %or = or i32 %shr1, 33554432
 ; CHECK: = memub(r{{[0-9]+}}++#-1:circ(m{{[0-1]}}))
-  %1 = call i8* @llvm.hexagon.circ.ldub(i8* %0, i8* %inputLR, i32 %or, i32 -1)
-  %2 = load i8, i8* %inputLR, align 1, !tbaa !0
-  ret i8 %2
+  %0 = call ptr @llvm.hexagon.circ.ldub(ptr %arrayidx, ptr %inputLR, i32 %or, i32 -1)
+  %1 = load i8, ptr %inputLR, align 1, !tbaa !0
+  ret i8 %1
 }
 
-declare i8* @llvm.hexagon.circ.ldub(i8*, i8*, i32, i32) nounwind
+declare ptr @llvm.hexagon.circ.ldub(ptr, ptr, i32, i32) nounwind
 
-define zeroext i16 @foo5(i16 zeroext %filtMemLen, i16* %filtMemLR, i16 signext %filtMemIndex) nounwind {
+define zeroext i16 @foo5(i16 zeroext %filtMemLen, ptr %filtMemLR, i16 signext %filtMemIndex) nounwind {
 entry:
   %inputLR = alloca i16, align 2
   %conv = zext i16 %filtMemLen to i32
   %shr1 = and i32 %conv, 65534
   %idxprom = sext i16 %filtMemIndex to i32
-  %arrayidx = getelementptr inbounds i16, i16* %filtMemLR, i32 %idxprom
-  %0 = bitcast i16* %arrayidx to i8*
-  %1 = bitcast i16* %inputLR to i8*
+  %arrayidx = getelementptr inbounds i16, ptr %filtMemLR, i32 %idxprom
   %or = or i32 %shr1, 50331648
 ; CHECK: = memuh(r{{[0-9]+}}++#-2:circ(m{{[0-1]}}))
-  %2 = call i8* @llvm.hexagon.circ.lduh(i8* %0, i8* %1, i32 %or, i32 -2)
-  %3 = bitcast i8* %1 to i16*
-  %4 = load i16, i16* %3, align 2, !tbaa !2
-  ret i16 %4
+  %0 = call ptr @llvm.hexagon.circ.lduh(ptr %arrayidx, ptr %inputLR, i32 %or, i32 -2)
+  %1 = load i16, ptr %inputLR, align 2, !tbaa !2
+  ret i16 %1
 }
 
-declare i8* @llvm.hexagon.circ.lduh(i8*, i8*, i32, i32) nounwind
+declare ptr @llvm.hexagon.circ.lduh(ptr, ptr, i32, i32) nounwind
 
-define i32 @foo6(i16 zeroext %filtMemLen, i16* %filtMemLR, i16 signext %filtMemIndex) nounwind {
+define i32 @foo6(i16 zeroext %filtMemLen, ptr %filtMemLR, i16 signext %filtMemIndex) nounwind {
 entry:
   %inputLR = alloca i32, align 4
   %conv = zext i16 %filtMemLen to i32
   %shr1 = lshr i32 %conv, 1
   %idxprom = sext i16 %filtMemIndex to i32
-  %arrayidx = getelementptr inbounds i16, i16* %filtMemLR, i32 %idxprom
-  %0 = bitcast i16* %arrayidx to i8*
-  %1 = bitcast i32* %inputLR to i8*
+  %arrayidx = getelementptr inbounds i16, ptr %filtMemLR, i32 %idxprom
   %shl = shl nuw nsw i32 %shr1, 2
   %or = or i32 %shl, 67108864
 ; CHECK: = memw(r{{[0-9]+}}++#-4:circ(m{{[0-1]}}))
-  %2 = call i8* @llvm.hexagon.circ.ldw(i8* %0, i8* %1, i32 %or, i32 -4)
-  %3 = bitcast i8* %1 to i32*
-  %4 = load i32, i32* %3, align 4, !tbaa !3
-  ret i32 %4
+  %0 = call ptr @llvm.hexagon.circ.ldw(ptr %arrayidx, ptr %inputLR, i32 %or, i32 -4)
+  %1 = load i32, ptr %inputLR, align 4, !tbaa !3
+  ret i32 %1
 }
 
-declare i8* @llvm.hexagon.circ.ldw(i8*, i8*, i32, i32) nounwind
+declare ptr @llvm.hexagon.circ.ldw(ptr, ptr, i32, i32) nounwind
 
 !0 = !{!"omnipotent char", !1}
 !1 = !{!"Simple C/C++ TBAA"}

diff  --git a/llvm/test/CodeGen/Hexagon/circ_ldd_bug.ll b/llvm/test/CodeGen/Hexagon/circ_ldd_bug.ll
index 610cae0dcad93..7b60e4c9d9ad9 100644
--- a/llvm/test/CodeGen/Hexagon/circ_ldd_bug.ll
+++ b/llvm/test/CodeGen/Hexagon/circ_ldd_bug.ll
@@ -14,35 +14,31 @@ target triple = "hexagon"
 ; The scheduler would move the CRRegs to the top of the loop. The allocator
 ; would try to spill the CRRegs after running out of them. We don't have code to
 ; spill CRRegs and the above assertion would be triggered.
-declare i8* @llvm.hexagon.circ.ldd(i8*, i8*, i32, i32) nounwind
+declare ptr @llvm.hexagon.circ.ldd(ptr, ptr, i32, i32) nounwind
 
-define i32 @test(i16 zeroext %var0, i16* %var1, i16 signext %var2, i16* nocapture %var3) nounwind {
+define i32 @test(i16 zeroext %var0, ptr %var1, i16 signext %var2, ptr nocapture %var3) nounwind {
 entry:
   %var4 = alloca i64, align 8
   %conv = zext i16 %var0 to i32
   %shr5 = lshr i32 %conv, 1
   %idxprom = sext i16 %var2 to i32
-  %arrayidx = getelementptr inbounds i16, i16* %var1, i32 %idxprom
-  %0 = bitcast i16* %var3 to i64*
-  %1 = load i64, i64* %0, align 8
-  %2 = bitcast i16* %arrayidx to i8*
-  %3 = bitcast i64* %var4 to i8*
+  %arrayidx = getelementptr inbounds i16, ptr %var1, i32 %idxprom
+  %0 = load i64, ptr %var3, align 8
   %shl = shl nuw nsw i32 %shr5, 3
   %or = or i32 %shl, 83886080
-  %4 = call i8* @llvm.hexagon.circ.ldd(i8* %2, i8* %3, i32 %or, i32 -8)
+  %1 = call ptr @llvm.hexagon.circ.ldd(ptr %arrayidx, ptr %var4, i32 %or, i32 -8)
   %sub = add nsw i32 %shr5, -1
   %cmp6 = icmp sgt i32 %sub, 0
-  %5 = load i64, i64* %var4, align 8
-  %6 = call i64 @llvm.hexagon.M2.vdmacs.s1(i64 0, i64 %1, i64 %5)
+  %2 = load i64, ptr %var4, align 8
+  %3 = call i64 @llvm.hexagon.M2.vdmacs.s1(i64 0, i64 %0, i64 %2)
   br i1 %cmp6, label %for.body.lr.ph, label %for.end
 
 for.body.lr.ph:                                   ; preds = %entry
-  %incdec.ptr = getelementptr inbounds i16, i16* %var3, i32 4
-  %7 = bitcast i16* %incdec.ptr to i64*
-  %8 = zext i16 %var0 to i32
-  %9 = lshr i32 %8, 1
-  %10 = add i32 %9, -1
-  %xtraiter = urem i32 %10, 8
+  %incdec.ptr = getelementptr inbounds i16, ptr %var3, i32 4
+  %4 = zext i16 %var0 to i32
+  %5 = lshr i32 %4, 1
+  %6 = add i32 %5, -1
+  %xtraiter = urem i32 %6, 8
   %lcmp = icmp ne i32 %xtraiter, 0
   br i1 %lcmp, label %unr.cmp60, label %for.body.lr.ph.split.split
 
@@ -71,179 +67,179 @@ unr.cmp:                                          ; preds = %unr.cmp24
   br i1 %un.tmp, label %for.body.unr13, label %for.body.unr
 
 for.body.unr:                                     ; preds = %unr.cmp
-  %11 = call i8* @llvm.hexagon.circ.ldd(i8* %4, i8* %3, i32 %or, i32 -8)
-  %12 = load i64, i64* %7, align 8
+  %7 = call ptr @llvm.hexagon.circ.ldd(ptr %1, ptr %var4, i32 %or, i32 -8)
+  %8 = load i64, ptr %incdec.ptr, align 8
   %inc.unr = add nsw i32 0, 1
-  %incdec.ptr4.unr = getelementptr inbounds i64, i64* %7, i32 1
+  %incdec.ptr4.unr = getelementptr inbounds i64, ptr %incdec.ptr, i32 1
   %cmp.unr = icmp slt i32 %inc.unr, %sub
-  %13 = load i64, i64* %var4, align 8
-  %14 = call i64 @llvm.hexagon.M2.vdmacs.s1(i64 %6, i64 %12, i64 %13)
+  %9 = load i64, ptr %var4, align 8
+  %10 = call i64 @llvm.hexagon.M2.vdmacs.s1(i64 %3, i64 %8, i64 %9)
   br label %for.body.unr13
 
 for.body.unr13:                                   ; preds = %for.body.unr, %unr.cmp
-  %15 = phi i64 [ %6, %unr.cmp ], [ %14, %for.body.unr ]
-  %pvar6.09.unr = phi i64* [ %7, %unr.cmp ], [ %incdec.ptr4.unr, %for.body.unr ]
-  %var8.0.in8.unr = phi i8* [ %4, %unr.cmp ], [ %11, %for.body.unr ]
+  %11 = phi i64 [ %3, %unr.cmp ], [ %10, %for.body.unr ]
+  %pvar6.09.unr = phi ptr [ %incdec.ptr, %unr.cmp ], [ %incdec.ptr4.unr, %for.body.unr ]
+  %var8.0.in8.unr = phi ptr [ %1, %unr.cmp ], [ %7, %for.body.unr ]
   %i.07.unr = phi i32 [ 0, %unr.cmp ], [ %inc.unr, %for.body.unr ]
-  %16 = call i8* @llvm.hexagon.circ.ldd(i8* %var8.0.in8.unr, i8* %3, i32 %or, i32 -8)
-  %17 = load i64, i64* %pvar6.09.unr, align 8
+  %12 = call ptr @llvm.hexagon.circ.ldd(ptr %var8.0.in8.unr, ptr %var4, i32 %or, i32 -8)
+  %13 = load i64, ptr %pvar6.09.unr, align 8
   %inc.unr14 = add nsw i32 %i.07.unr, 1
-  %incdec.ptr4.unr15 = getelementptr inbounds i64, i64* %pvar6.09.unr, i32 1
+  %incdec.ptr4.unr15 = getelementptr inbounds i64, ptr %pvar6.09.unr, i32 1
   %cmp.unr16 = icmp slt i32 %inc.unr14, %sub
-  %18 = load i64, i64* %var4, align 8
-  %19 = call i64 @llvm.hexagon.M2.vdmacs.s1(i64 %15, i64 %17, i64 %18)
+  %14 = load i64, ptr %var4, align 8
+  %15 = call i64 @llvm.hexagon.M2.vdmacs.s1(i64 %11, i64 %13, i64 %14)
   br label %for.body.unr17
 
 for.body.unr17:                                   ; preds = %for.body.unr13, %unr.cmp24
-  %20 = phi i64 [ %6, %unr.cmp24 ], [ %19, %for.body.unr13 ]
-  %pvar6.09.unr18 = phi i64* [ %7, %unr.cmp24 ], [ %incdec.ptr4.unr15, %for.body.unr13 ]
-  %var8.0.in8.unr19 = phi i8* [ %4, %unr.cmp24 ], [ %16, %for.body.unr13 ]
+  %16 = phi i64 [ %3, %unr.cmp24 ], [ %15, %for.body.unr13 ]
+  %pvar6.09.unr18 = phi ptr [ %incdec.ptr, %unr.cmp24 ], [ %incdec.ptr4.unr15, %for.body.unr13 ]
+  %var8.0.in8.unr19 = phi ptr [ %1, %unr.cmp24 ], [ %12, %for.body.unr13 ]
   %i.07.unr20 = phi i32 [ 0, %unr.cmp24 ], [ %inc.unr14, %for.body.unr13 ]
-  %21 = call i8* @llvm.hexagon.circ.ldd(i8* %var8.0.in8.unr19, i8* %3, i32 %or, i32 -8)
-  %22 = load i64, i64* %pvar6.09.unr18, align 8
+  %17 = call ptr @llvm.hexagon.circ.ldd(ptr %var8.0.in8.unr19, ptr %var4, i32 %or, i32 -8)
+  %18 = load i64, ptr %pvar6.09.unr18, align 8
   %inc.unr21 = add nsw i32 %i.07.unr20, 1
-  %incdec.ptr4.unr22 = getelementptr inbounds i64, i64* %pvar6.09.unr18, i32 1
+  %incdec.ptr4.unr22 = getelementptr inbounds i64, ptr %pvar6.09.unr18, i32 1
   %cmp.unr23 = icmp slt i32 %inc.unr21, %sub
-  %23 = load i64, i64* %var4, align 8
-  %24 = call i64 @llvm.hexagon.M2.vdmacs.s1(i64 %20, i64 %22, i64 %23)
+  %19 = load i64, ptr %var4, align 8
+  %20 = call i64 @llvm.hexagon.M2.vdmacs.s1(i64 %16, i64 %18, i64 %19)
   br label %for.body.unr26
 
 for.body.unr26:                                   ; preds = %for.body.unr17, %unr.cmp33
-  %25 = phi i64 [ %6, %unr.cmp33 ], [ %24, %for.body.unr17 ]
-  %pvar6.09.unr27 = phi i64* [ %7, %unr.cmp33 ], [ %incdec.ptr4.unr22, %for.body.unr17 ]
-  %var8.0.in8.unr28 = phi i8* [ %4, %unr.cmp33 ], [ %21, %for.body.unr17 ]
+  %21 = phi i64 [ %3, %unr.cmp33 ], [ %20, %for.body.unr17 ]
+  %pvar6.09.unr27 = phi ptr [ %incdec.ptr, %unr.cmp33 ], [ %incdec.ptr4.unr22, %for.body.unr17 ]
+  %var8.0.in8.unr28 = phi ptr [ %1, %unr.cmp33 ], [ %17, %for.body.unr17 ]
   %i.07.unr29 = phi i32 [ 0, %unr.cmp33 ], [ %inc.unr21, %for.body.unr17 ]
-  %26 = call i8* @llvm.hexagon.circ.ldd(i8* %var8.0.in8.unr28, i8* %3, i32 %or, i32 -8)
-  %27 = load i64, i64* %pvar6.09.unr27, align 8
+  %22 = call ptr @llvm.hexagon.circ.ldd(ptr %var8.0.in8.unr28, ptr %var4, i32 %or, i32 -8)
+  %23 = load i64, ptr %pvar6.09.unr27, align 8
   %inc.unr30 = add nsw i32 %i.07.unr29, 1
-  %incdec.ptr4.unr31 = getelementptr inbounds i64, i64* %pvar6.09.unr27, i32 1
+  %incdec.ptr4.unr31 = getelementptr inbounds i64, ptr %pvar6.09.unr27, i32 1
   %cmp.unr32 = icmp slt i32 %inc.unr30, %sub
-  %28 = load i64, i64* %var4, align 8
-  %29 = call i64 @llvm.hexagon.M2.vdmacs.s1(i64 %25, i64 %27, i64 %28)
+  %24 = load i64, ptr %var4, align 8
+  %25 = call i64 @llvm.hexagon.M2.vdmacs.s1(i64 %21, i64 %23, i64 %24)
   br label %for.body.unr35
 
 for.body.unr35:                                   ; preds = %for.body.unr26, %unr.cmp42
-  %30 = phi i64 [ %6, %unr.cmp42 ], [ %29, %for.body.unr26 ]
-  %pvar6.09.unr36 = phi i64* [ %7, %unr.cmp42 ], [ %incdec.ptr4.unr31, %for.body.unr26 ]
-  %var8.0.in8.unr37 = phi i8* [ %4, %unr.cmp42 ], [ %26, %for.body.unr26 ]
+  %26 = phi i64 [ %3, %unr.cmp42 ], [ %25, %for.body.unr26 ]
+  %pvar6.09.unr36 = phi ptr [ %incdec.ptr, %unr.cmp42 ], [ %incdec.ptr4.unr31, %for.body.unr26 ]
+  %var8.0.in8.unr37 = phi ptr [ %1, %unr.cmp42 ], [ %22, %for.body.unr26 ]
   %i.07.unr38 = phi i32 [ 0, %unr.cmp42 ], [ %inc.unr30, %for.body.unr26 ]
-  %31 = call i8* @llvm.hexagon.circ.ldd(i8* %var8.0.in8.unr37, i8* %3, i32 %or, i32 -8)
-  %32 = load i64, i64* %pvar6.09.unr36, align 8
+  %27 = call ptr @llvm.hexagon.circ.ldd(ptr %var8.0.in8.unr37, ptr %var4, i32 %or, i32 -8)
+  %28 = load i64, ptr %pvar6.09.unr36, align 8
   %inc.unr39 = add nsw i32 %i.07.unr38, 1
-  %incdec.ptr4.unr40 = getelementptr inbounds i64, i64* %pvar6.09.unr36, i32 1
+  %incdec.ptr4.unr40 = getelementptr inbounds i64, ptr %pvar6.09.unr36, i32 1
   %cmp.unr41 = icmp slt i32 %inc.unr39, %sub
-  %33 = load i64, i64* %var4, align 8
-  %34 = call i64 @llvm.hexagon.M2.vdmacs.s1(i64 %30, i64 %32, i64 %33)
+  %29 = load i64, ptr %var4, align 8
+  %30 = call i64 @llvm.hexagon.M2.vdmacs.s1(i64 %26, i64 %28, i64 %29)
   br label %for.body.unr44
 
 for.body.unr44:                                   ; preds = %for.body.unr35, %unr.cmp51
-  %35 = phi i64 [ %6, %unr.cmp51 ], [ %34, %for.body.unr35 ]
-  %pvar6.09.unr45 = phi i64* [ %7, %unr.cmp51 ], [ %incdec.ptr4.unr40, %for.body.unr35 ]
-  %var8.0.in8.unr46 = phi i8* [ %4, %unr.cmp51 ], [ %31, %for.body.unr35 ]
+  %31 = phi i64 [ %3, %unr.cmp51 ], [ %30, %for.body.unr35 ]
+  %pvar6.09.unr45 = phi ptr [ %incdec.ptr, %unr.cmp51 ], [ %incdec.ptr4.unr40, %for.body.unr35 ]
+  %var8.0.in8.unr46 = phi ptr [ %1, %unr.cmp51 ], [ %27, %for.body.unr35 ]
   %i.07.unr47 = phi i32 [ 0, %unr.cmp51 ], [ %inc.unr39, %for.body.unr35 ]
-  %36 = call i8* @llvm.hexagon.circ.ldd(i8* %var8.0.in8.unr46, i8* %3, i32 %or, i32 -8)
-  %37 = load i64, i64* %pvar6.09.unr45, align 8
+  %32 = call ptr @llvm.hexagon.circ.ldd(ptr %var8.0.in8.unr46, ptr %var4, i32 %or, i32 -8)
+  %33 = load i64, ptr %pvar6.09.unr45, align 8
   %inc.unr48 = add nsw i32 %i.07.unr47, 1
-  %incdec.ptr4.unr49 = getelementptr inbounds i64, i64* %pvar6.09.unr45, i32 1
+  %incdec.ptr4.unr49 = getelementptr inbounds i64, ptr %pvar6.09.unr45, i32 1
   %cmp.unr50 = icmp slt i32 %inc.unr48, %sub
-  %38 = load i64, i64* %var4, align 8
-  %39 = call i64 @llvm.hexagon.M2.vdmacs.s1(i64 %35, i64 %37, i64 %38)
+  %34 = load i64, ptr %var4, align 8
+  %35 = call i64 @llvm.hexagon.M2.vdmacs.s1(i64 %31, i64 %33, i64 %34)
   br label %for.body.unr53
 
 for.body.unr53:                                   ; preds = %for.body.unr44, %unr.cmp60
-  %40 = phi i64 [ %6, %unr.cmp60 ], [ %39, %for.body.unr44 ]
-  %pvar6.09.unr54 = phi i64* [ %7, %unr.cmp60 ], [ %incdec.ptr4.unr49, %for.body.unr44 ]
-  %var8.0.in8.unr55 = phi i8* [ %4, %unr.cmp60 ], [ %36, %for.body.unr44 ]
+  %36 = phi i64 [ %3, %unr.cmp60 ], [ %35, %for.body.unr44 ]
+  %pvar6.09.unr54 = phi ptr [ %incdec.ptr, %unr.cmp60 ], [ %incdec.ptr4.unr49, %for.body.unr44 ]
+  %var8.0.in8.unr55 = phi ptr [ %1, %unr.cmp60 ], [ %32, %for.body.unr44 ]
   %i.07.unr56 = phi i32 [ 0, %unr.cmp60 ], [ %inc.unr48, %for.body.unr44 ]
-  %41 = call i8* @llvm.hexagon.circ.ldd(i8* %var8.0.in8.unr55, i8* %3, i32 %or, i32 -8)
-  %42 = load i64, i64* %pvar6.09.unr54, align 8
+  %37 = call ptr @llvm.hexagon.circ.ldd(ptr %var8.0.in8.unr55, ptr %var4, i32 %or, i32 -8)
+  %38 = load i64, ptr %pvar6.09.unr54, align 8
   %inc.unr57 = add nsw i32 %i.07.unr56, 1
-  %incdec.ptr4.unr58 = getelementptr inbounds i64, i64* %pvar6.09.unr54, i32 1
+  %incdec.ptr4.unr58 = getelementptr inbounds i64, ptr %pvar6.09.unr54, i32 1
   %cmp.unr59 = icmp slt i32 %inc.unr57, %sub
-  %43 = load i64, i64* %var4, align 8
-  %44 = call i64 @llvm.hexagon.M2.vdmacs.s1(i64 %40, i64 %42, i64 %43)
+  %39 = load i64, ptr %var4, align 8
+  %40 = call i64 @llvm.hexagon.M2.vdmacs.s1(i64 %36, i64 %38, i64 %39)
   br label %for.body.lr.ph.split
 
 for.body.lr.ph.split:                             ; preds = %for.body.unr53
-  %45 = icmp ult i32 %10, 8
-  br i1 %45, label %for.end.loopexit, label %for.body.lr.ph.split.split
+  %41 = icmp ult i32 %6, 8
+  br i1 %41, label %for.end.loopexit, label %for.body.lr.ph.split.split
 
 for.body.lr.ph.split.split:                       ; preds = %for.body.lr.ph.split, %for.body.lr.ph
-  %.unr = phi i64 [ %44, %for.body.lr.ph.split ], [ %6, %for.body.lr.ph ]
-  %pvar6.09.unr62 = phi i64* [ %incdec.ptr4.unr58, %for.body.lr.ph.split ], [ %7, %for.body.lr.ph ]
-  %var8.0.in8.unr63 = phi i8* [ %41, %for.body.lr.ph.split ], [ %4, %for.body.lr.ph ]
+  %.unr = phi i64 [ %40, %for.body.lr.ph.split ], [ %3, %for.body.lr.ph ]
+  %pvar6.09.unr62 = phi ptr [ %incdec.ptr4.unr58, %for.body.lr.ph.split ], [ %incdec.ptr, %for.body.lr.ph ]
+  %var8.0.in8.unr63 = phi ptr [ %37, %for.body.lr.ph.split ], [ %1, %for.body.lr.ph ]
   %i.07.unr64 = phi i32 [ %inc.unr57, %for.body.lr.ph.split ], [ 0, %for.body.lr.ph ]
-  %.lcssa12.unr = phi i64 [ %44, %for.body.lr.ph.split ], [ 0, %for.body.lr.ph ]
+  %.lcssa12.unr = phi i64 [ %40, %for.body.lr.ph.split ], [ 0, %for.body.lr.ph ]
   br label %for.body
 
 for.body:                                         ; preds = %for.body, %for.body.lr.ph.split.split
-  %46 = phi i64 [ %.unr, %for.body.lr.ph.split.split ], [ %78, %for.body ]
-  %pvar6.09 = phi i64* [ %pvar6.09.unr62, %for.body.lr.ph.split.split ], [ %scevgep71, %for.body ]
-  %var8.0.in8 = phi i8* [ %var8.0.in8.unr63, %for.body.lr.ph.split.split ], [ %75, %for.body ]
+  %42 = phi i64 [ %.unr, %for.body.lr.ph.split.split ], [ %74, %for.body ]
+  %pvar6.09 = phi ptr [ %pvar6.09.unr62, %for.body.lr.ph.split.split ], [ %scevgep71, %for.body ]
+  %var8.0.in8 = phi ptr [ %var8.0.in8.unr63, %for.body.lr.ph.split.split ], [ %71, %for.body ]
   %i.07 = phi i32 [ %i.07.unr64, %for.body.lr.ph.split.split ], [ %inc.7, %for.body ]
-  %47 = call i8* @llvm.hexagon.circ.ldd(i8* %var8.0.in8, i8* %3, i32 %or, i32 -8)
-  %48 = load i64, i64* %pvar6.09, align 8
+  %43 = call ptr @llvm.hexagon.circ.ldd(ptr %var8.0.in8, ptr %var4, i32 %or, i32 -8)
+  %44 = load i64, ptr %pvar6.09, align 8
   %inc = add nsw i32 %i.07, 1
-  %49 = load i64, i64* %var4, align 8
-  %50 = call i64 @llvm.hexagon.M2.vdmacs.s1(i64 %46, i64 %48, i64 %49)
-  %51 = call i8* @llvm.hexagon.circ.ldd(i8* %47, i8* %3, i32 %or, i32 -8)
-  %scevgep = getelementptr i64, i64* %pvar6.09, i32 1
-  %52 = load i64, i64* %scevgep, align 8
+  %45 = load i64, ptr %var4, align 8
+  %46 = call i64 @llvm.hexagon.M2.vdmacs.s1(i64 %42, i64 %44, i64 %45)
+  %47 = call ptr @llvm.hexagon.circ.ldd(ptr %43, ptr %var4, i32 %or, i32 -8)
+  %scevgep = getelementptr i64, ptr %pvar6.09, i32 1
+  %48 = load i64, ptr %scevgep, align 8
   %inc.1 = add nsw i32 %inc, 1
-  %53 = load i64, i64* %var4, align 8
-  %54 = call i64 @llvm.hexagon.M2.vdmacs.s1(i64 %50, i64 %52, i64 %53)
-  %55 = call i8* @llvm.hexagon.circ.ldd(i8* %51, i8* %3, i32 %or, i32 -8)
-  %scevgep65 = getelementptr i64, i64* %scevgep, i32 1
-  %56 = load i64, i64* %scevgep65, align 8
+  %49 = load i64, ptr %var4, align 8
+  %50 = call i64 @llvm.hexagon.M2.vdmacs.s1(i64 %46, i64 %48, i64 %49)
+  %51 = call ptr @llvm.hexagon.circ.ldd(ptr %47, ptr %var4, i32 %or, i32 -8)
+  %scevgep65 = getelementptr i64, ptr %scevgep, i32 1
+  %52 = load i64, ptr %scevgep65, align 8
   %inc.2 = add nsw i32 %inc.1, 1
-  %57 = load i64, i64* %var4, align 8
-  %58 = call i64 @llvm.hexagon.M2.vdmacs.s1(i64 %54, i64 %56, i64 %57)
-  %59 = call i8* @llvm.hexagon.circ.ldd(i8* %55, i8* %3, i32 %or, i32 -8)
-  %scevgep66 = getelementptr i64, i64* %scevgep65, i32 1
-  %60 = load i64, i64* %scevgep66, align 8
+  %53 = load i64, ptr %var4, align 8
+  %54 = call i64 @llvm.hexagon.M2.vdmacs.s1(i64 %50, i64 %52, i64 %53)
+  %55 = call ptr @llvm.hexagon.circ.ldd(ptr %51, ptr %var4, i32 %or, i32 -8)
+  %scevgep66 = getelementptr i64, ptr %scevgep65, i32 1
+  %56 = load i64, ptr %scevgep66, align 8
   %inc.3 = add nsw i32 %inc.2, 1
-  %61 = load i64, i64* %var4, align 8
-  %62 = call i64 @llvm.hexagon.M2.vdmacs.s1(i64 %58, i64 %60, i64 %61)
-  %63 = call i8* @llvm.hexagon.circ.ldd(i8* %59, i8* %3, i32 %or, i32 -8)
-  %scevgep67 = getelementptr i64, i64* %scevgep66, i32 1
-  %64 = load i64, i64* %scevgep67, align 8
+  %57 = load i64, ptr %var4, align 8
+  %58 = call i64 @llvm.hexagon.M2.vdmacs.s1(i64 %54, i64 %56, i64 %57)
+  %59 = call ptr @llvm.hexagon.circ.ldd(ptr %55, ptr %var4, i32 %or, i32 -8)
+  %scevgep67 = getelementptr i64, ptr %scevgep66, i32 1
+  %60 = load i64, ptr %scevgep67, align 8
   %inc.4 = add nsw i32 %inc.3, 1
-  %65 = load i64, i64* %var4, align 8
-  %66 = call i64 @llvm.hexagon.M2.vdmacs.s1(i64 %62, i64 %64, i64 %65)
-  %67 = call i8* @llvm.hexagon.circ.ldd(i8* %63, i8* %3, i32 %or, i32 -8)
-  %scevgep68 = getelementptr i64, i64* %scevgep67, i32 1
-  %68 = load i64, i64* %scevgep68, align 8
+  %61 = load i64, ptr %var4, align 8
+  %62 = call i64 @llvm.hexagon.M2.vdmacs.s1(i64 %58, i64 %60, i64 %61)
+  %63 = call ptr @llvm.hexagon.circ.ldd(ptr %59, ptr %var4, i32 %or, i32 -8)
+  %scevgep68 = getelementptr i64, ptr %scevgep67, i32 1
+  %64 = load i64, ptr %scevgep68, align 8
   %inc.5 = add nsw i32 %inc.4, 1
-  %69 = load i64, i64* %var4, align 8
-  %70 = call i64 @llvm.hexagon.M2.vdmacs.s1(i64 %66, i64 %68, i64 %69)
-  %71 = call i8* @llvm.hexagon.circ.ldd(i8* %67, i8* %3, i32 %or, i32 -8)
-  %scevgep69 = getelementptr i64, i64* %scevgep68, i32 1
-  %72 = load i64, i64* %scevgep69, align 8
+  %65 = load i64, ptr %var4, align 8
+  %66 = call i64 @llvm.hexagon.M2.vdmacs.s1(i64 %62, i64 %64, i64 %65)
+  %67 = call ptr @llvm.hexagon.circ.ldd(ptr %63, ptr %var4, i32 %or, i32 -8)
+  %scevgep69 = getelementptr i64, ptr %scevgep68, i32 1
+  %68 = load i64, ptr %scevgep69, align 8
   %inc.6 = add nsw i32 %inc.5, 1
-  %73 = load i64, i64* %var4, align 8
-  %74 = call i64 @llvm.hexagon.M2.vdmacs.s1(i64 %70, i64 %72, i64 %73)
-  %75 = call i8* @llvm.hexagon.circ.ldd(i8* %71, i8* %3, i32 %or, i32 -8)
-  %scevgep70 = getelementptr i64, i64* %scevgep69, i32 1
-  %76 = load i64, i64* %scevgep70, align 8
+  %69 = load i64, ptr %var4, align 8
+  %70 = call i64 @llvm.hexagon.M2.vdmacs.s1(i64 %66, i64 %68, i64 %69)
+  %71 = call ptr @llvm.hexagon.circ.ldd(ptr %67, ptr %var4, i32 %or, i32 -8)
+  %scevgep70 = getelementptr i64, ptr %scevgep69, i32 1
+  %72 = load i64, ptr %scevgep70, align 8
   %inc.7 = add nsw i32 %inc.6, 1
-  %77 = load i64, i64* %var4, align 8
-  %78 = call i64 @llvm.hexagon.M2.vdmacs.s1(i64 %74, i64 %76, i64 %77)
+  %73 = load i64, ptr %var4, align 8
+  %74 = call i64 @llvm.hexagon.M2.vdmacs.s1(i64 %70, i64 %72, i64 %73)
   %cmp.7 = icmp slt i32 %inc.7, %sub
-  %scevgep71 = getelementptr i64, i64* %scevgep70, i32 1
+  %scevgep71 = getelementptr i64, ptr %scevgep70, i32 1
   br i1 %cmp.7, label %for.body, label %for.end.loopexit.unr-lcssa
 
 for.end.loopexit.unr-lcssa:                       ; preds = %for.body
-  %.lcssa12.ph = phi i64 [ %78, %for.body ]
+  %.lcssa12.ph = phi i64 [ %74, %for.body ]
   br label %for.end.loopexit
 
 for.end.loopexit:                                 ; preds = %for.end.loopexit.unr-lcssa, %for.body.lr.ph.split
-  %.lcssa12 = phi i64 [ %44, %for.body.lr.ph.split ], [ %.lcssa12.ph, %for.end.loopexit.unr-lcssa ]
+  %.lcssa12 = phi i64 [ %40, %for.body.lr.ph.split ], [ %.lcssa12.ph, %for.end.loopexit.unr-lcssa ]
   br label %for.end
 
 for.end:                                          ; preds = %for.end.loopexit, %entry
-  %.lcssa = phi i64 [ %6, %entry ], [ %.lcssa12, %for.end.loopexit ]
-  %79 = call i32 @llvm.hexagon.S2.vrndpackwhs(i64 %.lcssa)
-  ret i32 %79
+  %.lcssa = phi i64 [ %3, %entry ], [ %.lcssa12, %for.end.loopexit ]
+  %75 = call i32 @llvm.hexagon.S2.vrndpackwhs(i64 %.lcssa)
+  ret i32 %75
 }
 
 declare i64 @llvm.hexagon.M2.vdmacs.s1(i64, i64, i64) nounwind readnone

diff  --git a/llvm/test/CodeGen/Hexagon/circ_ldw.ll b/llvm/test/CodeGen/Hexagon/circ_ldw.ll
index 22fb2e00f0d45..1521bc1b8b66a 100644
--- a/llvm/test/CodeGen/Hexagon/circ_ldw.ll
+++ b/llvm/test/CodeGen/Hexagon/circ_ldw.ll
@@ -5,14 +5,11 @@
 %union.vect64 = type { i64 }
 %union.vect32 = type { i32 }
 
-define i32* @HallowedBeThyName(%union.vect64* nocapture %pRx, %union.vect32* %pLut, %union.vect64* nocapture %pOut, i64 %dc.coerce, i32 %shift, i32 %numSamples) nounwind {
+define ptr @HallowedBeThyName(ptr nocapture %pRx, ptr %pLut, ptr nocapture %pOut, i64 %dc.coerce, i32 %shift, i32 %numSamples) nounwind {
 entry:
   %vLutNext = alloca i32, align 4
-  %0 = bitcast %union.vect32* %pLut to i8*
-  %1 = bitcast i32* %vLutNext to i8*
-  %2 = call i8* @llvm.hexagon.circ.ldw(i8* %0, i8* %1, i32 83886144, i32 -4)
-  %3 = bitcast i8* %2 to i32*
-  ret i32* %3
+  %0 = call ptr @llvm.hexagon.circ.ldw(ptr %pLut, ptr %vLutNext, i32 83886144, i32 -4)
+  ret ptr %0
 }
 
-declare i8* @llvm.hexagon.circ.ldw(i8*, i8*, i32, i32) nounwind
+declare ptr @llvm.hexagon.circ.ldw(ptr, ptr, i32, i32) nounwind

diff  --git a/llvm/test/CodeGen/Hexagon/circ_new.ll b/llvm/test/CodeGen/Hexagon/circ_new.ll
index cf41f27d38876..d278e06468f71 100644
--- a/llvm/test/CodeGen/Hexagon/circ_new.ll
+++ b/llvm/test/CodeGen/Hexagon/circ_new.ll
@@ -4,291 +4,291 @@
 ; CHECK: m[[REG1:([0-1])]] = r0
 ; CHECK: cs[[REG1]] = r1
 ; CHECK: = memub(r1++#4:circ(m[[REG1]])
-define zeroext i8 @test1(i32 %mod, i8* %start) local_unnamed_addr #0 {
+define zeroext i8 @test1(i32 %mod, ptr %start) local_unnamed_addr #0 {
 entry:
-  %0 = tail call { i32, i8* } @llvm.hexagon.L2.loadrub.pci(i8* %start, i32 4, i32 %mod, i8* %start)
-  %1 = extractvalue { i32, i8* } %0, 0
+  %0 = tail call { i32, ptr } @llvm.hexagon.L2.loadrub.pci(ptr %start, i32 4, i32 %mod, ptr %start)
+  %1 = extractvalue { i32, ptr } %0, 0
   %conv = trunc i32 %1 to i8
   ret i8 %conv
 }
 
-declare { i32, i8* } @llvm.hexagon.L2.loadrub.pci(i8*, i32, i32, i8* nocapture) #1
+declare { i32, ptr } @llvm.hexagon.L2.loadrub.pci(ptr, i32, i32, ptr nocapture) #1
 
 ; CHECK-LABEL: test2
 ; CHECK: m[[REG2:([0-1])]] = r0
 ; CHECK: cs[[REG2]] = r1
 ; CHECK: = memb(r1++#4:circ(m[[REG2]])
-define zeroext i8 @test2(i32 %mod, i8* %start) local_unnamed_addr #0 {
+define zeroext i8 @test2(i32 %mod, ptr %start) local_unnamed_addr #0 {
 entry:
-  %0 = tail call { i32, i8* } @llvm.hexagon.L2.loadrb.pci(i8* %start, i32 4, i32 %mod, i8* %start)
-  %1 = extractvalue { i32, i8* } %0, 0
+  %0 = tail call { i32, ptr } @llvm.hexagon.L2.loadrb.pci(ptr %start, i32 4, i32 %mod, ptr %start)
+  %1 = extractvalue { i32, ptr } %0, 0
   %conv = trunc i32 %1 to i8
   ret i8 %conv
 }
 
-declare { i32, i8* } @llvm.hexagon.L2.loadrb.pci(i8*, i32, i32, i8* nocapture) #1
+declare { i32, ptr } @llvm.hexagon.L2.loadrb.pci(ptr, i32, i32, ptr nocapture) #1
 
 ; CHECK-LABEL: test3
 ; CHECK: m[[REG3:([0-1])]] = r0
 ; CHECK: cs[[REG3]] = r1
 ; CHECK: = memuh(r1++#4:circ(m[[REG3]])
-define zeroext i16 @test3(i32 %mod, i8* %start) local_unnamed_addr #0 {
+define zeroext i16 @test3(i32 %mod, ptr %start) local_unnamed_addr #0 {
 entry:
-  %0 = tail call { i32, i8* } @llvm.hexagon.L2.loadruh.pci(i8* %start, i32 4, i32 %mod, i8* %start)
-  %1 = extractvalue { i32, i8* } %0, 0
+  %0 = tail call { i32, ptr } @llvm.hexagon.L2.loadruh.pci(ptr %start, i32 4, i32 %mod, ptr %start)
+  %1 = extractvalue { i32, ptr } %0, 0
   %conv = trunc i32 %1 to i16
   ret i16 %conv
 }
 
-declare { i32, i8* } @llvm.hexagon.L2.loadruh.pci(i8*, i32, i32, i8* nocapture) #1
+declare { i32, ptr } @llvm.hexagon.L2.loadruh.pci(ptr, i32, i32, ptr nocapture) #1
 
 ; CHECK-LABEL: test4
 ; CHECK: m[[REG4:([0-1])]] = r0
 ; CHECK: cs[[REG4]] = r1
 ; CHECK: = memh(r1++#4:circ(m[[REG4]])
-define signext i16 @test4(i32 %mod, i8* %start) local_unnamed_addr #0 {
+define signext i16 @test4(i32 %mod, ptr %start) local_unnamed_addr #0 {
 entry:
-  %0 = tail call { i32, i8* } @llvm.hexagon.L2.loadrh.pci(i8* %start, i32 4, i32 %mod, i8* %start)
-  %1 = extractvalue { i32, i8* } %0, 0
+  %0 = tail call { i32, ptr } @llvm.hexagon.L2.loadrh.pci(ptr %start, i32 4, i32 %mod, ptr %start)
+  %1 = extractvalue { i32, ptr } %0, 0
   %conv = trunc i32 %1 to i16
   ret i16 %conv
 }
 
-declare { i32, i8* } @llvm.hexagon.L2.loadrh.pci(i8*, i32, i32, i8* nocapture) #1
+declare { i32, ptr } @llvm.hexagon.L2.loadrh.pci(ptr, i32, i32, ptr nocapture) #1
 
 ; CHECK-LABEL: test5
 ; CHECK: m[[REG5:([0-1])]] = r0
 ; CHECK: cs[[REG5]] = r1
 ; CHECK: = memw(r1++#4:circ(m[[REG5]])
-define i32 @test5(i32 %mod, i8* %start) local_unnamed_addr #0 {
+define i32 @test5(i32 %mod, ptr %start) local_unnamed_addr #0 {
 entry:
-  %0 = tail call { i32, i8* } @llvm.hexagon.L2.loadri.pci(i8* %start, i32 4, i32 %mod, i8* %start)
-  %1 = extractvalue { i32, i8* } %0, 0
+  %0 = tail call { i32, ptr } @llvm.hexagon.L2.loadri.pci(ptr %start, i32 4, i32 %mod, ptr %start)
+  %1 = extractvalue { i32, ptr } %0, 0
   ret i32 %1
 }
 
-declare { i32, i8* } @llvm.hexagon.L2.loadri.pci(i8*, i32, i32, i8* nocapture) #1
+declare { i32, ptr } @llvm.hexagon.L2.loadri.pci(ptr, i32, i32, ptr nocapture) #1
 
 ; CHECK-LABEL: test6
 ; CHECK: m[[REG6:([0-1])]] = r0
 ; CHECK: cs[[REG6]] = r1
 ; CHECK: = memd(r1++#8:circ(m[[REG6]])
-define i64 @test6(i32 %mod, i8* %start) local_unnamed_addr #0 {
+define i64 @test6(i32 %mod, ptr %start) local_unnamed_addr #0 {
 entry:
-  %0 = tail call { i64, i8* } @llvm.hexagon.L2.loadrd.pci(i8* %start, i32 8, i32 %mod, i8* %start)
-  %1 = extractvalue { i64, i8* } %0, 0
+  %0 = tail call { i64, ptr } @llvm.hexagon.L2.loadrd.pci(ptr %start, i32 8, i32 %mod, ptr %start)
+  %1 = extractvalue { i64, ptr } %0, 0
   ret i64 %1
 }
 
-declare { i64, i8* } @llvm.hexagon.L2.loadrd.pci(i8*, i32, i32, i8* nocapture) #1
+declare { i64, ptr } @llvm.hexagon.L2.loadrd.pci(ptr, i32, i32, ptr nocapture) #1
 
 ; CHECK-LABEL: test7
 ; CHECK: m[[REG7:([0-1])]] = r0
 ; CHECK: cs[[REG7]] = r1
 ; CHECK: = memub(r1++I:circ(m[[REG7]])
-define zeroext i8 @test7(i32 %mod, i8* %start) local_unnamed_addr #0 {
+define zeroext i8 @test7(i32 %mod, ptr %start) local_unnamed_addr #0 {
 entry:
-  %0 = tail call { i32, i8* } @llvm.hexagon.L2.loadrub.pcr(i8* %start, i32 %mod, i8* %start)
-  %1 = extractvalue { i32, i8* } %0, 0
+  %0 = tail call { i32, ptr } @llvm.hexagon.L2.loadrub.pcr(ptr %start, i32 %mod, ptr %start)
+  %1 = extractvalue { i32, ptr } %0, 0
   %conv = trunc i32 %1 to i8
   ret i8 %conv
 }
 
-declare { i32, i8* } @llvm.hexagon.L2.loadrub.pcr(i8*, i32, i8* nocapture) #1
+declare { i32, ptr } @llvm.hexagon.L2.loadrub.pcr(ptr, i32, ptr nocapture) #1
 
 ; CHECK-LABEL: test8
 ; CHECK: m[[REG8:([0-1])]] = r0
 ; CHECK: cs[[REG8]] = r1
 ; CHECK: = memb(r1++I:circ(m[[REG8]])
-define zeroext i8 @test8(i32 %mod, i8* %start) local_unnamed_addr #0 {
+define zeroext i8 @test8(i32 %mod, ptr %start) local_unnamed_addr #0 {
 entry:
-  %0 = tail call { i32, i8* } @llvm.hexagon.L2.loadrb.pcr(i8* %start, i32 %mod, i8* %start)
-  %1 = extractvalue { i32, i8* } %0, 0
+  %0 = tail call { i32, ptr } @llvm.hexagon.L2.loadrb.pcr(ptr %start, i32 %mod, ptr %start)
+  %1 = extractvalue { i32, ptr } %0, 0
   %conv = trunc i32 %1 to i8
   ret i8 %conv
 }
 
-declare { i32, i8* } @llvm.hexagon.L2.loadrb.pcr(i8*, i32, i8* nocapture) #1
+declare { i32, ptr } @llvm.hexagon.L2.loadrb.pcr(ptr, i32, ptr nocapture) #1
 
 ; CHECK-LABEL: test9
 ; CHECK: m[[REG9:([0-1])]] = r0
 ; CHECK: cs[[REG9]] = r1
 ; CHECK: = memuh(r1++I:circ(m[[REG9]])
-define zeroext i16 @test9(i32 %mod, i8* %start) local_unnamed_addr #0 {
+define zeroext i16 @test9(i32 %mod, ptr %start) local_unnamed_addr #0 {
 entry:
-  %0 = tail call { i32, i8* } @llvm.hexagon.L2.loadruh.pcr(i8* %start, i32 %mod, i8* %start)
-  %1 = extractvalue { i32, i8* } %0, 0
+  %0 = tail call { i32, ptr } @llvm.hexagon.L2.loadruh.pcr(ptr %start, i32 %mod, ptr %start)
+  %1 = extractvalue { i32, ptr } %0, 0
   %conv = trunc i32 %1 to i16
   ret i16 %conv
 }
 
-declare { i32, i8* } @llvm.hexagon.L2.loadruh.pcr(i8*, i32, i8* nocapture) #1
+declare { i32, ptr } @llvm.hexagon.L2.loadruh.pcr(ptr, i32, ptr nocapture) #1
 
 ; CHECK-LABEL: test10
 ; CHECK: m[[REG10:([0-1])]] = r0
 ; CHECK: cs[[REG10]] = r1
 ; CHECK: = memh(r1++I:circ(m[[REG10]])
-define signext i16 @test10(i32 %mod, i8* %start) local_unnamed_addr #0 {
+define signext i16 @test10(i32 %mod, ptr %start) local_unnamed_addr #0 {
 entry:
-  %0 = tail call { i32, i8* } @llvm.hexagon.L2.loadrh.pcr(i8* %start, i32 %mod, i8* %start)
-  %1 = extractvalue { i32, i8* } %0, 0
+  %0 = tail call { i32, ptr } @llvm.hexagon.L2.loadrh.pcr(ptr %start, i32 %mod, ptr %start)
+  %1 = extractvalue { i32, ptr } %0, 0
   %conv = trunc i32 %1 to i16
   ret i16 %conv
 }
 
-declare { i32, i8* } @llvm.hexagon.L2.loadrh.pcr(i8*, i32, i8* nocapture) #1
+declare { i32, ptr } @llvm.hexagon.L2.loadrh.pcr(ptr, i32, ptr nocapture) #1
 
 ; CHECK-LABEL: test11
 ; CHECK: m[[REG11:([0-1])]] = r0
 ; CHECK: cs[[REG11]] = r1
 ; CHECK: = memw(r1++I:circ(m[[REG11]])
-define i32 @test11(i32 %mod, i8* %start) local_unnamed_addr #0 {
+define i32 @test11(i32 %mod, ptr %start) local_unnamed_addr #0 {
 entry:
-  %0 = tail call { i32, i8* } @llvm.hexagon.L2.loadri.pcr(i8* %start, i32 %mod, i8* %start)
-  %1 = extractvalue { i32, i8* } %0, 0
+  %0 = tail call { i32, ptr } @llvm.hexagon.L2.loadri.pcr(ptr %start, i32 %mod, ptr %start)
+  %1 = extractvalue { i32, ptr } %0, 0
   ret i32 %1
 }
 
-declare { i32, i8* } @llvm.hexagon.L2.loadri.pcr(i8*, i32, i8* nocapture) #1
+declare { i32, ptr } @llvm.hexagon.L2.loadri.pcr(ptr, i32, ptr nocapture) #1
 
 ; CHECK-LABEL: test12
 ; CHECK: m[[REG12:([0-1])]] = r0
 ; CHECK: cs[[REG12]] = r1
 ; CHECK: = memd(r1++I:circ(m[[REG12]])
-define i64 @test12(i32 %mod, i8* %start) local_unnamed_addr #0 {
+define i64 @test12(i32 %mod, ptr %start) local_unnamed_addr #0 {
 entry:
-  %0 = tail call { i64, i8* } @llvm.hexagon.L2.loadrd.pcr(i8* %start, i32 %mod, i8* %start)
-  %1 = extractvalue { i64, i8* } %0, 0
+  %0 = tail call { i64, ptr } @llvm.hexagon.L2.loadrd.pcr(ptr %start, i32 %mod, ptr %start)
+  %1 = extractvalue { i64, ptr } %0, 0
   ret i64 %1
 }
 
-declare { i64, i8* } @llvm.hexagon.L2.loadrd.pcr(i8*, i32, i8* nocapture) #1
+declare { i64, ptr } @llvm.hexagon.L2.loadrd.pcr(ptr, i32, ptr nocapture) #1
 
 ; CHECK-LABEL: test13
 ; CHECK: m[[REG13:([0-1])]] = r0
 ; CHECK: cs[[REG13]] = r1
 ; CHECK: memb(r1++#4:circ(m[[REG13]])) =
-define void @test13(i32 %mod, i8* %start, i8 zeroext %v) local_unnamed_addr #0 {
+define void @test13(i32 %mod, ptr %start, i8 zeroext %v) local_unnamed_addr #0 {
 entry:
   %conv = zext i8 %v to i32
-  %0 = tail call i8* @llvm.hexagon.S2.storerb.pci(i8* %start, i32 4, i32 %mod, i32 %conv, i8* %start)
+  %0 = tail call ptr @llvm.hexagon.S2.storerb.pci(ptr %start, i32 4, i32 %mod, i32 %conv, ptr %start)
   ret void
 }
 
-declare i8* @llvm.hexagon.S2.storerb.pci(i8*, i32, i32, i32, i8* nocapture) #1
+declare ptr @llvm.hexagon.S2.storerb.pci(ptr, i32, i32, i32, ptr nocapture) #1
 
 ; CHECK-LABEL: test14
 ; CHECK: m[[REG14:([0-1])]] = r0
 ; CHECK: cs[[REG14]] = r1
 ; CHECK: memh(r1++#4:circ(m[[REG14]])) =
-define void @test14(i32 %mod, i8* %start, i16 signext %v) local_unnamed_addr #0 {
+define void @test14(i32 %mod, ptr %start, i16 signext %v) local_unnamed_addr #0 {
 entry:
   %conv = sext i16 %v to i32
-  %0 = tail call i8* @llvm.hexagon.S2.storerh.pci(i8* %start, i32 4, i32 %mod, i32 %conv, i8* %start)
+  %0 = tail call ptr @llvm.hexagon.S2.storerh.pci(ptr %start, i32 4, i32 %mod, i32 %conv, ptr %start)
   ret void
 }
 
-declare i8* @llvm.hexagon.S2.storerh.pci(i8*, i32, i32, i32, i8* nocapture) #1
+declare ptr @llvm.hexagon.S2.storerh.pci(ptr, i32, i32, i32, ptr nocapture) #1
 
 ; CHECK-LABEL: test15
 ; CHECK: m[[REG15:([0-1])]] = r0
 ; CHECK: cs[[REG15]] = r1
 ; CHECK: memh(r1++#4:circ(m[[REG15]])) = r{{[0-9]+}}.h
-define void @test15(i32 %mod, i8* %start, i16 signext %v) local_unnamed_addr #0 {
+define void @test15(i32 %mod, ptr %start, i16 signext %v) local_unnamed_addr #0 {
 entry:
   %conv = sext i16 %v to i32
-  %0 = tail call i8* @llvm.hexagon.S2.storerf.pci(i8* %start, i32 4, i32 %mod, i32 %conv, i8* %start)
+  %0 = tail call ptr @llvm.hexagon.S2.storerf.pci(ptr %start, i32 4, i32 %mod, i32 %conv, ptr %start)
   ret void
 }
 
-declare i8* @llvm.hexagon.S2.storerf.pci(i8*, i32, i32, i32, i8* nocapture) #1
+declare ptr @llvm.hexagon.S2.storerf.pci(ptr, i32, i32, i32, ptr nocapture) #1
 
 ; CHECK-LABEL: test16
 ; CHECK: m[[REG16:([0-1])]] = r0
 ; CHECK: cs[[REG16]] = r1
 ; CHECK: memw(r1++#4:circ(m[[REG16]])) =
-define void @test16(i32 %mod, i8* %start, i32 %v) local_unnamed_addr #0 {
+define void @test16(i32 %mod, ptr %start, i32 %v) local_unnamed_addr #0 {
 entry:
-  %0 = tail call i8* @llvm.hexagon.S2.storeri.pci(i8* %start, i32 4, i32 %mod, i32 %v, i8* %start)
+  %0 = tail call ptr @llvm.hexagon.S2.storeri.pci(ptr %start, i32 4, i32 %mod, i32 %v, ptr %start)
   ret void
 }
 
-declare i8* @llvm.hexagon.S2.storeri.pci(i8*, i32, i32, i32, i8* nocapture) #1
+declare ptr @llvm.hexagon.S2.storeri.pci(ptr, i32, i32, i32, ptr nocapture) #1
 
 ; CHECK-LABEL: test17
 ; CHECK: m[[REG17:([0-1])]] = r0
 ; CHECK: cs[[REG17]] = r1
 ; CHECK: memd(r1++#8:circ(m[[REG17]])) =
-define void @test17(i32 %mod, i8* %start, i64 %v) local_unnamed_addr #0 {
+define void @test17(i32 %mod, ptr %start, i64 %v) local_unnamed_addr #0 {
 entry:
-  %0 = tail call i8* @llvm.hexagon.S2.storerd.pci(i8* %start, i32 8, i32 %mod, i64 %v, i8* %start)
+  %0 = tail call ptr @llvm.hexagon.S2.storerd.pci(ptr %start, i32 8, i32 %mod, i64 %v, ptr %start)
   ret void
 }
 
-declare i8* @llvm.hexagon.S2.storerd.pci(i8*, i32, i32, i64, i8* nocapture) #1
+declare ptr @llvm.hexagon.S2.storerd.pci(ptr, i32, i32, i64, ptr nocapture) #1
 
 ; CHECK-LABEL: test18
 ; CHECK: m[[REG18:([0-1])]] = r0
 ; CHECK: cs[[REG18]] = r1
 ; CHECK: memb(r1++I:circ(m[[REG18]])) =
-define void @test18(i32 %mod, i8* %start, i8 zeroext %v) local_unnamed_addr #0 {
+define void @test18(i32 %mod, ptr %start, i8 zeroext %v) local_unnamed_addr #0 {
 entry:
   %conv = zext i8 %v to i32
-  %0 = tail call i8* @llvm.hexagon.S2.storerb.pcr(i8* %start, i32 %mod, i32 %conv, i8* %start)
+  %0 = tail call ptr @llvm.hexagon.S2.storerb.pcr(ptr %start, i32 %mod, i32 %conv, ptr %start)
   ret void
 }
 
-declare i8* @llvm.hexagon.S2.storerb.pcr(i8*, i32, i32, i8* nocapture) #1
+declare ptr @llvm.hexagon.S2.storerb.pcr(ptr, i32, i32, ptr nocapture) #1
 
 ; CHECK-LABEL: test19
 ; CHECK: m[[REG19:([0-1])]] = r0
 ; CHECK: cs[[REG19]] = r1
 ; CHECK: memh(r1++I:circ(m[[REG19]])) =
-define void @test19(i32 %mod, i8* %start, i16 signext %v) local_unnamed_addr #0 {
+define void @test19(i32 %mod, ptr %start, i16 signext %v) local_unnamed_addr #0 {
 entry:
   %conv = sext i16 %v to i32
-  %0 = tail call i8* @llvm.hexagon.S2.storerh.pcr(i8* %start, i32 %mod, i32 %conv, i8* %start)
+  %0 = tail call ptr @llvm.hexagon.S2.storerh.pcr(ptr %start, i32 %mod, i32 %conv, ptr %start)
   ret void
 }
 
-declare i8* @llvm.hexagon.S2.storerh.pcr(i8*, i32, i32, i8* nocapture) #1
+declare ptr @llvm.hexagon.S2.storerh.pcr(ptr, i32, i32, ptr nocapture) #1
 
 ; CHECK-LABEL: test20
 ; CHECK: m[[REG20:([0-1])]] = r0
 ; CHECK: cs[[REG20]] = r1
 ; CHECK: memh(r1++I:circ(m[[REG20]])) = r{{[0-9]+}}.h
-define void @test20(i32 %mod, i8* %start, i16 signext %v) local_unnamed_addr #0 {
+define void @test20(i32 %mod, ptr %start, i16 signext %v) local_unnamed_addr #0 {
 entry:
   %conv = sext i16 %v to i32
-  %0 = tail call i8* @llvm.hexagon.S2.storerf.pcr(i8* %start, i32 %mod, i32 %conv, i8* %start)
+  %0 = tail call ptr @llvm.hexagon.S2.storerf.pcr(ptr %start, i32 %mod, i32 %conv, ptr %start)
   ret void
 }
 
-declare i8* @llvm.hexagon.S2.storerf.pcr(i8*, i32, i32, i8* nocapture) #1
+declare ptr @llvm.hexagon.S2.storerf.pcr(ptr, i32, i32, ptr nocapture) #1
 
 ; CHECK-LABEL: test21
 ; CHECK: m[[REG21:([0-1])]] = r0
 ; CHECK: cs[[REG21]] = r1
 ; CHECK: memw(r1++I:circ(m[[REG21]])) =
-define void @test21(i32 %mod, i8* %start, i32 %v) local_unnamed_addr #0 {
+define void @test21(i32 %mod, ptr %start, i32 %v) local_unnamed_addr #0 {
 entry:
-  %0 = tail call i8* @llvm.hexagon.S2.storeri.pcr(i8* %start, i32 %mod, i32 %v, i8* %start)
+  %0 = tail call ptr @llvm.hexagon.S2.storeri.pcr(ptr %start, i32 %mod, i32 %v, ptr %start)
   ret void
 }
 
-declare i8* @llvm.hexagon.S2.storeri.pcr(i8*, i32, i32, i8* nocapture) #1
+declare ptr @llvm.hexagon.S2.storeri.pcr(ptr, i32, i32, ptr nocapture) #1
 
 ; CHECK-LABEL: test22
 ; CHECK: m[[REG22:([0-1])]] = r0
 ; CHECK: cs[[REG22]] = r1
 ; CHECK: memd(r1++I:circ(m[[REG1]])) =
-define void @test22(i32 %mod, i8* %start, i64 %v) local_unnamed_addr #0 {
+define void @test22(i32 %mod, ptr %start, i64 %v) local_unnamed_addr #0 {
 entry:
-  %0 = tail call i8* @llvm.hexagon.S2.storerd.pcr(i8* %start, i32 %mod, i64 %v, i8* %start)
+  %0 = tail call ptr @llvm.hexagon.S2.storerd.pcr(ptr %start, i32 %mod, i64 %v, ptr %start)
   ret void
 }
 
-declare i8* @llvm.hexagon.S2.storerd.pcr(i8*, i32, i64, i8* nocapture) #1
+declare ptr @llvm.hexagon.S2.storerd.pcr(ptr, i32, i64, ptr nocapture) #1
 
 attributes #0 = { nounwind "target-cpu"="hexagonv60" }
 attributes #1 = { argmemonly nounwind }

diff  --git a/llvm/test/CodeGen/Hexagon/circ_pcr_assert.ll b/llvm/test/CodeGen/Hexagon/circ_pcr_assert.ll
index 133c2aa7d07d4..26c1d445f941c 100644
--- a/llvm/test/CodeGen/Hexagon/circ_pcr_assert.ll
+++ b/llvm/test/CodeGen/Hexagon/circ_pcr_assert.ll
@@ -9,27 +9,27 @@
 target triple = "hexagon"
 
 ; Function Attrs: nounwind
-define zeroext i8 @f0(i8* %a0) local_unnamed_addr #0 {
+define zeroext i8 @f0(ptr %a0) local_unnamed_addr #0 {
 b0:
-  %v0 = tail call { i32, i8* } @llvm.hexagon.L2.loadrub.pcr(i8* %a0, i32 -268430336, i8* %a0)
-  %v1 = extractvalue { i32, i8* } %v0, 0
+  %v0 = tail call { i32, ptr } @llvm.hexagon.L2.loadrub.pcr(ptr %a0, i32 -268430336, ptr %a0)
+  %v1 = extractvalue { i32, ptr } %v0, 0
   %v2 = trunc i32 %v1 to i8
   ret i8 %v2
 }
 
 ; Function Attrs: argmemonly nounwind
-declare { i32, i8* } @llvm.hexagon.L2.loadrub.pcr(i8*, i32, i8* nocapture) #1
+declare { i32, ptr } @llvm.hexagon.L2.loadrub.pcr(ptr, i32, ptr nocapture) #1
 
 ; Function Attrs: nounwind
-define void @f1(i8* %a0, i8 zeroext %a1) local_unnamed_addr #0 {
+define void @f1(ptr %a0, i8 zeroext %a1) local_unnamed_addr #0 {
 b0:
   %v0 = zext i8 %a1 to i32
-  %v1 = tail call i8* @llvm.hexagon.S2.storerb.pcr(i8* %a0, i32 -268430336, i32 %v0, i8* %a0)
+  %v1 = tail call ptr @llvm.hexagon.S2.storerb.pcr(ptr %a0, i32 -268430336, i32 %v0, ptr %a0)
   ret void
 }
 
 ; Function Attrs: argmemonly nounwind
-declare i8* @llvm.hexagon.S2.storerb.pcr(i8*, i32, i32, i8* nocapture) #1
+declare ptr @llvm.hexagon.S2.storerb.pcr(ptr, i32, i32, ptr nocapture) #1
 
 attributes #0 = { nounwind "target-cpu"="hexagonv60" }
 attributes #1 = { argmemonly nounwind }

diff  --git a/llvm/test/CodeGen/Hexagon/circ_st.ll b/llvm/test/CodeGen/Hexagon/circ_st.ll
index bac9845010da7..22007331adfec 100644
--- a/llvm/test/CodeGen/Hexagon/circ_st.ll
+++ b/llvm/test/CodeGen/Hexagon/circ_st.ll
@@ -15,82 +15,77 @@
 target datalayout = "e-p:32:32:32-i64:64:64-i32:32:32-i16:16:16-i1:32:32-f64:64:64-f32:32:32-v64:64:64-v32:32:32-a0:0-n16:32"
 target triple = "hexagon"
 
-define zeroext i8 @foo1(i16 zeroext %filtMemLen, i16* %filtMemLR, i16 signext %filtMemIndex) nounwind {
+define zeroext i8 @foo1(i16 zeroext %filtMemLen, ptr %filtMemLR, i16 signext %filtMemIndex) nounwind {
 entry:
   %conv = zext i16 %filtMemLen to i32
   %shr2 = lshr i32 %conv, 1
   %idxprom = sext i16 %filtMemIndex to i32
-  %arrayidx = getelementptr inbounds i16, i16* %filtMemLR, i32 %idxprom
-  %0 = bitcast i16* %arrayidx to i8*
+  %arrayidx = getelementptr inbounds i16, ptr %filtMemLR, i32 %idxprom
   %or = or i32 %shr2, 33554432
 ; CHECK: memb(r{{[0-9]+}}++#-1:circ(m{{[0-1]}}))
-  %1 = tail call i8* @llvm.hexagon.circ.stb(i8* %0, i32 0, i32 %or, i32 -1)
+  %0 = tail call ptr @llvm.hexagon.circ.stb(ptr %arrayidx, i32 0, i32 %or, i32 -1)
   ret i8 0
 }
 
-declare i8* @llvm.hexagon.circ.stb(i8*, i32, i32, i32) nounwind
+declare ptr @llvm.hexagon.circ.stb(ptr, i32, i32, i32) nounwind
 
-define i64 @foo2(i16 zeroext %filtMemLen, i16* %filtMemLR, i16 signext %filtMemIndex) nounwind {
+define i64 @foo2(i16 zeroext %filtMemLen, ptr %filtMemLR, i16 signext %filtMemIndex) nounwind {
 entry:
   %conv = zext i16 %filtMemLen to i32
   %shr1 = lshr i32 %conv, 1
   %idxprom = sext i16 %filtMemIndex to i32
-  %arrayidx = getelementptr inbounds i16, i16* %filtMemLR, i32 %idxprom
-  %0 = bitcast i16* %arrayidx to i8*
+  %arrayidx = getelementptr inbounds i16, ptr %filtMemLR, i32 %idxprom
   %shl = shl nuw nsw i32 %shr1, 3
   %or = or i32 %shl, 83886080
 ; CHECK: memd(r{{[0-9]+}}++#-8:circ(m{{[0-1]}}))
-  %1 = tail call i8* @llvm.hexagon.circ.std(i8* %0, i64 undef, i32 %or, i32 -8)
+  %0 = tail call ptr @llvm.hexagon.circ.std(ptr %arrayidx, i64 undef, i32 %or, i32 -8)
   ret i64 0
 }
 
-declare i8* @llvm.hexagon.circ.std(i8*, i64, i32, i32) nounwind
+declare ptr @llvm.hexagon.circ.std(ptr, i64, i32, i32) nounwind
 
-define signext i16 @foo3(i16 zeroext %filtMemLen, i16* %filtMemLR, i16 signext %filtMemIndex) nounwind {
+define signext i16 @foo3(i16 zeroext %filtMemLen, ptr %filtMemLR, i16 signext %filtMemIndex) nounwind {
 entry:
   %conv = zext i16 %filtMemLen to i32
   %shr2 = and i32 %conv, 65534
   %idxprom = sext i16 %filtMemIndex to i32
-  %arrayidx = getelementptr inbounds i16, i16* %filtMemLR, i32 %idxprom
-  %0 = bitcast i16* %arrayidx to i8*
+  %arrayidx = getelementptr inbounds i16, ptr %filtMemLR, i32 %idxprom
   %or = or i32 %shr2, 50331648
 ; CHECK: memh(r{{[0-9]+}}++#-2:circ(m{{[0-1]}}))
-  %1 = tail call i8* @llvm.hexagon.circ.sth(i8* %0, i32 0, i32 %or, i32 -2)
+  %0 = tail call ptr @llvm.hexagon.circ.sth(ptr %arrayidx, i32 0, i32 %or, i32 -2)
   ret i16 0
 }
 
-declare i8* @llvm.hexagon.circ.sth(i8*, i32, i32, i32) nounwind
+declare ptr @llvm.hexagon.circ.sth(ptr, i32, i32, i32) nounwind
 
-define signext i16 @foo5(i16 zeroext %filtMemLen, i16* %filtMemLR, i16 signext %filtMemIndex) nounwind {
+define signext i16 @foo5(i16 zeroext %filtMemLen, ptr %filtMemLR, i16 signext %filtMemIndex) nounwind {
 entry:
   %conv = zext i16 %filtMemLen to i32
   %shr2 = and i32 %conv, 65534
   %idxprom = sext i16 %filtMemIndex to i32
-  %arrayidx = getelementptr inbounds i16, i16* %filtMemLR, i32 %idxprom
-  %0 = bitcast i16* %arrayidx to i8*
+  %arrayidx = getelementptr inbounds i16, ptr %filtMemLR, i32 %idxprom
   %or = or i32 %shr2, 50331648
 ; CHECK: memh(r{{[0-9]+}}++#-2:circ(m{{[0-1]}})) = r{{[0-9]*}}.h
-  %1 = tail call i8* @llvm.hexagon.circ.sthhi(i8* %0, i32 0, i32 %or, i32 -2)
+  %0 = tail call ptr @llvm.hexagon.circ.sthhi(ptr %arrayidx, i32 0, i32 %or, i32 -2)
   ret i16 0
 }
 
-declare i8* @llvm.hexagon.circ.sthhi(i8*, i32, i32, i32) nounwind
+declare ptr @llvm.hexagon.circ.sthhi(ptr, i32, i32, i32) nounwind
 
-define i32 @foo6(i16 zeroext %filtMemLen, i16* %filtMemLR, i16 signext %filtMemIndex) nounwind {
+define i32 @foo6(i16 zeroext %filtMemLen, ptr %filtMemLR, i16 signext %filtMemIndex) nounwind {
 entry:
   %conv = zext i16 %filtMemLen to i32
   %shr1 = lshr i32 %conv, 1
   %idxprom = sext i16 %filtMemIndex to i32
-  %arrayidx = getelementptr inbounds i16, i16* %filtMemLR, i32 %idxprom
-  %0 = bitcast i16* %arrayidx to i8*
+  %arrayidx = getelementptr inbounds i16, ptr %filtMemLR, i32 %idxprom
   %shl = shl nuw nsw i32 %shr1, 2
   %or = or i32 %shl, 67108864
 ; CHECK: memw(r{{[0-9]+}}++#-4:circ(m{{[0-1]}}))
-  %1 = tail call i8* @llvm.hexagon.circ.stw(i8* %0, i32 undef, i32 %or, i32 -4)
+  %0 = tail call ptr @llvm.hexagon.circ.stw(ptr %arrayidx, i32 undef, i32 %or, i32 -4)
   ret i32 0
 }
 
-declare i8* @llvm.hexagon.circ.stw(i8*, i32, i32, i32) nounwind
+declare ptr @llvm.hexagon.circ.stw(ptr, i32, i32, i32) nounwind
 
 !0 = !{!"omnipotent char", !1}
 !1 = !{!"Simple C/C++ TBAA"}

diff  --git a/llvm/test/CodeGen/Hexagon/clr_set_toggle.ll b/llvm/test/CodeGen/Hexagon/clr_set_toggle.ll
index 43c866c7b7655..a38b2529be106 100644
--- a/llvm/test/CodeGen/Hexagon/clr_set_toggle.ll
+++ b/llvm/test/CodeGen/Hexagon/clr_set_toggle.ll
@@ -6,8 +6,8 @@ entry:
 ; CHECK-LABEL: my_clrbit
 ; CHECK: r{{[0-9]+}} = clrbit(r{{[0-9]+}},#31)
   %x.addr = alloca i32, align 4
-  store i32 %x, i32* %x.addr, align 4
-  %0 = load i32, i32* %x.addr, align 4
+  store i32 %x, ptr %x.addr, align 4
+  %0 = load i32, ptr %x.addr, align 4
   %and = and i32 %0, 2147483647
   ret i32 %and
 }
@@ -17,8 +17,8 @@ entry:
 ; CHECK-LABEL: my_clrbit2
 ; CHECK: r{{[0-9]+}} = clrbit(r{{[0-9]+}},#31)
   %x.addr = alloca i64, align 8
-  store i64 %x, i64* %x.addr, align 8
-  %0 = load i64, i64* %x.addr, align 8
+  store i64 %x, ptr %x.addr, align 8
+  %0 = load i64, ptr %x.addr, align 8
   %and = and i64 %0, -2147483649
   ret i64 %and
 }
@@ -28,8 +28,8 @@ entry:
 ; CHECK-LABEL: my_clrbit3
 ; CHECK: r{{[0-9]+}} = clrbit(r{{[0-9]+}},#31)
   %x.addr = alloca i64, align 8
-  store i64 %x, i64* %x.addr, align 8
-  %0 = load i64, i64* %x.addr, align 8
+  store i64 %x, ptr %x.addr, align 8
+  %0 = load i64, ptr %x.addr, align 8
   %and = and i64 %0, 9223372036854775807
   ret i64 %and
 }
@@ -39,8 +39,8 @@ entry:
 ; CHECK-LABEL: my_clrbit4
 ; CHECK: r{{[0-9]+}} = clrbit(r{{[0-9]+}},#13)
   %x.addr = alloca i32, align 4
-  store i32 %x, i32* %x.addr, align 4
-  %0 = load i32, i32* %x.addr, align 4
+  store i32 %x, ptr %x.addr, align 4
+  %0 = load i32, ptr %x.addr, align 4
   %and = and i32 %0, -8193
   ret i32 %and
 }
@@ -50,8 +50,8 @@ entry:
 ; CHECK-LABEL: my_clrbit5
 ; CHECK: r{{[0-9]+}} = clrbit(r{{[0-9]+}},#13)
   %x.addr = alloca i64, align 8
-  store i64 %x, i64* %x.addr, align 8
-  %0 = load i64, i64* %x.addr, align 8
+  store i64 %x, ptr %x.addr, align 8
+  %0 = load i64, ptr %x.addr, align 8
   %and = and i64 %0, -8193
   ret i64 %and
 }
@@ -61,8 +61,8 @@ entry:
 ; CHECK-LABEL: my_clrbit6
 ; CHECK: r{{[0-9]+}} = clrbit(r{{[0-9]+}},#27)
   %x.addr = alloca i64, align 8
-  store i64 %x, i64* %x.addr, align 8
-  %0 = load i64, i64* %x.addr, align 8
+  store i64 %x, ptr %x.addr, align 8
+  %0 = load i64, ptr %x.addr, align 8
   %and = and i64 %0, -576460752303423489
   ret i64 %and
 }
@@ -72,13 +72,13 @@ entry:
 ; CHECK-LABEL: my_setbit
 ; CHECK: r{{[0-9]+}} = setbit(r{{[0-9]+}},#15)
   %crc.addr = alloca i16, align 2
-  store i16 %crc, i16* %crc.addr, align 2
-  %0 = load i16, i16* %crc.addr, align 2
+  store i16 %crc, ptr %crc.addr, align 2
+  %0 = load i16, ptr %crc.addr, align 2
   %conv = zext i16 %0 to i32
   %or = or i32 %conv, 32768
   %conv1 = trunc i32 %or to i16
-  store i16 %conv1, i16* %crc.addr, align 2
-  %1 = load i16, i16* %crc.addr, align 2
+  store i16 %conv1, ptr %crc.addr, align 2
+  %1 = load i16, ptr %crc.addr, align 2
   ret i16 %1
 }
 
@@ -87,8 +87,8 @@ entry:
 ; CHECK-LABEL: my_setbit2
 ; CHECK: r{{[0-9]+}} = setbit(r{{[0-9]+}},#15)
   %x.addr = alloca i32, align 4
-  store i32 %x, i32* %x.addr, align 4
-  %0 = load i32, i32* %x.addr, align 4
+  store i32 %x, ptr %x.addr, align 4
+  %0 = load i32, ptr %x.addr, align 4
   %or = or i32 %0, 32768
   ret i32 %or
 }
@@ -98,8 +98,8 @@ entry:
 ; CHECK-LABEL: my_setbit3
 ; CHECK: r{{[0-9]+}} = setbit(r{{[0-9]+}},#15)
   %x.addr = alloca i64, align 8
-  store i64 %x, i64* %x.addr, align 8
-  %0 = load i64, i64* %x.addr, align 8
+  store i64 %x, ptr %x.addr, align 8
+  %0 = load i64, ptr %x.addr, align 8
   %or = or i64 %0, 32768
   ret i64 %or
 }
@@ -109,8 +109,8 @@ entry:
 ; CHECK-LABEL: my_setbit4
 ; CHECK: r{{[0-9]+}} = setbit(r{{[0-9]+}},#31)
   %x.addr = alloca i32, align 4
-  store i32 %x, i32* %x.addr, align 4
-  %0 = load i32, i32* %x.addr, align 4
+  store i32 %x, ptr %x.addr, align 4
+  %0 = load i32, ptr %x.addr, align 4
   %or = or i32 %0, -2147483648
   ret i32 %or
 }
@@ -120,8 +120,8 @@ entry:
 ; CHECK-LABEL: my_setbit5
 ; CHECK: r{{[0-9]+}} = setbit(r{{[0-9]+}},#13)
   %x.addr = alloca i64, align 8
-  store i64 %x, i64* %x.addr, align 8
-  %0 = load i64, i64* %x.addr, align 8
+  store i64 %x, ptr %x.addr, align 8
+  %0 = load i64, ptr %x.addr, align 8
   %or = or i64 %0, 35184372088832
   ret i64 %or
 }
@@ -131,13 +131,13 @@ entry:
 ; CHECK-LABEL: my_togglebit
 ; CHECK: r{{[0-9]+}} = togglebit(r{{[0-9]+}},#15)
   %crc.addr = alloca i16, align 2
-  store i16 %crc, i16* %crc.addr, align 2
-  %0 = load i16, i16* %crc.addr, align 2
+  store i16 %crc, ptr %crc.addr, align 2
+  %0 = load i16, ptr %crc.addr, align 2
   %conv = zext i16 %0 to i32
   %xor = xor i32 %conv, 32768
   %conv1 = trunc i32 %xor to i16
-  store i16 %conv1, i16* %crc.addr, align 2
-  %1 = load i16, i16* %crc.addr, align 2
+  store i16 %conv1, ptr %crc.addr, align 2
+  %1 = load i16, ptr %crc.addr, align 2
   ret i16 %1
 }
 
@@ -146,8 +146,8 @@ entry:
 ; CHECK-LABEL: my_togglebit2
 ; CHECK: r{{[0-9]+}} = togglebit(r{{[0-9]+}},#15)
   %x.addr = alloca i32, align 4
-  store i32 %x, i32* %x.addr, align 4
-  %0 = load i32, i32* %x.addr, align 4
+  store i32 %x, ptr %x.addr, align 4
+  %0 = load i32, ptr %x.addr, align 4
   %xor = xor i32 %0, 32768
   ret i32 %xor
 }
@@ -157,8 +157,8 @@ entry:
 ; CHECK-LABEL: my_togglebit3
 ; CHECK: r{{[0-9]+}} = togglebit(r{{[0-9]+}},#15)
   %x.addr = alloca i64, align 8
-  store i64 %x, i64* %x.addr, align 8
-  %0 = load i64, i64* %x.addr, align 8
+  store i64 %x, ptr %x.addr, align 8
+  %0 = load i64, ptr %x.addr, align 8
   %xor = xor i64 %0, 32768
   ret i64 %xor
 }
@@ -168,8 +168,8 @@ entry:
 ; CHECK-LABEL: my_togglebit4
 ; CHECK: r{{[0-9]+}} = togglebit(r{{[0-9]+}},#20)
   %x.addr = alloca i64, align 8
-  store i64 %x, i64* %x.addr, align 8
-  %0 = load i64, i64* %x.addr, align 8
+  store i64 %x, ptr %x.addr, align 8
+  %0 = load i64, ptr %x.addr, align 8
   %xor = xor i64 %0, 4503599627370496
   ret i64 %xor
 }

diff  --git a/llvm/test/CodeGen/Hexagon/cmp-extend.ll b/llvm/test/CodeGen/Hexagon/cmp-extend.ll
index 4107e62356867..3a728ed51658f 100644
--- a/llvm/test/CodeGen/Hexagon/cmp-extend.ll
+++ b/llvm/test/CodeGen/Hexagon/cmp-extend.ll
@@ -1,9 +1,9 @@
 ; RUN: llc -march=hexagon < %s | FileCheck %s
 
-%struct.RESULTS_S.A = type { i16, i16, i16, [4 x i8*], i32, i32, i32, %struct.list_head_s.B*, %struct.MAT_PARAMS_S.D, i16, i16, i16, i16, i16, %struct.CORE_PORTABLE_S.E }
-%struct.list_head_s.B = type { %struct.list_head_s.B*, %struct.list_data_s.C* }
+%struct.RESULTS_S.A = type { i16, i16, i16, [4 x ptr], i32, i32, i32, ptr, %struct.MAT_PARAMS_S.D, i16, i16, i16, i16, i16, %struct.CORE_PORTABLE_S.E }
+%struct.list_head_s.B = type { ptr, ptr }
 %struct.list_data_s.C = type { i16, i16 }
-%struct.MAT_PARAMS_S.D = type { i32, i16*, i16*, i32* }
+%struct.MAT_PARAMS_S.D = type { i32, ptr, ptr, ptr }
 %struct.CORE_PORTABLE_S.E = type { i8 }
 
 ; Test that we don't generate a zero extend in this case. Instead we generate
@@ -12,10 +12,10 @@
 ; CHECK-NOT: zxth
 
 ; Function Attrs: nounwind
-define void @core_bench_list(%struct.RESULTS_S.A* %res) #0 {
+define void @core_bench_list(ptr %res) #0 {
 entry:
-  %seed3 = getelementptr inbounds %struct.RESULTS_S.A, %struct.RESULTS_S.A* %res, i32 0, i32 2
-  %0 = load i16, i16* %seed3, align 2
+  %seed3 = getelementptr inbounds %struct.RESULTS_S.A, ptr %res, i32 0, i32 2
+  %0 = load i16, ptr %seed3, align 2
   %cmp364 = icmp sgt i16 %0, 0
   br i1 %cmp364, label %for.body, label %while.body19.i160
 
@@ -24,7 +24,7 @@ for.body:
   br i1 undef, label %if.then, label %while.body.i273
 
 while.body.i273:
-  %tobool.i272 = icmp eq %struct.list_head_s.B* undef, null
+  %tobool.i272 = icmp eq ptr undef, null
   br i1 %tobool.i272, label %if.then, label %while.body.i273
 
 if.then:

diff  --git a/llvm/test/CodeGen/Hexagon/cmp-promote.ll b/llvm/test/CodeGen/Hexagon/cmp-promote.ll
index 7811b7e729cbe..e3f8992507b20 100644
--- a/llvm/test/CodeGen/Hexagon/cmp-promote.ll
+++ b/llvm/test/CodeGen/Hexagon/cmp-promote.ll
@@ -64,7 +64,7 @@ entry:
 ; CHECK: cmp.eq{{.*}}#-12
 define i32 @foo8() nounwind readonly {
 entry:
-  %0 = load i16, i16* @g, align 2
+  %0 = load i16, ptr @g, align 2
   %cmp = icmp eq i16 %0, -12
   %res.0 = select i1 %cmp, i32 0, i32 8
   ret i32 %res.0

diff  --git a/llvm/test/CodeGen/Hexagon/cmp.ll b/llvm/test/CodeGen/Hexagon/cmp.ll
index b1f14f27a2ebf..ce5361ecb525a 100644
--- a/llvm/test/CodeGen/Hexagon/cmp.ll
+++ b/llvm/test/CodeGen/Hexagon/cmp.ll
@@ -4,8 +4,8 @@
 define i32 @cmpeq(i32 %i) #0 {
 entry:
   %i.addr = alloca i32, align 4
-  store i32 %i, i32* %i.addr, align 4
-  %0 = load i32, i32* %i.addr, align 4
+  store i32 %i, ptr %i.addr, align 4
+  %0 = load i32, ptr %i.addr, align 4
   %1 = call i32 @llvm.hexagon.C2.cmpeq(i32 %0, i32 1)
   ret i32 %1
 }
@@ -18,8 +18,8 @@ declare i32 @llvm.hexagon.C2.cmpeq(i32, i32) #1
 define i32 @cmpgt(i32 %i) #0 {
 entry:
   %i.addr = alloca i32, align 4
-  store i32 %i, i32* %i.addr, align 4
-  %0 = load i32, i32* %i.addr, align 4
+  store i32 %i, ptr %i.addr, align 4
+  %0 = load i32, ptr %i.addr, align 4
   %1 = call i32 @llvm.hexagon.C2.cmpgt(i32 %0, i32 2)
   ret i32 %1
 }
@@ -32,8 +32,8 @@ declare i32 @llvm.hexagon.C2.cmpgt(i32, i32) #1
 define i32 @cmpgtu(i32 %i) #0 {
 entry:
   %i.addr = alloca i32, align 4
-  store i32 %i, i32* %i.addr, align 4
-  %0 = load i32, i32* %i.addr, align 4
+  store i32 %i, ptr %i.addr, align 4
+  %0 = load i32, ptr %i.addr, align 4
   %1 = call i32 @llvm.hexagon.C2.cmpgtu(i32 %0, i32 3)
   ret i32 %1
 }
@@ -46,8 +46,8 @@ declare i32 @llvm.hexagon.C2.cmpgtu(i32, i32) #1
 define i32 @cmplt(i32 %i) #0 {
 entry:
   %i.addr = alloca i32, align 4
-  store i32 %i, i32* %i.addr, align 4
-  %0 = load i32, i32* %i.addr, align 4
+  store i32 %i, ptr %i.addr, align 4
+  %0 = load i32, ptr %i.addr, align 4
   %1 = call i32 @llvm.hexagon.C2.cmplt(i32 %0, i32 4)
   ret i32 %1
 }
@@ -60,8 +60,8 @@ declare i32 @llvm.hexagon.C2.cmplt(i32, i32) #1
 define i32 @cmpltu(i32 %i) #0 {
 entry:
   %i.addr = alloca i32, align 4
-  store i32 %i, i32* %i.addr, align 4
-  %0 = load i32, i32* %i.addr, align 4
+  store i32 %i, ptr %i.addr, align 4
+  %0 = load i32, ptr %i.addr, align 4
   %1 = call i32 @llvm.hexagon.C2.cmpltu(i32 %0, i32 5)
   ret i32 %1
 }
@@ -74,8 +74,8 @@ declare i32 @llvm.hexagon.C2.cmpltu(i32, i32) #1
 define i32 @cmpeqi(i32 %i) #0 {
 entry:
   %i.addr = alloca i32, align 4
-  store i32 %i, i32* %i.addr, align 4
-  %0 = load i32, i32* %i.addr, align 4
+  store i32 %i, ptr %i.addr, align 4
+  %0 = load i32, ptr %i.addr, align 4
   %1 = call i32 @llvm.hexagon.C2.cmpeqi(i32 %0, i32 10)
   ret i32 %1
 }
@@ -88,8 +88,8 @@ declare i32 @llvm.hexagon.C2.cmpeqi(i32, i32) #1
 define i32 @cmpgti(i32 %i) #0 {
 entry:
   %i.addr = alloca i32, align 4
-  store i32 %i, i32* %i.addr, align 4
-  %0 = load i32, i32* %i.addr, align 4
+  store i32 %i, ptr %i.addr, align 4
+  %0 = load i32, ptr %i.addr, align 4
   %1 = call i32 @llvm.hexagon.C2.cmpgti(i32 %0, i32 20)
   ret i32 %1
 }
@@ -102,8 +102,8 @@ declare i32 @llvm.hexagon.C2.cmpgti(i32, i32) #1
 define i32 @cmpgtui(i32 %i) #0 {
 entry:
   %i.addr = alloca i32, align 4
-  store i32 %i, i32* %i.addr, align 4
-  %0 = load i32, i32* %i.addr, align 4
+  store i32 %i, ptr %i.addr, align 4
+  %0 = load i32, ptr %i.addr, align 4
   %1 = call i32 @llvm.hexagon.C2.cmpgtui(i32 %0, i32 40)
   ret i32 %1
 }
@@ -116,8 +116,8 @@ declare i32 @llvm.hexagon.C2.cmpgtui(i32, i32) #1
 define i32 @cmpgei(i32 %i) #0 {
 entry:
   %i.addr = alloca i32, align 4
-  store i32 %i, i32* %i.addr, align 4
-  %0 = load i32, i32* %i.addr, align 4
+  store i32 %i, ptr %i.addr, align 4
+  %0 = load i32, ptr %i.addr, align 4
   %1 = call i32 @llvm.hexagon.C2.cmpgei(i32 %0, i32 3)
   ret i32 %1
 }
@@ -130,8 +130,8 @@ declare i32 @llvm.hexagon.C2.cmpgei(i32, i32) #1
 define i32 @cmpgeu(i32 %i) #0 {
 entry:
   %i.addr = alloca i32, align 4
-  store i32 %i, i32* %i.addr, align 4
-  %0 = load i32, i32* %i.addr, align 4
+  store i32 %i, ptr %i.addr, align 4
+  %0 = load i32, ptr %i.addr, align 4
   %1 = call i32 @llvm.hexagon.C2.cmpgeui(i32 %0, i32 3)
   ret i32 %1
 }
@@ -144,8 +144,8 @@ declare i32 @llvm.hexagon.C2.cmpgeui(i32, i32) #1
 define i32 @cmpgeu0(i32 %i) #0 {
 entry:
   %i.addr = alloca i32, align 4
-  store i32 %i, i32* %i.addr, align 4
-  %0 = load i32, i32* %i.addr, align 4
+  store i32 %i, ptr %i.addr, align 4
+  %0 = load i32, ptr %i.addr, align 4
   %1 = call i32 @llvm.hexagon.C2.cmpgeui(i32 %0, i32 0)
   ret i32 %1
 }

diff  --git a/llvm/test/CodeGen/Hexagon/cmp_pred2.ll b/llvm/test/CodeGen/Hexagon/cmp_pred2.ll
index 182e5e0447ca7..93afbc9c2e542 100644
--- a/llvm/test/CodeGen/Hexagon/cmp_pred2.ll
+++ b/llvm/test/CodeGen/Hexagon/cmp_pred2.ll
@@ -11,12 +11,12 @@ entry:
   br i1 %cmp, label %if.then, label %entry.if.end_crit_edge
 
 entry.if.end_crit_edge:
-  %.pre = load i32, i32* @c, align 4
+  %.pre = load i32, ptr @c, align 4
   br label %if.end
 
 if.then:
   %sub = add nsw i32 %a, -10
-  store i32 %sub, i32* @c, align 4
+  store i32 %sub, ptr @c, align 4
   br label %if.end
 
 if.end:
@@ -32,12 +32,12 @@ entry:
   br i1 %cmp, label %entry.if.end_crit_edge, label %if.then
 
 entry.if.end_crit_edge:
-  %.pre = load i32, i32* @c, align 4
+  %.pre = load i32, ptr @c, align 4
   br label %if.end
 
 if.then:
   %sub = add nsw i32 %a, -10
-  store i32 %sub, i32* @c, align 4
+  store i32 %sub, ptr @c, align 4
   br label %if.end
 
 if.end:
@@ -53,12 +53,12 @@ entry:
   br i1 %cmp, label %entry.if.end_crit_edge, label %if.then
 
 entry.if.end_crit_edge:
-  %.pre = load i32, i32* @c, align 4
+  %.pre = load i32, ptr @c, align 4
   br label %if.end
 
 if.then:
   %sub = add i32 %a, -10
-  store i32 %sub, i32* @c, align 4
+  store i32 %sub, ptr @c, align 4
   br label %if.end
 
 if.end:
@@ -73,12 +73,12 @@ entry:
   br i1 %cmp, label %if.then, label %entry.if.end_crit_edge
 
 entry.if.end_crit_edge:
-  %.pre = load i32, i32* @c, align 4
+  %.pre = load i32, ptr @c, align 4
   br label %if.end
 
 if.then:
   %sub = add i32 %a, -10
-  store i32 %sub, i32* @c, align 4
+  store i32 %sub, ptr @c, align 4
   br label %if.end
 
 if.end:

diff  --git a/llvm/test/CodeGen/Hexagon/cmpb-dec-imm.ll b/llvm/test/CodeGen/Hexagon/cmpb-dec-imm.ll
index d3b48e6b294e3..49eab94a0768a 100644
--- a/llvm/test/CodeGen/Hexagon/cmpb-dec-imm.ll
+++ b/llvm/test/CodeGen/Hexagon/cmpb-dec-imm.ll
@@ -19,7 +19,7 @@ b2:
 
 b6:                                               ; preds = %b2
   %v7 = trunc i32 %a0 to i8
-  store i8 %v7, i8* @glob, align 1
+  store i8 %v7, ptr @glob, align 1
   br label %b8
 
 b8:                                               ; preds = %b6, %b2

diff  --git a/llvm/test/CodeGen/Hexagon/cmpb-eq.ll b/llvm/test/CodeGen/Hexagon/cmpb-eq.ll
index f0953294acbc7..faf2057a9f55f 100644
--- a/llvm/test/CodeGen/Hexagon/cmpb-eq.ll
+++ b/llvm/test/CodeGen/Hexagon/cmpb-eq.ll
@@ -6,10 +6,10 @@ target triple = "hexagon"
 
 %struct.wms_address_s = type { i32, i32, i32, i32, i8, [48 x i8] }
 
-define zeroext i8 @qmi_wmsi_bin_to_addr(i8* %str, i8 zeroext %len, %struct.wms_address_s* %addr) nounwind optsize {
+define zeroext i8 @qmi_wmsi_bin_to_addr(ptr %str, i8 zeroext %len, ptr %addr) nounwind optsize {
 entry:
-  %cmp = icmp eq i8* %str, null
-  %cmp2 = icmp eq %struct.wms_address_s* %addr, null
+  %cmp = icmp eq ptr %str, null
+  %cmp2 = icmp eq ptr %addr, null
   %or.cond = or i1 %cmp, %cmp2
   br i1 %or.cond, label %if.then12, label %if.then
 
@@ -27,9 +27,9 @@ for.body.lr.ph:                                   ; preds = %if.then
 for.body:                                         ; preds = %for.body.lr.ph, %if.end21
   %indvars.iv = phi i32 [ 0, %for.body.lr.ph ], [ %indvars.iv.next, %if.end21 ]
   %dec630 = phi i8 [ %dec626, %for.body.lr.ph ], [ %dec6, %if.end21 ]
-  %str.pn = phi i8* [ %str, %for.body.lr.ph ], [ %str.addr.029, %if.end21 ]
-  %str.addr.029 = getelementptr inbounds i8, i8* %str.pn, i32 1
-  %0 = load i8, i8* %str.addr.029, align 1, !tbaa !0
+  %str.pn = phi ptr [ %str, %for.body.lr.ph ], [ %str.addr.029, %if.end21 ]
+  %str.addr.029 = getelementptr inbounds i8, ptr %str.pn, i32 1
+  %0 = load i8, ptr %str.addr.029, align 1, !tbaa !0
   %cmp10 = icmp ugt i8 %0, -49
   br i1 %cmp10, label %if.then12.loopexit, label %if.end21
 
@@ -41,8 +41,8 @@ if.then12:                                        ; preds = %if.then12.loopexit,
 
 if.end21:                                         ; preds = %for.body
   %shr24 = lshr i8 %0, 4
-  %arrayidx = getelementptr inbounds %struct.wms_address_s, %struct.wms_address_s* %addr, i32 0, i32 5, i32 %indvars.iv
-  store i8 %shr24, i8* %arrayidx, align 1, !tbaa !0
+  %arrayidx = getelementptr inbounds %struct.wms_address_s, ptr %addr, i32 0, i32 5, i32 %indvars.iv
+  store i8 %shr24, ptr %arrayidx, align 1, !tbaa !0
   %dec6 = add i8 %dec630, -1
   %tobool = icmp eq i8 %dec630, 0
   %indvars.iv.next = add i32 %indvars.iv, 1

diff  --git a/llvm/test/CodeGen/Hexagon/cmpb_gtu.ll b/llvm/test/CodeGen/Hexagon/cmpb_gtu.ll
index 3f749550a6a05..4dcc7162022e8 100644
--- a/llvm/test/CodeGen/Hexagon/cmpb_gtu.ll
+++ b/llvm/test/CodeGen/Hexagon/cmpb_gtu.ll
@@ -3,17 +3,17 @@
 
 target triple = "hexagon"
 
-%s.0 = type { void (i8)*, void (i8)*, void (i8)*, void (i8)* }
-%s.1 = type { i8 (i8)*, void (i8)* }
-%s.2 = type { i8 (i8, %s.3*)*, i8 (i8)*, i8 (i8)*, i8 (i8)*, i8 (i8)*, i8 (i8)*, i8 (i16)*, i8 (i8)*, i8 (i16)*, i8 (i8)* }
+%s.0 = type { ptr, ptr, ptr, ptr }
+%s.1 = type { ptr, ptr }
+%s.2 = type { ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr }
 %s.3 = type { %s.4, [2 x %s.5], i8, %s.7, %s.19, i8, %s.8, i16, [6 x %s.14], %s.17, %s.18, %s.19 }
-%s.4 = type { i16, i8, i8* }
+%s.4 = type { i16, i8, ptr }
 %s.5 = type { i16, i8, i8, i8, i8, i8, i8, i8, i16, i8, i8, i8, i16, %s.6 }
 %s.6 = type { i8, i16, i16, i8, i8 }
 %s.7 = type { i8, i8, i8, i8, i64, i64 }
-%s.8 = type { i16, %s.9, i32, %s.10*, i8, i8 }
+%s.8 = type { i16, %s.9, i32, ptr, i8, i8 }
 %s.9 = type { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }
-%s.10 = type { i8, [14 x i8], [14 x %s.11*] }
+%s.10 = type { i8, [14 x i8], [14 x ptr] }
 %s.11 = type { i8, i8, i8, i8, i8, i16, i8, i8, i8, i8, i8, i8, i8, i8, %s.12, %s.13 }
 %s.12 = type { i16, i8, i8, i8, i16, i8, i8 }
 %s.13 = type { i8, i8, i8, i8, i8, i8 }
@@ -24,7 +24,7 @@ target triple = "hexagon"
 %s.18 = type { i8, i8, i32 }
 %s.19 = type { i8, i8, i8, i8 }
 %s.22 = type { %s.23, %s.24 }
-%s.23 = type { i8, i8, i8, i8, i8, i8, i8, %s.0*, %s.1*, %s.2*, i8 }
+%s.23 = type { i8, i8, i8, i8, i8, i8, i8, ptr, ptr, ptr, i8 }
 %s.24 = type { i16, i16, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i16, i8, i8 }
 %s.25 = type { %s.26 }
 %s.26 = type { i8, i8, i32 }
@@ -68,14 +68,14 @@ declare void @f6()
 ; Function Attrs: nounwind
 define void @f7() #0 {
 b0:
-  %v0 = load i8, i8* getelementptr inbounds (%s.25, %s.25* @g20, i32 0, i32 0, i32 1), align 1, !tbaa !0
+  %v0 = load i8, ptr getelementptr inbounds (%s.25, ptr @g20, i32 0, i32 0, i32 1), align 1, !tbaa !0
   %v1 = icmp eq i8 %v0, 1
   br label %b1
 
 b1:                                               ; preds = %b5, %b0
   %v2 = phi i32 [ 0, %b0 ], [ %v14, %b5 ]
-  %v3 = getelementptr inbounds [6 x %s.22], [6 x %s.22]* @g19, i32 0, i32 %v2, i32 1, i32 4
-  %v4 = load i8, i8* %v3, align 2, !tbaa !0
+  %v3 = getelementptr inbounds [6 x %s.22], ptr @g19, i32 0, i32 %v2, i32 1, i32 4
+  %v4 = load i8, ptr %v3, align 2, !tbaa !0
   %v5 = icmp eq i8 %v4, 1
   br i1 %v5, label %b2, label %b5
 
@@ -83,19 +83,19 @@ b2:                                               ; preds = %b1
   br i1 %v1, label %b3, label %b4
 
 b3:                                               ; preds = %b2
-  %v6 = getelementptr inbounds [6 x %s.22], [6 x %s.22]* @g19, i32 0, i32 %v2, i32 1, i32 6
-  %v7 = load i8, i8* %v6, align 4, !tbaa !0
+  %v6 = getelementptr inbounds [6 x %s.22], ptr @g19, i32 0, i32 %v2, i32 1, i32 6
+  %v7 = load i8, ptr %v6, align 4, !tbaa !0
   %v8 = add i8 %v7, -2
   %v9 = icmp ult i8 %v8, 44
   br i1 %v9, label %b5, label %b4
 
 b4:                                               ; preds = %b3, %b2
   %v10 = shl i32 1, %v2
-  %v11 = load i32, i32* getelementptr inbounds (%s.25, %s.25* @g20, i32 0, i32 0, i32 2), align 4, !tbaa !3
+  %v11 = load i32, ptr getelementptr inbounds (%s.25, ptr @g20, i32 0, i32 0, i32 2), align 4, !tbaa !3
   %v12 = or i32 %v11, %v10
-  store i32 %v12, i32* getelementptr inbounds (%s.25, %s.25* @g20, i32 0, i32 0, i32 2), align 4, !tbaa !3
-  %v13 = getelementptr inbounds [6 x %s.22], [6 x %s.22]* @g19, i32 0, i32 %v2, i32 1, i32 13
-  store i8 1, i8* %v13, align 4, !tbaa !0
+  store i32 %v12, ptr getelementptr inbounds (%s.25, ptr @g20, i32 0, i32 0, i32 2), align 4, !tbaa !3
+  %v13 = getelementptr inbounds [6 x %s.22], ptr @g19, i32 0, i32 %v2, i32 1, i32 13
+  store i8 1, ptr %v13, align 4, !tbaa !0
   br label %b5
 
 b5:                                               ; preds = %b4, %b3, %b1

diff  --git a/llvm/test/CodeGen/Hexagon/cmpb_pred.ll b/llvm/test/CodeGen/Hexagon/cmpb_pred.ll
index b8c969035c5c5..39defe335c640 100644
--- a/llvm/test/CodeGen/Hexagon/cmpb_pred.ll
+++ b/llvm/test/CodeGen/Hexagon/cmpb_pred.ll
@@ -16,7 +16,7 @@ entry:
 define i32 @Func_3b(i32) nounwind readonly {
 entry:
 ; CHECK-NOT: mux
-  %1 = load i8, i8* @Enum_global, align 1
+  %1 = load i8, ptr @Enum_global, align 1
   %2 = trunc i32 %0 to i8
   %cmp = icmp ne i8 %1, %2
   %selv = zext i1 %cmp to i32
@@ -35,7 +35,7 @@ entry:
 define i32 @Func_3d(i32) nounwind readonly {
 entry:
 ; CHECK-NOT: mux
-  %1 = load i8, i8* @Enum_global, align 1
+  %1 = load i8, ptr @Enum_global, align 1
   %2 = trunc i32 %0 to i8
   %cmp = icmp eq i8 %1, %2
   %selv = zext i1 %cmp to i32
@@ -45,7 +45,7 @@ entry:
 define i32 @Func_3e(i32) nounwind readonly {
 entry:
 ; CHECK-NOT: mux
-  %1 = load i8, i8* @Enum_global, align 1
+  %1 = load i8, ptr @Enum_global, align 1
   %2 = trunc i32 %0 to i8
   %cmp = icmp eq i8 %1, %2
   %selv = zext i1 %cmp to i32

diff  --git a/llvm/test/CodeGen/Hexagon/cmpbeq.ll b/llvm/test/CodeGen/Hexagon/cmpbeq.ll
index 645161ffdd08b..7cf51b3d08819 100644
--- a/llvm/test/CodeGen/Hexagon/cmpbeq.ll
+++ b/llvm/test/CodeGen/Hexagon/cmpbeq.ll
@@ -16,7 +16,7 @@ b0:
 
 b1:                                               ; preds = %b0
   %v3 = trunc i32 %a0 to i8
-  store i8 %v3, i8* @g0, align 1
+  store i8 %v3, ptr @g0, align 1
   br label %b2
 
 b2:                                               ; preds = %b1, %b0

diff  --git a/llvm/test/CodeGen/Hexagon/cmph-gtu.ll b/llvm/test/CodeGen/Hexagon/cmph-gtu.ll
index f5feb7bc6fb15..419eee963f3f7 100644
--- a/llvm/test/CodeGen/Hexagon/cmph-gtu.ll
+++ b/llvm/test/CodeGen/Hexagon/cmph-gtu.ll
@@ -15,7 +15,7 @@ b2:
 
 b6:                                               ; preds = %b2
   %v7 = trunc i32 %a0 to i8
-  store i8 %v7, i8* @glob, align 1
+  store i8 %v7, ptr @glob, align 1
   br label %b8
 
 b8:                                               ; preds = %b6, %b2
@@ -35,7 +35,7 @@ b2:
 
 b6:                                               ; preds = %b2
   %v7 = trunc i32 %a0 to i8
-  store i8 %v7, i8* @glob, align 1
+  store i8 %v7, ptr @glob, align 1
   br label %b8
 
 b8:                                               ; preds = %b6, %b2

diff  --git a/llvm/test/CodeGen/Hexagon/coalesce_tfri.ll b/llvm/test/CodeGen/Hexagon/coalesce_tfri.ll
index ae89beabcdaff..f83668f9c4a33 100644
--- a/llvm/test/CodeGen/Hexagon/coalesce_tfri.ll
+++ b/llvm/test/CodeGen/Hexagon/coalesce_tfri.ll
@@ -6,7 +6,7 @@ target triple = "hexagon"
 @g1 = external global i32, align 4
 @g2 = external hidden unnamed_addr constant [49 x i8], align 8
 @g3 = external hidden unnamed_addr constant [76 x i8], align 8
- at g4 = external unnamed_addr constant { i8*, i8* }
+ at g4 = external unnamed_addr constant { ptr, ptr }
 @g5 = external hidden unnamed_addr constant [36 x i8], align 8
 
 declare void @f0()
@@ -18,10 +18,10 @@ declare i32 @f2(i32)
 declare void @f3()
 
 ; Function Attrs: nounwind
-declare void ()* @f4(void ()*) #0
+declare ptr @f4(ptr) #0
 
 ; Function Attrs: nounwind
-declare void ()* @f5(void ()*) #0
+declare ptr @f5(ptr) #0
 
 ; CHECK: f6:
 ; CHECK-DAG: call f4
@@ -36,47 +36,47 @@ declare void ()* @f5(void ()*) #0
 ; CHECK-DAG: call f10
 ; CHECK-DAG: r0 = #4
 ; CHECK-DAG: r{{[0-9]+}} = ##g1
-define i32 @f6() personality i8* bitcast (i32 (...)* @f11 to i8*) {
+define i32 @f6() personality ptr @f11 {
 b0:
   tail call void @f7()
-  %v0 = tail call void ()* @f4(void ()* @f3) #0
-  %v1 = tail call void ()* @f5(void ()* @f0) #0
-  tail call void (i8*, ...) @f8(i8* getelementptr inbounds ([49 x i8], [49 x i8]* @g2, i32 0, i32 0))
+  %v0 = tail call ptr @f4(ptr @f3) #0
+  %v1 = tail call ptr @f5(ptr @f0) #0
+  tail call void (ptr, ...) @f8(ptr @g2)
   tail call void @f9()
-  tail call void (i8*, ...) @f8(i8* getelementptr inbounds ([76 x i8], [76 x i8]* @g3, i32 0, i32 0))
-  %v2 = tail call i8* @f10(i32 4) #0
-  %v3 = load i32, i32* @g1, align 4, !tbaa !0
+  tail call void (ptr, ...) @f8(ptr @g3)
+  %v2 = tail call ptr @f10(i32 4) #0
+  %v3 = load i32, ptr @g1, align 4, !tbaa !0
   %v4 = add nsw i32 %v3, 1
-  store i32 %v4, i32* @g1, align 4, !tbaa !0
-  invoke void @f12(i8* %v2, i8* bitcast ({ i8*, i8* }* @g4 to i8*), i8* null) #1
+  store i32 %v4, ptr @g1, align 4, !tbaa !0
+  invoke void @f12(ptr %v2, ptr @g4, ptr null) #1
           to label %b7 unwind label %b1
 
 b1:                                               ; preds = %b0
-  %v5 = landingpad { i8*, i32 }
-          catch i8* null
-  %v6 = extractvalue { i8*, i32 } %v5, 0
-  %v7 = tail call i8* @f13(i8* %v6) #0
-  store i32 0, i32* @g1, align 4, !tbaa !0
+  %v5 = landingpad { ptr, i32 }
+          catch ptr null
+  %v6 = extractvalue { ptr, i32 } %v5, 0
+  %v7 = tail call ptr @f13(ptr %v6) #0
+  store i32 0, ptr @g1, align 4, !tbaa !0
   invoke void @f14() #1
           to label %b7 unwind label %b2
 
 b2:                                               ; preds = %b1
-  %v8 = landingpad { i8*, i32 }
-          catch i8* null
+  %v8 = landingpad { ptr, i32 }
+          catch ptr null
   invoke void @f15()
           to label %b3 unwind label %b6
 
 b3:                                               ; preds = %b2
-  %v9 = extractvalue { i8*, i32 } %v8, 0
-  %v10 = tail call i8* @f13(i8* %v9) #0
+  %v9 = extractvalue { ptr, i32 } %v8, 0
+  %v10 = tail call ptr @f13(ptr %v9) #0
   tail call void @f15()
-  %v11 = load i32, i32* @g1, align 4, !tbaa !0
+  %v11 = load i32, ptr @g1, align 4, !tbaa !0
   %v12 = icmp eq i32 %v11, 0
   br i1 %v12, label %b5, label %b4
 
 b4:                                               ; preds = %b3
-  tail call void (i8*, ...) @f8(i8* getelementptr inbounds ([36 x i8], [36 x i8]* @g5, i32 0, i32 0))
-  store i32 1, i32* @g0, align 4, !tbaa !0
+  tail call void (ptr, ...) @f8(ptr @g5)
+  store i32 1, ptr @g0, align 4, !tbaa !0
   br label %b5
 
 b5:                                               ; preds = %b4, %b3
@@ -85,8 +85,8 @@ b5:                                               ; preds = %b4, %b3
   ret i32 %v14
 
 b6:                                               ; preds = %b2
-  %v15 = landingpad { i8*, i32 }
-          catch i8* null
+  %v15 = landingpad { ptr, i32 }
+          catch ptr null
   tail call void @f16() #2
   unreachable
 
@@ -96,17 +96,17 @@ b7:                                               ; preds = %b1, %b0
 
 declare void @f7()
 
-declare void @f8(i8*, ...)
+declare void @f8(ptr, ...)
 
 declare void @f9()
 
-declare i8* @f10(i32)
+declare ptr @f10(i32)
 
 declare i32 @f11(...)
 
-declare void @f12(i8*, i8*, i8*)
+declare void @f12(ptr, ptr, ptr)
 
-declare i8* @f13(i8*)
+declare ptr @f13(ptr)
 
 declare void @f14()
 

diff  --git a/llvm/test/CodeGen/Hexagon/coalescing-hvx-across-calls.ll b/llvm/test/CodeGen/Hexagon/coalescing-hvx-across-calls.ll
index 6e963a2b26070..ae9eb48c9a787 100644
--- a/llvm/test/CodeGen/Hexagon/coalescing-hvx-across-calls.ll
+++ b/llvm/test/CodeGen/Hexagon/coalescing-hvx-across-calls.ll
@@ -8,16 +8,16 @@ target triple = "hexagon"
 
 %struct.descr = type opaque
 
-define inreg <64 x i32> @danny(%struct.descr* %desc, i32 %xy0, i32 %xy1) #0 {
+define inreg <64 x i32> @danny(ptr %desc, i32 %xy0, i32 %xy1) #0 {
 entry:
-  %call = tail call inreg <32 x i32> @sammy(%struct.descr* %desc, i32 %xy0) #3
-  %call1 = tail call inreg <32 x i32> @kirby(%struct.descr* %desc, i32 %xy1) #3
+  %call = tail call inreg <32 x i32> @sammy(ptr %desc, i32 %xy0) #3
+  %call1 = tail call inreg <32 x i32> @kirby(ptr %desc, i32 %xy1) #3
   %0 = tail call <64 x i32> @llvm.hexagon.V6.vcombine.128B(<32 x i32> %call1, <32 x i32> %call)
   ret <64 x i32> %0
 }
 
-declare inreg <32 x i32> @sammy(%struct.descr*, i32) #1
-declare inreg <32 x i32> @kirby(%struct.descr*, i32) #1
+declare inreg <32 x i32> @sammy(ptr, i32) #1
+declare inreg <32 x i32> @kirby(ptr, i32) #1
 declare <64 x i32> @llvm.hexagon.V6.vcombine.128B(<32 x i32>, <32 x i32>) #2
 
 attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvx-length128b,+hvxv60" }

diff  --git a/llvm/test/CodeGen/Hexagon/combine-imm-ext.ll b/llvm/test/CodeGen/Hexagon/combine-imm-ext.ll
index b604ca1f3c32e..1ee6d55287c55 100644
--- a/llvm/test/CodeGen/Hexagon/combine-imm-ext.ll
+++ b/llvm/test/CodeGen/Hexagon/combine-imm-ext.ll
@@ -3,11 +3,11 @@
 target triple = "hexagon"
 
 ; Function Attrs: nounwind
-define i32 @f0(i32* %a0, i32* %a1) #0 {
+define i32 @f0(ptr %a0, ptr %a1) #0 {
 b0:
 ; We want to see a #-22 in combine, not ##-22.
 ; CHECK: combine(#5,#-22)
-  %v0 = tail call i32 bitcast (i32 (...)* @f1 to i32 (i32*, i32*, i32, i32)*)(i32* %a0, i32* %a1, i32 -22, i32 5) #0
+  %v0 = tail call i32 @f1(ptr %a0, ptr %a1, i32 -22, i32 5) #0
   ret i32 %v0
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/combine-imm-ext2.ll b/llvm/test/CodeGen/Hexagon/combine-imm-ext2.ll
index dd0bd2a96d6d4..1fe89dc175106 100644
--- a/llvm/test/CodeGen/Hexagon/combine-imm-ext2.ll
+++ b/llvm/test/CodeGen/Hexagon/combine-imm-ext2.ll
@@ -3,11 +3,11 @@
 target triple = "hexagon"
 
 ; Function Attrs: nounwind
-define i32 @f0(i32* %a0, i32* %a1) #0 {
+define i32 @f0(ptr %a0, ptr %a1) #0 {
 b0:
 ; We want to see a ##24576 in combine, not #24576.
 ; CHECK: combine(#5,##24576)
-  %v0 = tail call i32 bitcast (i32 (...)* @f1 to i32 (i32*, i32*, i16, i16)*)(i32* %a0, i32* %a1, i16 24576, i16 5) #0
+  %v0 = tail call i32 @f1(ptr %a0, ptr %a1, i16 24576, i16 5) #0
   ret i32 %v0
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/combine.ll b/llvm/test/CodeGen/Hexagon/combine.ll
index f0f9d6507b078..1069771d08a55 100644
--- a/llvm/test/CodeGen/Hexagon/combine.ll
+++ b/llvm/test/CodeGen/Hexagon/combine.ll
@@ -6,11 +6,11 @@
 
 define void @foo() nounwind {
 entry:
-  %0 = load i32, i32* @j, align 4
-  %1 = load i64, i64* @k, align 8
+  %0 = load i32, ptr @j, align 4
+  %1 = load i64, ptr @k, align 8
   %conv = trunc i64 %1 to i32
   %2 = call i64 @llvm.hexagon.A2.combinew(i32 %0, i32 %conv)
-  store i64 %2, i64* @k, align 8
+  store i64 %2, ptr @k, align 8
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/combine_ir.ll b/llvm/test/CodeGen/Hexagon/combine_ir.ll
index 00fce0a05de65..73d1eaef33699 100644
--- a/llvm/test/CodeGen/Hexagon/combine_ir.ll
+++ b/llvm/test/CodeGen/Hexagon/combine_ir.ll
@@ -5,12 +5,12 @@ declare void @bar(i64)
 ; CHECK-LABEL: halfword:
 ; CHECK: combine(#0
 
-define void @halfword(i16* nocapture %a) nounwind {
+define void @halfword(ptr nocapture %a) nounwind {
 entry:
-  %0 = load i16, i16* %a, align 2
+  %0 = load i16, ptr %a, align 2
   %1 = zext i16 %0 to i64
-  %add.ptr = getelementptr inbounds i16, i16* %a, i32 1
-  %2 = load i16, i16* %add.ptr, align 2
+  %add.ptr = getelementptr inbounds i16, ptr %a, i32 1
+  %2 = load i16, ptr %add.ptr, align 2
   %3 = zext i16 %2 to i64
   %4 = shl nuw nsw i64 %3, 16
   %ins = or i64 %4, %1
@@ -21,12 +21,12 @@ entry:
 ; CHECK-LABEL: byte:
 ; CHECK: combine(#0
 
-define void @byte(i8* nocapture %a) nounwind {
+define void @byte(ptr nocapture %a) nounwind {
 entry:
-  %0 = load i8, i8* %a, align 1
+  %0 = load i8, ptr %a, align 1
   %1 = zext i8 %0 to i64
-  %add.ptr = getelementptr inbounds i8, i8* %a, i32 1
-  %2 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %a, i32 1
+  %2 = load i8, ptr %add.ptr, align 1
   %3 = zext i8 %2 to i64
   %4 = shl nuw nsw i64 %3, 8
   %ins = or i64 %4, %1

diff  --git a/llvm/test/CodeGen/Hexagon/combiner-lts.ll b/llvm/test/CodeGen/Hexagon/combiner-lts.ll
index 46c96d5dc1a8e..2731149abcc43 100644
--- a/llvm/test/CodeGen/Hexagon/combiner-lts.ll
+++ b/llvm/test/CodeGen/Hexagon/combiner-lts.ll
@@ -11,7 +11,7 @@ target triple = "hexagon"
 %s.0 = type { [3 x i32] }
 
 ; Function Attrs: nounwind
-define void @f0(i32 %a0, i32 %a1, %s.0* nocapture %a2, %s.0* nocapture %a3) #0 {
+define void @f0(i32 %a0, i32 %a1, ptr nocapture %a2, ptr nocapture %a3) #0 {
 b0:
 ; Pick one store that happens as a result.  This isn't the best, but a regular
 ; expression for a register name matches some unrelated load.
@@ -19,17 +19,14 @@ b0:
 ; CHECK: = memw(r3+#8)
 ; CHECK-NOT: memw(r3+#8) =
 ; CHECK: %bb.
-  %v0 = bitcast %s.0* %a2 to i8*
-  %v1 = bitcast %s.0* %a3 to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %v0, i8* align 4 %v1, i32 12, i1 false)
-  %v2 = bitcast %s.0* %a2 to i96*
+  call void @llvm.memcpy.p0.p0.i32(ptr align 4 %a2, ptr align 4 %a3, i32 12, i1 false)
   %v3 = zext i32 %a0 to i96
-  %v4 = load i96, i96* %v2, align 4
+  %v4 = load i96, ptr %a2, align 4
   %v5 = shl nuw nsw i96 %v3, 48
   %v6 = and i96 %v5, 281474976710656
   %v7 = and i96 %v4, -281474976710657
   %v8 = or i96 %v7, %v6
-  store i96 %v8, i96* %v2, align 4
+  store i96 %v8, ptr %a2, align 4
   %v9 = icmp eq i32 %a1, 2147483647
   br i1 %v9, label %b1, label %b2
 
@@ -38,16 +35,15 @@ b1:                                               ; preds = %b0
   br label %b3
 
 b2:                                               ; preds = %b0
-  %v11 = bitcast %s.0* %a3 to i96*
-  %v12 = load i96, i96* %v11, align 4
+  %v12 = load i96, ptr %a3, align 4
   %v13 = trunc i96 %v12 to i32
   %v14 = add i32 %v13, %a1
   %v15 = zext i32 %v14 to i96
   %v16 = and i96 %v15, 4194303
   %v17 = and i96 %v8, -4194304
   %v18 = or i96 %v16, %v17
-  store i96 %v18, i96* %v2, align 4
-  %v19 = load i96, i96* %v11, align 4
+  store i96 %v18, ptr %a2, align 4
+  %v19 = load i96, ptr %a3, align 4
   %v20 = and i96 %v19, 12582912
   %v21 = and i96 %v18, -12582913
   %v22 = or i96 %v21, %v20
@@ -55,12 +51,12 @@ b2:                                               ; preds = %b0
 
 b3:                                               ; preds = %b2, %b1
   %v23 = phi i96 [ %v22, %b2 ], [ %v10, %b1 ]
-  store i96 %v23, i96* %v2, align 4
+  store i96 %v23, ptr %a2, align 4
   ret void
 }
 
 ; Function Attrs: argmemonly nounwind
-declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture writeonly, i8* nocapture readonly, i32, i1) #1
+declare void @llvm.memcpy.p0.p0.i32(ptr nocapture writeonly, ptr nocapture readonly, i32, i1) #1
 
 attributes #0 = { nounwind }
 attributes #1 = { argmemonly nounwind }

diff  --git a/llvm/test/CodeGen/Hexagon/common-gep-basic.ll b/llvm/test/CodeGen/Hexagon/common-gep-basic.ll
index 165d8921c97e8..adf2d58ab9964 100644
--- a/llvm/test/CodeGen/Hexagon/common-gep-basic.ll
+++ b/llvm/test/CodeGen/Hexagon/common-gep-basic.ll
@@ -18,20 +18,20 @@ entry:
   br i1 %cmp, label %if.then, label %if.else
 
 if.then:                                          ; preds = %entry
-  %arrayidx1 = getelementptr inbounds [100 x %struct.s_t], [100 x %struct.s_t]* @g, i32 0, i32 %x, i32 0, i32 1, i32 2
-  tail call void @bar(i32* %arrayidx1) #0
+  %arrayidx1 = getelementptr inbounds [100 x %struct.s_t], ptr @g, i32 0, i32 %x, i32 0, i32 1, i32 2
+  tail call void @bar(ptr %arrayidx1) #0
   br label %if.end
 
 if.else:                                          ; preds = %entry
-  %arrayidx5 = getelementptr inbounds [100 x %struct.s_t], [100 x %struct.s_t]* @g, i32 0, i32 %x, i32 0, i32 1, i32 3
-  tail call void @bar(i32* %arrayidx5) #0
+  %arrayidx5 = getelementptr inbounds [100 x %struct.s_t], ptr @g, i32 0, i32 %x, i32 0, i32 1, i32 3
+  tail call void @bar(ptr %arrayidx5) #0
   br label %if.end
 
 if.end:                                           ; preds = %if.else, %if.then
   ret void
 }
 
-declare void @bar(i32*) #0
+declare void @bar(ptr) #0
 
 attributes #0 = { nounwind }
 

diff  --git a/llvm/test/CodeGen/Hexagon/common-gep-icm.ll b/llvm/test/CodeGen/Hexagon/common-gep-icm.ll
index bc5719dfe1d0e..871df7f149a69 100644
--- a/llvm/test/CodeGen/Hexagon/common-gep-icm.ll
+++ b/llvm/test/CodeGen/Hexagon/common-gep-icm.ll
@@ -29,8 +29,8 @@ entry:
 while.body:
   %count = phi i32 [ 0, %entry ], [ %next, %while.end ]
   %idx = phi i32 [ 0, %entry ], [ %15, %while.end ]
-  %0 = load i32, i32* @B1, align 4
-  %1 = load i32, i32* @B2, align 8
+  %0 = load i32, ptr @B1, align 4
+  %1 = load i32, ptr @B2, align 8
   %2 = and i32 %1, %0
   br label %while.body13
 
@@ -40,21 +40,21 @@ while.body13:                                     ; preds = %while.body, %if.end
   %m = phi i32 [ %6, %if.end ], [ %2, %while.body ]
   %5 = tail call i32 @llvm.hexagon.S2.cl0(i32 %m)
   %6 = tail call i32 @llvm.hexagon.S2.setbit.r(i32 %m, i32 %5)
-  %cgep85 = getelementptr [10 x %struct.2], [10 x %struct.2]* inttoptr (i32 -121502345 to [10 x %struct.2]*), i32 0, i32 %idx
-  %cgep90 = getelementptr %struct.2, %struct.2* %cgep85, i32 0, i32 12, i32 %5
-  %7 = load i32, i32* %cgep90, align 4
+  %cgep85 = getelementptr [10 x %struct.2], ptr inttoptr (i32 -121502345 to ptr), i32 0, i32 %idx
+  %cgep90 = getelementptr %struct.2, ptr %cgep85, i32 0, i32 12, i32 %5
+  %7 = load i32, ptr %cgep90, align 4
   %8 = tail call i64 @llvm.hexagon.M2.vmpy2s.s0(i32 %7, i32 %7)
-  %cgep91 = getelementptr %struct.2, %struct.2* %cgep85, i32 0, i32 13, i32 %5
-  %9 = load i32, i32* %cgep91, align 4
+  %cgep91 = getelementptr %struct.2, ptr %cgep85, i32 0, i32 13, i32 %5
+  %9 = load i32, ptr %cgep91, align 4
   %10 = tail call i64 @llvm.hexagon.M2.vmac2s.s0(i64 %8, i32 %9, i32 %9)
-  %11 = load i8, i8* @C1, align 1
+  %11 = load i8, ptr @C1, align 1
   %and24 = and i8 %11, 1
   %cmp = icmp eq i8 %and24, 0
   br i1 %cmp, label %if.then, label %if.end
 
 if.then:                                          ; preds = %while.body13
   %12 = tail call i64 @llvm.hexagon.A2.vaddws(i64 %3, i64 %10)
-  store i64 %12, i64* @A1, align 8
+  store i64 %12, ptr @A1, align 8
   br label %if.end
 
 if.end:                                           ; preds = %if.then, %while.body13
@@ -71,6 +71,6 @@ while.end:
   br i1 %cc, label %end, label %while.body
 
 end:
-  store i64 %10, i64* @A2, align 8
+  store i64 %10, ptr @A2, align 8
   ret void
 }

diff  --git a/llvm/test/CodeGen/Hexagon/common-global-addr.ll b/llvm/test/CodeGen/Hexagon/common-global-addr.ll
index a744b53c85246..ffd93a9afe6c4 100644
--- a/llvm/test/CodeGen/Hexagon/common-global-addr.ll
+++ b/llvm/test/CodeGen/Hexagon/common-global-addr.ll
@@ -8,9 +8,9 @@ define zeroext i32 @f0() #0 {
 b0:
 ; CHECK: ##g0
 ; CHECK-NOT: ##g0
-  %v0 = load i32, i32* @g0, align 1
+  %v0 = load i32, ptr @g0, align 1
   %v1 = mul nsw i32 100, %v0
-  store i32 %v1, i32* @g0, align 1
+  store i32 %v1, ptr @g0, align 1
   ret i32 %v1
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/concat-vectors-legalize.ll b/llvm/test/CodeGen/Hexagon/concat-vectors-legalize.ll
index f9802405e4b9e..6f73b47f9a487 100644
--- a/llvm/test/CodeGen/Hexagon/concat-vectors-legalize.ll
+++ b/llvm/test/CodeGen/Hexagon/concat-vectors-legalize.ll
@@ -3,27 +3,27 @@
 
 target triple = "hexagon-unknown--elf"
 
-%s.8 = type { i8*, i32, i32, i32, i32, %s.9*, %s.9*, %s.9* }
+%s.8 = type { ptr, i32, i32, i32, i32, ptr, ptr, ptr }
 %s.9 = type { %s.10 }
 %s.10 = type { i64 }
-%s.4 = type { i64, i8*, [4 x i32], [4 x i32], [4 x i32], i32, i8, i8, [6 x i8] }
+%s.4 = type { i64, ptr, [4 x i32], [4 x i32], [4 x i32], i32, i8, i8, [6 x i8] }
 
 @g0 = private constant [6 x i8] c"input\00", align 32
 @g1 = private constant [11 x i8] c"gaussian11\00", align 32
- at g2 = private constant [2 x %s.8] [%s.8 { i8* getelementptr inbounds ([6 x i8], [6 x i8]* @g0, i32 0, i32 0), i32 1, i32 2, i32 1, i32 8, %s.9* null, %s.9* null, %s.9* null }, %s.8 { i8* getelementptr inbounds ([11 x i8], [11 x i8]* @g1, i32 0, i32 0), i32 2, i32 2, i32 1, i32 8, %s.9* null, %s.9* null, %s.9* null }]
+ at g2 = private constant [2 x %s.8] [%s.8 { ptr @g0, i32 1, i32 2, i32 1, i32 8, ptr null, ptr null, ptr null }, %s.8 { ptr @g1, i32 2, i32 2, i32 1, i32 8, ptr null, ptr null, ptr null }]
 @g3 = private constant [53 x i8] c"hexagon-32-os_unknown-no_asserts-no_bounds_query-hvx\00", align 32
 
 ; Function Attrs: nounwind
-declare i8* @f0(i8*, i32) #0
+declare ptr @f0(ptr, i32) #0
 
 ; Function Attrs: nounwind
-declare void @f1(i8*, i8*) #0
+declare void @f1(ptr, ptr) #0
 
 ; Function Attrs: nounwind
-declare noalias i8* @f2(i8*, i32) #0
+declare noalias ptr @f2(ptr, i32) #0
 
 ; Function Attrs: nounwind
-declare void @f3(i8*, i8*) #0
+declare void @f3(ptr, ptr) #0
 
 ; Function Attrs: nounwind
 declare void @f4() #0
@@ -32,32 +32,32 @@ declare void @f4() #0
 declare void @f5() #0
 
 ; Function Attrs: nounwind
-define i32 @f6(%s.4* noalias nocapture readonly %a0, %s.4* noalias nocapture readonly %a1) #0 {
+define i32 @f6(ptr noalias nocapture readonly %a0, ptr noalias nocapture readonly %a1) #0 {
 b0:
-  %v0 = getelementptr inbounds %s.4, %s.4* %a0, i32 0, i32 1
-  %v1 = load i8*, i8** %v0
-  %v2 = getelementptr inbounds %s.4, %s.4* %a0, i32 0, i32 2, i32 0
-  %v3 = load i32, i32* %v2
-  %v4 = getelementptr inbounds %s.4, %s.4* %a0, i32 0, i32 2, i32 1
-  %v5 = load i32, i32* %v4
-  %v6 = getelementptr inbounds %s.4, %s.4* %a0, i32 0, i32 3, i32 1
-  %v7 = load i32, i32* %v6
-  %v8 = getelementptr inbounds %s.4, %s.4* %a0, i32 0, i32 4, i32 0
-  %v9 = load i32, i32* %v8
-  %v10 = getelementptr inbounds %s.4, %s.4* %a0, i32 0, i32 4, i32 1
-  %v11 = load i32, i32* %v10
-  %v12 = getelementptr inbounds %s.4, %s.4* %a1, i32 0, i32 1
-  %v13 = load i8*, i8** %v12
-  %v14 = getelementptr inbounds %s.4, %s.4* %a1, i32 0, i32 2, i32 0
-  %v15 = load i32, i32* %v14
-  %v16 = getelementptr inbounds %s.4, %s.4* %a1, i32 0, i32 2, i32 1
-  %v17 = load i32, i32* %v16
-  %v18 = getelementptr inbounds %s.4, %s.4* %a1, i32 0, i32 3, i32 1
-  %v19 = load i32, i32* %v18
-  %v20 = getelementptr inbounds %s.4, %s.4* %a1, i32 0, i32 4, i32 0
-  %v21 = load i32, i32* %v20
-  %v22 = getelementptr inbounds %s.4, %s.4* %a1, i32 0, i32 4, i32 1
-  %v23 = load i32, i32* %v22
+  %v0 = getelementptr inbounds %s.4, ptr %a0, i32 0, i32 1
+  %v1 = load ptr, ptr %v0
+  %v2 = getelementptr inbounds %s.4, ptr %a0, i32 0, i32 2, i32 0
+  %v3 = load i32, ptr %v2
+  %v4 = getelementptr inbounds %s.4, ptr %a0, i32 0, i32 2, i32 1
+  %v5 = load i32, ptr %v4
+  %v6 = getelementptr inbounds %s.4, ptr %a0, i32 0, i32 3, i32 1
+  %v7 = load i32, ptr %v6
+  %v8 = getelementptr inbounds %s.4, ptr %a0, i32 0, i32 4, i32 0
+  %v9 = load i32, ptr %v8
+  %v10 = getelementptr inbounds %s.4, ptr %a0, i32 0, i32 4, i32 1
+  %v11 = load i32, ptr %v10
+  %v12 = getelementptr inbounds %s.4, ptr %a1, i32 0, i32 1
+  %v13 = load ptr, ptr %v12
+  %v14 = getelementptr inbounds %s.4, ptr %a1, i32 0, i32 2, i32 0
+  %v15 = load i32, ptr %v14
+  %v16 = getelementptr inbounds %s.4, ptr %a1, i32 0, i32 2, i32 1
+  %v17 = load i32, ptr %v16
+  %v18 = getelementptr inbounds %s.4, ptr %a1, i32 0, i32 3, i32 1
+  %v19 = load i32, ptr %v18
+  %v20 = getelementptr inbounds %s.4, ptr %a1, i32 0, i32 4, i32 0
+  %v21 = load i32, ptr %v20
+  %v22 = getelementptr inbounds %s.4, ptr %a1, i32 0, i32 4, i32 1
+  %v23 = load i32, ptr %v22
   %v24 = add nsw i32 %v21, %v15
   %v25 = add nsw i32 %v24, -64
   %v26 = icmp slt i32 %v21, %v25
@@ -82,7 +82,7 @@ b0:
   %v45 = sext i32 %v44 to i64
   %v46 = mul nsw i64 %v45, %v42
   %v47 = trunc i64 %v46 to i32
-  %v48 = tail call i8* @f2(i8* null, i32 %v47)
+  %v48 = tail call ptr @f2(ptr null, i32 %v47)
   %v49 = add nsw i32 %v23, -1
   %v50 = add i32 %v23, %v17
   %v51 = icmp sgt i32 %v23, %v50
@@ -123,8 +123,8 @@ b3:                                               ; preds = %b3, %b2
   %v77 = add i32 %v68, %v9
   %v78 = sub i32 %v71, %v77
   %v79 = add i32 %v78, %v76
-  %v80 = getelementptr inbounds i8, i8* %v1, i32 %v79
-  %v81 = load i8, i8* %v80, align 1, !tbaa !4
+  %v80 = getelementptr inbounds i8, ptr %v1, i32 %v79
+  %v81 = load i8, ptr %v80, align 1, !tbaa !4
   %v82 = icmp sle i32 %v64, %v52
   %v83 = icmp sle i32 %v58, %v67
   %v84 = icmp slt i32 %v67, %v9
@@ -138,8 +138,8 @@ b3:                                               ; preds = %b3, %b2
   %v92 = sub i32 1, %v27
   %v93 = add i32 %v92, %v91
   %v94 = add i32 %v93, %v67
-  %v95 = getelementptr inbounds i8, i8* %v48, i32 %v94
-  store i8 %v88, i8* %v95, align 1, !tbaa !7
+  %v95 = getelementptr inbounds i8, ptr %v48, i32 %v94
+  store i8 %v88, ptr %v95, align 1, !tbaa !7
   %v96 = add nsw i32 %v67, 1
   %v97 = icmp eq i32 %v96, %v57
   br i1 %v97, label %b7, label %b3
@@ -156,8 +156,8 @@ b5:                                               ; preds = %b5, %b4
   %v103 = icmp slt i32 %v102, %v9
   %v104 = select i1 %v103, i32 %v9, i32 %v102
   %v105 = sub i32 %v104, %v9
-  %v106 = getelementptr inbounds i8, i8* %v1, i32 %v105
-  %v107 = load i8, i8* %v106, align 1, !tbaa !4
+  %v106 = getelementptr inbounds i8, ptr %v1, i32 %v105
+  %v107 = load i8, ptr %v106, align 1, !tbaa !4
   %v108 = icmp sle i32 %v64, %v52
   %v109 = icmp slt i32 %v52, %v11
   %v110 = icmp sle i32 %v58, %v99
@@ -172,8 +172,8 @@ b5:                                               ; preds = %b5, %b4
   %v119 = sub i32 1, %v27
   %v120 = add i32 %v119, %v118
   %v121 = add i32 %v120, %v99
-  %v122 = getelementptr inbounds i8, i8* %v48, i32 %v121
-  store i8 %v115, i8* %v122, align 1, !tbaa !7
+  %v122 = getelementptr inbounds i8, ptr %v48, i32 %v121
+  store i8 %v115, ptr %v122, align 1, !tbaa !7
   %v123 = add nsw i32 %v99, 1
   %v124 = icmp eq i32 %v123, %v57
   br i1 %v124, label %b7, label %b5
@@ -190,8 +190,8 @@ b6:                                               ; preds = %b6, %b4
   %v133 = add i32 %v126, %v9
   %v134 = sub i32 %v127, %v133
   %v135 = add i32 %v134, %v132
-  %v136 = getelementptr inbounds i8, i8* %v1, i32 %v135
-  %v137 = load i8, i8* %v136, align 1, !tbaa !4
+  %v136 = getelementptr inbounds i8, ptr %v1, i32 %v135
+  %v137 = load i8, ptr %v136, align 1, !tbaa !4
   %v138 = icmp sle i32 %v64, %v52
   %v139 = icmp slt i32 %v52, %v11
   %v140 = icmp sle i32 %v58, %v125
@@ -206,8 +206,8 @@ b6:                                               ; preds = %b6, %b4
   %v149 = sub i32 1, %v27
   %v150 = add i32 %v149, %v148
   %v151 = add i32 %v150, %v125
-  %v152 = getelementptr inbounds i8, i8* %v48, i32 %v151
-  store i8 %v145, i8* %v152, align 1, !tbaa !7
+  %v152 = getelementptr inbounds i8, ptr %v48, i32 %v151
+  store i8 %v145, ptr %v152, align 1, !tbaa !7
   %v153 = add nsw i32 %v125, 1
   %v154 = icmp eq i32 %v153, %v57
   br i1 %v154, label %b7, label %b6
@@ -229,8 +229,8 @@ b8:                                               ; preds = %b8, %b7
   %v165 = add i32 %v157, %v9
   %v166 = sub i32 %v164, %v165
   %v167 = add i32 %v166, %v156
-  %v168 = getelementptr inbounds i8, i8* %v1, i32 %v167
-  %v169 = load i8, i8* %v168, align 1, !tbaa !4
+  %v168 = getelementptr inbounds i8, ptr %v1, i32 %v167
+  %v169 = load i8, ptr %v168, align 1, !tbaa !4
   %v170 = icmp sle i32 %v158, %v52
   %v171 = icmp slt i32 %v52, %v11
   %v172 = or i1 %v171, %v170
@@ -241,8 +241,8 @@ b8:                                               ; preds = %b8, %b7
   %v177 = sub i32 1, %v27
   %v178 = add i32 %v177, %v176
   %v179 = add i32 %v178, %v156
-  %v180 = getelementptr inbounds i8, i8* %v48, i32 %v179
-  store i8 %v173, i8* %v180, align 1, !tbaa !7
+  %v180 = getelementptr inbounds i8, ptr %v48, i32 %v179
+  store i8 %v173, ptr %v180, align 1, !tbaa !7
   %v181 = add nsw i32 %v156, 1
   %v182 = icmp eq i32 %v181, %v62
   br i1 %v182, label %b9, label %b8
@@ -269,8 +269,8 @@ b10:                                              ; preds = %b10, %b9
   %v198 = add i32 %v185, %v9
   %v199 = sub i32 %v192, %v198
   %v200 = add i32 %v199, %v197
-  %v201 = getelementptr inbounds i8, i8* %v1, i32 %v200
-  %v202 = load i8, i8* %v201, align 1, !tbaa !4
+  %v201 = getelementptr inbounds i8, ptr %v1, i32 %v200
+  %v202 = load i8, ptr %v201, align 1, !tbaa !4
   %v203 = icmp sle i32 %v186, %v52
   %v204 = icmp slt i32 %v52, %v11
   %v205 = icmp sle i32 %v58, %v184
@@ -285,8 +285,8 @@ b10:                                              ; preds = %b10, %b9
   %v214 = sub i32 1, %v27
   %v215 = add i32 %v214, %v213
   %v216 = add i32 %v215, %v184
-  %v217 = getelementptr inbounds i8, i8* %v48, i32 %v216
-  store i8 %v210, i8* %v217, align 1, !tbaa !7
+  %v217 = getelementptr inbounds i8, ptr %v48, i32 %v216
+  store i8 %v210, ptr %v217, align 1, !tbaa !7
   %v218 = add nsw i32 %v184, 1
   %v219 = icmp eq i32 %v218, %v24
   br i1 %v219, label %b11, label %b10
@@ -302,7 +302,7 @@ b12:                                              ; preds = %b11, %b0
   %v224 = shl nsw i64 %v42, 2
   %v225 = mul i64 %v224, %v223
   %v226 = trunc i64 %v225 to i32
-  %v227 = tail call i8* @f2(i8* null, i32 %v226)
+  %v227 = tail call ptr @f2(ptr null, i32 %v226)
   br i1 %v51, label %b14, label %b13, !prof !3
 
 b13:                                              ; preds = %b19, %b12
@@ -314,11 +314,11 @@ b13:                                              ; preds = %b19, %b12
   br i1 %v232, label %b16, label %b17, !prof !9
 
 b14:                                              ; preds = %b19, %b12
-  %v233 = icmp eq i8* %v48, null
+  %v233 = icmp eq ptr %v48, null
   br i1 %v233, label %b20, label %b15
 
 b15:                                              ; preds = %b14
-  tail call void @f3(i8* null, i8* %v48) #2
+  tail call void @f3(ptr null, ptr %v48) #2
   br label %b20
 
 b16:                                              ; preds = %b16, %b13
@@ -330,18 +330,16 @@ b16:                                              ; preds = %b16, %b13
   %v239 = sub i32 %v21, %v27
   %v240 = add i32 %v239, %v238
   %v241 = add nsw i32 %v240, %v237
-  %v242 = getelementptr inbounds i8, i8* %v48, i32 %v241
-  %v243 = bitcast i8* %v242 to <16 x i32>*
-  %v244 = load <16 x i32>, <16 x i32>* %v243, align 1, !tbaa !7
+  %v242 = getelementptr inbounds i8, ptr %v48, i32 %v241
+  %v244 = load <16 x i32>, ptr %v242, align 1, !tbaa !7
   %v245 = tail call <32 x i32> @llvm.hexagon.V6.vzb(<16 x i32> %v244)
   %v246 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v245)
   %v247 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v245)
   %v248 = tail call <32 x i32> @llvm.hexagon.V6.vzh(<16 x i32> %v247)
   %v249 = tail call <32 x i32> @llvm.hexagon.V6.vzh(<16 x i32> %v246)
   %v250 = add nsw i32 %v241, 1
-  %v251 = getelementptr inbounds i8, i8* %v48, i32 %v250
-  %v252 = bitcast i8* %v251 to <16 x i32>*
-  %v253 = load <16 x i32>, <16 x i32>* %v252, align 1, !tbaa !7
+  %v251 = getelementptr inbounds i8, ptr %v48, i32 %v250
+  %v253 = load <16 x i32>, ptr %v251, align 1, !tbaa !7
   %v254 = tail call <32 x i32> @llvm.hexagon.V6.vzb(<16 x i32> %v253)
   %v255 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v254)
   %v256 = tail call <32 x i32> @llvm.hexagon.V6.vzh(<16 x i32> %v255)
@@ -362,25 +360,20 @@ b16:                                              ; preds = %b16, %b13
   %v271 = shufflevector <32 x i32> %v269, <32 x i32> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
   %v272 = mul nsw i32 %v236, %v222
   %v273 = add nsw i32 %v240, %v272
-  %v274 = bitcast i8* %v227 to i32*
-  %v275 = getelementptr inbounds i32, i32* %v274, i32 %v273
-  %v276 = bitcast i32* %v275 to <16 x i32>*
-  store <16 x i32> %v271, <16 x i32>* %v276, align 4, !tbaa !10
+  %v275 = getelementptr inbounds i32, ptr %v227, i32 %v273
+  store <16 x i32> %v271, ptr %v275, align 4, !tbaa !10
   %v277 = shufflevector <32 x i32> %v269, <32 x i32> undef, <16 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
   %v278 = add nsw i32 %v273, 16
-  %v279 = getelementptr inbounds i32, i32* %v274, i32 %v278
-  %v280 = bitcast i32* %v279 to <16 x i32>*
-  store <16 x i32> %v277, <16 x i32>* %v280, align 4, !tbaa !10
+  %v279 = getelementptr inbounds i32, ptr %v227, i32 %v278
+  store <16 x i32> %v277, ptr %v279, align 4, !tbaa !10
   %v281 = shufflevector <32 x i32> %v270, <32 x i32> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
   %v282 = add nsw i32 %v273, 32
-  %v283 = getelementptr inbounds i32, i32* %v274, i32 %v282
-  %v284 = bitcast i32* %v283 to <16 x i32>*
-  store <16 x i32> %v281, <16 x i32>* %v284, align 4, !tbaa !10
+  %v283 = getelementptr inbounds i32, ptr %v227, i32 %v282
+  store <16 x i32> %v281, ptr %v283, align 4, !tbaa !10
   %v285 = shufflevector <32 x i32> %v270, <32 x i32> undef, <16 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
   %v286 = add nsw i32 %v273, 48
-  %v287 = getelementptr inbounds i32, i32* %v274, i32 %v286
-  %v288 = bitcast i32* %v287 to <16 x i32>*
-  store <16 x i32> %v285, <16 x i32>* %v288, align 4, !tbaa !10
+  %v287 = getelementptr inbounds i32, ptr %v227, i32 %v286
+  store <16 x i32> %v285, ptr %v287, align 4, !tbaa !10
   %v289 = add nuw nsw i32 %v234, 1
   %v290 = icmp eq i32 %v289, %v231
   br i1 %v290, label %b17, label %b16
@@ -399,18 +392,16 @@ b18:                                              ; preds = %b18, %b17
   %v298 = sub nsw i32 %v24, %v27
   %v299 = add nsw i32 %v297, %v298
   %v300 = add nsw i32 %v299, -64
-  %v301 = getelementptr inbounds i8, i8* %v48, i32 %v300
-  %v302 = bitcast i8* %v301 to <16 x i32>*
-  %v303 = load <16 x i32>, <16 x i32>* %v302, align 1, !tbaa !7
+  %v301 = getelementptr inbounds i8, ptr %v48, i32 %v300
+  %v303 = load <16 x i32>, ptr %v301, align 1, !tbaa !7
   %v304 = tail call <32 x i32> @llvm.hexagon.V6.vzb(<16 x i32> %v303)
   %v305 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v304)
   %v306 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v304)
   %v307 = tail call <32 x i32> @llvm.hexagon.V6.vzh(<16 x i32> %v306)
   %v308 = tail call <32 x i32> @llvm.hexagon.V6.vzh(<16 x i32> %v305)
   %v309 = add nsw i32 %v299, -63
-  %v310 = getelementptr inbounds i8, i8* %v48, i32 %v309
-  %v311 = bitcast i8* %v310 to <16 x i32>*
-  %v312 = load <16 x i32>, <16 x i32>* %v311, align 1, !tbaa !7
+  %v310 = getelementptr inbounds i8, ptr %v48, i32 %v309
+  %v312 = load <16 x i32>, ptr %v310, align 1, !tbaa !7
   %v313 = tail call <32 x i32> @llvm.hexagon.V6.vzb(<16 x i32> %v312)
   %v314 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v313)
   %v315 = tail call <32 x i32> @llvm.hexagon.V6.vzh(<16 x i32> %v314)
@@ -432,25 +423,20 @@ b18:                                              ; preds = %b18, %b17
   %v331 = mul nsw i32 %v296, %v222
   %v332 = add nsw i32 %v331, %v298
   %v333 = add nsw i32 %v332, -64
-  %v334 = bitcast i8* %v227 to i32*
-  %v335 = getelementptr inbounds i32, i32* %v334, i32 %v333
-  %v336 = bitcast i32* %v335 to <16 x i32>*
-  store <16 x i32> %v330, <16 x i32>* %v336, align 4, !tbaa !10
+  %v335 = getelementptr inbounds i32, ptr %v227, i32 %v333
+  store <16 x i32> %v330, ptr %v335, align 4, !tbaa !10
   %v337 = shufflevector <32 x i32> %v328, <32 x i32> undef, <16 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
   %v338 = add nsw i32 %v332, -48
-  %v339 = getelementptr inbounds i32, i32* %v334, i32 %v338
-  %v340 = bitcast i32* %v339 to <16 x i32>*
-  store <16 x i32> %v337, <16 x i32>* %v340, align 4, !tbaa !10
+  %v339 = getelementptr inbounds i32, ptr %v227, i32 %v338
+  store <16 x i32> %v337, ptr %v339, align 4, !tbaa !10
   %v341 = shufflevector <32 x i32> %v329, <32 x i32> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
   %v342 = add nsw i32 %v332, -32
-  %v343 = getelementptr inbounds i32, i32* %v334, i32 %v342
-  %v344 = bitcast i32* %v343 to <16 x i32>*
-  store <16 x i32> %v341, <16 x i32>* %v344, align 4, !tbaa !10
+  %v343 = getelementptr inbounds i32, ptr %v227, i32 %v342
+  store <16 x i32> %v341, ptr %v343, align 4, !tbaa !10
   %v345 = shufflevector <32 x i32> %v329, <32 x i32> undef, <16 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
   %v346 = add nsw i32 %v332, -16
-  %v347 = getelementptr inbounds i32, i32* %v334, i32 %v346
-  %v348 = bitcast i32* %v347 to <16 x i32>*
-  store <16 x i32> %v345, <16 x i32>* %v348, align 4, !tbaa !10
+  %v347 = getelementptr inbounds i32, ptr %v227, i32 %v346
+  store <16 x i32> %v345, ptr %v347, align 4, !tbaa !10
   %v349 = add nuw nsw i32 %v294, 1
   %v350 = icmp eq i32 %v349, %v292
   br i1 %v350, label %b19, label %b18
@@ -479,25 +465,20 @@ b22:                                              ; preds = %b25, %b22
   %v362 = add nsw i32 %v361, %v21
   %v363 = sub nsw i32 %v362, %v27
   %v364 = add nsw i32 %v363, %v360
-  %v365 = bitcast i8* %v227 to i32*
-  %v366 = getelementptr inbounds i32, i32* %v365, i32 %v364
-  %v367 = bitcast i32* %v366 to <16 x i32>*
-  %v368 = load <16 x i32>, <16 x i32>* %v367, align 4, !tbaa !10
+  %v366 = getelementptr inbounds i32, ptr %v227, i32 %v364
+  %v368 = load <16 x i32>, ptr %v366, align 4, !tbaa !10
   %v369 = add nsw i32 %v364, 16
-  %v370 = getelementptr inbounds i32, i32* %v365, i32 %v369
-  %v371 = bitcast i32* %v370 to <16 x i32>*
-  %v372 = load <16 x i32>, <16 x i32>* %v371, align 4, !tbaa !10
+  %v370 = getelementptr inbounds i32, ptr %v227, i32 %v369
+  %v372 = load <16 x i32>, ptr %v370, align 4, !tbaa !10
   %v373 = shufflevector <16 x i32> %v368, <16 x i32> %v372, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
   %v374 = add nsw i32 %v359, 1
   %v375 = mul nsw i32 %v374, %v222
   %v376 = add nsw i32 %v363, %v375
-  %v377 = getelementptr inbounds i32, i32* %v365, i32 %v376
-  %v378 = bitcast i32* %v377 to <16 x i32>*
-  %v379 = load <16 x i32>, <16 x i32>* %v378, align 4, !tbaa !10
+  %v377 = getelementptr inbounds i32, ptr %v227, i32 %v376
+  %v379 = load <16 x i32>, ptr %v377, align 4, !tbaa !10
   %v380 = add nsw i32 %v376, 16
-  %v381 = getelementptr inbounds i32, i32* %v365, i32 %v380
-  %v382 = bitcast i32* %v381 to <16 x i32>*
-  %v383 = load <16 x i32>, <16 x i32>* %v382, align 4, !tbaa !10
+  %v381 = getelementptr inbounds i32, ptr %v227, i32 %v380
+  %v383 = load <16 x i32>, ptr %v381, align 4, !tbaa !10
   %v384 = shufflevector <16 x i32> %v379, <16 x i32> %v383, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
   %v385 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v384)
   %v386 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v384)
@@ -511,22 +492,18 @@ b22:                                              ; preds = %b25, %b22
   %v394 = tail call <16 x i32> @llvm.hexagon.V6.vlsrw(<16 x i32> %v393, i32 20)
   %v395 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v394, <16 x i32> %v392)
   %v396 = add nsw i32 %v364, 32
-  %v397 = getelementptr inbounds i32, i32* %v365, i32 %v396
-  %v398 = bitcast i32* %v397 to <16 x i32>*
-  %v399 = load <16 x i32>, <16 x i32>* %v398, align 4, !tbaa !10
+  %v397 = getelementptr inbounds i32, ptr %v227, i32 %v396
+  %v399 = load <16 x i32>, ptr %v397, align 4, !tbaa !10
   %v400 = add nsw i32 %v364, 48
-  %v401 = getelementptr inbounds i32, i32* %v365, i32 %v400
-  %v402 = bitcast i32* %v401 to <16 x i32>*
-  %v403 = load <16 x i32>, <16 x i32>* %v402, align 4, !tbaa !10
+  %v401 = getelementptr inbounds i32, ptr %v227, i32 %v400
+  %v403 = load <16 x i32>, ptr %v401, align 4, !tbaa !10
   %v404 = shufflevector <16 x i32> %v399, <16 x i32> %v403, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
   %v405 = add nsw i32 %v376, 32
-  %v406 = getelementptr inbounds i32, i32* %v365, i32 %v405
-  %v407 = bitcast i32* %v406 to <16 x i32>*
-  %v408 = load <16 x i32>, <16 x i32>* %v407, align 4, !tbaa !10
+  %v406 = getelementptr inbounds i32, ptr %v227, i32 %v405
+  %v408 = load <16 x i32>, ptr %v406, align 4, !tbaa !10
   %v409 = add nsw i32 %v376, 48
-  %v410 = getelementptr inbounds i32, i32* %v365, i32 %v409
-  %v411 = bitcast i32* %v410 to <16 x i32>*
-  %v412 = load <16 x i32>, <16 x i32>* %v411, align 4, !tbaa !10
+  %v410 = getelementptr inbounds i32, ptr %v227, i32 %v409
+  %v412 = load <16 x i32>, ptr %v410, align 4, !tbaa !10
   %v413 = shufflevector <16 x i32> %v408, <16 x i32> %v412, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
   %v414 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v413)
   %v415 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v413)
@@ -554,9 +531,8 @@ b22:                                              ; preds = %b25, %b22
   %v437 = add i32 %v435, %v21
   %v438 = sub i32 %v436, %v437
   %v439 = add i32 %v438, %v362
-  %v440 = getelementptr inbounds i8, i8* %v13, i32 %v439
-  %v441 = bitcast i8* %v440 to <16 x i32>*
-  store <16 x i32> %v434, <16 x i32>* %v441, align 1, !tbaa !12
+  %v440 = getelementptr inbounds i8, ptr %v13, i32 %v439
+  store <16 x i32> %v434, ptr %v440, align 1, !tbaa !12
   %v442 = add nuw nsw i32 %v358, 1
   %v443 = icmp eq i32 %v442, %v356
   br i1 %v443, label %b26, label %b22
@@ -566,23 +542,19 @@ b23:                                              ; preds = %b26, %b23
   %v445 = sub nsw i32 %v24, %v27
   %v446 = add nsw i32 %v360, %v445
   %v447 = add nsw i32 %v446, -64
-  %v448 = getelementptr inbounds i32, i32* %v365, i32 %v447
-  %v449 = bitcast i32* %v448 to <16 x i32>*
-  %v450 = load <16 x i32>, <16 x i32>* %v449, align 4, !tbaa !10
+  %v448 = getelementptr inbounds i32, ptr %v227, i32 %v447
+  %v450 = load <16 x i32>, ptr %v448, align 4, !tbaa !10
   %v451 = add nsw i32 %v446, -48
-  %v452 = getelementptr inbounds i32, i32* %v365, i32 %v451
-  %v453 = bitcast i32* %v452 to <16 x i32>*
-  %v454 = load <16 x i32>, <16 x i32>* %v453, align 4, !tbaa !10
+  %v452 = getelementptr inbounds i32, ptr %v227, i32 %v451
+  %v454 = load <16 x i32>, ptr %v452, align 4, !tbaa !10
   %v455 = shufflevector <16 x i32> %v450, <16 x i32> %v454, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
   %v456 = add nsw i32 %v375, %v445
   %v457 = add nsw i32 %v456, -64
-  %v458 = getelementptr inbounds i32, i32* %v365, i32 %v457
-  %v459 = bitcast i32* %v458 to <16 x i32>*
-  %v460 = load <16 x i32>, <16 x i32>* %v459, align 4, !tbaa !10
+  %v458 = getelementptr inbounds i32, ptr %v227, i32 %v457
+  %v460 = load <16 x i32>, ptr %v458, align 4, !tbaa !10
   %v461 = add nsw i32 %v456, -48
-  %v462 = getelementptr inbounds i32, i32* %v365, i32 %v461
-  %v463 = bitcast i32* %v462 to <16 x i32>*
-  %v464 = load <16 x i32>, <16 x i32>* %v463, align 4, !tbaa !10
+  %v462 = getelementptr inbounds i32, ptr %v227, i32 %v461
+  %v464 = load <16 x i32>, ptr %v462, align 4, !tbaa !10
   %v465 = shufflevector <16 x i32> %v460, <16 x i32> %v464, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
   %v466 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v465)
   %v467 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v465)
@@ -596,22 +568,18 @@ b23:                                              ; preds = %b26, %b23
   %v475 = tail call <16 x i32> @llvm.hexagon.V6.vlsrw(<16 x i32> %v474, i32 20)
   %v476 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v475, <16 x i32> %v473)
   %v477 = add nsw i32 %v446, -32
-  %v478 = getelementptr inbounds i32, i32* %v365, i32 %v477
-  %v479 = bitcast i32* %v478 to <16 x i32>*
-  %v480 = load <16 x i32>, <16 x i32>* %v479, align 4, !tbaa !10
+  %v478 = getelementptr inbounds i32, ptr %v227, i32 %v477
+  %v480 = load <16 x i32>, ptr %v478, align 4, !tbaa !10
   %v481 = add nsw i32 %v446, -16
-  %v482 = getelementptr inbounds i32, i32* %v365, i32 %v481
-  %v483 = bitcast i32* %v482 to <16 x i32>*
-  %v484 = load <16 x i32>, <16 x i32>* %v483, align 4, !tbaa !10
+  %v482 = getelementptr inbounds i32, ptr %v227, i32 %v481
+  %v484 = load <16 x i32>, ptr %v482, align 4, !tbaa !10
   %v485 = shufflevector <16 x i32> %v480, <16 x i32> %v484, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
   %v486 = add nsw i32 %v456, -32
-  %v487 = getelementptr inbounds i32, i32* %v365, i32 %v486
-  %v488 = bitcast i32* %v487 to <16 x i32>*
-  %v489 = load <16 x i32>, <16 x i32>* %v488, align 4, !tbaa !10
+  %v487 = getelementptr inbounds i32, ptr %v227, i32 %v486
+  %v489 = load <16 x i32>, ptr %v487, align 4, !tbaa !10
   %v490 = add nsw i32 %v456, -16
-  %v491 = getelementptr inbounds i32, i32* %v365, i32 %v490
-  %v492 = bitcast i32* %v491 to <16 x i32>*
-  %v493 = load <16 x i32>, <16 x i32>* %v492, align 4, !tbaa !10
+  %v491 = getelementptr inbounds i32, ptr %v227, i32 %v490
+  %v493 = load <16 x i32>, ptr %v491, align 4, !tbaa !10
   %v494 = shufflevector <16 x i32> %v489, <16 x i32> %v493, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
   %v495 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v494)
   %v496 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v494)
@@ -637,9 +605,8 @@ b23:                                              ; preds = %b26, %b23
   %v516 = add i32 %v15, -64
   %v517 = sub i32 %v516, %v435
   %v518 = add i32 %v517, %v436
-  %v519 = getelementptr inbounds i8, i8* %v13, i32 %v518
-  %v520 = bitcast i8* %v519 to <16 x i32>*
-  store <16 x i32> %v515, <16 x i32>* %v520, align 1, !tbaa !12
+  %v519 = getelementptr inbounds i8, ptr %v13, i32 %v518
+  store <16 x i32> %v515, ptr %v519, align 1, !tbaa !12
   %v521 = add nuw nsw i32 %v444, 1
   %v522 = icmp eq i32 %v521, %v527
   br i1 %v522, label %b24, label %b23
@@ -672,26 +639,21 @@ b28:                                              ; preds = %b29, %b28
   %v535 = sub nsw i32 %v24, %v27
   %v536 = add nsw i32 %v534, %v535
   %v537 = add nsw i32 %v536, -64
-  %v538 = bitcast i8* %v227 to i32*
-  %v539 = getelementptr inbounds i32, i32* %v538, i32 %v537
-  %v540 = bitcast i32* %v539 to <16 x i32>*
-  %v541 = load <16 x i32>, <16 x i32>* %v540, align 4, !tbaa !10
+  %v539 = getelementptr inbounds i32, ptr %v227, i32 %v537
+  %v541 = load <16 x i32>, ptr %v539, align 4, !tbaa !10
   %v542 = add nsw i32 %v536, -48
-  %v543 = getelementptr inbounds i32, i32* %v538, i32 %v542
-  %v544 = bitcast i32* %v543 to <16 x i32>*
-  %v545 = load <16 x i32>, <16 x i32>* %v544, align 4, !tbaa !10
+  %v543 = getelementptr inbounds i32, ptr %v227, i32 %v542
+  %v545 = load <16 x i32>, ptr %v543, align 4, !tbaa !10
   %v546 = shufflevector <16 x i32> %v541, <16 x i32> %v545, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
   %v547 = add nsw i32 %v533, 1
   %v548 = mul nsw i32 %v547, %v222
   %v549 = add nsw i32 %v548, %v535
   %v550 = add nsw i32 %v549, -64
-  %v551 = getelementptr inbounds i32, i32* %v538, i32 %v550
-  %v552 = bitcast i32* %v551 to <16 x i32>*
-  %v553 = load <16 x i32>, <16 x i32>* %v552, align 4, !tbaa !10
+  %v551 = getelementptr inbounds i32, ptr %v227, i32 %v550
+  %v553 = load <16 x i32>, ptr %v551, align 4, !tbaa !10
   %v554 = add nsw i32 %v549, -48
-  %v555 = getelementptr inbounds i32, i32* %v538, i32 %v554
-  %v556 = bitcast i32* %v555 to <16 x i32>*
-  %v557 = load <16 x i32>, <16 x i32>* %v556, align 4, !tbaa !10
+  %v555 = getelementptr inbounds i32, ptr %v227, i32 %v554
+  %v557 = load <16 x i32>, ptr %v555, align 4, !tbaa !10
   %v558 = shufflevector <16 x i32> %v553, <16 x i32> %v557, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
   %v559 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v558)
   %v560 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v558)
@@ -705,22 +667,18 @@ b28:                                              ; preds = %b29, %b28
   %v568 = tail call <16 x i32> @llvm.hexagon.V6.vlsrw(<16 x i32> %v567, i32 20)
   %v569 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v568, <16 x i32> %v566)
   %v570 = add nsw i32 %v536, -32
-  %v571 = getelementptr inbounds i32, i32* %v538, i32 %v570
-  %v572 = bitcast i32* %v571 to <16 x i32>*
-  %v573 = load <16 x i32>, <16 x i32>* %v572, align 4, !tbaa !10
+  %v571 = getelementptr inbounds i32, ptr %v227, i32 %v570
+  %v573 = load <16 x i32>, ptr %v571, align 4, !tbaa !10
   %v574 = add nsw i32 %v536, -16
-  %v575 = getelementptr inbounds i32, i32* %v538, i32 %v574
-  %v576 = bitcast i32* %v575 to <16 x i32>*
-  %v577 = load <16 x i32>, <16 x i32>* %v576, align 4, !tbaa !10
+  %v575 = getelementptr inbounds i32, ptr %v227, i32 %v574
+  %v577 = load <16 x i32>, ptr %v575, align 4, !tbaa !10
   %v578 = shufflevector <16 x i32> %v573, <16 x i32> %v577, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
   %v579 = add nsw i32 %v549, -32
-  %v580 = getelementptr inbounds i32, i32* %v538, i32 %v579
-  %v581 = bitcast i32* %v580 to <16 x i32>*
-  %v582 = load <16 x i32>, <16 x i32>* %v581, align 4, !tbaa !10
+  %v580 = getelementptr inbounds i32, ptr %v227, i32 %v579
+  %v582 = load <16 x i32>, ptr %v580, align 4, !tbaa !10
   %v583 = add nsw i32 %v549, -16
-  %v584 = getelementptr inbounds i32, i32* %v538, i32 %v583
-  %v585 = bitcast i32* %v584 to <16 x i32>*
-  %v586 = load <16 x i32>, <16 x i32>* %v585, align 4, !tbaa !10
+  %v584 = getelementptr inbounds i32, ptr %v227, i32 %v583
+  %v586 = load <16 x i32>, ptr %v584, align 4, !tbaa !10
   %v587 = shufflevector <16 x i32> %v582, <16 x i32> %v586, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
   %v588 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v587)
   %v589 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v587)
@@ -748,9 +706,8 @@ b28:                                              ; preds = %b29, %b28
   %v611 = add i32 %v15, -64
   %v612 = sub i32 %v611, %v609
   %v613 = add i32 %v612, %v610
-  %v614 = getelementptr inbounds i8, i8* %v13, i32 %v613
-  %v615 = bitcast i8* %v614 to <16 x i32>*
-  store <16 x i32> %v608, <16 x i32>* %v615, align 1, !tbaa !12
+  %v614 = getelementptr inbounds i8, ptr %v13, i32 %v613
+  store <16 x i32> %v608, ptr %v614, align 1, !tbaa !12
   %v616 = add nuw nsw i32 %v532, 1
   %v617 = icmp eq i32 %v616, %v530
   br i1 %v617, label %b30, label %b28
@@ -765,11 +722,11 @@ b30:                                              ; preds = %b28
   br i1 %v620, label %b32, label %b29
 
 b31:                                              ; preds = %b27, %b20
-  %v621 = icmp eq i8* %v227, null
+  %v621 = icmp eq ptr %v227, null
   br i1 %v621, label %b33, label %b32
 
 b32:                                              ; preds = %b31, %b30, %b24
-  tail call void @f3(i8* null, i8* %v227) #2
+  tail call void @f3(ptr null, ptr %v227) #2
   br label %b33
 
 b33:                                              ; preds = %b32, %b31

diff  --git a/llvm/test/CodeGen/Hexagon/const-combine.ll b/llvm/test/CodeGen/Hexagon/const-combine.ll
index 90756f1c407eb..2a0a8a28f4860 100644
--- a/llvm/test/CodeGen/Hexagon/const-combine.ll
+++ b/llvm/test/CodeGen/Hexagon/const-combine.ll
@@ -4,41 +4,41 @@
 target triple = "hexagon"
 
 %s.1 = type { %s.2 }
-%s.2 = type { i32, i8* }
+%s.2 = type { i32, ptr }
 
 @g0 = internal constant [61 x i8] c"............................................................\00", align 4
- at g1 = internal constant %s.1 { %s.2 { i32 8, i8* getelementptr inbounds ([61 x i8], [61 x i8]* @g0, i32 0, i32 0) } }, align 4
+ at g1 = internal constant %s.1 { %s.2 { i32 8, ptr @g0 } }, align 4
 
 define void @f0(i32 %a0) local_unnamed_addr #0 {
 b0:
-  %v0 = alloca i8*, align 4
-  store i8* null, i8** %v0, align 4, !tbaa !0
-  call void @f1(i32 88, i16 zeroext 4917, i8** nonnull %v0) #0
-  %v1 = load i8*, i8** %v0, align 4, !tbaa !0
-  %v2 = icmp eq i8* %v1, null
+  %v0 = alloca ptr, align 4
+  store ptr null, ptr %v0, align 4, !tbaa !0
+  call void @f1(i32 88, i16 zeroext 4917, ptr nonnull %v0) #0
+  %v1 = load ptr, ptr %v0, align 4, !tbaa !0
+  %v2 = icmp eq ptr %v1, null
   br i1 %v2, label %b1, label %b2
 
 b1:                                               ; preds = %b0
-  call void @f2(%s.1* nonnull @g1) #0
+  call void @f2(ptr nonnull @g1) #0
   br label %b3
 
 b2:                                               ; preds = %b0
-  %v3 = call i32 @f3(i8 zeroext 22, i8* null, i8* nonnull %v1, i16 zeroext 88) #0
-  %v4 = load i8*, i8** %v0, align 4, !tbaa !0
-  call void @f4(i8* %v4, i32 88) #0
+  %v3 = call i32 @f3(i8 zeroext 22, ptr null, ptr nonnull %v1, i16 zeroext 88) #0
+  %v4 = load ptr, ptr %v0, align 4, !tbaa !0
+  call void @f4(ptr %v4, i32 88) #0
   br label %b3
 
 b3:                                               ; preds = %b2, %b1
   ret void
 }
 
-declare void @f1(i32, i16 zeroext, i8**) local_unnamed_addr
+declare void @f1(i32, i16 zeroext, ptr) local_unnamed_addr
 
-declare void @f2(%s.1*) local_unnamed_addr
+declare void @f2(ptr) local_unnamed_addr
 
-declare i32 @f3(i8 zeroext, i8*, i8*, i16 zeroext) local_unnamed_addr
+declare i32 @f3(i8 zeroext, ptr, ptr, i16 zeroext) local_unnamed_addr
 
-declare void @f4(i8*, i32) local_unnamed_addr
+declare void @f4(ptr, i32) local_unnamed_addr
 
 attributes #0 = { nounwind optsize }
 

diff  --git a/llvm/test/CodeGen/Hexagon/constext-call.ll b/llvm/test/CodeGen/Hexagon/constext-call.ll
index b24384d82f2ac..5de40208530f8 100644
--- a/llvm/test/CodeGen/Hexagon/constext-call.ll
+++ b/llvm/test/CodeGen/Hexagon/constext-call.ll
@@ -16,12 +16,12 @@
 @g0 = external global i32
 
 ; Function Attrs: noinline nounwind
-define i32 @f0(i32 %a0, i32* nocapture %a1) #0 {
+define i32 @f0(i32 %a0, ptr nocapture %a1) #0 {
 b0:
   %v0 = tail call i32 @f1(i32 %a0)
   %v1 = icmp eq i32 %v0, 0
   %v2 = select i1 %v1, i32 3, i32 %a0
-  store i32 %v2, i32* %a1, align 4
+  store i32 %v2, ptr %a1, align 4
   switch i32 %a0, label %b5 [
     i32 0, label %b1
     i32 1, label %b2
@@ -30,22 +30,22 @@ b0:
   ]
 
 b1:                                               ; preds = %b0
-  store i32 0, i32* %a1, align 4
+  store i32 0, ptr %a1, align 4
   br label %b5
 
 b2:                                               ; preds = %b0
-  %v3 = load i32, i32* @g0, align 4
+  %v3 = load i32, ptr @g0, align 4
   %v4 = icmp sgt i32 %v3, 100
   %v5 = select i1 %v4, i32 0, i32 3
-  store i32 %v5, i32* %a1, align 4
+  store i32 %v5, ptr %a1, align 4
   br label %b5
 
 b3:                                               ; preds = %b0
-  store i32 1, i32* %a1, align 4
+  store i32 1, ptr %a1, align 4
   br label %b5
 
 b4:                                               ; preds = %b0
-  store i32 2, i32* %a1, align 4
+  store i32 2, ptr %a1, align 4
   br label %b5
 
 b5:                                               ; preds = %b4, %b3, %b2, %b1, %b0

diff  --git a/llvm/test/CodeGen/Hexagon/constext-immstore.ll b/llvm/test/CodeGen/Hexagon/constext-immstore.ll
index 398ab1a05bc86..1a035ea406c14 100644
--- a/llvm/test/CodeGen/Hexagon/constext-immstore.ll
+++ b/llvm/test/CodeGen/Hexagon/constext-immstore.ll
@@ -1,162 +1,162 @@
 ; RUN: llc -march=hexagon < %s
 ; REQUIRES: asserts
 
-%s.0 = type { i8, i8, i8*, i8, i32, %s.0*, %s.0* }
-%s.1 = type { %s.1*, %s.2, %s.0*, %s.2 }
+%s.0 = type { i8, i8, ptr, i8, i32, ptr, ptr }
+%s.1 = type { ptr, %s.2, ptr, %s.2 }
 %s.2 = type { i8, %s.3, i8 }
-%s.3 = type { %s.4* }
-%s.4 = type { [65 x i8], i16, %s.4*, %s.4* }
+%s.3 = type { ptr }
+%s.4 = type { [65 x i8], i16, ptr, ptr }
 
 @g0 = private unnamed_addr constant [4 x i8] c"and\00", align 1
 @g1 = private unnamed_addr constant [3 x i8] c"or\00", align 1
 @g2 = private unnamed_addr constant [8 x i8] c"implies\00", align 1
 @g3 = private unnamed_addr constant [3 x i8] c"if\00", align 1
- at g4 = global [4 x %s.0] [%s.0 { i8 1, i8 38, i8* getelementptr inbounds ([4 x i8], [4 x i8]* @g0, i32 0, i32 0), i8 1, i32 8, %s.0* null, %s.0* null }, %s.0 { i8 2, i8 124, i8* getelementptr inbounds ([3 x i8], [3 x i8]* @g1, i32 0, i32 0), i8 1, i32 7, %s.0* null, %s.0* null }, %s.0 { i8 3, i8 62, i8* getelementptr inbounds ([8 x i8], [8 x i8]* @g2, i32 0, i32 0), i8 1, i32 1, %s.0* null, %s.0* null }, %s.0 { i8 4, i8 60, i8* getelementptr inbounds ([3 x i8], [3 x i8]* @g3, i32 0, i32 0), i8 1, i32 1, %s.0* null, %s.0* null }], align 8
+ at g4 = global [4 x %s.0] [%s.0 { i8 1, i8 38, ptr @g0, i8 1, i32 8, ptr null, ptr null }, %s.0 { i8 2, i8 124, ptr @g1, i8 1, i32 7, ptr null, ptr null }, %s.0 { i8 3, i8 62, ptr @g2, i8 1, i32 1, ptr null, ptr null }, %s.0 { i8 4, i8 60, ptr @g3, i8 1, i32 1, ptr null, ptr null }], align 8
 @g5 = internal global [64 x i8] zeroinitializer, align 8
- at g6 = internal unnamed_addr global %s.0* null, align 4
+ at g6 = internal unnamed_addr global ptr null, align 4
 
 ; Function Attrs: nounwind
-define %s.1* @f0() #0 {
+define ptr @f0() #0 {
 b0:
-  %v0 = tail call %s.1* @f1(%s.1* null) #0
+  %v0 = tail call ptr @f1(ptr null) #0
   br label %b1
 
 b1:                                               ; preds = %b1, %b0
-  %v1 = tail call zeroext i8 @f2(i8* getelementptr inbounds ([64 x i8], [64 x i8]* @g5, i32 0, i32 0)) #0
+  %v1 = tail call zeroext i8 @f2(ptr @g5) #0
   switch i8 %v1, label %b1 [
     i8 8, label %b2
     i8 6, label %b2
   ]
 
 b2:                                               ; preds = %b1, %b1
-  ret %s.1* %v0
+  ret ptr %v0
 }
 
-declare %s.1* @f1(%s.1*) #0
+declare ptr @f1(ptr) #0
 
-declare zeroext i8 @f2(i8*) #0
+declare zeroext i8 @f2(ptr) #0
 
 ; Function Attrs: nounwind
 define void @f3() #0 {
 b0:
-  store %s.0* getelementptr inbounds ([4 x %s.0], [4 x %s.0]* @g4, i32 0, i32 0), %s.0** @g6, align 4
-  store %s.0* getelementptr inbounds ([4 x %s.0], [4 x %s.0]* @g4, i32 0, i32 0), %s.0** getelementptr inbounds ([4 x %s.0], [4 x %s.0]* @g4, i32 0, i32 0, i32 5), align 8
-  store %s.0* getelementptr inbounds ([4 x %s.0], [4 x %s.0]* @g4, i32 0, i32 0), %s.0** getelementptr inbounds ([4 x %s.0], [4 x %s.0]* @g4, i32 0, i32 0, i32 6), align 4
-  %v0 = load i8*, i8** getelementptr inbounds ([4 x %s.0], [4 x %s.0]* @g4, i32 0, i32 1, i32 2), align 4
+  store ptr @g4, ptr @g6, align 4
+  store ptr @g4, ptr getelementptr inbounds ([4 x %s.0], ptr @g4, i32 0, i32 0, i32 5), align 8
+  store ptr @g4, ptr getelementptr inbounds ([4 x %s.0], ptr @g4, i32 0, i32 0, i32 6), align 4
+  %v0 = load ptr, ptr getelementptr inbounds ([4 x %s.0], ptr @g4, i32 0, i32 1, i32 2), align 4
   br label %b1
 
 b1:                                               ; preds = %b1, %b0
-  %v1 = phi %s.0* [ getelementptr inbounds ([4 x %s.0], [4 x %s.0]* @g4, i32 0, i32 0), %b0 ], [ %v9, %b1 ]
-  %v2 = getelementptr inbounds %s.0, %s.0* %v1, i32 0, i32 2
-  %v3 = load i8*, i8** %v2, align 4
-  %v4 = tail call i32 @f4(i8* %v0, i8* %v3) #0
+  %v1 = phi ptr [ @g4, %b0 ], [ %v9, %b1 ]
+  %v2 = getelementptr inbounds %s.0, ptr %v1, i32 0, i32 2
+  %v3 = load ptr, ptr %v2, align 4
+  %v4 = tail call i32 @f4(ptr %v0, ptr %v3) #0
   %v5 = icmp sgt i32 %v4, 0
-  %v6 = getelementptr inbounds %s.0, %s.0* %v1, i32 0, i32 5
-  %v7 = getelementptr inbounds %s.0, %s.0* %v1, i32 0, i32 6
-  %v8 = select i1 %v5, %s.0** %v6, %s.0** %v7
-  %v9 = load %s.0*, %s.0** %v8, align 4
-  %v10 = icmp eq %s.0* %v9, null
+  %v6 = getelementptr inbounds %s.0, ptr %v1, i32 0, i32 5
+  %v7 = getelementptr inbounds %s.0, ptr %v1, i32 0, i32 6
+  %v8 = select i1 %v5, ptr %v6, ptr %v7
+  %v9 = load ptr, ptr %v8, align 4
+  %v10 = icmp eq ptr %v9, null
   br i1 %v10, label %b2, label %b1
 
 b2:                                               ; preds = %b1
   %v11 = phi i32 [ %v4, %b1 ]
-  %v12 = phi %s.0* [ %v1, %b1 ]
+  %v12 = phi ptr [ %v1, %b1 ]
   %v13 = icmp sgt i32 %v11, 0
   br i1 %v13, label %b3, label %b4
 
 b3:                                               ; preds = %b2
-  %v14 = getelementptr inbounds %s.0, %s.0* %v12, i32 0, i32 5
-  store %s.0* getelementptr inbounds ([4 x %s.0], [4 x %s.0]* @g4, i32 0, i32 1), %s.0** %v14, align 4
+  %v14 = getelementptr inbounds %s.0, ptr %v12, i32 0, i32 5
+  store ptr getelementptr inbounds ([4 x %s.0], ptr @g4, i32 0, i32 1), ptr %v14, align 4
   br label %b4
 
 b4:                                               ; preds = %b3, %b2
-  %v15 = getelementptr inbounds %s.0, %s.0* %v12, i32 0, i32 6
-  store %s.0* getelementptr inbounds ([4 x %s.0], [4 x %s.0]* @g4, i32 0, i32 1), %s.0** %v15, align 4
-  %v16 = load %s.0*, %s.0** @g6, align 4
-  %v17 = icmp eq %s.0* %v16, null
+  %v15 = getelementptr inbounds %s.0, ptr %v12, i32 0, i32 6
+  store ptr getelementptr inbounds ([4 x %s.0], ptr @g4, i32 0, i32 1), ptr %v15, align 4
+  %v16 = load ptr, ptr @g6, align 4
+  %v17 = icmp eq ptr %v16, null
   br i1 %v17, label %b8, label %b5
 
 b5:                                               ; preds = %b4
-  %v18 = load i8*, i8** getelementptr inbounds ([4 x %s.0], [4 x %s.0]* @g4, i32 0, i32 2, i32 2), align 4
+  %v18 = load ptr, ptr getelementptr inbounds ([4 x %s.0], ptr @g4, i32 0, i32 2, i32 2), align 4
   br label %b6
 
 b6:                                               ; preds = %b6, %b5
-  %v19 = phi %s.0* [ %v16, %b5 ], [ %v27, %b6 ]
-  %v20 = getelementptr inbounds %s.0, %s.0* %v19, i32 0, i32 2
-  %v21 = load i8*, i8** %v20, align 4
-  %v22 = tail call i32 @f4(i8* %v18, i8* %v21) #0
+  %v19 = phi ptr [ %v16, %b5 ], [ %v27, %b6 ]
+  %v20 = getelementptr inbounds %s.0, ptr %v19, i32 0, i32 2
+  %v21 = load ptr, ptr %v20, align 4
+  %v22 = tail call i32 @f4(ptr %v18, ptr %v21) #0
   %v23 = icmp sgt i32 %v22, 0
-  %v24 = getelementptr inbounds %s.0, %s.0* %v19, i32 0, i32 5
-  %v25 = getelementptr inbounds %s.0, %s.0* %v19, i32 0, i32 6
-  %v26 = select i1 %v23, %s.0** %v24, %s.0** %v25
-  %v27 = load %s.0*, %s.0** %v26, align 4
-  %v28 = icmp eq %s.0* %v27, null
+  %v24 = getelementptr inbounds %s.0, ptr %v19, i32 0, i32 5
+  %v25 = getelementptr inbounds %s.0, ptr %v19, i32 0, i32 6
+  %v26 = select i1 %v23, ptr %v24, ptr %v25
+  %v27 = load ptr, ptr %v26, align 4
+  %v28 = icmp eq ptr %v27, null
   br i1 %v28, label %b7, label %b6
 
 b7:                                               ; preds = %b6
   %v29 = phi i32 [ %v22, %b6 ]
-  %v30 = phi %s.0* [ %v19, %b6 ]
+  %v30 = phi ptr [ %v19, %b6 ]
   br label %b8
 
 b8:                                               ; preds = %b7, %b4
   %v31 = phi i32 [ %v11, %b4 ], [ %v29, %b7 ]
-  %v32 = phi %s.0* [ null, %b4 ], [ %v30, %b7 ]
+  %v32 = phi ptr [ null, %b4 ], [ %v30, %b7 ]
   %v33 = icmp sgt i32 %v31, 0
   br i1 %v33, label %b9, label %b10
 
 b9:                                               ; preds = %b8
-  %v34 = getelementptr inbounds %s.0, %s.0* %v32, i32 0, i32 5
-  store %s.0* getelementptr inbounds ([4 x %s.0], [4 x %s.0]* @g4, i32 0, i32 2), %s.0** %v34, align 4
+  %v34 = getelementptr inbounds %s.0, ptr %v32, i32 0, i32 5
+  store ptr getelementptr inbounds ([4 x %s.0], ptr @g4, i32 0, i32 2), ptr %v34, align 4
   br label %b10
 
 b10:                                              ; preds = %b9, %b8
-  %v35 = getelementptr inbounds %s.0, %s.0* %v32, i32 0, i32 6
-  store %s.0* getelementptr inbounds ([4 x %s.0], [4 x %s.0]* @g4, i32 0, i32 2), %s.0** %v35, align 4
-  %v36 = load %s.0*, %s.0** @g6, align 4
-  %v37 = icmp eq %s.0* %v36, null
+  %v35 = getelementptr inbounds %s.0, ptr %v32, i32 0, i32 6
+  store ptr getelementptr inbounds ([4 x %s.0], ptr @g4, i32 0, i32 2), ptr %v35, align 4
+  %v36 = load ptr, ptr @g6, align 4
+  %v37 = icmp eq ptr %v36, null
   br i1 %v37, label %b14, label %b11
 
 b11:                                              ; preds = %b10
-  %v38 = load i8*, i8** getelementptr inbounds ([4 x %s.0], [4 x %s.0]* @g4, i32 0, i32 3, i32 2), align 4
+  %v38 = load ptr, ptr getelementptr inbounds ([4 x %s.0], ptr @g4, i32 0, i32 3, i32 2), align 4
   br label %b12
 
 b12:                                              ; preds = %b12, %b11
-  %v39 = phi %s.0* [ %v36, %b11 ], [ %v47, %b12 ]
-  %v40 = getelementptr inbounds %s.0, %s.0* %v39, i32 0, i32 2
-  %v41 = load i8*, i8** %v40, align 4
-  %v42 = tail call i32 @f4(i8* %v38, i8* %v41) #0
+  %v39 = phi ptr [ %v36, %b11 ], [ %v47, %b12 ]
+  %v40 = getelementptr inbounds %s.0, ptr %v39, i32 0, i32 2
+  %v41 = load ptr, ptr %v40, align 4
+  %v42 = tail call i32 @f4(ptr %v38, ptr %v41) #0
   %v43 = icmp sgt i32 %v42, 0
-  %v44 = getelementptr inbounds %s.0, %s.0* %v39, i32 0, i32 5
-  %v45 = getelementptr inbounds %s.0, %s.0* %v39, i32 0, i32 6
-  %v46 = select i1 %v43, %s.0** %v44, %s.0** %v45
-  %v47 = load %s.0*, %s.0** %v46, align 4
-  %v48 = icmp eq %s.0* %v47, null
+  %v44 = getelementptr inbounds %s.0, ptr %v39, i32 0, i32 5
+  %v45 = getelementptr inbounds %s.0, ptr %v39, i32 0, i32 6
+  %v46 = select i1 %v43, ptr %v44, ptr %v45
+  %v47 = load ptr, ptr %v46, align 4
+  %v48 = icmp eq ptr %v47, null
   br i1 %v48, label %b13, label %b12
 
 b13:                                              ; preds = %b12
   %v49 = phi i32 [ %v42, %b12 ]
-  %v50 = phi %s.0* [ %v39, %b12 ]
+  %v50 = phi ptr [ %v39, %b12 ]
   br label %b14
 
 b14:                                              ; preds = %b13, %b10
   %v51 = phi i32 [ %v31, %b10 ], [ %v49, %b13 ]
-  %v52 = phi %s.0* [ null, %b10 ], [ %v50, %b13 ]
+  %v52 = phi ptr [ null, %b10 ], [ %v50, %b13 ]
   %v53 = icmp sgt i32 %v51, 0
   br i1 %v53, label %b15, label %b16
 
 b15:                                              ; preds = %b14
-  %v54 = getelementptr inbounds %s.0, %s.0* %v52, i32 0, i32 5
-  store %s.0* getelementptr inbounds ([4 x %s.0], [4 x %s.0]* @g4, i32 0, i32 3), %s.0** %v54, align 4
+  %v54 = getelementptr inbounds %s.0, ptr %v52, i32 0, i32 5
+  store ptr getelementptr inbounds ([4 x %s.0], ptr @g4, i32 0, i32 3), ptr %v54, align 4
   br label %b16
 
 b16:                                              ; preds = %b15, %b14
-  %v55 = getelementptr inbounds %s.0, %s.0* %v52, i32 0, i32 6
-  store %s.0* getelementptr inbounds ([4 x %s.0], [4 x %s.0]* @g4, i32 0, i32 3), %s.0** %v55, align 4
+  %v55 = getelementptr inbounds %s.0, ptr %v52, i32 0, i32 6
+  store ptr getelementptr inbounds ([4 x %s.0], ptr @g4, i32 0, i32 3), ptr %v55, align 4
   ret void
 }
 
 ; Function Attrs: nounwind readonly
-declare i32 @f4(i8* nocapture, i8* nocapture) #1
+declare i32 @f4(ptr nocapture, ptr nocapture) #1
 
 attributes #0 = { nounwind }
 attributes #1 = { nounwind readonly }

diff  --git a/llvm/test/CodeGen/Hexagon/constext-replace.ll b/llvm/test/CodeGen/Hexagon/constext-replace.ll
index 2d51b8ab0b9f3..5a34af363848b 100644
--- a/llvm/test/CodeGen/Hexagon/constext-replace.ll
+++ b/llvm/test/CodeGen/Hexagon/constext-replace.ll
@@ -22,27 +22,27 @@ target triple = "hexagon-unknown-linux-gnu"
 @g1 = external global [13595 x i32], align 8
 @g2 = external global [13595 x i32], align 8
 
-define i32 @f0(i32 %a0, i32* nocapture %a1) {
+define i32 @f0(i32 %a0, ptr nocapture %a1) {
 b0:
-  %v0 = load i32, i32* %a1, align 4
-  %v1 = getelementptr inbounds [13595 x i32], [13595 x i32]* @g1, i32 0, i32 %v0
-  %v2 = load i32, i32* %v1, align 4
+  %v0 = load i32, ptr %a1, align 4
+  %v1 = getelementptr inbounds [13595 x i32], ptr @g1, i32 0, i32 %v0
+  %v2 = load i32, ptr %v1, align 4
   %v3 = icmp sgt i32 %v2, %a0
   br i1 %v3, label %b1, label %b2
 
 b1:                                               ; preds = %b0
-  %v4 = load i32, i32* @g0, align 4
-  store i32 %v4, i32* %a1, align 4
-  %v5 = load i32, i32* @g0, align 4
-  %v6 = getelementptr inbounds [13595 x i32], [13595 x i32]* @g2, i32 0, i32 %v5
-  %v7 = load i32, i32* %v6, align 4
-  store i32 %v7, i32* @g0, align 4
-  %v8 = load i32, i32* %a1, align 4
-  %v9 = getelementptr inbounds [13595 x i32], [13595 x i32]* @g2, i32 0, i32 %v8
-  store i32 %v0, i32* %v9, align 4
-  %v10 = load i32, i32* %a1, align 4
-  %v11 = getelementptr inbounds [13595 x i32], [13595 x i32]* @g1, i32 0, i32 %v10
-  store i32 %a0, i32* %v11, align 4
+  %v4 = load i32, ptr @g0, align 4
+  store i32 %v4, ptr %a1, align 4
+  %v5 = load i32, ptr @g0, align 4
+  %v6 = getelementptr inbounds [13595 x i32], ptr @g2, i32 0, i32 %v5
+  %v7 = load i32, ptr %v6, align 4
+  store i32 %v7, ptr @g0, align 4
+  %v8 = load i32, ptr %a1, align 4
+  %v9 = getelementptr inbounds [13595 x i32], ptr @g2, i32 0, i32 %v8
+  store i32 %v0, ptr %v9, align 4
+  %v10 = load i32, ptr %a1, align 4
+  %v11 = getelementptr inbounds [13595 x i32], ptr @g1, i32 0, i32 %v10
+  store i32 %a0, ptr %v11, align 4
   br label %b16
 
 b2:                                               ; preds = %b0
@@ -54,10 +54,10 @@ b3:                                               ; preds = %b2
 
 b4:                                               ; preds = %b13, %b3
   %v13 = phi i32 [ %v45, %b13 ], [ %v0, %b3 ]
-  %v14 = getelementptr inbounds [13595 x i32], [13595 x i32]* @g2, i32 0, i32 %v13
-  %v15 = load i32, i32* %v14, align 4
-  %v16 = getelementptr inbounds [13595 x i32], [13595 x i32]* @g1, i32 0, i32 %v15
-  %v17 = load i32, i32* %v16, align 4
+  %v14 = getelementptr inbounds [13595 x i32], ptr @g2, i32 0, i32 %v13
+  %v15 = load i32, ptr %v14, align 4
+  %v16 = getelementptr inbounds [13595 x i32], ptr @g1, i32 0, i32 %v15
+  %v17 = load i32, ptr %v16, align 4
   %v18 = icmp slt i32 %v17, %a0
   br i1 %v18, label %b7, label %b5
 
@@ -66,21 +66,21 @@ b5:                                               ; preds = %b4
   br i1 %v19, label %b16, label %b6
 
 b6:                                               ; preds = %b5
-  %v20 = load i32, i32* @g0, align 4
-  store i32 %v20, i32* %v14, align 4
-  %v21 = getelementptr inbounds [13595 x i32], [13595 x i32]* @g1, i32 0, i32 %v20
-  store i32 %a0, i32* %v21, align 4
-  %v22 = getelementptr inbounds [13595 x i32], [13595 x i32]* @g2, i32 0, i32 %v20
-  %v23 = load i32, i32* %v22, align 4
-  store i32 %v23, i32* @g0, align 4
-  store i32 %v15, i32* %v22, align 4
+  %v20 = load i32, ptr @g0, align 4
+  store i32 %v20, ptr %v14, align 4
+  %v21 = getelementptr inbounds [13595 x i32], ptr @g1, i32 0, i32 %v20
+  store i32 %a0, ptr %v21, align 4
+  %v22 = getelementptr inbounds [13595 x i32], ptr @g2, i32 0, i32 %v20
+  %v23 = load i32, ptr %v22, align 4
+  store i32 %v23, ptr @g0, align 4
+  store i32 %v15, ptr %v22, align 4
   br label %b16
 
 b7:                                               ; preds = %b4
-  %v24 = getelementptr inbounds [13595 x i32], [13595 x i32]* @g2, i32 0, i32 %v15
-  %v25 = load i32, i32* %v24, align 4
-  %v26 = getelementptr inbounds [13595 x i32], [13595 x i32]* @g1, i32 0, i32 %v25
-  %v27 = load i32, i32* %v26, align 4
+  %v24 = getelementptr inbounds [13595 x i32], ptr @g2, i32 0, i32 %v15
+  %v25 = load i32, ptr %v24, align 4
+  %v26 = getelementptr inbounds [13595 x i32], ptr @g1, i32 0, i32 %v25
+  %v27 = load i32, ptr %v26, align 4
   %v28 = icmp slt i32 %v27, %a0
   br i1 %v28, label %b10, label %b8
 
@@ -89,21 +89,21 @@ b8:                                               ; preds = %b7
   br i1 %v29, label %b16, label %b9
 
 b9:                                               ; preds = %b8
-  %v30 = load i32, i32* @g0, align 4
-  store i32 %v30, i32* %v24, align 4
-  %v31 = getelementptr inbounds [13595 x i32], [13595 x i32]* @g1, i32 0, i32 %v30
-  store i32 %a0, i32* %v31, align 4
-  %v32 = getelementptr inbounds [13595 x i32], [13595 x i32]* @g2, i32 0, i32 %v30
-  %v33 = load i32, i32* %v32, align 4
-  store i32 %v33, i32* @g0, align 4
-  store i32 %v25, i32* %v32, align 4
+  %v30 = load i32, ptr @g0, align 4
+  store i32 %v30, ptr %v24, align 4
+  %v31 = getelementptr inbounds [13595 x i32], ptr @g1, i32 0, i32 %v30
+  store i32 %a0, ptr %v31, align 4
+  %v32 = getelementptr inbounds [13595 x i32], ptr @g2, i32 0, i32 %v30
+  %v33 = load i32, ptr %v32, align 4
+  store i32 %v33, ptr @g0, align 4
+  store i32 %v25, ptr %v32, align 4
   br label %b16
 
 b10:                                              ; preds = %b7
-  %v34 = getelementptr inbounds [13595 x i32], [13595 x i32]* @g2, i32 0, i32 %v25
-  %v35 = load i32, i32* %v34, align 4
-  %v36 = getelementptr inbounds [13595 x i32], [13595 x i32]* @g1, i32 0, i32 %v35
-  %v37 = load i32, i32* %v36, align 4
+  %v34 = getelementptr inbounds [13595 x i32], ptr @g2, i32 0, i32 %v25
+  %v35 = load i32, ptr %v34, align 4
+  %v36 = getelementptr inbounds [13595 x i32], ptr @g1, i32 0, i32 %v35
+  %v37 = load i32, ptr %v36, align 4
   %v38 = icmp slt i32 %v37, %a0
   br i1 %v38, label %b13, label %b11
 
@@ -112,21 +112,21 @@ b11:                                              ; preds = %b10
   br i1 %v39, label %b16, label %b12
 
 b12:                                              ; preds = %b11
-  %v40 = load i32, i32* @g0, align 4
-  store i32 %v40, i32* %v34, align 4
-  %v41 = getelementptr inbounds [13595 x i32], [13595 x i32]* @g1, i32 0, i32 %v40
-  store i32 %a0, i32* %v41, align 4
-  %v42 = getelementptr inbounds [13595 x i32], [13595 x i32]* @g2, i32 0, i32 %v40
-  %v43 = load i32, i32* %v42, align 4
-  store i32 %v43, i32* @g0, align 4
-  store i32 %v35, i32* %v42, align 4
+  %v40 = load i32, ptr @g0, align 4
+  store i32 %v40, ptr %v34, align 4
+  %v41 = getelementptr inbounds [13595 x i32], ptr @g1, i32 0, i32 %v40
+  store i32 %a0, ptr %v41, align 4
+  %v42 = getelementptr inbounds [13595 x i32], ptr @g2, i32 0, i32 %v40
+  %v43 = load i32, ptr %v42, align 4
+  store i32 %v43, ptr @g0, align 4
+  store i32 %v35, ptr %v42, align 4
   br label %b16
 
 b13:                                              ; preds = %b10
-  %v44 = getelementptr inbounds [13595 x i32], [13595 x i32]* @g2, i32 0, i32 %v35
-  %v45 = load i32, i32* %v44, align 4
-  %v46 = getelementptr inbounds [13595 x i32], [13595 x i32]* @g1, i32 0, i32 %v45
-  %v47 = load i32, i32* %v46, align 4
+  %v44 = getelementptr inbounds [13595 x i32], ptr @g2, i32 0, i32 %v35
+  %v45 = load i32, ptr %v44, align 4
+  %v46 = getelementptr inbounds [13595 x i32], ptr @g1, i32 0, i32 %v45
+  %v47 = load i32, ptr %v46, align 4
   %v48 = icmp slt i32 %v47, %a0
   br i1 %v48, label %b4, label %b14
 
@@ -135,14 +135,14 @@ b14:                                              ; preds = %b13
   br i1 %v49, label %b16, label %b15
 
 b15:                                              ; preds = %b14
-  %v50 = load i32, i32* @g0, align 4
-  store i32 %v50, i32* %v44, align 4
-  %v51 = getelementptr inbounds [13595 x i32], [13595 x i32]* @g1, i32 0, i32 %v50
-  store i32 %a0, i32* %v51, align 4
-  %v52 = getelementptr inbounds [13595 x i32], [13595 x i32]* @g2, i32 0, i32 %v50
-  %v53 = load i32, i32* %v52, align 4
-  store i32 %v53, i32* @g0, align 4
-  store i32 %v45, i32* %v52, align 4
+  %v50 = load i32, ptr @g0, align 4
+  store i32 %v50, ptr %v44, align 4
+  %v51 = getelementptr inbounds [13595 x i32], ptr @g1, i32 0, i32 %v50
+  store i32 %a0, ptr %v51, align 4
+  %v52 = getelementptr inbounds [13595 x i32], ptr @g2, i32 0, i32 %v50
+  %v53 = load i32, ptr %v52, align 4
+  store i32 %v53, ptr @g0, align 4
+  store i32 %v45, ptr %v52, align 4
   br label %b16
 
 b16:                                              ; preds = %b15, %b14, %b12, %b11, %b9, %b8, %b6, %b5, %b2, %b1

diff  --git a/llvm/test/CodeGen/Hexagon/constp-combine-neg.ll b/llvm/test/CodeGen/Hexagon/constp-combine-neg.ll
index c0cf7a53d18b5..0d74e1db6097b 100644
--- a/llvm/test/CodeGen/Hexagon/constp-combine-neg.ll
+++ b/llvm/test/CodeGen/Hexagon/constp-combine-neg.ll
@@ -7,15 +7,11 @@
 define i32 @main() #0 {
 entry:
   %l = alloca [7 x i32], align 8
-  %p_arrayidx45 = bitcast [7 x i32]* %l to i32*
-  %vector_ptr = bitcast [7 x i32]* %l to <2 x i32>*
-  store <2 x i32> <i32 3, i32 -2>, <2 x i32>* %vector_ptr, align 8
-  %p_arrayidx.1 = getelementptr [7 x i32], [7 x i32]* %l, i32 0, i32 2
-  %vector_ptr.1 = bitcast i32* %p_arrayidx.1 to <2 x i32>*
-  store <2 x i32> <i32 -4, i32 6>, <2 x i32>* %vector_ptr.1, align 8
-  %p_arrayidx.2 = getelementptr [7 x i32], [7 x i32]* %l, i32 0, i32 4
-  %vector_ptr.2 = bitcast i32* %p_arrayidx.2 to <2 x i32>*
-  store <2 x i32> <i32 -8, i32 -10>, <2 x i32>* %vector_ptr.2, align 8
+  store <2 x i32> <i32 3, i32 -2>, ptr %l, align 8
+  %p_arrayidx.1 = getelementptr [7 x i32], ptr %l, i32 0, i32 2
+  store <2 x i32> <i32 -4, i32 6>, ptr %p_arrayidx.1, align 8
+  %p_arrayidx.2 = getelementptr [7 x i32], ptr %l, i32 0, i32 4
+  store <2 x i32> <i32 -8, i32 -10>, ptr %p_arrayidx.2, align 8
   ret i32 0
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/constp-extract.ll b/llvm/test/CodeGen/Hexagon/constp-extract.ll
index e7b6f959d16c9..cffa4856d8682 100644
--- a/llvm/test/CodeGen/Hexagon/constp-extract.ll
+++ b/llvm/test/CodeGen/Hexagon/constp-extract.ll
@@ -13,13 +13,13 @@ entry:
   %0 = call i32 @llvm.hexagon.S2.extractu(i32 703696, i32 16, i32 4)
 ; CHECK: 43981
 ; CHECK-NOT: extractu
-  store i32 %0, i32* @x, align 4
+  store i32 %0, ptr @x, align 4
   ; extract(0x000ABCD0, 16, 4)
   ; should evaluate to 0xFFFFABCD (dec 4294945741 or -21555)
   %1 = call i32 @llvm.hexagon.S4.extract(i32 703696, i32 16, i32 4)
 ; CHECK: -21555
 ; CHECK-NOT: extract
-  store i32 %1, i32* @y, align 4
+  store i32 %1, ptr @y, align 4
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/convert-to-dot-old.ll b/llvm/test/CodeGen/Hexagon/convert-to-dot-old.ll
index c4e67f3db6108..f1c398d3a7add 100644
--- a/llvm/test/CodeGen/Hexagon/convert-to-dot-old.ll
+++ b/llvm/test/CodeGen/Hexagon/convert-to-dot-old.ll
@@ -11,7 +11,7 @@
 
 target triple = "hexagon"
 
-define void @fred(i16* nocapture %a0, i16* nocapture %a1, i16* nocapture %a2, i16 signext %a3, i16* %a4, i16 signext %a5, i16 signext %a6, i16 signext %a7, i32 %a8, i16 signext %a9, i16 signext %a10) local_unnamed_addr #0 {
+define void @fred(ptr nocapture %a0, ptr nocapture %a1, ptr nocapture %a2, i16 signext %a3, ptr %a4, i16 signext %a5, i16 signext %a6, i16 signext %a7, i32 %a8, i16 signext %a9, i16 signext %a10) local_unnamed_addr #0 {
 b11:
   %v12 = sext i16 %a5 to i32
   %v13 = tail call i32 @llvm.hexagon.A2.sxth(i32 %v12)
@@ -37,7 +37,7 @@ b11:
   %v33 = select i1 %v32, i32 %v30, i32 undef
   %v34 = trunc i32 %v33 to i16
   %v35 = trunc i32 %v24 to i16
-  call void @foo(i16* nonnull undef, i32* nonnull undef, i16* %a4, i16 signext %v35, i16 signext %v34, i16 signext 2) #4
+  call void @foo(ptr nonnull undef, ptr nonnull undef, ptr %a4, i16 signext %v35, i16 signext %v34, i16 signext 2) #4
   %v36 = call i32 @llvm.hexagon.S2.asr.r.r.sat(i32 %v18, i32 undef)
   %v37 = call i32 @llvm.hexagon.A2.asrh(i32 %v36)
   %v38 = call i32 @llvm.hexagon.A2.sub(i32 %v13, i32 undef)
@@ -47,23 +47,23 @@ b11:
   %v42 = call i32 @llvm.hexagon.A2.sath(i32 %v41)
   %v43 = select i1 undef, i32 %v42, i32 %v37
   %v44 = trunc i32 %v43 to i16
-  call void @foo(i16* nonnull undef, i32* nonnull undef, i16* %a4, i16 signext undef, i16 signext %v44, i16 signext 2) #4
+  call void @foo(ptr nonnull undef, ptr nonnull undef, ptr %a4, i16 signext undef, i16 signext %v44, i16 signext 2) #4
   %v45 = call i32 @llvm.hexagon.A2.sath(i32 undef)
   %v46 = select i1 undef, i32 undef, i32 %v45
   %v47 = trunc i32 %v46 to i16
-  call void @foo(i16* nonnull undef, i32* nonnull undef, i16* %a4, i16 signext %v47, i16 signext undef, i16 signext 2) #4
+  call void @foo(ptr nonnull undef, ptr nonnull undef, ptr %a4, i16 signext %v47, i16 signext undef, i16 signext 2) #4
   %v48 = call i32 @llvm.hexagon.A2.sub(i32 undef, i32 %v15)
   %v49 = call i32 @llvm.hexagon.A2.sath(i32 %v48)
   %v50 = trunc i32 %v49 to i16
-  store i16 %v50, i16* undef, align 2
-  store i16 %a3, i16* %a0, align 2
+  store i16 %v50, ptr undef, align 2
+  store i16 %a3, ptr %a0, align 2
   %v51 = sext i16 %a10 to i32
   %v52 = call i32 @llvm.hexagon.A2.sxth(i32 %v51)
   %v53 = call i32 @llvm.hexagon.A2.add(i32 undef, i32 %v52)
   %v54 = call i32 @llvm.hexagon.A2.sath(i32 %v53)
   %v55 = trunc i32 %v54 to i16
-  store i16 %v55, i16* %a1, align 2
-  store i16 %a7, i16* %a2, align 2
+  store i16 %v55, ptr %a1, align 2
+  store i16 %a7, ptr %a2, align 2
   %v56 = sext i16 %a9 to i32
   %v57 = call i32 @llvm.hexagon.A2.sxth(i32 %v56)
   br i1 undef, label %b58, label %b62
@@ -72,7 +72,7 @@ b58:                                              ; preds = %b11
   %v59 = call i32 @llvm.hexagon.A2.add(i32 %v57, i32 %v52)
   %v60 = call i32 @llvm.hexagon.A2.sath(i32 %v59)
   %v61 = trunc i32 %v60 to i16
-  store i16 %v61, i16* %a1, align 2
+  store i16 %v61, ptr %a1, align 2
   br label %b63
 
 b62:                                              ; preds = %b11
@@ -87,7 +87,7 @@ b66:                                              ; preds = %b63
   br i1 undef, label %b67, label %b68
 
 b67:                                              ; preds = %b66, %b63
-  store i16 0, i16* %a2, align 2
+  store i16 0, ptr %a2, align 2
   br label %b68
 
 b68:                                              ; preds = %b67, %b66
@@ -101,7 +101,7 @@ declare i32 @llvm.hexagon.A2.sub(i32, i32) #2
 declare i32 @llvm.hexagon.A2.asrh(i32) #2
 declare i32 @llvm.hexagon.S2.asr.r.r.sat(i32, i32) #2
 declare i32 @llvm.hexagon.A2.aslh(i32) #2
-declare void @foo(i16*, i32*, i16*, i16 signext, i16 signext, i16 signext) local_unnamed_addr #3
+declare void @foo(ptr, ptr, ptr, i16 signext, i16 signext, i16 signext) local_unnamed_addr #3
 
 attributes #0 = { nounwind optsize "target-cpu"="hexagonv55" "target-features"="-hvx,-long-calls" }
 attributes #1 = { argmemonly nounwind }

diff  --git a/llvm/test/CodeGen/Hexagon/convert_const_i1_to_i8.ll b/llvm/test/CodeGen/Hexagon/convert_const_i1_to_i8.ll
index 9246b026b6648..05b182178599d 100644
--- a/llvm/test/CodeGen/Hexagon/convert_const_i1_to_i8.ll
+++ b/llvm/test/CodeGen/Hexagon/convert_const_i1_to_i8.ll
@@ -2,12 +2,12 @@
 ; CHECK-NOT: .space {{[0-9][0-9][0-9][0-9]}}
 ; CHECK: q{{[0-3]}} = vand(v{{[0-9]*}},r{{[0-9]*}})
 
-define void @convert_const_i1_to_i8(<32 x i32>* %a0) #0 {
+define void @convert_const_i1_to_i8(ptr %a0) #0 {
 entry:
-  %v0 = load <32 x i32>, <32 x i32>* %a0, align 128
+  %v0 = load <32 x i32>, ptr %a0, align 128
   %v1 = tail call <32 x i32> @llvm.hexagon.V6.vrdelta.128B(<32 x i32> %v0, <32 x i32> undef)
   %v2 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<128 x i1> <i1 false, i1 false, i1 true, i1 false, i1 false, i1 true, i1 false, i1 false, i1 true, i1 false, i1 false, i1 true, i1 false, i1 false, i1 true, i1 false, i1 false, i1 true, i1 false, i1 false, i1 true, i1 false, i1 false, i1 true, i1 false, i1 false, i1 true, i1 false, i1 false, i1 true, i1 false, i1 false, i1 true, i1 false, i1 false, i1 true, i1 false, i1 false, i1 true, i1 false, i1 false, i1 true, i1 false, i1 false, i1 true, i1 false, i1 false, i1 true, i1 false, i1 false, i1 true, i1 false, i1 false, i1 true, i1 false, i1 false, i1 true, i1 false, i1 false, i1 true, i1 false, i1 false, i1 true, i1 false, i1 false, i1 true, i1 false, i1 false, i1 true, i1 false, i1 false, i1 true, i1 false, i1 false, i1 true, i1 false, i1 false, i1 true, i1 false, i1 false, i1 true, i1 false, i1 false, i1 true, i1 false, i1 false, i1 true, i1 false, i1 false, i1 true, i1 false, i1 false, i1 true, i1 false, i1 false, i1 true, i1 false, i1 false, i1 true, i1 false, i1 false, i1 true, i1 false, i1 false, i1 true, i1 false, i1 false, i1 true, i1 false, i1 false, i1 true, i1 false, i1 false, i1 true, i1 false, i1 false, i1 true, i1 false, i1 false, i1 true, i1 false, i1 false, i1 true, i1 false, i1 false, i1 true, i1 false, i1 false>, <32 x i32> undef, <32 x i32> %v1)
-  store <32 x i32> %v2, <32 x i32>* %a0, align 128
+  store <32 x i32> %v2, ptr %a0, align 128
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/convertdptoint.ll b/llvm/test/CodeGen/Hexagon/convertdptoint.ll
index adf76e5dc82eb..670fd9f228144 100644
--- a/llvm/test/CodeGen/Hexagon/convertdptoint.ll
+++ b/llvm/test/CodeGen/Hexagon/convertdptoint.ll
@@ -11,16 +11,16 @@ entry:
   %a = alloca double, align 8
   %b = alloca double, align 8
   %c = alloca double, align 8
-  store i32 0, i32* %retval
-  store volatile double 1.540000e+01, double* %a, align 8
-  store volatile double 9.100000e+00, double* %b, align 8
-  %0 = load volatile double, double* %a, align 8
-  %1 = load volatile double, double* %b, align 8
+  store i32 0, ptr %retval
+  store volatile double 1.540000e+01, ptr %a, align 8
+  store volatile double 9.100000e+00, ptr %b, align 8
+  %0 = load volatile double, ptr %a, align 8
+  %1 = load volatile double, ptr %b, align 8
   %add = fadd double %0, %1
-  store double %add, double* %c, align 8
-  %2 = load double, double* %c, align 8
+  store double %add, ptr %c, align 8
+  %2 = load double, ptr %c, align 8
   %conv = fptosi double %2 to i32
-  store i32 %conv, i32* %i, align 4
-  %3 = load i32, i32* %i, align 4
+  store i32 %conv, ptr %i, align 4
+  %3 = load i32, ptr %i, align 4
   ret i32 %3
 }

diff  --git a/llvm/test/CodeGen/Hexagon/convertdptoll.ll b/llvm/test/CodeGen/Hexagon/convertdptoll.ll
index 6b5bf56a248bc..e0c104d3b273f 100644
--- a/llvm/test/CodeGen/Hexagon/convertdptoll.ll
+++ b/llvm/test/CodeGen/Hexagon/convertdptoll.ll
@@ -11,17 +11,17 @@ entry:
   %a = alloca double, align 8
   %b = alloca double, align 8
   %c = alloca double, align 8
-  store i32 0, i32* %retval
-  store double 1.540000e+01, double* %a, align 8
-  store double 9.100000e+00, double* %b, align 8
-  %0 = load double, double* %a, align 8
-  %1 = load double, double* %b, align 8
+  store i32 0, ptr %retval
+  store double 1.540000e+01, ptr %a, align 8
+  store double 9.100000e+00, ptr %b, align 8
+  %0 = load double, ptr %a, align 8
+  %1 = load double, ptr %b, align 8
   %add = fadd double %0, %1
-  store volatile double %add, double* %c, align 8
-  %2 = load volatile double, double* %c, align 8
+  store volatile double %add, ptr %c, align 8
+  %2 = load volatile double, ptr %c, align 8
   %conv = fptosi double %2 to i64
-  store i64 %conv, i64* %i, align 8
-  %3 = load i64, i64* %i, align 8
+  store i64 %conv, ptr %i, align 8
+  %3 = load i64, ptr %i, align 8
   %conv1 = trunc i64 %3 to i32
   ret i32 %conv1
 }

diff  --git a/llvm/test/CodeGen/Hexagon/convertsptoint.ll b/llvm/test/CodeGen/Hexagon/convertsptoint.ll
index 939b3b06a8c79..e5dbcc299cd8b 100644
--- a/llvm/test/CodeGen/Hexagon/convertsptoint.ll
+++ b/llvm/test/CodeGen/Hexagon/convertsptoint.ll
@@ -11,16 +11,16 @@ entry:
   %a = alloca float, align 4
   %b = alloca float, align 4
   %c = alloca float, align 4
-  store i32 0, i32* %retval
-  store float 0x402ECCCCC0000000, float* %a, align 4
-  store float 0x4022333340000000, float* %b, align 4
-  %0 = load float, float* %a, align 4
-  %1 = load float, float* %b, align 4
+  store i32 0, ptr %retval
+  store float 0x402ECCCCC0000000, ptr %a, align 4
+  store float 0x4022333340000000, ptr %b, align 4
+  %0 = load float, ptr %a, align 4
+  %1 = load float, ptr %b, align 4
   %add = fadd float %0, %1
-  store volatile float %add, float* %c, align 4
-  %2 = load volatile float, float* %c, align 4
+  store volatile float %add, ptr %c, align 4
+  %2 = load volatile float, ptr %c, align 4
   %conv = fptosi float %2 to i32
-  store i32 %conv, i32* %i, align 4
-  %3 = load i32, i32* %i, align 4
+  store i32 %conv, ptr %i, align 4
+  %3 = load i32, ptr %i, align 4
   ret i32 %3
 }

diff  --git a/llvm/test/CodeGen/Hexagon/convertsptoll.ll b/llvm/test/CodeGen/Hexagon/convertsptoll.ll
index f540397ccf5e5..dc5b71bfcda09 100644
--- a/llvm/test/CodeGen/Hexagon/convertsptoll.ll
+++ b/llvm/test/CodeGen/Hexagon/convertsptoll.ll
@@ -11,17 +11,17 @@ entry:
   %a = alloca float, align 4
   %b = alloca float, align 4
   %c = alloca float, align 4
-  store i32 0, i32* %retval
-  store float 0x402ECCCCC0000000, float* %a, align 4
-  store float 0x4022333340000000, float* %b, align 4
-  %0 = load float, float* %a, align 4
-  %1 = load float, float* %b, align 4
+  store i32 0, ptr %retval
+  store float 0x402ECCCCC0000000, ptr %a, align 4
+  store float 0x4022333340000000, ptr %b, align 4
+  %0 = load float, ptr %a, align 4
+  %1 = load float, ptr %b, align 4
   %add = fadd float %0, %1
-  store volatile float %add, float* %c, align 4
-  %2 = load volatile float, float* %c, align 4
+  store volatile float %add, ptr %c, align 4
+  %2 = load volatile float, ptr %c, align 4
   %conv = fptosi float %2 to i64
-  store i64 %conv, i64* %i, align 8
-  %3 = load i64, i64* %i, align 8
+  store i64 %conv, ptr %i, align 8
+  %3 = load i64, ptr %i, align 8
   %conv1 = trunc i64 %3 to i32
   ret i32 %conv1
 }

diff  --git a/llvm/test/CodeGen/Hexagon/copy-to-combine-dbg.ll b/llvm/test/CodeGen/Hexagon/copy-to-combine-dbg.ll
index 837e97d79651f..62aea4a3d7127 100644
--- a/llvm/test/CodeGen/Hexagon/copy-to-combine-dbg.ll
+++ b/llvm/test/CodeGen/Hexagon/copy-to-combine-dbg.ll
@@ -5,7 +5,7 @@
 target triple = "hexagon"
 
 ; Function Attrs: nounwind
-define i32 @f0(i32 %a0, i8* (i32, i8*)* %a1) local_unnamed_addr #0 !dbg !5 {
+define i32 @f0(i32 %a0, ptr %a1) local_unnamed_addr #0 !dbg !5 {
 b0:
   br label %b1
 
@@ -15,7 +15,7 @@ b1:                                               ; preds = %b0
 
 b2:                                               ; preds = %b3, %b1
   %v0 = phi i32 [ 0, %b1 ], [ %v2, %b3 ]
-  %v1 = tail call i8* %a1(i32 12, i8* null) #0
+  %v1 = tail call ptr %a1(i32 12, ptr null) #0
   br label %b3
 
 b3:                                               ; preds = %b2

diff  --git a/llvm/test/CodeGen/Hexagon/csr-func-usedef.ll b/llvm/test/CodeGen/Hexagon/csr-func-usedef.ll
index a9f81b9f521ac..042c9da52539a 100644
--- a/llvm/test/CodeGen/Hexagon/csr-func-usedef.ll
+++ b/llvm/test/CodeGen/Hexagon/csr-func-usedef.ll
@@ -3,10 +3,10 @@
 
 target triple = "hexagon"
 
-declare i8* @llvm.hexagon.circ.ldb(i8*, i8*, i32, i32) #1
-declare i8* @llvm.hexagon.circ.stb(i8*, i32, i32, i32) #1
+declare ptr @llvm.hexagon.circ.ldb(ptr, ptr, i32, i32) #1
+declare ptr @llvm.hexagon.circ.stb(ptr, i32, i32, i32) #1
 
-define zeroext i8 @circular_loop_test10(i8* %A, i8* %B, i32 %x, i32 %y, i32 %z, i32 %w) #0 {
+define zeroext i8 @circular_loop_test10(ptr %A, ptr %B, i32 %x, i32 %y, i32 %z, i32 %w) #0 {
 entry:
   %element_load0 = alloca i8, align 1
   %element_load2 = alloca i8, align 1
@@ -18,27 +18,27 @@ entry:
   br label %for.body
 
 for.body:                                         ; preds = %for.body, %entry
-  %p0.082 = phi i8* [ %A, %entry ], [ undef, %for.body ]
+  %p0.082 = phi ptr [ %A, %entry ], [ undef, %for.body ]
   %element_load.080 = phi i32 [ 0, %entry ], [ %add18, %for.body ]
-  %p1.079 = phi i8* [ %B, %entry ], [ %1, %for.body ]
-  %p2.078 = phi i8* [ undef, %entry ], [ %3, %for.body ]
-  %p3.077 = phi i8* [ undef, %entry ], [ %4, %for.body ]
-  %0 = call i8* @llvm.hexagon.circ.ldb(i8* %p0.082, i8* nonnull %element_load0, i32 %or, i32 2)
-  %1 = call i8* @llvm.hexagon.circ.ldb(i8* %p1.079, i8* nonnull null, i32 0, i32 1)
-  %2 = call i8* @llvm.hexagon.circ.ldb(i8* %p2.078, i8* nonnull %element_load2, i32 %or5, i32 3)
-  %3 = call i8* @llvm.hexagon.circ.ldb(i8* %2, i8* nonnull %element_load5, i32 %or5, i32 1)
-  %4 = call i8* @llvm.hexagon.circ.ldb(i8* %p3.077, i8* nonnull %element_load3, i32 %or7, i32 1)
-  %5 = load i8, i8* null, align 1
+  %p1.079 = phi ptr [ %B, %entry ], [ %1, %for.body ]
+  %p2.078 = phi ptr [ undef, %entry ], [ %3, %for.body ]
+  %p3.077 = phi ptr [ undef, %entry ], [ %4, %for.body ]
+  %0 = call ptr @llvm.hexagon.circ.ldb(ptr %p0.082, ptr nonnull %element_load0, i32 %or, i32 2)
+  %1 = call ptr @llvm.hexagon.circ.ldb(ptr %p1.079, ptr nonnull null, i32 0, i32 1)
+  %2 = call ptr @llvm.hexagon.circ.ldb(ptr %p2.078, ptr nonnull %element_load2, i32 %or5, i32 3)
+  %3 = call ptr @llvm.hexagon.circ.ldb(ptr %2, ptr nonnull %element_load5, i32 %or5, i32 1)
+  %4 = call ptr @llvm.hexagon.circ.ldb(ptr %p3.077, ptr nonnull %element_load3, i32 %or7, i32 1)
+  %5 = load i8, ptr null, align 1
   %conv = zext i8 %5 to i32
-  %6 = load i8, i8* %element_load2, align 1
+  %6 = load i8, ptr %element_load2, align 1
   %conv8 = zext i8 %6 to i32
-  %7 = load i8, i8* %element_load3, align 1
+  %7 = load i8, ptr %element_load3, align 1
   %conv9 = zext i8 %7 to i32
-  %8 = load i8, i8* undef, align 1
+  %8 = load i8, ptr undef, align 1
   %conv11 = zext i8 %8 to i32
-  %9 = load i8, i8* %element_load5, align 1
+  %9 = load i8, ptr %element_load5, align 1
   %conv13 = zext i8 %9 to i32
-  %10 = load i8, i8* %element_load0, align 1
+  %10 = load i8, ptr %element_load0, align 1
   %conv15 = zext i8 %10 to i32
   %conv17 = and i32 %element_load.080, 255
   %add = add nuw nsw i32 %conv, %conv17
@@ -51,13 +51,12 @@ for.body:                                         ; preds = %for.body, %entry
   br i1 %exitcond84, label %for.body23, label %for.body
 
 for.body23:                                       ; preds = %for.body23, %for.body
-  %11 = call i8* @llvm.hexagon.circ.stb(i8* undef, i32 undef, i32 %or, i32 3)
+  %11 = call ptr @llvm.hexagon.circ.stb(ptr undef, i32 undef, i32 %or, i32 3)
   br i1 undef, label %for.body34, label %for.body23
 
 for.body34:                                       ; preds = %for.body34, %for.body23
   %element_load.173 = phi i32 [ %add38, %for.body34 ], [ %add18, %for.body23 ]
-  %arrayidx35 = getelementptr inbounds i8, i8* %B, i32 0
-  %12 = load i8, i8* %arrayidx35, align 1
+  %12 = load i8, ptr %B, align 1
   %conv36 = zext i8 %12 to i32
   %conv37 = and i32 %element_load.173, 255
   %add38 = add nuw nsw i32 %conv36, %conv37

diff  --git a/llvm/test/CodeGen/Hexagon/ctor.ll b/llvm/test/CodeGen/Hexagon/ctor.ll
index 2e2fc519118fe..81e659dae4187 100644
--- a/llvm/test/CodeGen/Hexagon/ctor.ll
+++ b/llvm/test/CodeGen/Hexagon/ctor.ll
@@ -1,7 +1,7 @@
 ; RUN: llc -march=hexagon < %s  | FileCheck -check-prefix=INITARRAY %s
 ; RUN: llc -march=hexagon < %s  -use-ctors | FileCheck -check-prefix=CTOR %s
 
- at llvm.global_ctors = appending global [1 x { i32, void ()*, i8* }] [{ i32, void ()*, i8* } { i32 65535, void ()* @_GLOBAL__sub_I_P10066.ii, i8* null }]
+ at llvm.global_ctors = appending global [1 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 65535, ptr @_GLOBAL__sub_I_P10066.ii, ptr null }]
 define internal void @_GLOBAL__sub_I_P10066.ii() {
 entry:
   ret void

diff  --git a/llvm/test/CodeGen/Hexagon/dadd.ll b/llvm/test/CodeGen/Hexagon/dadd.ll
index 3068f499d12df..d34de23ccde7c 100644
--- a/llvm/test/CodeGen/Hexagon/dadd.ll
+++ b/llvm/test/CodeGen/Hexagon/dadd.ll
@@ -9,11 +9,11 @@ entry:
   %a = alloca double, align 8
   %b = alloca double, align 8
   %c = alloca double, align 8
-  store volatile double 1.540000e+01, double* %a, align 8
-  store volatile double 9.100000e+00, double* %b, align 8
-  %0 = load volatile double, double* %a, align 8
-  %1 = load volatile double, double* %b, align 8
+  store volatile double 1.540000e+01, ptr %a, align 8
+  store volatile double 9.100000e+00, ptr %b, align 8
+  %0 = load volatile double, ptr %a, align 8
+  %1 = load volatile double, ptr %b, align 8
   %add = fadd double %0, %1
-  store double %add, double* %c, align 8
+  store double %add, ptr %c, align 8
   ret i32 0
 }

diff  --git a/llvm/test/CodeGen/Hexagon/dag-indexed.ll b/llvm/test/CodeGen/Hexagon/dag-indexed.ll
index 7e3b9416561b2..71d1b9f9f0a8a 100644
--- a/llvm/test/CodeGen/Hexagon/dag-indexed.ll
+++ b/llvm/test/CodeGen/Hexagon/dag-indexed.ll
@@ -6,7 +6,7 @@
 ; in DAGCombiner is unable to convert indexed stores.
 
 ; Function Attrs: nounwind
-define void @f0(i32 %a0, i8* %a1, i8* %a2) #0 {
+define void @f0(i32 %a0, ptr %a1, ptr %a2) #0 {
 b0:
   switch i32 %a0, label %b5 [
     i32 67830273, label %b1
@@ -23,14 +23,11 @@ b3:                                               ; preds = %b0
   br i1 undef, label %b4, label %b5
 
 b4:                                               ; preds = %b3
-  %v0 = bitcast i8* %a2 to i32*
-  store i32 0, i32* %v0, align 1, !tbaa !0
-  %v1 = getelementptr inbounds i8, i8* %a1, i32 4
-  %v2 = bitcast i8* %v1 to i32*
-  %v3 = load i32, i32* %v2, align 4, !tbaa !5
-  %v4 = getelementptr inbounds i8, i8* %a2, i32 4
-  %v5 = bitcast i8* %v4 to i32*
-  store i32 %v3, i32* %v5, align 1, !tbaa !5
+  store i32 0, ptr %a2, align 1, !tbaa !0
+  %v1 = getelementptr inbounds i8, ptr %a1, i32 4
+  %v3 = load i32, ptr %v1, align 4, !tbaa !5
+  %v4 = getelementptr inbounds i8, ptr %a2, i32 4
+  store i32 %v3, ptr %v4, align 1, !tbaa !5
   br label %b5
 
 b5:                                               ; preds = %b4, %b3, %b2, %b1, %b0

diff  --git a/llvm/test/CodeGen/Hexagon/dccleana.ll b/llvm/test/CodeGen/Hexagon/dccleana.ll
index 633ac69fd60d2..85cb6d2106142 100644
--- a/llvm/test/CodeGen/Hexagon/dccleana.ll
+++ b/llvm/test/CodeGen/Hexagon/dccleana.ll
@@ -4,12 +4,12 @@
 ; CHECK: dccleana
 
 ; Function Attrs: nounwind
-declare void @llvm.hexagon.Y2.dccleana(i8*) #0
+declare void @llvm.hexagon.Y2.dccleana(ptr) #0
 
-define i32 @f0(i8* %a0) {
+define i32 @f0(ptr %a0) {
 b0:
-  tail call void @llvm.hexagon.Y2.dccleana(i8* %a0)
-  %v0 = load i8, i8* %a0
+  tail call void @llvm.hexagon.Y2.dccleana(ptr %a0)
+  %v0 = load i8, ptr %a0
   %v1 = zext i8 %v0 to i32
   ret i32 %v1
 }

diff  --git a/llvm/test/CodeGen/Hexagon/dead-store-stack.ll b/llvm/test/CodeGen/Hexagon/dead-store-stack.ll
index a539be440f291..23c80b0e4aa57 100644
--- a/llvm/test/CodeGen/Hexagon/dead-store-stack.ll
+++ b/llvm/test/CodeGen/Hexagon/dead-store-stack.ll
@@ -8,10 +8,10 @@
 define void @ParseFunc() local_unnamed_addr #0 {
 entry:
   %dataVar = alloca i32, align 4
-  %0 = load i32, i32* %dataVar, align 4
+  %0 = load i32, ptr %dataVar, align 4
   %and = and i32 %0, 65535
-  store i32 %and, i32* %dataVar, align 4
-  %.pr = load i32, i32* %dataVar, align 4
+  store i32 %and, ptr %dataVar, align 4
+  %.pr = load i32, ptr %dataVar, align 4
   switch i32 %.pr, label %sw.epilog [
     i32 4, label %sw.bb
     i32 5, label %sw.bb
@@ -37,7 +37,7 @@ ParseFuncNext.exit.i:
   br i1 %cmp1.i, label %if.then.i, label %if.else10.i
 
 if.then.i:
-  call void (i8*, i32, i8*, ...) @snprintf(i8* undef, i32 undef, i8* getelementptr inbounds ([8 x i8], [8 x i8]* @.str.3, i32 0, i32 0), i32 undef) #2
+  call void (ptr, i32, ptr, ...) @snprintf(ptr undef, i32 undef, ptr @.str.3, i32 undef) #2
   br label %if.end27.i
 
 if.else10.i:
@@ -56,12 +56,12 @@ sw.bb41:
   unreachable
 
 sw.bb42:
-  %1 = load i32, i32* undef, align 4
+  %1 = load i32, ptr undef, align 4
   %shr.i = lshr i32 %1, 16
   br label %while.cond.i.i
 
 while.cond.i.i:
-  %2 = load i8, i8* undef, align 1
+  %2 = load i8, ptr undef, align 1
   switch i8 %2, label %if.then4.i [
     i8 48, label %land.end.i.i
     i8 120, label %land.end.i.i
@@ -104,7 +104,7 @@ sw.bb16.i:
   unreachable
 
 sw.epilog.i:
-  call void (i8*, i32, i8*, ...) @snprintf(i8* undef, i32 undef, i8* nonnull undef, i32 undef) #2
+  call void (ptr, i32, ptr, ...) @snprintf(ptr undef, i32 undef, ptr nonnull undef, i32 undef) #2
   br label %land.rhs.i126
 
 sw.bb43:
@@ -124,7 +124,7 @@ sw.epilog:
 }
 
 ; Function Attrs: nounwind
-declare void @snprintf(i8* nocapture, i32, i8* nocapture readonly, ...) local_unnamed_addr #1
+declare void @snprintf(ptr nocapture, i32, ptr nocapture readonly, ...) local_unnamed_addr #1
 
 attributes #0 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="hexagonv62" "target-features"="+hvx,+hvx-length64b" "unsafe-fp-math"="false" "use-soft-float"="false" }
 attributes #1 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="hexagonv62" "target-features"="+hvx,+hvx-length64b" "unsafe-fp-math"="false" "use-soft-float"="false" }

diff  --git a/llvm/test/CodeGen/Hexagon/dealloc-store.ll b/llvm/test/CodeGen/Hexagon/dealloc-store.ll
index 0f23aedbd73ed..1ac1464839f8d 100644
--- a/llvm/test/CodeGen/Hexagon/dealloc-store.ll
+++ b/llvm/test/CodeGen/Hexagon/dealloc-store.ll
@@ -2,24 +2,24 @@
 
 target triple = "hexagon"
 
-%s.0 = type <{ i8*, i8*, i16, i8, i8, i8 }>
-%s.1 = type { %s.2, [14 x %s.6*], [14 x i8], [6 x i8], [4 x %s.4], [4 x %s.8], [4 x %s.8], [14 x %s.10], %s.6*, %s.6* }
+%s.0 = type <{ ptr, ptr, i16, i8, i8, i8 }>
+%s.1 = type { %s.2, [14 x ptr], [14 x i8], [6 x i8], [4 x %s.4], [4 x %s.8], [4 x %s.8], [14 x %s.10], ptr, ptr }
 %s.2 = type { [4 x %s.3], i16, i32, i32, i32, i32 }
 %s.3 = type { i8, i8, i8, i8 }
 %s.4 = type { i8, i32, [16 x %s.5], %s.6, i8, [7 x i8] }
-%s.5 = type { void (i8*)*, i8*, i32 }
-%s.6 = type { %s.7*, i32, %s.7*, i32, i32, i32, %s.7*, %s.7*, i32, i8, i32*, i32, i32, i32*, i32*, i32, i8, i32*, i32, %s.5*, i32, i32, i32, void (%s.6*)*, i32, i8 }
+%s.5 = type { ptr, ptr, i32 }
+%s.6 = type { ptr, i32, ptr, i32, i32, i32, ptr, ptr, i32, i8, ptr, i32, i32, ptr, ptr, i32, i8, ptr, i32, ptr, i32, i32, i32, ptr, i32, i8 }
 %s.7 = type { i32, i16, i16 }
 %s.8 = type { %s.9 }
-%s.9 = type { i8*, i32, i32 }
+%s.9 = type { ptr, i32, i32 }
 %s.10 = type { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 }
 
- at g0 = internal constant %s.0 <{ i8* getelementptr inbounds ([125 x i8], [125 x i8]* @g1, i32 0, i32 0), i8* getelementptr inbounds ([82 x i8], [82 x i8]* @g2, i32 0, i32 0), i16 1694, i8 4, i8 0, i8 0 }>, section ".rodata.diag", align 1
+ at g0 = internal constant %s.0 <{ ptr @g1, ptr @g2, i16 1694, i8 4, i8 0, i8 0 }>, section ".rodata.diag", align 1
 @g1 = private unnamed_addr constant [125 x i8] c"............................................................................................................................\00", align 8
 @g2 = private unnamed_addr constant [82 x i8] c"Assertion (..............................................................) failed\00", align 8
 @g3 = external global %s.1
 
-define void @f0(%s.6* %a0, i8 zeroext %a1) {
+define void @f0(ptr %a0, i8 zeroext %a1) {
 ;  look for a dealloc_return in a packet with nothing else.
 ;
 ; CHECK: memw(r1+#0) = r0
@@ -33,7 +33,7 @@ b0:
   br i1 %v1, label %b1, label %b2, !prof !0
 
 b1:                                               ; preds = %b0
-  tail call void @f1(%s.0* @g0) #1
+  tail call void @f1(ptr @g0) #1
   unreachable
 
 b2:                                               ; preds = %b0
@@ -41,11 +41,11 @@ b2:                                               ; preds = %b0
   br i1 %v2, label %b3, label %b4
 
 b3:                                               ; preds = %b2
-  store %s.6* %a0, %s.6** getelementptr inbounds (%s.1, %s.1* @g3, i32 0, i32 8), align 4, !tbaa !1
+  store ptr %a0, ptr getelementptr inbounds (%s.1, ptr @g3, i32 0, i32 8), align 4, !tbaa !1
   br label %b5
 
 b4:                                               ; preds = %b2
-  store %s.6* %a0, %s.6** getelementptr inbounds (%s.1, %s.1* @g3, i32 0, i32 9), align 4, !tbaa !1
+  store ptr %a0, ptr getelementptr inbounds (%s.1, ptr @g3, i32 0, i32 9), align 4, !tbaa !1
   br label %b5
 
 b5:                                               ; preds = %b4, %b3
@@ -53,7 +53,7 @@ b5:                                               ; preds = %b4, %b3
 }
 
 ; Function Attrs: noreturn
-declare void @f1(%s.0*) #1
+declare void @f1(ptr) #1
 
 attributes #0 = { nounwind "target-cpu"="hexagonv60" }
 attributes #1 = { noreturn }

diff  --git a/llvm/test/CodeGen/Hexagon/dealloc_return.ll b/llvm/test/CodeGen/Hexagon/dealloc_return.ll
index e750f37df7850..2b5e64ca8b0ea 100644
--- a/llvm/test/CodeGen/Hexagon/dealloc_return.ll
+++ b/llvm/test/CodeGen/Hexagon/dealloc_return.ll
@@ -12,15 +12,15 @@
 define i32 @f0() #0 {
 b0:
   %v0 = alloca i32, align 4
-  %v1 = load i32, i32* @g0, align 4
-  store i32 %v1, i32* %v0, align 4
-  %v2 = load i32, i32* %v0, align 4
-  %v3 = load i32, i32* @g1, align 4
+  %v1 = load i32, ptr @g0, align 4
+  store i32 %v1, ptr %v0, align 4
+  %v2 = load i32, ptr %v0, align 4
+  %v3 = load i32, ptr @g1, align 4
   %v4 = mul nsw i32 %v2, %v3
-  %v5 = load i32, i32* @g2, align 4
+  %v5 = load i32, ptr @g2, align 4
   %v6 = add nsw i32 %v4, %v5
-  store i32 %v6, i32* %v0, align 4
-  %v7 = load i32, i32* %v0, align 4
+  store i32 %v6, ptr %v0, align 4
+  %v7 = load i32, ptr %v0, align 4
   ret i32 %v7
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/debug-line_table_start.ll b/llvm/test/CodeGen/Hexagon/debug-line_table_start.ll
index 46ffeb198a6a4..0cc1515ae2437 100644
--- a/llvm/test/CodeGen/Hexagon/debug-line_table_start.ll
+++ b/llvm/test/CodeGen/Hexagon/debug-line_table_start.ll
@@ -11,7 +11,7 @@
 define i32 @f0() #0 !dbg !5 {
 b0:
   %v0 = alloca i32, align 4
-  store i32 0, i32* %v0, align 4
+  store i32 0, ptr %v0, align 4
   ret i32 0, !dbg !9
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/debug-prologue.ll b/llvm/test/CodeGen/Hexagon/debug-prologue.ll
index 1220c76449280..94bcf488ed107 100644
--- a/llvm/test/CodeGen/Hexagon/debug-prologue.ll
+++ b/llvm/test/CodeGen/Hexagon/debug-prologue.ll
@@ -11,29 +11,29 @@ b0:
   %v0 = alloca i32, align 4
   %v1 = alloca i32, align 4
   %v2 = alloca i32, align 4
-  store i32 %a0, i32* %v1, align 4
-  call void @llvm.dbg.declare(metadata i32* %v1, metadata !9, metadata !DIExpression()), !dbg !10
-  call void @llvm.dbg.declare(metadata i32* %v2, metadata !11, metadata !DIExpression()), !dbg !12
-  %v3 = load i32, i32* %v1, align 4, !dbg !13
+  store i32 %a0, ptr %v1, align 4
+  call void @llvm.dbg.declare(metadata ptr %v1, metadata !9, metadata !DIExpression()), !dbg !10
+  call void @llvm.dbg.declare(metadata ptr %v2, metadata !11, metadata !DIExpression()), !dbg !12
+  %v3 = load i32, ptr %v1, align 4, !dbg !13
   %v4 = icmp sgt i32 %v3, 1, !dbg !15
   br i1 %v4, label %b1, label %b2, !dbg !16
 
 b1:                                               ; preds = %b0
-  %v5 = load i32, i32* %v1, align 4, !dbg !17
-  %v6 = load i32, i32* %v1, align 4, !dbg !18
+  %v5 = load i32, ptr %v1, align 4, !dbg !17
+  %v6 = load i32, ptr %v1, align 4, !dbg !18
   %v7 = sub nsw i32 %v6, 1, !dbg !19
   %v8 = call i32 @f0(i32 %v7), !dbg !20
   %v9 = mul nsw i32 %v5, %v8, !dbg !21
-  store i32 %v9, i32* %v0, align 4, !dbg !22
+  store i32 %v9, ptr %v0, align 4, !dbg !22
   br label %b3, !dbg !22
 
 b2:                                               ; preds = %b0
-  %v10 = load i32, i32* %v1, align 4, !dbg !23
-  store i32 %v10, i32* %v0, align 4, !dbg !24
+  %v10 = load i32, ptr %v1, align 4, !dbg !23
+  store i32 %v10, ptr %v0, align 4, !dbg !24
   br label %b3, !dbg !24
 
 b3:                                               ; preds = %b2, %b1
-  %v11 = load i32, i32* %v0, align 4, !dbg !25
+  %v11 = load i32, ptr %v0, align 4, !dbg !25
   ret i32 %v11, !dbg !25
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/def-undef-deps.ll b/llvm/test/CodeGen/Hexagon/def-undef-deps.ll
index 7984d92046040..5d4b409e5f424 100644
--- a/llvm/test/CodeGen/Hexagon/def-undef-deps.ll
+++ b/llvm/test/CodeGen/Hexagon/def-undef-deps.ll
@@ -9,7 +9,7 @@
 ; that happens after register allocation. The undef flags need to be cleared
 ; earlier to avoid this issue.
 
-%0 = type <{ i8*, i8*, i16, i8, i8, i8 }>
+%0 = type <{ ptr, ptr, i16, i8, i8, i8 }>
 %1 = type { %2, %5, [3 x %3] }
 %2 = type { %3, %4, i16, i16 }
 %3 = type { i32, i32, i8, i8 }
@@ -23,15 +23,15 @@
 ; Function Attrs: nounwind readnone
 declare i32 @llvm.hexagon.M2.mpy.up(i32, i32) #1
 
-declare void @f0(%0*, i32, i32, i32, i32, i32)
+declare void @f0(ptr, i32, i32, i32, i32, i32)
 
-define void @f1(i8 zeroext %a0, %1* nocapture %a1, i8 zeroext %a2, i8 zeroext %a3) #0 {
+define void @f1(i8 zeroext %a0, ptr nocapture %a1, i8 zeroext %a2, i8 zeroext %a3) #0 {
 b0:
-  %v0 = getelementptr inbounds %1, %1* %a1, i32 0, i32 1, i32 9
-  %v1 = load i8, i8* %v0, align 1
+  %v0 = getelementptr inbounds %1, ptr %a1, i32 0, i32 1, i32 9
+  %v1 = load i8, ptr %v0, align 1
   %v2 = zext i8 %v1 to i32
-  %v3 = getelementptr inbounds %1, %1* %a1, i32 0, i32 2, i32 %v2
-  %v4 = tail call %6* @f2(i32 undef, i8 zeroext 0)
+  %v3 = getelementptr inbounds %1, ptr %a1, i32 0, i32 2, i32 %v2
+  %v4 = tail call ptr @f2(i32 undef, i8 zeroext 0)
   br i1 undef, label %b1, label %b5
 
 b1:                                               ; preds = %b0
@@ -42,13 +42,12 @@ b1:                                               ; preds = %b0
   %v9 = add nuw nsw i64 %v8, %v7
   %v10 = lshr i64 %v9, 5
   %v11 = trunc i64 %v10 to i32
-  store i32 %v11, i32* undef, align 4
+  store i32 %v11, ptr undef, align 4
   br i1 undef, label %b3, label %b2
 
 b2:                                               ; preds = %b1
-  %v12 = getelementptr inbounds %3, %3* %v3, i32 0, i32 0
-  store i32 0, i32* %v12, align 4
-  tail call void @f0(%0* @g0, i32 undef, i32 0, i32 undef, i32 undef, i32 undef)
+  store i32 0, ptr %v3, align 4
+  tail call void @f0(ptr @g0, i32 undef, i32 0, i32 undef, i32 undef, i32 undef)
   br label %b4
 
 b3:                                               ; preds = %b1
@@ -67,7 +66,7 @@ b7:                                               ; preds = %b5
   unreachable
 }
 
-declare %6* @f2(i32, i8 zeroext)
+declare ptr @f2(i32, i8 zeroext)
 
 attributes #0 = { nounwind "target-cpu"="hexagonv55" }
 attributes #1 = { nounwind readnone }

diff  --git a/llvm/test/CodeGen/Hexagon/default-align.ll b/llvm/test/CodeGen/Hexagon/default-align.ll
index 55753907688ca..f320e5c591ded 100644
--- a/llvm/test/CodeGen/Hexagon/default-align.ll
+++ b/llvm/test/CodeGen/Hexagon/default-align.ll
@@ -8,40 +8,38 @@ target triple = "hexagon-unknown--elf"
 define void @f0() #0 {
 b0:
   %v0 = alloca [64 x float], align 16
-  %v1 = bitcast [64 x float]* %v0 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 256, i8* %v1) #1
-  %v2 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 8
-  store float 0.000000e+00, float* %v2, align 16, !tbaa !0
-  %v3 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 0
-  store float 0.000000e+00, float* %v3, align 16, !tbaa !0
-  %v4 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 9
-  store float 0.000000e+00, float* %v4, align 4, !tbaa !0
-  %v5 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 1
-  store float 0.000000e+00, float* %v5, align 4, !tbaa !0
-  %v6 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 10
-  store float 0.000000e+00, float* %v6, align 8, !tbaa !0
-  %v7 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 2
-  store float 0.000000e+00, float* %v7, align 8, !tbaa !0
-  %v8 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 11
-  store float 1.000000e+00, float* %v8, align 4, !tbaa !0
-  %v9 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 3
-  store float 1.000000e+00, float* %v9, align 4, !tbaa !0
-  %v10 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 12
-  store float 0.000000e+00, float* %v10, align 16, !tbaa !0
-  %v11 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 4
-  store float 0.000000e+00, float* %v11, align 16, !tbaa !0
-  call void @f1(float* %v3) #2
-  call void @llvm.lifetime.end.p0i8(i64 256, i8* %v1) #1
+  call void @llvm.lifetime.start.p0(i64 256, ptr %v0) #1
+  %v2 = getelementptr inbounds [64 x float], ptr %v0, i32 0, i32 8
+  store float 0.000000e+00, ptr %v2, align 16, !tbaa !0
+  store float 0.000000e+00, ptr %v0, align 16, !tbaa !0
+  %v4 = getelementptr inbounds [64 x float], ptr %v0, i32 0, i32 9
+  store float 0.000000e+00, ptr %v4, align 4, !tbaa !0
+  %v5 = getelementptr inbounds [64 x float], ptr %v0, i32 0, i32 1
+  store float 0.000000e+00, ptr %v5, align 4, !tbaa !0
+  %v6 = getelementptr inbounds [64 x float], ptr %v0, i32 0, i32 10
+  store float 0.000000e+00, ptr %v6, align 8, !tbaa !0
+  %v7 = getelementptr inbounds [64 x float], ptr %v0, i32 0, i32 2
+  store float 0.000000e+00, ptr %v7, align 8, !tbaa !0
+  %v8 = getelementptr inbounds [64 x float], ptr %v0, i32 0, i32 11
+  store float 1.000000e+00, ptr %v8, align 4, !tbaa !0
+  %v9 = getelementptr inbounds [64 x float], ptr %v0, i32 0, i32 3
+  store float 1.000000e+00, ptr %v9, align 4, !tbaa !0
+  %v10 = getelementptr inbounds [64 x float], ptr %v0, i32 0, i32 12
+  store float 0.000000e+00, ptr %v10, align 16, !tbaa !0
+  %v11 = getelementptr inbounds [64 x float], ptr %v0, i32 0, i32 4
+  store float 0.000000e+00, ptr %v11, align 16, !tbaa !0
+  call void @f1(ptr %v0) #2
+  call void @llvm.lifetime.end.p0(i64 256, ptr %v0) #1
   ret void
 }
 
-declare void @f1(float*) #0
+declare void @f1(ptr) #0
 
 ; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #1
+declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
 
 ; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #1
+declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
 
 attributes #0 = { nounwind "target-cpu"="hexagonv55" }
 attributes #1 = { argmemonly nounwind }

diff  --git a/llvm/test/CodeGen/Hexagon/deflate.ll b/llvm/test/CodeGen/Hexagon/deflate.ll
index dd70fe8bce6a3..6a925bce52f9d 100644
--- a/llvm/test/CodeGen/Hexagon/deflate.ll
+++ b/llvm/test/CodeGen/Hexagon/deflate.ll
@@ -18,13 +18,12 @@ b1:                                               ; preds = %b2
 b2:                                               ; preds = %b2, %b0
   %v0 = phi i32 [ 0, %b0 ], [ %v1, %b2 ]
   %v1 = add nsw i32 %v0, 4
-  %v2 = getelementptr [0 x i16], [0 x i16]* @g0, i32 0, i32 %v0
-  %v3 = bitcast i16* %v2 to <4 x i16>*
-  %v4 = load <4 x i16>, <4 x i16>* %v3, align 2
+  %v2 = getelementptr [0 x i16], ptr @g0, i32 0, i32 %v0
+  %v4 = load <4 x i16>, ptr %v2, align 2
   %v5 = icmp slt <4 x i16> %v4, zeroinitializer
   %v6 = xor <4 x i16> %v4, <i16 -32768, i16 -32768, i16 -32768, i16 -32768>
   %v7 = select <4 x i1> %v5, <4 x i16> %v6, <4 x i16> zeroinitializer
-  store <4 x i16> %v7, <4 x i16>* %v3, align 2
+  store <4 x i16> %v7, ptr %v2, align 2
   %v8 = icmp slt i32 %v1, 32768
   br i1 %v8, label %b2, label %b1
 }

diff  --git a/llvm/test/CodeGen/Hexagon/dhry.ll b/llvm/test/CodeGen/Hexagon/dhry.ll
index 8fd4a5e4f801f..076ac18865816 100644
--- a/llvm/test/CodeGen/Hexagon/dhry.ll
+++ b/llvm/test/CodeGen/Hexagon/dhry.ll
@@ -2,7 +2,7 @@
 ; CHECK: combine(#11,#10)
 
 ; Function Attrs: nounwind
-define void @f0(i32* nocapture %a0, i32* nocapture %a1) #0 {
+define void @f0(ptr nocapture %a0, ptr nocapture %a1) #0 {
 b0:
   br label %b2
 
@@ -10,16 +10,16 @@ b1:                                               ; preds = %b4
   br label %b5
 
 b2:                                               ; preds = %b0
-  %v0 = getelementptr inbounds i32, i32* %a0, i32 2
-  %v1 = getelementptr inbounds i32, i32* %a0, i32 3
+  %v0 = getelementptr inbounds i32, ptr %a0, i32 2
+  %v1 = getelementptr inbounds i32, ptr %a0, i32 3
   br label %b3
 
 b3:                                               ; preds = %b2
   br label %b4
 
 b4:                                               ; preds = %b4, %b3
-  %v2 = load i32, i32* %v0, align 4, !tbaa !0
-  %v3 = load i32, i32* %v1, align 4, !tbaa !0
+  %v2 = load i32, ptr %v0, align 4, !tbaa !0
+  %v3 = load i32, ptr %v1, align 4, !tbaa !0
   %v4 = tail call i32 @f1(i32 %v2, i32 %v3) #0
   %v5 = icmp eq i32 %v4, 0
   br i1 %v5, label %b4, label %b1

diff  --git a/llvm/test/CodeGen/Hexagon/dhry_proc8.ll b/llvm/test/CodeGen/Hexagon/dhry_proc8.ll
index 53ea59de92216..2da10e8c811a0 100644
--- a/llvm/test/CodeGen/Hexagon/dhry_proc8.ll
+++ b/llvm/test/CodeGen/Hexagon/dhry_proc8.ll
@@ -16,32 +16,32 @@
 @g0 = external global i32
 
 ; Function Attrs: nounwind
-define i32 @f0(i32* nocapture %a0, [50 x i32]* nocapture %a1, i32 %a2, i32 %a3) #0 {
+define i32 @f0(ptr nocapture %a0, ptr nocapture %a1, i32 %a2, i32 %a3) #0 {
 b0:
   %v0 = add nsw i32 %a2, 5
-  %v1 = getelementptr inbounds i32, i32* %a0, i32 %v0
-  store i32 %a3, i32* %v1, align 4, !tbaa !0
+  %v1 = getelementptr inbounds i32, ptr %a0, i32 %v0
+  store i32 %a3, ptr %v1, align 4, !tbaa !0
   %v2 = add nsw i32 %a2, 6
-  %v3 = getelementptr inbounds i32, i32* %a0, i32 %v2
-  store i32 %a3, i32* %v3, align 4, !tbaa !0
+  %v3 = getelementptr inbounds i32, ptr %a0, i32 %v2
+  store i32 %a3, ptr %v3, align 4, !tbaa !0
   %v4 = add nsw i32 %a2, 35
-  %v5 = getelementptr inbounds i32, i32* %a0, i32 %v4
-  store i32 %v0, i32* %v5, align 4, !tbaa !0
-  %v6 = getelementptr inbounds [50 x i32], [50 x i32]* %a1, i32 %v0, i32 %v0
-  store i32 %v0, i32* %v6, align 4, !tbaa !0
+  %v5 = getelementptr inbounds i32, ptr %a0, i32 %v4
+  store i32 %v0, ptr %v5, align 4, !tbaa !0
+  %v6 = getelementptr inbounds [50 x i32], ptr %a1, i32 %v0, i32 %v0
+  store i32 %v0, ptr %v6, align 4, !tbaa !0
   %v7 = add nsw i32 %a2, 6
-  %v8 = getelementptr inbounds [50 x i32], [50 x i32]* %a1, i32 %v0, i32 %v7
-  store i32 %v0, i32* %v8, align 4, !tbaa !0
+  %v8 = getelementptr inbounds [50 x i32], ptr %a1, i32 %v0, i32 %v7
+  store i32 %v0, ptr %v8, align 4, !tbaa !0
   %v9 = add nsw i32 %a2, 4
-  %v10 = getelementptr inbounds [50 x i32], [50 x i32]* %a1, i32 %v0, i32 %v9
-  %v11 = load i32, i32* %v10, align 4, !tbaa !0
+  %v10 = getelementptr inbounds [50 x i32], ptr %a1, i32 %v0, i32 %v9
+  %v11 = load i32, ptr %v10, align 4, !tbaa !0
   %v12 = add nsw i32 %v11, 1
-  store i32 %v12, i32* %v10, align 4, !tbaa !0
-  %v13 = load i32, i32* %v1, align 4, !tbaa !0
+  store i32 %v12, ptr %v10, align 4, !tbaa !0
+  %v13 = load i32, ptr %v1, align 4, !tbaa !0
   %v14 = add nsw i32 %a2, 25
-  %v15 = getelementptr inbounds [50 x i32], [50 x i32]* %a1, i32 %v14, i32 %v0
-  store i32 %v13, i32* %v15, align 4, !tbaa !0
-  store i32 5, i32* @g0, align 4, !tbaa !0
+  %v15 = getelementptr inbounds [50 x i32], ptr %a1, i32 %v14, i32 %v0
+  store i32 %v13, ptr %v15, align 4, !tbaa !0
+  store i32 5, ptr @g0, align 4, !tbaa !0
   ret i32 undef
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/dhry_stall.ll b/llvm/test/CodeGen/Hexagon/dhry_stall.ll
index a9eae47c23777..bfe99d51634c7 100644
--- a/llvm/test/CodeGen/Hexagon/dhry_stall.ll
+++ b/llvm/test/CodeGen/Hexagon/dhry_stall.ll
@@ -11,32 +11,32 @@ target triple = "hexagon"
 @g0 = external global i32
 
 ; Function Attrs: nounwind
-define i32 @f0(i32* nocapture %a0, [50 x i32]* nocapture %a1, i32 %a2, i32 %a3) #0 {
+define i32 @f0(ptr nocapture %a0, ptr nocapture %a1, i32 %a2, i32 %a3) #0 {
 b0:
   %v0 = add nsw i32 %a2, 5
-  %v1 = getelementptr inbounds i32, i32* %a0, i32 %v0
-  store i32 %a3, i32* %v1, align 4, !tbaa !0
+  %v1 = getelementptr inbounds i32, ptr %a0, i32 %v0
+  store i32 %a3, ptr %v1, align 4, !tbaa !0
   %v2 = add nsw i32 %a2, 6
-  %v3 = getelementptr inbounds i32, i32* %a0, i32 %v2
-  store i32 %a3, i32* %v3, align 4, !tbaa !0
+  %v3 = getelementptr inbounds i32, ptr %a0, i32 %v2
+  store i32 %a3, ptr %v3, align 4, !tbaa !0
   %v4 = add nsw i32 %a2, 35
-  %v5 = getelementptr inbounds i32, i32* %a0, i32 %v4
-  store i32 %v0, i32* %v5, align 4, !tbaa !0
-  %v6 = getelementptr inbounds [50 x i32], [50 x i32]* %a1, i32 %v0, i32 %v0
-  store i32 %v0, i32* %v6, align 4, !tbaa !0
+  %v5 = getelementptr inbounds i32, ptr %a0, i32 %v4
+  store i32 %v0, ptr %v5, align 4, !tbaa !0
+  %v6 = getelementptr inbounds [50 x i32], ptr %a1, i32 %v0, i32 %v0
+  store i32 %v0, ptr %v6, align 4, !tbaa !0
   %v7 = add nsw i32 %a2, 6
-  %v8 = getelementptr inbounds [50 x i32], [50 x i32]* %a1, i32 %v0, i32 %v7
-  store i32 %v0, i32* %v8, align 4, !tbaa !0
+  %v8 = getelementptr inbounds [50 x i32], ptr %a1, i32 %v0, i32 %v7
+  store i32 %v0, ptr %v8, align 4, !tbaa !0
   %v9 = add nsw i32 %a2, 4
-  %v10 = getelementptr inbounds [50 x i32], [50 x i32]* %a1, i32 %v0, i32 %v9
-  %v11 = load i32, i32* %v10, align 4, !tbaa !0
+  %v10 = getelementptr inbounds [50 x i32], ptr %a1, i32 %v0, i32 %v9
+  %v11 = load i32, ptr %v10, align 4, !tbaa !0
   %v12 = add nsw i32 %v11, 1
-  store i32 %v12, i32* %v10, align 4, !tbaa !0
-  %v13 = load i32, i32* %v1, align 4, !tbaa !0
+  store i32 %v12, ptr %v10, align 4, !tbaa !0
+  %v13 = load i32, ptr %v1, align 4, !tbaa !0
   %v14 = add nsw i32 %a2, 25
-  %v15 = getelementptr inbounds [50 x i32], [50 x i32]* %a1, i32 %v14, i32 %v0
-  store i32 %v13, i32* %v15, align 4, !tbaa !0
-  store i32 5, i32* @g0, align 4, !tbaa !0
+  %v15 = getelementptr inbounds [50 x i32], ptr %a1, i32 %v14, i32 %v0
+  store i32 %v13, ptr %v15, align 4, !tbaa !0
+  store i32 5, ptr @g0, align 4, !tbaa !0
   ret i32 undef
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/disable-const64-tinycore.ll b/llvm/test/CodeGen/Hexagon/disable-const64-tinycore.ll
index 7ed59edc626b6..2d8f12a77d381 100644
--- a/llvm/test/CodeGen/Hexagon/disable-const64-tinycore.ll
+++ b/llvm/test/CodeGen/Hexagon/disable-const64-tinycore.ll
@@ -2,80 +2,69 @@
 
 ;CHECK-NOT: CONST64
 
-define dso_local void @analyze(i16* nocapture %analysisBuffer0, i16* nocapture %analysisBuffer1, i32* nocapture %subband) local_unnamed_addr {
+define dso_local void @analyze(ptr nocapture %analysisBuffer0, ptr nocapture %analysisBuffer1, ptr nocapture %subband) local_unnamed_addr {
 entry:
-  %0 = load i64, i64* undef, align 8
+  %0 = load i64, ptr undef, align 8
   %1 = tail call i64 @llvm.hexagon.S2.vtrunewh(i64 %0, i64 undef)
   %2 = tail call i64 @llvm.hexagon.S2.vtrunowh(i64 %0, i64 undef)
   %_HEXAGON_V64_internal_union.sroa.3.0.extract.shift = and i64 %1, -4294967296
   %3 = shl i64 %2, 32
   %conv15 = ashr exact i64 %3, 32
-  %arrayidx16 = getelementptr inbounds i16, i16* %analysisBuffer0, i32 4
-  %4 = bitcast i16* %arrayidx16 to i64*
-  store i64 %_HEXAGON_V64_internal_union.sroa.3.0.extract.shift, i64* %4, align 8
-  %arrayidx17 = getelementptr inbounds i16, i16* %analysisBuffer1, i32 4
-  %5 = bitcast i16* %arrayidx17 to i64*
-  store i64 %conv15, i64* %5, align 8
-  %arrayidx18 = getelementptr inbounds i16, i16* %analysisBuffer1, i32 8
-  %6 = bitcast i16* %arrayidx18 to i64*
-  %7 = load i64, i64* %6, align 8
-  %8 = tail call i64 @llvm.hexagon.M2.mmachs.s1(i64 undef, i64 29819854865948160, i64 %7)
-  store i64 %8, i64* %6, align 8
-  %arrayidx34 = getelementptr inbounds i16, i16* %analysisBuffer0, i32 40
-  %9 = bitcast i16* %arrayidx34 to i64*
-  %10 = load i64, i64* %9, align 8
-  %11 = tail call i64 @llvm.hexagon.M2.mmachs.s1(i64 undef, i64 282574488406740992, i64 %10)
-  %arrayidx35 = getelementptr inbounds i16, i16* %analysisBuffer0, i32 56
-  %12 = bitcast i16* %arrayidx35 to i64*
-  %13 = load i64, i64* %12, align 8
-  %14 = tail call i64 @llvm.hexagon.M2.mmacls.s1(i64 undef, i64 undef, i64 %13)
-  %15 = tail call i64 @llvm.hexagon.M2.mmachs.s1(i64 %8, i64 282574488406740992, i64 %7)
-  %16 = load i64, i64* null, align 8
-  %17 = tail call i64 @llvm.hexagon.M2.mmacls.s1(i64 %14, i64 27234903028652032, i64 %16)
-  %18 = tail call i64 @llvm.hexagon.M2.mmacls.s1(i64 undef, i64 27234903028652032, i64 %7)
-  %19 = tail call i64 @llvm.hexagon.M2.mmachs.s1(i64 %15, i64 7661056, i64 %7)
-  %_HEXAGON_V64_internal_union53.sroa.3.0.extract.shift = lshr i64 %17, 32
-  %_HEXAGON_V64_internal_union62.sroa.3.0.extract.shift = and i64 %18, -4294967296
+  %arrayidx16 = getelementptr inbounds i16, ptr %analysisBuffer0, i32 4
+  store i64 %_HEXAGON_V64_internal_union.sroa.3.0.extract.shift, ptr %arrayidx16, align 8
+  %arrayidx17 = getelementptr inbounds i16, ptr %analysisBuffer1, i32 4
+  store i64 %conv15, ptr %arrayidx17, align 8
+  %arrayidx18 = getelementptr inbounds i16, ptr %analysisBuffer1, i32 8
+  %4 = load i64, ptr %arrayidx18, align 8
+  %5 = tail call i64 @llvm.hexagon.M2.mmachs.s1(i64 undef, i64 29819854865948160, i64 %4)
+  store i64 %5, ptr %arrayidx18, align 8
+  %arrayidx34 = getelementptr inbounds i16, ptr %analysisBuffer0, i32 40
+  %6 = load i64, ptr %arrayidx34, align 8
+  %7 = tail call i64 @llvm.hexagon.M2.mmachs.s1(i64 undef, i64 282574488406740992, i64 %6)
+  %arrayidx35 = getelementptr inbounds i16, ptr %analysisBuffer0, i32 56
+  %8 = load i64, ptr %arrayidx35, align 8
+  %9 = tail call i64 @llvm.hexagon.M2.mmacls.s1(i64 undef, i64 undef, i64 %8)
+  %10 = tail call i64 @llvm.hexagon.M2.mmachs.s1(i64 %5, i64 282574488406740992, i64 %4)
+  %11 = load i64, ptr null, align 8
+  %12 = tail call i64 @llvm.hexagon.M2.mmacls.s1(i64 %9, i64 27234903028652032, i64 %11)
+  %13 = tail call i64 @llvm.hexagon.M2.mmacls.s1(i64 undef, i64 27234903028652032, i64 %4)
+  %14 = tail call i64 @llvm.hexagon.M2.mmachs.s1(i64 %10, i64 7661056, i64 %4)
+  %_HEXAGON_V64_internal_union53.sroa.3.0.extract.shift = lshr i64 %12, 32
+  %_HEXAGON_V64_internal_union62.sroa.3.0.extract.shift = and i64 %13, -4294967296
   %_HEXAGON_V64_internal_union71.sroa.0.0.insert.insert = or i64 %_HEXAGON_V64_internal_union62.sroa.3.0.extract.shift, %_HEXAGON_V64_internal_union53.sroa.3.0.extract.shift
-  %_HEXAGON_V64_internal_union79.sroa.4.0.insert.shift = shl i64 %19, 32
-  %_HEXAGON_V64_internal_union79.sroa.0.0.insert.ext = and i64 %11, 4294967295
+  %_HEXAGON_V64_internal_union79.sroa.4.0.insert.shift = shl i64 %14, 32
+  %_HEXAGON_V64_internal_union79.sroa.0.0.insert.ext = and i64 %7, 4294967295
   %_HEXAGON_V64_internal_union79.sroa.0.0.insert.insert = or i64 %_HEXAGON_V64_internal_union79.sroa.4.0.insert.shift, %_HEXAGON_V64_internal_union79.sroa.0.0.insert.ext
-  %20 = bitcast i32* %subband to i64*
-  %21 = tail call i64 @llvm.hexagon.M2.mmpyh.s0(i64 %_HEXAGON_V64_internal_union71.sroa.0.0.insert.insert, i64 undef)
-  %22 = tail call i64 @llvm.hexagon.A2.vsubw(i64 undef, i64 %21)
-  %23 = tail call i64 @llvm.hexagon.A2.vaddw(i64 undef, i64 undef)
-  %24 = tail call i64 @llvm.hexagon.S2.asl.i.vw(i64 %23, i32 2)
-  %25 = tail call i64 @llvm.hexagon.M2.mmpyl.s0(i64 0, i64 undef)
-  %26 = tail call i64 @llvm.hexagon.S2.asl.i.vw(i64 %25, i32 2)
-  %27 = tail call i64 @llvm.hexagon.A2.vsubw(i64 undef, i64 %24)
-  %28 = tail call i64 @llvm.hexagon.A2.vaddw(i64 %26, i64 %_HEXAGON_V64_internal_union79.sroa.0.0.insert.insert)
-  %29 = tail call i64 @llvm.hexagon.M2.mmpyh.s0(i64 %28, i64 undef)
-  %30 = tail call i64 @llvm.hexagon.M2.mmpyl.s0(i64 %27, i64 3998767301)
-  %31 = tail call i64 @llvm.hexagon.S2.asl.i.vw(i64 %30, i32 2)
-  %32 = tail call i64 @llvm.hexagon.A2.vaddw(i64 undef, i64 %29)
-  %33 = tail call i64 @llvm.hexagon.A2.vaddw(i64 0, i64 %31)
-  %34 = tail call i64 @llvm.hexagon.A2.vaddw(i64 %22, i64 undef)
-  %_HEXAGON_V64_internal_union8.sroa.0.0.insert.ext.i = and i64 %32, 4294967295
-  store i64 %_HEXAGON_V64_internal_union8.sroa.0.0.insert.ext.i, i64* %20, align 8
-  %_HEXAGON_V64_internal_union17.sroa.5.0.insert.shift.i = shl i64 %34, 32
-  %_HEXAGON_V64_internal_union17.sroa.0.0.insert.ext.i = and i64 %33, 4294967295
+  %15 = tail call i64 @llvm.hexagon.M2.mmpyh.s0(i64 %_HEXAGON_V64_internal_union71.sroa.0.0.insert.insert, i64 undef)
+  %16 = tail call i64 @llvm.hexagon.A2.vsubw(i64 undef, i64 %15)
+  %17 = tail call i64 @llvm.hexagon.A2.vaddw(i64 undef, i64 undef)
+  %18 = tail call i64 @llvm.hexagon.S2.asl.i.vw(i64 %17, i32 2)
+  %19 = tail call i64 @llvm.hexagon.M2.mmpyl.s0(i64 0, i64 undef)
+  %20 = tail call i64 @llvm.hexagon.S2.asl.i.vw(i64 %19, i32 2)
+  %21 = tail call i64 @llvm.hexagon.A2.vsubw(i64 undef, i64 %18)
+  %22 = tail call i64 @llvm.hexagon.A2.vaddw(i64 %20, i64 %_HEXAGON_V64_internal_union79.sroa.0.0.insert.insert)
+  %23 = tail call i64 @llvm.hexagon.M2.mmpyh.s0(i64 %22, i64 undef)
+  %24 = tail call i64 @llvm.hexagon.M2.mmpyl.s0(i64 %21, i64 3998767301)
+  %25 = tail call i64 @llvm.hexagon.S2.asl.i.vw(i64 %24, i32 2)
+  %26 = tail call i64 @llvm.hexagon.A2.vaddw(i64 undef, i64 %23)
+  %27 = tail call i64 @llvm.hexagon.A2.vaddw(i64 0, i64 %25)
+  %28 = tail call i64 @llvm.hexagon.A2.vaddw(i64 %16, i64 undef)
+  %_HEXAGON_V64_internal_union8.sroa.0.0.insert.ext.i = and i64 %26, 4294967295
+  store i64 %_HEXAGON_V64_internal_union8.sroa.0.0.insert.ext.i, ptr %subband, align 8
+  %_HEXAGON_V64_internal_union17.sroa.5.0.insert.shift.i = shl i64 %28, 32
+  %_HEXAGON_V64_internal_union17.sroa.0.0.insert.ext.i = and i64 %27, 4294967295
   %_HEXAGON_V64_internal_union17.sroa.0.0.insert.insert.i = or i64 %_HEXAGON_V64_internal_union17.sroa.5.0.insert.shift.i, %_HEXAGON_V64_internal_union17.sroa.0.0.insert.ext.i
-  %arrayidx31.i = getelementptr inbounds i32, i32* %subband, i32 2
-  %35 = bitcast i32* %arrayidx31.i to i64*
-  store i64 %_HEXAGON_V64_internal_union17.sroa.0.0.insert.insert.i, i64* %35, align 8
-  %_HEXAGON_V64_internal_union32.sroa.0.0.insert.ext.i = and i64 %23, 4294967295
-  %arrayidx46.i = getelementptr inbounds i32, i32* %subband, i32 4
-  %36 = bitcast i32* %arrayidx46.i to i64*
-  store i64 %_HEXAGON_V64_internal_union32.sroa.0.0.insert.ext.i, i64* %36, align 8
-  %arrayidx55.i = getelementptr inbounds i32, i32* %subband, i32 6
-  %37 = bitcast i32* %arrayidx55.i to i64*
-  store i64 0, i64* %37, align 8
-  %arrayidx64.i = getelementptr inbounds i32, i32* %subband, i32 8
-  %38 = bitcast i32* %arrayidx64.i to i64*
-  store i64 0, i64* %38, align 8
-  %arrayidx73.i = getelementptr inbounds i32, i32* %subband, i32 12
-  %39 = bitcast i32* %arrayidx73.i to i64*
-  store i64 0, i64* %39, align 8
+  %arrayidx31.i = getelementptr inbounds i32, ptr %subband, i32 2
+  store i64 %_HEXAGON_V64_internal_union17.sroa.0.0.insert.insert.i, ptr %arrayidx31.i, align 8
+  %_HEXAGON_V64_internal_union32.sroa.0.0.insert.ext.i = and i64 %17, 4294967295
+  %arrayidx46.i = getelementptr inbounds i32, ptr %subband, i32 4
+  store i64 %_HEXAGON_V64_internal_union32.sroa.0.0.insert.ext.i, ptr %arrayidx46.i, align 8
+  %arrayidx55.i = getelementptr inbounds i32, ptr %subband, i32 6
+  store i64 0, ptr %arrayidx55.i, align 8
+  %arrayidx64.i = getelementptr inbounds i32, ptr %subband, i32 8
+  store i64 0, ptr %arrayidx64.i, align 8
+  %arrayidx73.i = getelementptr inbounds i32, ptr %subband, i32 12
+  store i64 0, ptr %arrayidx73.i, align 8
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/disable-const64.ll b/llvm/test/CodeGen/Hexagon/disable-const64.ll
index df702537163e0..6e34f062fd7db 100644
--- a/llvm/test/CodeGen/Hexagon/disable-const64.ll
+++ b/llvm/test/CodeGen/Hexagon/disable-const64.ll
@@ -7,24 +7,22 @@ target triple = "hexagon"
 ; CHECK: ##
 ; CHECK: ##
 
-define void @analyze(i16* nocapture %in) local_unnamed_addr {
+define void @analyze(ptr nocapture %in) local_unnamed_addr {
 entry:
-  %0 = bitcast i16* %in to i64*
-  %1 = tail call i64 @llvm.hexagon.M2.mmachs.s1(i64 10230955697128160, i64 10230955697128160, i64 0)
-  store i64 %1, i64* %0, align 8
+  %0 = tail call i64 @llvm.hexagon.M2.mmachs.s1(i64 10230955697128160, i64 10230955697128160, i64 0)
+  store i64 %0, ptr %in, align 8
   ret void
 }
 
 ; CHECK-NOT: CONST64
-define dso_local void @analyze2(i16* nocapture %in) local_unnamed_addr {
+define dso_local void @analyze2(ptr nocapture %in) local_unnamed_addr {
 entry:
-  %arrayidx = getelementptr inbounds i16, i16* %in, i32 3
-  %0 = load i16, i16* %arrayidx, align 2
+  %arrayidx = getelementptr inbounds i16, ptr %in, i32 3
+  %0 = load i16, ptr %arrayidx, align 2
   %conv = sext i16 %0 to i64
   %1 = tail call i64 @llvm.hexagon.M2.mmacls.s1(i64 undef, i64 30432282833584128, i64 %conv)
-  %arrayidx4 = getelementptr inbounds i16, i16* %in, i32 4
-  %2 = bitcast i16* %arrayidx4 to i64*
-  store i64 %1, i64* %2, align 8
+  %arrayidx4 = getelementptr inbounds i16, ptr %in, i32 4
+  store i64 %1, ptr %arrayidx4, align 8
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/dmul.ll b/llvm/test/CodeGen/Hexagon/dmul.ll
index a6cf62b0c0aae..0ad862e0dbdbd 100644
--- a/llvm/test/CodeGen/Hexagon/dmul.ll
+++ b/llvm/test/CodeGen/Hexagon/dmul.ll
@@ -8,11 +8,11 @@ entry:
   %a = alloca double, align 8
   %b = alloca double, align 8
   %c = alloca double, align 8
-  store volatile double 1.540000e+01, double* %a, align 8
-  store volatile double 9.100000e+00, double* %b, align 8
-  %0 = load volatile double, double* %b, align 8
-  %1 = load volatile double, double* %a, align 8
+  store volatile double 1.540000e+01, ptr %a, align 8
+  store volatile double 9.100000e+00, ptr %b, align 8
+  %0 = load volatile double, ptr %b, align 8
+  %1 = load volatile double, ptr %a, align 8
   %mul = fmul double %0, %1
-  store double %mul, double* %c, align 8
+  store double %mul, ptr %c, align 8
   ret i32 0
 }

diff  --git a/llvm/test/CodeGen/Hexagon/double.ll b/llvm/test/CodeGen/Hexagon/double.ll
index 336f32fee6117..458ab3d8611dd 100644
--- a/llvm/test/CodeGen/Hexagon/double.ll
+++ b/llvm/test/CodeGen/Hexagon/double.ll
@@ -2,22 +2,22 @@
 ; CHECK: __hexagon_adddf3
 ; CHECK: __hexagon_subdf3
 
-define void @f0(double* %a0, double %a1, double %a2) #0 {
+define void @f0(ptr %a0, double %a1, double %a2) #0 {
 b0:
-  %v0 = alloca double*, align 4
+  %v0 = alloca ptr, align 4
   %v1 = alloca double, align 8
   %v2 = alloca double, align 8
-  store double* %a0, double** %v0, align 4
-  store double %a1, double* %v1, align 8
-  store double %a2, double* %v2, align 8
-  %v3 = load double*, double** %v0, align 4
-  %v4 = load double, double* %v3
-  %v5 = load double, double* %v1, align 8
+  store ptr %a0, ptr %v0, align 4
+  store double %a1, ptr %v1, align 8
+  store double %a2, ptr %v2, align 8
+  %v3 = load ptr, ptr %v0, align 4
+  %v4 = load double, ptr %v3
+  %v5 = load double, ptr %v1, align 8
   %v6 = fadd double %v4, %v5
-  %v7 = load double, double* %v2, align 8
+  %v7 = load double, ptr %v2, align 8
   %v8 = fsub double %v6, %v7
-  %v9 = load double*, double** %v0, align 4
-  store double %v8, double* %v9
+  %v9 = load ptr, ptr %v0, align 4
+  store double %v8, ptr %v9
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/dsub.ll b/llvm/test/CodeGen/Hexagon/dsub.ll
index d7e44b307cf8d..b9a89201969bb 100644
--- a/llvm/test/CodeGen/Hexagon/dsub.ll
+++ b/llvm/test/CodeGen/Hexagon/dsub.ll
@@ -8,11 +8,11 @@ entry:
   %a = alloca double, align 8
   %b = alloca double, align 8
   %c = alloca double, align 8
-  store volatile double 1.540000e+01, double* %a, align 8
-  store volatile double 9.100000e+00, double* %b, align 8
-  %0 = load volatile double, double* %b, align 8
-  %1 = load volatile double, double* %a, align 8
+  store volatile double 1.540000e+01, ptr %a, align 8
+  store volatile double 9.100000e+00, ptr %b, align 8
+  %0 = load volatile double, ptr %b, align 8
+  %1 = load volatile double, ptr %a, align 8
   %sub = fsub double %0, %1
-  store double %sub, double* %c, align 8
+  store double %sub, ptr %c, align 8
   ret i32 0
 }

diff  --git a/llvm/test/CodeGen/Hexagon/dualstore.ll b/llvm/test/CodeGen/Hexagon/dualstore.ll
index 9f4569d6459c7..7318fe54ba1d6 100644
--- a/llvm/test/CodeGen/Hexagon/dualstore.ll
+++ b/llvm/test/CodeGen/Hexagon/dualstore.ll
@@ -4,8 +4,8 @@
 ; CHECK: 00 40 9f 52 529f4000
 ; CHECK: 10 10 00 f0 f0001010
 
-define void @foo(i32* %a, i32* %b) {
-  store i32 0, i32* %a
-  store i32 0, i32* %b
+define void @foo(ptr %a, ptr %b) {
+  store i32 0, ptr %a
+  store i32 0, ptr %b
   ret void
 }

diff  --git a/llvm/test/CodeGen/Hexagon/early-if-conversion-bug1.ll b/llvm/test/CodeGen/Hexagon/early-if-conversion-bug1.ll
index 4a67a98d19623..395b0ce3bcca7 100644
--- a/llvm/test/CodeGen/Hexagon/early-if-conversion-bug1.ll
+++ b/llvm/test/CodeGen/Hexagon/early-if-conversion-bug1.ll
@@ -11,312 +11,279 @@ target triple = "hexagon"
 %"class.std::__1::__libcpp_compressed_pair_imp" = type { %"struct.std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >::__rep" }
 %"struct.std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >::__rep" = type { %union.anon }
 %union.anon = type { %"struct.std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >::__long" }
-%"struct.std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >::__long" = type { i32, i32, i8* }
-%"class.std::__1::ios_base" = type { i32 (...)**, i32, i32, i32, i32, i32, i8*, i8*, void (i8, %"class.std::__1::ios_base"*, i32)**, i32*, i32, i32, i32*, i32, i32, i8**, i32, i32 }
-%"class.std::__1::basic_streambuf" = type { i32 (...)**, %"class.std::__1::locale", i8*, i8*, i8*, i8*, i8*, i8* }
-%"class.std::__1::locale" = type { %"class.std::__1::locale::__imp"* }
+%"struct.std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >::__long" = type { i32, i32, ptr }
+%"class.std::__1::ios_base" = type { ptr, i32, i32, i32, i32, i32, ptr, ptr, ptr, ptr, i32, i32, ptr, i32, i32, ptr, i32, i32 }
+%"class.std::__1::basic_streambuf" = type { ptr, %"class.std::__1::locale", ptr, ptr, ptr, ptr, ptr, ptr }
+%"class.std::__1::locale" = type { ptr }
 %"class.std::__1::locale::__imp" = type opaque
 %"class.std::__1::allocator" = type { i8 }
-%"class.std::__1::ostreambuf_iterator" = type { %"class.std::__1::basic_streambuf"* }
+%"class.std::__1::ostreambuf_iterator" = type { ptr }
 %"class.std::__1::__basic_string_common" = type { i8 }
 %"struct.std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >::__short" = type { %union.anon.0, [11 x i8] }
 %union.anon.0 = type { i8 }
 
 ; Function Attrs: nounwind
-declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i1) #0
+declare void @llvm.memcpy.p0.p0.i32(ptr nocapture, ptr nocapture readonly, i32, i1) #0
 
 declare i32 @__gxx_personality_v0(...)
 
 ; Function Attrs: nounwind
-declare void @_ZNSt3__112basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEED1Ev(%"class.std::__1::basic_string"*) #1
+declare void @_ZNSt3__112basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEED1Ev(ptr) #1
 
-define weak_odr hidden i32 @_ZNSt3__116__pad_and_outputIcNS_11char_traitsIcEEEENS_19ostreambuf_iteratorIT_T0_EES6_PKS4_S8_S8_RNS_8ios_baseES4_(i32 %__s.coerce, i8* %__ob, i8* %__op, i8* %__oe, %"class.std::__1::ios_base"* nonnull %__iob, i8 zeroext %__fl) #2 personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+define weak_odr hidden i32 @_ZNSt3__116__pad_and_outputIcNS_11char_traitsIcEEEENS_19ostreambuf_iteratorIT_T0_EES6_PKS4_S8_S8_RNS_8ios_baseES4_(i32 %__s.coerce, ptr %__ob, ptr %__op, ptr %__oe, ptr nonnull %__iob, i8 zeroext %__fl) #2 personality ptr @__gxx_personality_v0 {
 entry:
-  %this.addr.i66 = alloca %"class.std::__1::basic_streambuf"*, align 4
-  %__s.addr.i67 = alloca i8*, align 4
+  %this.addr.i66 = alloca ptr, align 4
+  %__s.addr.i67 = alloca ptr, align 4
   %__n.addr.i68 = alloca i32, align 4
-  %__p.addr.i.i = alloca i8*, align 4
-  %this.addr.i.i.i13.i.i = alloca %"class.std::__1::__libcpp_compressed_pair_imp"*, align 4
-  %this.addr.i.i14.i.i = alloca %"class.std::__1::__compressed_pair"*, align 4
-  %this.addr.i15.i.i = alloca %"class.std::__1::basic_string"*, align 4
-  %__x.addr.i.i.i.i.i = alloca i8*, align 4
-  %__r.addr.i.i.i.i = alloca i8*, align 4
-  %this.addr.i.i.i4.i.i = alloca %"class.std::__1::__libcpp_compressed_pair_imp"*, align 4
-  %this.addr.i.i5.i.i = alloca %"class.std::__1::__compressed_pair"*, align 4
-  %this.addr.i6.i.i = alloca %"class.std::__1::basic_string"*, align 4
-  %this.addr.i.i.i.i.i56 = alloca %"class.std::__1::__libcpp_compressed_pair_imp"*, align 4
-  %this.addr.i.i.i.i57 = alloca %"class.std::__1::__compressed_pair"*, align 4
-  %this.addr.i.i.i58 = alloca %"class.std::__1::basic_string"*, align 4
-  %this.addr.i.i59 = alloca %"class.std::__1::basic_string"*, align 4
-  %this.addr.i60 = alloca %"class.std::__1::basic_string"*, align 4
-  %this.addr.i.i.i.i.i = alloca %"class.std::__1::allocator"*, align 4
-  %this.addr.i.i.i.i = alloca %"class.std::__1::__libcpp_compressed_pair_imp"*, align 4
-  %this.addr.i.i.i = alloca %"class.std::__1::__compressed_pair"*, align 4
-  %this.addr.i.i = alloca %"class.std::__1::basic_string"*, align 4
+  %__p.addr.i.i = alloca ptr, align 4
+  %this.addr.i.i.i13.i.i = alloca ptr, align 4
+  %this.addr.i.i14.i.i = alloca ptr, align 4
+  %this.addr.i15.i.i = alloca ptr, align 4
+  %__x.addr.i.i.i.i.i = alloca ptr, align 4
+  %__r.addr.i.i.i.i = alloca ptr, align 4
+  %this.addr.i.i.i4.i.i = alloca ptr, align 4
+  %this.addr.i.i5.i.i = alloca ptr, align 4
+  %this.addr.i6.i.i = alloca ptr, align 4
+  %this.addr.i.i.i.i.i56 = alloca ptr, align 4
+  %this.addr.i.i.i.i57 = alloca ptr, align 4
+  %this.addr.i.i.i58 = alloca ptr, align 4
+  %this.addr.i.i59 = alloca ptr, align 4
+  %this.addr.i60 = alloca ptr, align 4
+  %this.addr.i.i.i.i.i = alloca ptr, align 4
+  %this.addr.i.i.i.i = alloca ptr, align 4
+  %this.addr.i.i.i = alloca ptr, align 4
+  %this.addr.i.i = alloca ptr, align 4
   %__n.addr.i.i = alloca i32, align 4
   %__c.addr.i.i = alloca i8, align 1
-  %this.addr.i53 = alloca %"class.std::__1::basic_string"*, align 4
+  %this.addr.i53 = alloca ptr, align 4
   %__n.addr.i54 = alloca i32, align 4
   %__c.addr.i = alloca i8, align 1
-  %this.addr.i46 = alloca %"class.std::__1::basic_streambuf"*, align 4
-  %__s.addr.i47 = alloca i8*, align 4
+  %this.addr.i46 = alloca ptr, align 4
+  %__s.addr.i47 = alloca ptr, align 4
   %__n.addr.i48 = alloca i32, align 4
-  %this.addr.i44 = alloca %"class.std::__1::basic_streambuf"*, align 4
-  %__s.addr.i = alloca i8*, align 4
+  %this.addr.i44 = alloca ptr, align 4
+  %__s.addr.i = alloca ptr, align 4
   %__n.addr.i = alloca i32, align 4
-  %this.addr.i41 = alloca %"class.std::__1::ios_base"*, align 4
+  %this.addr.i41 = alloca ptr, align 4
   %__wide.addr.i = alloca i32, align 4
   %__r.i = alloca i32, align 4
-  %this.addr.i = alloca %"class.std::__1::ios_base"*, align 4
+  %this.addr.i = alloca ptr, align 4
   %retval = alloca %"class.std::__1::ostreambuf_iterator", align 4
   %__s = alloca %"class.std::__1::ostreambuf_iterator", align 4
-  %__ob.addr = alloca i8*, align 4
-  %__op.addr = alloca i8*, align 4
-  %__oe.addr = alloca i8*, align 4
-  %__iob.addr = alloca %"class.std::__1::ios_base"*, align 4
+  %__ob.addr = alloca ptr, align 4
+  %__op.addr = alloca ptr, align 4
+  %__oe.addr = alloca ptr, align 4
+  %__iob.addr = alloca ptr, align 4
   %__fl.addr = alloca i8, align 1
   %__sz = alloca i32, align 4
   %__ns = alloca i32, align 4
   %__np = alloca i32, align 4
   %__sp = alloca %"class.std::__1::basic_string", align 4
-  %exn.slot = alloca i8*
+  %exn.slot = alloca ptr
   %ehselector.slot = alloca i32
   %cleanup.dest.slot = alloca i32
-  %coerce.dive = getelementptr %"class.std::__1::ostreambuf_iterator", %"class.std::__1::ostreambuf_iterator"* %__s, i32 0, i32 0
-  %coerce.val.ip = inttoptr i32 %__s.coerce to %"class.std::__1::basic_streambuf"*
-  store %"class.std::__1::basic_streambuf"* %coerce.val.ip, %"class.std::__1::basic_streambuf"** %coerce.dive
-  store i8* %__ob, i8** %__ob.addr, align 4
-  store i8* %__op, i8** %__op.addr, align 4
-  store i8* %__oe, i8** %__oe.addr, align 4
-  store %"class.std::__1::ios_base"* %__iob, %"class.std::__1::ios_base"** %__iob.addr, align 4
-  store i8 %__fl, i8* %__fl.addr, align 1
-  %__sbuf_ = getelementptr inbounds %"class.std::__1::ostreambuf_iterator", %"class.std::__1::ostreambuf_iterator"* %__s, i32 0, i32 0
-  %0 = load %"class.std::__1::basic_streambuf"*, %"class.std::__1::basic_streambuf"** %__sbuf_, align 4
-  %cmp = icmp eq %"class.std::__1::basic_streambuf"* %0, null
+  %coerce.val.ip = inttoptr i32 %__s.coerce to ptr
+  store ptr %coerce.val.ip, ptr %__s
+  store ptr %__ob, ptr %__ob.addr, align 4
+  store ptr %__op, ptr %__op.addr, align 4
+  store ptr %__oe, ptr %__oe.addr, align 4
+  store ptr %__iob, ptr %__iob.addr, align 4
+  store i8 %__fl, ptr %__fl.addr, align 1
+  %0 = load ptr, ptr %__s, align 4
+  %cmp = icmp eq ptr %0, null
   br i1 %cmp, label %if.then, label %if.end
 
 if.then:                                          ; preds = %entry
-  %1 = bitcast %"class.std::__1::ostreambuf_iterator"* %retval to i8*
-  %2 = bitcast %"class.std::__1::ostreambuf_iterator"* %__s to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %1, i8* align 4 %2, i32 4, i1 false)
+  call void @llvm.memcpy.p0.p0.i32(ptr align 4 %retval, ptr align 4 %__s, i32 4, i1 false)
   br label %return
 
 if.end:                                           ; preds = %entry
-  %3 = load i8*, i8** %__oe.addr, align 4
-  %4 = load i8*, i8** %__ob.addr, align 4
-  %sub.ptr.lhs.cast = ptrtoint i8* %3 to i32
-  %sub.ptr.rhs.cast = ptrtoint i8* %4 to i32
+  %1 = load ptr, ptr %__oe.addr, align 4
+  %2 = load ptr, ptr %__ob.addr, align 4
+  %sub.ptr.lhs.cast = ptrtoint ptr %1 to i32
+  %sub.ptr.rhs.cast = ptrtoint ptr %2 to i32
   %sub.ptr.sub = sub i32 %sub.ptr.lhs.cast, %sub.ptr.rhs.cast
-  store i32 %sub.ptr.sub, i32* %__sz, align 4
-  %5 = load %"class.std::__1::ios_base"*, %"class.std::__1::ios_base"** %__iob.addr, align 4
-  store %"class.std::__1::ios_base"* %5, %"class.std::__1::ios_base"** %this.addr.i, align 4
-  %this1.i = load %"class.std::__1::ios_base"*, %"class.std::__1::ios_base"** %this.addr.i
-  %__width_.i = getelementptr inbounds %"class.std::__1::ios_base", %"class.std::__1::ios_base"* %this1.i, i32 0, i32 3
-  %6 = load i32, i32* %__width_.i, align 4
-  store i32 %6, i32* %__ns, align 4
-  %7 = load i32, i32* %__ns, align 4
-  %8 = load i32, i32* %__sz, align 4
-  %cmp1 = icmp sgt i32 %7, %8
+  store i32 %sub.ptr.sub, ptr %__sz, align 4
+  %3 = load ptr, ptr %__iob.addr, align 4
+  store ptr %3, ptr %this.addr.i, align 4
+  %this1.i = load ptr, ptr %this.addr.i
+  %__width_.i = getelementptr inbounds %"class.std::__1::ios_base", ptr %this1.i, i32 0, i32 3
+  %4 = load i32, ptr %__width_.i, align 4
+  store i32 %4, ptr %__ns, align 4
+  %5 = load i32, ptr %__ns, align 4
+  %6 = load i32, ptr %__sz, align 4
+  %cmp1 = icmp sgt i32 %5, %6
   br i1 %cmp1, label %if.then2, label %if.else
 
 if.then2:                                         ; preds = %if.end
-  %9 = load i32, i32* %__sz, align 4
-  %10 = load i32, i32* %__ns, align 4
-  %sub = sub nsw i32 %10, %9
-  store i32 %sub, i32* %__ns, align 4
+  %7 = load i32, ptr %__sz, align 4
+  %8 = load i32, ptr %__ns, align 4
+  %sub = sub nsw i32 %8, %7
+  store i32 %sub, ptr %__ns, align 4
   br label %if.end3
 
 if.else:                                          ; preds = %if.end
-  store i32 0, i32* %__ns, align 4
+  store i32 0, ptr %__ns, align 4
   br label %if.end3
 
 if.end3:                                          ; preds = %if.else, %if.then2
-  %11 = load i8*, i8** %__op.addr, align 4
-  %12 = load i8*, i8** %__ob.addr, align 4
-  %sub.ptr.lhs.cast4 = ptrtoint i8* %11 to i32
-  %sub.ptr.rhs.cast5 = ptrtoint i8* %12 to i32
+  %9 = load ptr, ptr %__op.addr, align 4
+  %10 = load ptr, ptr %__ob.addr, align 4
+  %sub.ptr.lhs.cast4 = ptrtoint ptr %9 to i32
+  %sub.ptr.rhs.cast5 = ptrtoint ptr %10 to i32
   %sub.ptr.sub6 = sub i32 %sub.ptr.lhs.cast4, %sub.ptr.rhs.cast5
-  store i32 %sub.ptr.sub6, i32* %__np, align 4
-  %13 = load i32, i32* %__np, align 4
-  %cmp7 = icmp sgt i32 %13, 0
+  store i32 %sub.ptr.sub6, ptr %__np, align 4
+  %11 = load i32, ptr %__np, align 4
+  %cmp7 = icmp sgt i32 %11, 0
   br i1 %cmp7, label %if.then8, label %if.end15
 
 if.then8:                                         ; preds = %if.end3
-  %__sbuf_9 = getelementptr inbounds %"class.std::__1::ostreambuf_iterator", %"class.std::__1::ostreambuf_iterator"* %__s, i32 0, i32 0
-  %14 = load %"class.std::__1::basic_streambuf"*, %"class.std::__1::basic_streambuf"** %__sbuf_9, align 4
-  %15 = load i8*, i8** %__ob.addr, align 4
-  %16 = load i32, i32* %__np, align 4
-  store %"class.std::__1::basic_streambuf"* %14, %"class.std::__1::basic_streambuf"** %this.addr.i46, align 4
-  store i8* %15, i8** %__s.addr.i47, align 4
-  store i32 %16, i32* %__n.addr.i48, align 4
-  %this1.i49 = load %"class.std::__1::basic_streambuf"*, %"class.std::__1::basic_streambuf"** %this.addr.i46
-  %17 = bitcast %"class.std::__1::basic_streambuf"* %this1.i49 to i32 (%"class.std::__1::basic_streambuf"*, i8*, i32)***
-  %vtable.i50 = load i32 (%"class.std::__1::basic_streambuf"*, i8*, i32)**, i32 (%"class.std::__1::basic_streambuf"*, i8*, i32)*** %17
-  %vfn.i51 = getelementptr inbounds i32 (%"class.std::__1::basic_streambuf"*, i8*, i32)*, i32 (%"class.std::__1::basic_streambuf"*, i8*, i32)** %vtable.i50, i64 12
-  %18 = load i32 (%"class.std::__1::basic_streambuf"*, i8*, i32)*, i32 (%"class.std::__1::basic_streambuf"*, i8*, i32)** %vfn.i51
-  %19 = load i8*, i8** %__s.addr.i47, align 4
-  %20 = load i32, i32* %__n.addr.i48, align 4
-  %call.i52 = call i32 %18(%"class.std::__1::basic_streambuf"* %this1.i49, i8* %19, i32 %20)
-  %21 = load i32, i32* %__np, align 4
-  %cmp11 = icmp ne i32 %call.i52, %21
+  %12 = load ptr, ptr %__s, align 4
+  %13 = load ptr, ptr %__ob.addr, align 4
+  %14 = load i32, ptr %__np, align 4
+  store ptr %12, ptr %this.addr.i46, align 4
+  store ptr %13, ptr %__s.addr.i47, align 4
+  store i32 %14, ptr %__n.addr.i48, align 4
+  %this1.i49 = load ptr, ptr %this.addr.i46
+  %vtable.i50 = load ptr, ptr %this1.i49
+  %vfn.i51 = getelementptr inbounds ptr, ptr %vtable.i50, i64 12
+  %15 = load ptr, ptr %vfn.i51
+  %16 = load ptr, ptr %__s.addr.i47, align 4
+  %17 = load i32, ptr %__n.addr.i48, align 4
+  %call.i52 = call i32 %15(ptr %this1.i49, ptr %16, i32 %17)
+  %18 = load i32, ptr %__np, align 4
+  %cmp11 = icmp ne i32 %call.i52, %18
   br i1 %cmp11, label %if.then12, label %if.end14
 
 if.then12:                                        ; preds = %if.then8
-  %__sbuf_13 = getelementptr inbounds %"class.std::__1::ostreambuf_iterator", %"class.std::__1::ostreambuf_iterator"* %__s, i32 0, i32 0
-  store %"class.std::__1::basic_streambuf"* null, %"class.std::__1::basic_streambuf"** %__sbuf_13, align 4
-  %22 = bitcast %"class.std::__1::ostreambuf_iterator"* %retval to i8*
-  %23 = bitcast %"class.std::__1::ostreambuf_iterator"* %__s to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %22, i8* align 4 %23, i32 4, i1 false)
+  store ptr null, ptr %__s, align 4
+  call void @llvm.memcpy.p0.p0.i32(ptr align 4 %retval, ptr align 4 %__s, i32 4, i1 false)
   br label %return
 
 if.end14:                                         ; preds = %if.then8
   br label %if.end15
 
 if.end15:                                         ; preds = %if.end14, %if.end3
-  %24 = load i32, i32* %__ns, align 4
-  %cmp16 = icmp sgt i32 %24, 0
+  %19 = load i32, ptr %__ns, align 4
+  %cmp16 = icmp sgt i32 %19, 0
   br i1 %cmp16, label %if.then17, label %if.end25
 
 if.then17:                                        ; preds = %if.end15
-  %25 = load i32, i32* %__ns, align 4
-  %26 = load i8, i8* %__fl.addr, align 1
-  store %"class.std::__1::basic_string"* %__sp, %"class.std::__1::basic_string"** %this.addr.i53, align 4
-  store i32 %25, i32* %__n.addr.i54, align 4
-  store i8 %26, i8* %__c.addr.i, align 1
-  %this1.i55 = load %"class.std::__1::basic_string"*, %"class.std::__1::basic_string"** %this.addr.i53
-  %27 = load i32, i32* %__n.addr.i54, align 4
-  %28 = load i8, i8* %__c.addr.i, align 1
-  store %"class.std::__1::basic_string"* %this1.i55, %"class.std::__1::basic_string"** %this.addr.i.i, align 4
-  store i32 %27, i32* %__n.addr.i.i, align 4
-  store i8 %28, i8* %__c.addr.i.i, align 1
-  %this1.i.i = load %"class.std::__1::basic_string"*, %"class.std::__1::basic_string"** %this.addr.i.i
-  %29 = bitcast %"class.std::__1::basic_string"* %this1.i.i to %"class.std::__1::__basic_string_common"*
-  %__r_.i.i = getelementptr inbounds %"class.std::__1::basic_string", %"class.std::__1::basic_string"* %this1.i.i, i32 0, i32 0
-  store %"class.std::__1::__compressed_pair"* %__r_.i.i, %"class.std::__1::__compressed_pair"** %this.addr.i.i.i, align 4
-  %this1.i.i.i = load %"class.std::__1::__compressed_pair"*, %"class.std::__1::__compressed_pair"** %this.addr.i.i.i
-  %30 = bitcast %"class.std::__1::__compressed_pair"* %this1.i.i.i to %"class.std::__1::__libcpp_compressed_pair_imp"*
-  store %"class.std::__1::__libcpp_compressed_pair_imp"* %30, %"class.std::__1::__libcpp_compressed_pair_imp"** %this.addr.i.i.i.i, align 4
-  %this1.i.i.i.i = load %"class.std::__1::__libcpp_compressed_pair_imp"*, %"class.std::__1::__libcpp_compressed_pair_imp"** %this.addr.i.i.i.i
-  %31 = bitcast %"class.std::__1::__libcpp_compressed_pair_imp"* %this1.i.i.i.i to %"class.std::__1::allocator"*
-  store %"class.std::__1::allocator"* %31, %"class.std::__1::allocator"** %this.addr.i.i.i.i.i, align 4
-  %this1.i.i.i.i.i = load %"class.std::__1::allocator"*, %"class.std::__1::allocator"** %this.addr.i.i.i.i.i
-  %__first_.i.i.i.i = getelementptr inbounds %"class.std::__1::__libcpp_compressed_pair_imp", %"class.std::__1::__libcpp_compressed_pair_imp"* %this1.i.i.i.i, i32 0, i32 0
-  %32 = load i32, i32* %__n.addr.i.i, align 4
-  %33 = load i8, i8* %__c.addr.i.i, align 1
-  call void @_ZNSt3__112basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEE6__initEjc(%"class.std::__1::basic_string"* %this1.i.i, i32 %32, i8 zeroext %33)
-  %__sbuf_18 = getelementptr inbounds %"class.std::__1::ostreambuf_iterator", %"class.std::__1::ostreambuf_iterator"* %__s, i32 0, i32 0
-  %34 = load %"class.std::__1::basic_streambuf"*, %"class.std::__1::basic_streambuf"** %__sbuf_18, align 4
-  store %"class.std::__1::basic_string"* %__sp, %"class.std::__1::basic_string"** %this.addr.i60, align 4
-  %this1.i61 = load %"class.std::__1::basic_string"*, %"class.std::__1::basic_string"** %this.addr.i60
-  store %"class.std::__1::basic_string"* %this1.i61, %"class.std::__1::basic_string"** %this.addr.i.i59, align 4
-  %this1.i.i62 = load %"class.std::__1::basic_string"*, %"class.std::__1::basic_string"** %this.addr.i.i59
-  store %"class.std::__1::basic_string"* %this1.i.i62, %"class.std::__1::basic_string"** %this.addr.i.i.i58, align 4
-  %this1.i.i.i63 = load %"class.std::__1::basic_string"*, %"class.std::__1::basic_string"** %this.addr.i.i.i58
-  %__r_.i.i.i = getelementptr inbounds %"class.std::__1::basic_string", %"class.std::__1::basic_string"* %this1.i.i.i63, i32 0, i32 0
-  store %"class.std::__1::__compressed_pair"* %__r_.i.i.i, %"class.std::__1::__compressed_pair"** %this.addr.i.i.i.i57, align 4
-  %this1.i.i.i.i64 = load %"class.std::__1::__compressed_pair"*, %"class.std::__1::__compressed_pair"** %this.addr.i.i.i.i57
-  %35 = bitcast %"class.std::__1::__compressed_pair"* %this1.i.i.i.i64 to %"class.std::__1::__libcpp_compressed_pair_imp"*
-  store %"class.std::__1::__libcpp_compressed_pair_imp"* %35, %"class.std::__1::__libcpp_compressed_pair_imp"** %this.addr.i.i.i.i.i56, align 4
-  %this1.i.i.i.i.i65 = load %"class.std::__1::__libcpp_compressed_pair_imp"*, %"class.std::__1::__libcpp_compressed_pair_imp"** %this.addr.i.i.i.i.i56
-  %__first_.i.i.i.i.i = getelementptr inbounds %"class.std::__1::__libcpp_compressed_pair_imp", %"class.std::__1::__libcpp_compressed_pair_imp"* %this1.i.i.i.i.i65, i32 0, i32 0
-  %36 = getelementptr inbounds %"struct.std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >::__rep", %"struct.std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >::__rep"* %__first_.i.i.i.i.i, i32 0, i32 0
-  %__s.i.i.i = bitcast %union.anon* %36 to %"struct.std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >::__short"*
-  %37 = getelementptr inbounds %"struct.std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >::__short", %"struct.std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >::__short"* %__s.i.i.i, i32 0, i32 0
-  %__size_.i.i.i = bitcast %union.anon.0* %37 to i8*
-  %38 = load i8, i8* %__size_.i.i.i, align 1
-  %conv.i.i.i = zext i8 %38 to i32
+  %20 = load i32, ptr %__ns, align 4
+  %21 = load i8, ptr %__fl.addr, align 1
+  store ptr %__sp, ptr %this.addr.i53, align 4
+  store i32 %20, ptr %__n.addr.i54, align 4
+  store i8 %21, ptr %__c.addr.i, align 1
+  %this1.i55 = load ptr, ptr %this.addr.i53
+  %22 = load i32, ptr %__n.addr.i54, align 4
+  %23 = load i8, ptr %__c.addr.i, align 1
+  store ptr %this1.i55, ptr %this.addr.i.i, align 4
+  store i32 %22, ptr %__n.addr.i.i, align 4
+  store i8 %23, ptr %__c.addr.i.i, align 1
+  %this1.i.i = load ptr, ptr %this.addr.i.i
+  store ptr %this1.i.i, ptr %this.addr.i.i.i, align 4
+  %this1.i.i.i = load ptr, ptr %this.addr.i.i.i
+  store ptr %this1.i.i.i, ptr %this.addr.i.i.i.i, align 4
+  %this1.i.i.i.i = load ptr, ptr %this.addr.i.i.i.i
+  store ptr %this1.i.i.i.i, ptr %this.addr.i.i.i.i.i, align 4
+  %this1.i.i.i.i.i = load ptr, ptr %this.addr.i.i.i.i.i
+  %24 = load i32, ptr %__n.addr.i.i, align 4
+  %25 = load i8, ptr %__c.addr.i.i, align 1
+  call void @_ZNSt3__112basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEE6__initEjc(ptr %this1.i.i, i32 %24, i8 zeroext %25)
+  %26 = load ptr, ptr %__s, align 4
+  store ptr %__sp, ptr %this.addr.i60, align 4
+  %this1.i61 = load ptr, ptr %this.addr.i60
+  store ptr %this1.i61, ptr %this.addr.i.i59, align 4
+  %this1.i.i62 = load ptr, ptr %this.addr.i.i59
+  store ptr %this1.i.i62, ptr %this.addr.i.i.i58, align 4
+  %this1.i.i.i63 = load ptr, ptr %this.addr.i.i.i58
+  store ptr %this1.i.i.i63, ptr %this.addr.i.i.i.i57, align 4
+  %this1.i.i.i.i64 = load ptr, ptr %this.addr.i.i.i.i57
+  store ptr %this1.i.i.i.i64, ptr %this.addr.i.i.i.i.i56, align 4
+  %this1.i.i.i.i.i65 = load ptr, ptr %this.addr.i.i.i.i.i56
+  %27 = getelementptr inbounds %"struct.std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >::__rep", ptr %this1.i.i.i.i.i65, i32 0, i32 0
+  %28 = getelementptr inbounds %"struct.std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >::__short", ptr %27, i32 0, i32 0
+  %29 = load i8, ptr %28, align 1
+  %conv.i.i.i = zext i8 %29 to i32
   %and.i.i.i = and i32 %conv.i.i.i, 1
   %tobool.i.i.i = icmp ne i32 %and.i.i.i, 0
   br i1 %tobool.i.i.i, label %cond.true.i.i, label %cond.false.i.i
 
 cond.true.i.i:                                    ; preds = %if.then17
-  store %"class.std::__1::basic_string"* %this1.i.i62, %"class.std::__1::basic_string"** %this.addr.i15.i.i, align 4
-  %this1.i16.i.i = load %"class.std::__1::basic_string"*, %"class.std::__1::basic_string"** %this.addr.i15.i.i
-  %__r_.i17.i.i = getelementptr inbounds %"class.std::__1::basic_string", %"class.std::__1::basic_string"* %this1.i16.i.i, i32 0, i32 0
-  store %"class.std::__1::__compressed_pair"* %__r_.i17.i.i, %"class.std::__1::__compressed_pair"** %this.addr.i.i14.i.i, align 4
-  %this1.i.i18.i.i = load %"class.std::__1::__compressed_pair"*, %"class.std::__1::__compressed_pair"** %this.addr.i.i14.i.i
-  %39 = bitcast %"class.std::__1::__compressed_pair"* %this1.i.i18.i.i to %"class.std::__1::__libcpp_compressed_pair_imp"*
-  store %"class.std::__1::__libcpp_compressed_pair_imp"* %39, %"class.std::__1::__libcpp_compressed_pair_imp"** %this.addr.i.i.i13.i.i, align 4
-  %this1.i.i.i19.i.i = load %"class.std::__1::__libcpp_compressed_pair_imp"*, %"class.std::__1::__libcpp_compressed_pair_imp"** %this.addr.i.i.i13.i.i
-  %__first_.i.i.i20.i.i = getelementptr inbounds %"class.std::__1::__libcpp_compressed_pair_imp", %"class.std::__1::__libcpp_compressed_pair_imp"* %this1.i.i.i19.i.i, i32 0, i32 0
-  %40 = getelementptr inbounds %"struct.std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >::__rep", %"struct.std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >::__rep"* %__first_.i.i.i20.i.i, i32 0, i32 0
-  %__l.i.i.i = bitcast %union.anon* %40 to %"struct.std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >::__long"*
-  %__data_.i21.i.i = getelementptr inbounds %"struct.std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >::__long", %"struct.std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >::__long"* %__l.i.i.i, i32 0, i32 2
-  %41 = load i8*, i8** %__data_.i21.i.i, align 4
+  store ptr %this1.i.i62, ptr %this.addr.i15.i.i, align 4
+  %this1.i16.i.i = load ptr, ptr %this.addr.i15.i.i
+  store ptr %this1.i16.i.i, ptr %this.addr.i.i14.i.i, align 4
+  %this1.i.i18.i.i = load ptr, ptr %this.addr.i.i14.i.i
+  store ptr %this1.i.i18.i.i, ptr %this.addr.i.i.i13.i.i, align 4
+  %this1.i.i.i19.i.i = load ptr, ptr %this.addr.i.i.i13.i.i
+  %30 = getelementptr inbounds %"struct.std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >::__rep", ptr %this1.i.i.i19.i.i, i32 0, i32 0
+  %__data_.i21.i.i = getelementptr inbounds %"struct.std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >::__long", ptr %30, i32 0, i32 2
+  %31 = load ptr, ptr %__data_.i21.i.i, align 4
   br label %_ZNKSt3__112basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEE4dataEv.exit
 
 cond.false.i.i:                                   ; preds = %if.then17
-  store %"class.std::__1::basic_string"* %this1.i.i62, %"class.std::__1::basic_string"** %this.addr.i6.i.i, align 4
-  %this1.i7.i.i = load %"class.std::__1::basic_string"*, %"class.std::__1::basic_string"** %this.addr.i6.i.i
-  %__r_.i8.i.i = getelementptr inbounds %"class.std::__1::basic_string", %"class.std::__1::basic_string"* %this1.i7.i.i, i32 0, i32 0
-  store %"class.std::__1::__compressed_pair"* %__r_.i8.i.i, %"class.std::__1::__compressed_pair"** %this.addr.i.i5.i.i, align 4
-  %this1.i.i9.i.i = load %"class.std::__1::__compressed_pair"*, %"class.std::__1::__compressed_pair"** %this.addr.i.i5.i.i
-  %42 = bitcast %"class.std::__1::__compressed_pair"* %this1.i.i9.i.i to %"class.std::__1::__libcpp_compressed_pair_imp"*
-  store %"class.std::__1::__libcpp_compressed_pair_imp"* %42, %"class.std::__1::__libcpp_compressed_pair_imp"** %this.addr.i.i.i4.i.i, align 4
-  %this1.i.i.i10.i.i = load %"class.std::__1::__libcpp_compressed_pair_imp"*, %"class.std::__1::__libcpp_compressed_pair_imp"** %this.addr.i.i.i4.i.i
-  %__first_.i.i.i11.i.i = getelementptr inbounds %"class.std::__1::__libcpp_compressed_pair_imp", %"class.std::__1::__libcpp_compressed_pair_imp"* %this1.i.i.i10.i.i, i32 0, i32 0
-  %43 = getelementptr inbounds %"struct.std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >::__rep", %"struct.std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >::__rep"* %__first_.i.i.i11.i.i, i32 0, i32 0
-  %__s.i12.i.i = bitcast %union.anon* %43 to %"struct.std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >::__short"*
-  %__data_.i.i.i = getelementptr inbounds %"struct.std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >::__short", %"struct.std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >::__short"* %__s.i12.i.i, i32 0, i32 1
-  %arrayidx.i.i.i = getelementptr inbounds [11 x i8], [11 x i8]* %__data_.i.i.i, i32 0, i32 0
-  store i8* %arrayidx.i.i.i, i8** %__r.addr.i.i.i.i, align 4
-  %44 = load i8*, i8** %__r.addr.i.i.i.i, align 4
-  store i8* %44, i8** %__x.addr.i.i.i.i.i, align 4
-  %45 = load i8*, i8** %__x.addr.i.i.i.i.i, align 4
+  store ptr %this1.i.i62, ptr %this.addr.i6.i.i, align 4
+  %this1.i7.i.i = load ptr, ptr %this.addr.i6.i.i
+  store ptr %this1.i7.i.i, ptr %this.addr.i.i5.i.i, align 4
+  %this1.i.i9.i.i = load ptr, ptr %this.addr.i.i5.i.i
+  store ptr %this1.i.i9.i.i, ptr %this.addr.i.i.i4.i.i, align 4
+  %this1.i.i.i10.i.i = load ptr, ptr %this.addr.i.i.i4.i.i
+  %32 = getelementptr inbounds %"struct.std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >::__rep", ptr %this1.i.i.i10.i.i, i32 0, i32 0
+  %__data_.i.i.i = getelementptr inbounds %"struct.std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >::__short", ptr %32, i32 0, i32 1
+  store ptr %__data_.i.i.i, ptr %__r.addr.i.i.i.i, align 4
+  %33 = load ptr, ptr %__r.addr.i.i.i.i, align 4
+  store ptr %33, ptr %__x.addr.i.i.i.i.i, align 4
+  %34 = load ptr, ptr %__x.addr.i.i.i.i.i, align 4
   br label %_ZNKSt3__112basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEE4dataEv.exit
 
 _ZNKSt3__112basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEE4dataEv.exit: ; preds = %cond.false.i.i, %cond.true.i.i
-  %cond.i.i = phi i8* [ %41, %cond.true.i.i ], [ %45, %cond.false.i.i ]
-  store i8* %cond.i.i, i8** %__p.addr.i.i, align 4
-  %46 = load i8*, i8** %__p.addr.i.i, align 4
-  %47 = load i32, i32* %__ns, align 4
-  store %"class.std::__1::basic_streambuf"* %34, %"class.std::__1::basic_streambuf"** %this.addr.i66, align 4
-  store i8* %46, i8** %__s.addr.i67, align 4
-  store i32 %47, i32* %__n.addr.i68, align 4
-  %this1.i69 = load %"class.std::__1::basic_streambuf"*, %"class.std::__1::basic_streambuf"** %this.addr.i66
-  %48 = bitcast %"class.std::__1::basic_streambuf"* %this1.i69 to i32 (%"class.std::__1::basic_streambuf"*, i8*, i32)***
-  %vtable.i70 = load i32 (%"class.std::__1::basic_streambuf"*, i8*, i32)**, i32 (%"class.std::__1::basic_streambuf"*, i8*, i32)*** %48
-  %vfn.i71 = getelementptr inbounds i32 (%"class.std::__1::basic_streambuf"*, i8*, i32)*, i32 (%"class.std::__1::basic_streambuf"*, i8*, i32)** %vtable.i70, i64 12
-  %49 = load i32 (%"class.std::__1::basic_streambuf"*, i8*, i32)*, i32 (%"class.std::__1::basic_streambuf"*, i8*, i32)** %vfn.i71
-  %50 = load i8*, i8** %__s.addr.i67, align 4
-  %51 = load i32, i32* %__n.addr.i68, align 4
-  %call.i7273 = invoke i32 %49(%"class.std::__1::basic_streambuf"* %this1.i69, i8* %50, i32 %51)
+  %cond.i.i = phi ptr [ %31, %cond.true.i.i ], [ %34, %cond.false.i.i ]
+  store ptr %cond.i.i, ptr %__p.addr.i.i, align 4
+  %35 = load ptr, ptr %__p.addr.i.i, align 4
+  %36 = load i32, ptr %__ns, align 4
+  store ptr %26, ptr %this.addr.i66, align 4
+  store ptr %35, ptr %__s.addr.i67, align 4
+  store i32 %36, ptr %__n.addr.i68, align 4
+  %this1.i69 = load ptr, ptr %this.addr.i66
+  %vtable.i70 = load ptr, ptr %this1.i69
+  %vfn.i71 = getelementptr inbounds ptr, ptr %vtable.i70, i64 12
+  %37 = load ptr, ptr %vfn.i71
+  %38 = load ptr, ptr %__s.addr.i67, align 4
+  %39 = load i32, ptr %__n.addr.i68, align 4
+  %call.i7273 = invoke i32 %37(ptr %this1.i69, ptr %38, i32 %39)
           to label %_ZNSt3__115basic_streambufIcNS_11char_traitsIcEEE5sputnEPKci.exit unwind label %lpad
 
 _ZNSt3__115basic_streambufIcNS_11char_traitsIcEEE5sputnEPKci.exit: ; preds = %_ZNKSt3__112basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEE4dataEv.exit
   br label %invoke.cont
 
 invoke.cont:                                      ; preds = %_ZNSt3__115basic_streambufIcNS_11char_traitsIcEEE5sputnEPKci.exit
-  %52 = load i32, i32* %__ns, align 4
-  %cmp21 = icmp ne i32 %call.i7273, %52
+  %40 = load i32, ptr %__ns, align 4
+  %cmp21 = icmp ne i32 %call.i7273, %40
   br i1 %cmp21, label %if.then22, label %if.end24
 
 if.then22:                                        ; preds = %invoke.cont
-  %__sbuf_23 = getelementptr inbounds %"class.std::__1::ostreambuf_iterator", %"class.std::__1::ostreambuf_iterator"* %__s, i32 0, i32 0
-  store %"class.std::__1::basic_streambuf"* null, %"class.std::__1::basic_streambuf"** %__sbuf_23, align 4
-  %53 = bitcast %"class.std::__1::ostreambuf_iterator"* %retval to i8*
-  %54 = bitcast %"class.std::__1::ostreambuf_iterator"* %__s to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %53, i8* align 4 %54, i32 4, i1 false)
-  store i32 1, i32* %cleanup.dest.slot
+  store ptr null, ptr %__s, align 4
+  call void @llvm.memcpy.p0.p0.i32(ptr align 4 %retval, ptr align 4 %__s, i32 4, i1 false)
+  store i32 1, ptr %cleanup.dest.slot
   br label %cleanup
 
 lpad:                                             ; preds = %_ZNKSt3__112basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEE4dataEv.exit
-  %55 = landingpad { i8*, i32 }
+  %41 = landingpad { ptr, i32 }
           cleanup
-  %56 = extractvalue { i8*, i32 } %55, 0
-  store i8* %56, i8** %exn.slot
-  %57 = extractvalue { i8*, i32 } %55, 1
-  store i32 %57, i32* %ehselector.slot
-  call void @_ZNSt3__112basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEED1Ev(%"class.std::__1::basic_string"* %__sp) #0
+  %42 = extractvalue { ptr, i32 } %41, 0
+  store ptr %42, ptr %exn.slot
+  %43 = extractvalue { ptr, i32 } %41, 1
+  store i32 %43, ptr %ehselector.slot
+  call void @_ZNSt3__112basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEED1Ev(ptr %__sp) #0
   br label %eh.resume
 
 if.end24:                                         ; preds = %invoke.cont
-  store i32 0, i32* %cleanup.dest.slot
+  store i32 0, ptr %cleanup.dest.slot
   br label %cleanup
 
 cleanup:                                          ; preds = %if.end24, %if.then22
-  call void @_ZNSt3__112basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEED1Ev(%"class.std::__1::basic_string"* %__sp) #0
-  %cleanup.dest = load i32, i32* %cleanup.dest.slot
+  call void @_ZNSt3__112basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEED1Ev(ptr %__sp) #0
+  %cleanup.dest = load i32, ptr %cleanup.dest.slot
   switch i32 %cleanup.dest, label %unreachable [
     i32 0, label %cleanup.cont
     i32 1, label %return
@@ -326,82 +293,74 @@ cleanup.cont:                                     ; preds = %cleanup
   br label %if.end25
 
 if.end25:                                         ; preds = %cleanup.cont, %if.end15
-  %58 = load i8*, i8** %__oe.addr, align 4
-  %59 = load i8*, i8** %__op.addr, align 4
-  %sub.ptr.lhs.cast26 = ptrtoint i8* %58 to i32
-  %sub.ptr.rhs.cast27 = ptrtoint i8* %59 to i32
+  %44 = load ptr, ptr %__oe.addr, align 4
+  %45 = load ptr, ptr %__op.addr, align 4
+  %sub.ptr.lhs.cast26 = ptrtoint ptr %44 to i32
+  %sub.ptr.rhs.cast27 = ptrtoint ptr %45 to i32
   %sub.ptr.sub28 = sub i32 %sub.ptr.lhs.cast26, %sub.ptr.rhs.cast27
-  store i32 %sub.ptr.sub28, i32* %__np, align 4
-  %60 = load i32, i32* %__np, align 4
-  %cmp29 = icmp sgt i32 %60, 0
+  store i32 %sub.ptr.sub28, ptr %__np, align 4
+  %46 = load i32, ptr %__np, align 4
+  %cmp29 = icmp sgt i32 %46, 0
   br i1 %cmp29, label %if.then30, label %if.end37
 
 if.then30:                                        ; preds = %if.end25
-  %__sbuf_31 = getelementptr inbounds %"class.std::__1::ostreambuf_iterator", %"class.std::__1::ostreambuf_iterator"* %__s, i32 0, i32 0
-  %61 = load %"class.std::__1::basic_streambuf"*, %"class.std::__1::basic_streambuf"** %__sbuf_31, align 4
-  %62 = load i8*, i8** %__op.addr, align 4
-  %63 = load i32, i32* %__np, align 4
-  store %"class.std::__1::basic_streambuf"* %61, %"class.std::__1::basic_streambuf"** %this.addr.i44, align 4
-  store i8* %62, i8** %__s.addr.i, align 4
-  store i32 %63, i32* %__n.addr.i, align 4
-  %this1.i45 = load %"class.std::__1::basic_streambuf"*, %"class.std::__1::basic_streambuf"** %this.addr.i44
-  %64 = bitcast %"class.std::__1::basic_streambuf"* %this1.i45 to i32 (%"class.std::__1::basic_streambuf"*, i8*, i32)***
-  %vtable.i = load i32 (%"class.std::__1::basic_streambuf"*, i8*, i32)**, i32 (%"class.std::__1::basic_streambuf"*, i8*, i32)*** %64
-  %vfn.i = getelementptr inbounds i32 (%"class.std::__1::basic_streambuf"*, i8*, i32)*, i32 (%"class.std::__1::basic_streambuf"*, i8*, i32)** %vtable.i, i64 12
-  %65 = load i32 (%"class.std::__1::basic_streambuf"*, i8*, i32)*, i32 (%"class.std::__1::basic_streambuf"*, i8*, i32)** %vfn.i
-  %66 = load i8*, i8** %__s.addr.i, align 4
-  %67 = load i32, i32* %__n.addr.i, align 4
-  %call.i = call i32 %65(%"class.std::__1::basic_streambuf"* %this1.i45, i8* %66, i32 %67)
-  %68 = load i32, i32* %__np, align 4
-  %cmp33 = icmp ne i32 %call.i, %68
+  %47 = load ptr, ptr %__s, align 4
+  %48 = load ptr, ptr %__op.addr, align 4
+  %49 = load i32, ptr %__np, align 4
+  store ptr %47, ptr %this.addr.i44, align 4
+  store ptr %48, ptr %__s.addr.i, align 4
+  store i32 %49, ptr %__n.addr.i, align 4
+  %this1.i45 = load ptr, ptr %this.addr.i44
+  %vtable.i = load ptr, ptr %this1.i45
+  %vfn.i = getelementptr inbounds ptr, ptr %vtable.i, i64 12
+  %50 = load ptr, ptr %vfn.i
+  %51 = load ptr, ptr %__s.addr.i, align 4
+  %52 = load i32, ptr %__n.addr.i, align 4
+  %call.i = call i32 %50(ptr %this1.i45, ptr %51, i32 %52)
+  %53 = load i32, ptr %__np, align 4
+  %cmp33 = icmp ne i32 %call.i, %53
   br i1 %cmp33, label %if.then34, label %if.end36
 
 if.then34:                                        ; preds = %if.then30
-  %__sbuf_35 = getelementptr inbounds %"class.std::__1::ostreambuf_iterator", %"class.std::__1::ostreambuf_iterator"* %__s, i32 0, i32 0
-  store %"class.std::__1::basic_streambuf"* null, %"class.std::__1::basic_streambuf"** %__sbuf_35, align 4
-  %69 = bitcast %"class.std::__1::ostreambuf_iterator"* %retval to i8*
-  %70 = bitcast %"class.std::__1::ostreambuf_iterator"* %__s to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %69, i8* align 4 %70, i32 4, i1 false)
+  store ptr null, ptr %__s, align 4
+  call void @llvm.memcpy.p0.p0.i32(ptr align 4 %retval, ptr align 4 %__s, i32 4, i1 false)
   br label %return
 
 if.end36:                                         ; preds = %if.then30
   br label %if.end37
 
 if.end37:                                         ; preds = %if.end36, %if.end25
-  %71 = load %"class.std::__1::ios_base"*, %"class.std::__1::ios_base"** %__iob.addr, align 4
-  store %"class.std::__1::ios_base"* %71, %"class.std::__1::ios_base"** %this.addr.i41, align 4
-  store i32 0, i32* %__wide.addr.i, align 4
-  %this1.i42 = load %"class.std::__1::ios_base"*, %"class.std::__1::ios_base"** %this.addr.i41
-  %__width_.i43 = getelementptr inbounds %"class.std::__1::ios_base", %"class.std::__1::ios_base"* %this1.i42, i32 0, i32 3
-  %72 = load i32, i32* %__width_.i43, align 4
-  store i32 %72, i32* %__r.i, align 4
-  %73 = load i32, i32* %__wide.addr.i, align 4
-  %__width_2.i = getelementptr inbounds %"class.std::__1::ios_base", %"class.std::__1::ios_base"* %this1.i42, i32 0, i32 3
-  store i32 %73, i32* %__width_2.i, align 4
-  %74 = load i32, i32* %__r.i, align 4
-  %75 = bitcast %"class.std::__1::ostreambuf_iterator"* %retval to i8*
-  %76 = bitcast %"class.std::__1::ostreambuf_iterator"* %__s to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %75, i8* align 4 %76, i32 4, i1 false)
+  %54 = load ptr, ptr %__iob.addr, align 4
+  store ptr %54, ptr %this.addr.i41, align 4
+  store i32 0, ptr %__wide.addr.i, align 4
+  %this1.i42 = load ptr, ptr %this.addr.i41
+  %__width_.i43 = getelementptr inbounds %"class.std::__1::ios_base", ptr %this1.i42, i32 0, i32 3
+  %55 = load i32, ptr %__width_.i43, align 4
+  store i32 %55, ptr %__r.i, align 4
+  %56 = load i32, ptr %__wide.addr.i, align 4
+  %__width_2.i = getelementptr inbounds %"class.std::__1::ios_base", ptr %this1.i42, i32 0, i32 3
+  store i32 %56, ptr %__width_2.i, align 4
+  %57 = load i32, ptr %__r.i, align 4
+  call void @llvm.memcpy.p0.p0.i32(ptr align 4 %retval, ptr align 4 %__s, i32 4, i1 false)
   br label %return
 
 return:                                           ; preds = %if.end37, %if.then34, %cleanup, %if.then12, %if.then
-  %coerce.dive39 = getelementptr %"class.std::__1::ostreambuf_iterator", %"class.std::__1::ostreambuf_iterator"* %retval, i32 0, i32 0
-  %77 = load %"class.std::__1::basic_streambuf"*, %"class.std::__1::basic_streambuf"** %coerce.dive39
-  %coerce.val.pi = ptrtoint %"class.std::__1::basic_streambuf"* %77 to i32
+  %58 = load ptr, ptr %retval
+  %coerce.val.pi = ptrtoint ptr %58 to i32
   ret i32 %coerce.val.pi
 
 eh.resume:                                        ; preds = %lpad
-  %exn = load i8*, i8** %exn.slot
-  %sel = load i32, i32* %ehselector.slot
-  %lpad.val = insertvalue { i8*, i32 } undef, i8* %exn, 0
-  %lpad.val40 = insertvalue { i8*, i32 } %lpad.val, i32 %sel, 1
-  resume { i8*, i32 } %lpad.val40
+  %exn = load ptr, ptr %exn.slot
+  %sel = load i32, ptr %ehselector.slot
+  %lpad.val = insertvalue { ptr, i32 } undef, ptr %exn, 0
+  %lpad.val40 = insertvalue { ptr, i32 } %lpad.val, i32 %sel, 1
+  resume { ptr, i32 } %lpad.val40
 
 unreachable:                                      ; preds = %cleanup
   unreachable
 }
 
-declare void @_ZNSt3__112basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEE6__initEjc(%"class.std::__1::basic_string"*, i32, i8 zeroext) #2
+declare void @_ZNSt3__112basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEE6__initEjc(ptr, i32, i8 zeroext) #2
 
 attributes #0 = { nounwind }
 attributes #1 = { nounwind "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }

diff  --git a/llvm/test/CodeGen/Hexagon/early-if-merge-loop.ll b/llvm/test/CodeGen/Hexagon/early-if-merge-loop.ll
index 01983cfb969d0..a9c9a814c4d6f 100644
--- a/llvm/test/CodeGen/Hexagon/early-if-merge-loop.ll
+++ b/llvm/test/CodeGen/Hexagon/early-if-merge-loop.ll
@@ -10,23 +10,22 @@
 
 target triple = "hexagon"
 
-define i32 @fred(i32 %a0, i64* nocapture readonly %a1) #0 {
+define i32 @fred(i32 %a0, ptr nocapture readonly %a1) #0 {
 b2:
-  %v3 = bitcast i64* %a1 to i32*
-  %v4 = getelementptr inbounds i32, i32* %v3, i32 1
+  %v4 = getelementptr inbounds i32, ptr %a1, i32 1
   %v5 = zext i32 %a0 to i64
   br label %loop
 
 loop:                                             ; preds = %should_merge, %b2
   %v7 = phi i32 [ 0, %b2 ], [ %v49, %should_merge ]
   %v8 = phi i32 [ 0, %b2 ], [ %v42, %should_merge ]
-  %v9 = phi i32* [ %v4, %b2 ], [ %v53, %should_merge ]
+  %v9 = phi ptr [ %v4, %b2 ], [ %v53, %should_merge ]
   %v10 = phi i32 [ 0, %b2 ], [ %v30, %should_merge ]
-  %v11 = phi i32* [ %v3, %b2 ], [ %v51, %should_merge ]
+  %v11 = phi ptr [ %a1, %b2 ], [ %v51, %should_merge ]
   %v12 = phi i32 [ 0, %b2 ], [ %v23, %should_merge ]
   %v13 = phi i32 [ 2, %b2 ], [ %v54, %should_merge ]
-  %v14 = load i32, i32* %v11, align 4, !tbaa !0
-  %v15 = load i32, i32* %v9, align 4, !tbaa !0
+  %v14 = load i32, ptr %v11, align 4, !tbaa !0
+  %v15 = load i32, ptr %v9, align 4, !tbaa !0
   %v16 = icmp ult i32 %v13, 30
   %v17 = zext i32 %v12 to i64
   %v18 = shl nuw i64 %v17, 32
@@ -42,11 +41,11 @@ loop:                                             ; preds = %should_merge, %b2
   %v28 = tail call i64 @llvm.hexagon.A2.addp(i64 %v27, i64 %v5)
   %v29 = lshr i64 %v28, 32
   %v30 = trunc i64 %v29 to i32
-  %v31 = getelementptr inbounds i32, i32* %v3, i32 %v13
-  %v32 = load i32, i32* %v31, align 4, !tbaa !0
+  %v31 = getelementptr inbounds i32, ptr %a1, i32 %v13
+  %v32 = load i32, ptr %v31, align 4, !tbaa !0
   %v33 = or i32 %v13, 1
-  %v34 = getelementptr inbounds i32, i32* %v3, i32 %v33
-  %v35 = load i32, i32* %v34, align 4, !tbaa !0
+  %v34 = getelementptr inbounds i32, ptr %a1, i32 %v33
+  %v35 = load i32, ptr %v34, align 4, !tbaa !0
   %v36 = zext i32 %v8 to i64
   %v37 = shl nuw i64 %v36, 32
   %v38 = zext i32 %v32 to i64
@@ -65,9 +64,9 @@ loop:                                             ; preds = %should_merge, %b2
 
 should_merge:                                     ; preds = %loop
   %v50 = add nuw nsw i32 %v13, 2
-  %v51 = getelementptr inbounds i32, i32* %v3, i32 %v50
+  %v51 = getelementptr inbounds i32, ptr %a1, i32 %v50
   %v52 = add nuw nsw i32 %v13, 3
-  %v53 = getelementptr inbounds i32, i32* %v3, i32 %v52
+  %v53 = getelementptr inbounds i32, ptr %a1, i32 %v52
   %v54 = add nuw nsw i32 %v13, 4
   br label %loop
 

diff  --git a/llvm/test/CodeGen/Hexagon/early-if-phi-i1.ll b/llvm/test/CodeGen/Hexagon/early-if-phi-i1.ll
index f4af62d6b10ea..d0a911802d9e0 100644
--- a/llvm/test/CodeGen/Hexagon/early-if-phi-i1.ll
+++ b/llvm/test/CodeGen/Hexagon/early-if-phi-i1.ll
@@ -3,13 +3,13 @@
 ; Check that the early if-conversion does not predicate block1 (where the
 ; join block has a phi node of type i1).
 
-define i1 @foo(i32 %x, i32* %p) {
+define i1 @foo(i32 %x, ptr %p) {
 entry:
   %c = icmp sgt i32 %x, 0
   %c1 = icmp sgt i32 %x, 10
   br i1 %c, label %block2, label %block1
 block1:
-  store i32 1, i32* %p, align 4
+  store i32 1, ptr %p, align 4
   br label %block2
 block2:
   %b = phi i1 [ 0, %entry ], [ %c1, %block1 ]

diff  --git a/llvm/test/CodeGen/Hexagon/early-if-spare.ll b/llvm/test/CodeGen/Hexagon/early-if-spare.ll
index bc3d5ba62644b..04980e6e90b62 100644
--- a/llvm/test/CodeGen/Hexagon/early-if-spare.ll
+++ b/llvm/test/CodeGen/Hexagon/early-if-spare.ll
@@ -6,7 +6,7 @@
 
 target triple = "hexagon"
 
-define void @fred(i32 %n, i32* %bp) #0 {
+define void @fred(i32 %n, ptr %bp) #0 {
 entry:
   %cmp16 = icmp eq i32 %n, 0
   br i1 %cmp16, label %for.end, label %for.body.lr.ph
@@ -17,22 +17,22 @@ for.body.lr.ph:                                   ; preds = %entry
 
 for.body:                                         ; preds = %for.inc, %for.body.lr.ph
   %i.017 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.inc ]
-  %call = tail call i32 @foo(i32* %bp) nounwind
-  %call1 = tail call i32 @bar(i32* %bp) nounwind
+  %call = tail call i32 @foo(ptr %bp) nounwind
+  %call1 = tail call i32 @bar(ptr %bp) nounwind
   br i1 %cmp2, label %if.then, label %if.else
 
 if.then:                                          ; preds = %for.body
-  %arrayidx = getelementptr inbounds i32, i32* %bp, i32 %i.017
-  store i32 %call, i32* %arrayidx, align 4, !tbaa !0
+  %arrayidx = getelementptr inbounds i32, ptr %bp, i32 %i.017
+  store i32 %call, ptr %arrayidx, align 4, !tbaa !0
   %add = add i32 %i.017, 2
-  %arrayidx3 = getelementptr inbounds i32, i32* %bp, i32 %add
-  store i32 %call1, i32* %arrayidx3, align 4, !tbaa !0
+  %arrayidx3 = getelementptr inbounds i32, ptr %bp, i32 %add
+  store i32 %call1, ptr %arrayidx3, align 4, !tbaa !0
   br label %for.inc
 
 if.else:                                          ; preds = %for.body
   %or = or i32 %call1, %call
-  %arrayidx4 = getelementptr inbounds i32, i32* %bp, i32 %i.017
-  store i32 %or, i32* %arrayidx4, align 4, !tbaa !0
+  %arrayidx4 = getelementptr inbounds i32, ptr %bp, i32 %i.017
+  store i32 %or, ptr %arrayidx4, align 4, !tbaa !0
   br label %for.inc
 
 for.inc:                                          ; preds = %if.then, %if.else
@@ -47,9 +47,9 @@ for.end:                                          ; preds = %for.end.loopexit, %
   ret void
 }
 
-declare i32 @foo(i32*) nounwind
+declare i32 @foo(ptr) nounwind
 
-declare i32 @bar(i32*) nounwind
+declare i32 @bar(ptr) nounwind
 
 attributes #0 = { nounwind "target-cpu"="hexagonv5" }
 

diff  --git a/llvm/test/CodeGen/Hexagon/early-if-vecpi.ll b/llvm/test/CodeGen/Hexagon/early-if-vecpi.ll
index 6fd2aa134807c..62914243edb80 100644
--- a/llvm/test/CodeGen/Hexagon/early-if-vecpi.ll
+++ b/llvm/test/CodeGen/Hexagon/early-if-vecpi.ll
@@ -5,14 +5,14 @@ target triple = "hexagon-unknown--elf"
 ; Check that we can predicate base+offset vector stores.
 ; CHECK-LABEL: sammy
 ; CHECK: if{{.*}}vmem(r{{[0-9]+}}+#0) =
-define void @sammy(<16 x i32>* nocapture %p, <16 x i32>* nocapture readonly %q, i32 %n) #0 {
+define void @sammy(ptr nocapture %p, ptr nocapture readonly %q, i32 %n) #0 {
 entry:
-  %0 = load <16 x i32>, <16 x i32>* %q, align 64
+  %0 = load <16 x i32>, ptr %q, align 64
   %sub = add nsw i32 %n, -1
   br label %for.body
 
 for.body:                                         ; preds = %if.end, %entry
-  %p.addr.011 = phi <16 x i32>* [ %p, %entry ], [ %incdec.ptr, %if.end ]
+  %p.addr.011 = phi ptr [ %p, %entry ], [ %incdec.ptr, %if.end ]
   %i.010 = phi i32 [ 0, %entry ], [ %add, %if.end ]
   %mul = mul nsw i32 %i.010, %sub
   %add = add nuw nsw i32 %i.010, 1
@@ -21,11 +21,11 @@ for.body:                                         ; preds = %if.end, %entry
   br i1 %cmp2, label %if.then, label %if.end
 
 if.then:                                          ; preds = %for.body
-  store <16 x i32> %0, <16 x i32>* %p.addr.011, align 64
+  store <16 x i32> %0, ptr %p.addr.011, align 64
   br label %if.end
 
 if.end:                                           ; preds = %if.then, %for.body
-  %incdec.ptr = getelementptr inbounds <16 x i32>, <16 x i32>* %p.addr.011, i32 1
+  %incdec.ptr = getelementptr inbounds <16 x i32>, ptr %p.addr.011, i32 1
   %exitcond = icmp eq i32 %add, 100
   br i1 %exitcond, label %for.end, label %for.body
 
@@ -36,14 +36,14 @@ for.end:                                          ; preds = %if.end
 ; Check that we can predicate post-increment vector stores.
 ; CHECK-LABEL: danny
 ; CHECK: if{{.*}}vmem(r{{[0-9]+}}++#1) =
-define void @danny(<16 x i32>* nocapture %p, <16 x i32>* nocapture readonly %q, i32 %n) #0 {
+define void @danny(ptr nocapture %p, ptr nocapture readonly %q, i32 %n) #0 {
 entry:
-  %0 = load <16 x i32>, <16 x i32>* %q, align 64
+  %0 = load <16 x i32>, ptr %q, align 64
   %sub = add nsw i32 %n, -1
   br label %for.body
 
 for.body:                                         ; preds = %if.end, %entry
-  %p.addr.012 = phi <16 x i32>* [ %p, %entry ], [ %incdec.ptr3, %if.end ]
+  %p.addr.012 = phi ptr [ %p, %entry ], [ %incdec.ptr3, %if.end ]
   %i.011 = phi i32 [ 0, %entry ], [ %add, %if.end ]
   %mul = mul nsw i32 %i.011, %sub
   %add = add nuw nsw i32 %i.011, 1
@@ -52,13 +52,13 @@ for.body:                                         ; preds = %if.end, %entry
   br i1 %cmp2, label %if.then, label %if.end
 
 if.then:                                          ; preds = %for.body
-  %incdec.ptr = getelementptr inbounds <16 x i32>, <16 x i32>* %p.addr.012, i32 1
-  store <16 x i32> %0, <16 x i32>* %p.addr.012, align 64
+  %incdec.ptr = getelementptr inbounds <16 x i32>, ptr %p.addr.012, i32 1
+  store <16 x i32> %0, ptr %p.addr.012, align 64
   br label %if.end
 
 if.end:                                           ; preds = %if.then, %for.body
-  %p.addr.1 = phi <16 x i32>* [ %incdec.ptr, %if.then ], [ %p.addr.012, %for.body ]
-  %incdec.ptr3 = getelementptr inbounds <16 x i32>, <16 x i32>* %p.addr.1, i32 1
+  %p.addr.1 = phi ptr [ %incdec.ptr, %if.then ], [ %p.addr.012, %for.body ]
+  %incdec.ptr3 = getelementptr inbounds <16 x i32>, ptr %p.addr.1, i32 1
   %exitcond = icmp eq i32 %add, 100
   br i1 %exitcond, label %for.end, label %for.body
 

diff  --git a/llvm/test/CodeGen/Hexagon/early-if-vecpred.ll b/llvm/test/CodeGen/Hexagon/early-if-vecpred.ll
index 372e96dbff836..2694fe58029a7 100644
--- a/llvm/test/CodeGen/Hexagon/early-if-vecpred.ll
+++ b/llvm/test/CodeGen/Hexagon/early-if-vecpred.ll
@@ -23,7 +23,7 @@ b3:                                               ; preds = %b1
 
 b5:                                               ; preds = %b3, %b1
   %v6 = phi <128 x i1> [ %v4, %b3 ], [ %v2, %b1 ]
-  tail call void asm sideeffect "if ($0) vmem($1) = $2;", "q,r,v,~{memory}"(<128 x i1> %v6, <32 x i32>* undef, <32 x i32> undef) #2
+  tail call void asm sideeffect "if ($0) vmem($1) = $2;", "q,r,v,~{memory}"(<128 x i1> %v6, ptr undef, <32 x i32> undef) #2
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/early-if.ll b/llvm/test/CodeGen/Hexagon/early-if.ll
index 4809fdf5774bd..3fea4833817cd 100644
--- a/llvm/test/CodeGen/Hexagon/early-if.ll
+++ b/llvm/test/CodeGen/Hexagon/early-if.ll
@@ -21,15 +21,15 @@ declare i64 @llvm.hexagon.A2.vaddws(i64, i64) nounwind readnone
 declare i64 @llvm.hexagon.A2.vsubws(i64, i64) nounwind readnone
 declare i32 @llvm.hexagon.A4.modwrapu(i32, i32) nounwind readnone
 
-define void @foo(i32 %n, i64* %ptr) #0 {
+define void @foo(i32 %n, ptr %ptr) #0 {
 entry:
   br label %while.body
 
 while.body:
   %count = phi i32 [ 0, %entry ], [ %next, %while.end ]
   %idx = phi i32 [ 0, %entry ], [ %15, %while.end ]
-  %0 = load i32, i32* @B1, align 4
-  %1 = load i32, i32* @B2, align 8
+  %0 = load i32, ptr @B1, align 4
+  %1 = load i32, ptr @B2, align 8
   %2 = and i32 %1, %0
   br label %while.body13
 
@@ -39,21 +39,21 @@ while.body13:                                     ; preds = %while.body, %if.end
   %m = phi i32 [ %6, %if.end ], [ %2, %while.body ]
   %5 = tail call i32 @llvm.hexagon.S2.cl0(i32 %m)
   %6 = tail call i32 @llvm.hexagon.S2.setbit.r(i32 %m, i32 %5)
-  %cgep85 = getelementptr [10 x %struct.2], [10 x %struct.2]* inttoptr (i32 -121502345 to [10 x %struct.2]*), i32 0, i32 %idx
-  %cgep90 = getelementptr %struct.2, %struct.2* %cgep85, i32 0, i32 12, i32 %5
-  %7 = load i32, i32* %cgep90, align 4
+  %cgep85 = getelementptr [10 x %struct.2], ptr inttoptr (i32 -121502345 to ptr), i32 0, i32 %idx
+  %cgep90 = getelementptr %struct.2, ptr %cgep85, i32 0, i32 12, i32 %5
+  %7 = load i32, ptr %cgep90, align 4
   %8 = tail call i64 @llvm.hexagon.M2.vmpy2s.s0(i32 %7, i32 %7)
-  %cgep91 = getelementptr %struct.2, %struct.2* %cgep85, i32 0, i32 13, i32 %5
-  %9 = load i32, i32* %cgep91, align 4
+  %cgep91 = getelementptr %struct.2, ptr %cgep85, i32 0, i32 13, i32 %5
+  %9 = load i32, ptr %cgep91, align 4
   %10 = tail call i64 @llvm.hexagon.M2.vmac2s.s0(i64 %8, i32 %9, i32 %9)
-  %11 = load i8, i8* @C1, align 1
+  %11 = load i8, ptr @C1, align 1
   %and24 = and i8 %11, 1
   %cmp = icmp eq i8 %and24, 0
   br i1 %cmp, label %if.then, label %if.end
 
 if.then:                                          ; preds = %while.body13
   %12 = tail call i64 @llvm.hexagon.A2.vaddws(i64 %3, i64 %10)
-  store i64 %12, i64* %ptr, align 8
+  store i64 %12, ptr %ptr, align 8
   br label %if.end
 
 if.end:                                           ; preds = %if.then, %while.body13
@@ -70,7 +70,7 @@ while.end:
   br i1 %cc, label %end, label %while.body
 
 end:
-  store i64 %10, i64* @A2, align 8
+  store i64 %10, ptr @A2, align 8
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/eh_return-r30.ll b/llvm/test/CodeGen/Hexagon/eh_return-r30.ll
index 36063fd1a2a23..02e18d50fda84 100644
--- a/llvm/test/CodeGen/Hexagon/eh_return-r30.ll
+++ b/llvm/test/CodeGen/Hexagon/eh_return-r30.ll
@@ -4,14 +4,14 @@
 target triple = "hexagon"
 
 ; Function Attrs: noreturn nounwind
-define void @f0(i32 %a0, i8* %a1) #0 {
+define void @f0(i32 %a0, ptr %a1) #0 {
 b0:
-  tail call void @llvm.eh.return.i32(i32 %a0, i8* %a1)
+  tail call void @llvm.eh.return.i32(i32 %a0, ptr %a1)
   unreachable
 }
 
 ; Function Attrs: nounwind
-declare void @llvm.eh.return.i32(i32, i8*) #1
+declare void @llvm.eh.return.i32(i32, ptr) #1
 
 attributes #0 = { noreturn nounwind }
 attributes #1 = { nounwind }

diff  --git a/llvm/test/CodeGen/Hexagon/eh_return.ll b/llvm/test/CodeGen/Hexagon/eh_return.ll
index 1596ade24c820..9aa1c8e578341 100644
--- a/llvm/test/CodeGen/Hexagon/eh_return.ll
+++ b/llvm/test/CodeGen/Hexagon/eh_return.ll
@@ -13,36 +13,35 @@
 target datalayout = "e-p:32:32:32-i64:64:64-i32:32:32-i16:16:16-i1:32:32-f64:64:64-f32:32:32-a0:0-n32"
 target triple = "hexagon-unknown-linux-gnu"
 
-%struct.Data = type { i32, i8* }
+%struct.Data = type { i32, ptr }
 
 define i32 @test_eh_return(i32 %a, i32 %b) nounwind {
 entry:
   %a.addr = alloca i32, align 4
   %b.addr = alloca i32, align 4
   %d = alloca %struct.Data, align 4
-  store i32 %a, i32* %a.addr, align 4
-  store i32 %b, i32* %b.addr, align 4
-  %0 = load i32, i32* %a.addr, align 4
-  %1 = load i32, i32* %b.addr, align 4
+  store i32 %a, ptr %a.addr, align 4
+  store i32 %b, ptr %b.addr, align 4
+  %0 = load i32, ptr %a.addr, align 4
+  %1 = load i32, ptr %b.addr, align 4
   %cmp = icmp sgt i32 %0, %1
   br i1 %cmp, label %if.then, label %if.else
 
 if.then:                                          ; preds = %entry
-  %2 = load i32, i32* %a.addr, align 4
-  %3 = load i32, i32* %b.addr, align 4
+  %2 = load i32, ptr %a.addr, align 4
+  %3 = load i32, ptr %b.addr, align 4
   %add = add nsw i32 %2, %3
   ret i32 %add
 
 if.else:                                          ; preds = %entry
-  %call = call i32 @setup(%struct.Data* %d)
-  %_d1 = getelementptr inbounds %struct.Data, %struct.Data* %d, i32 0, i32 0
-  %4 = load i32, i32* %_d1, align 4
-  %_d2 = getelementptr inbounds %struct.Data, %struct.Data* %d, i32 0, i32 1
-  %5 = load i8*, i8** %_d2, align 4
-  call void @llvm.eh.return.i32(i32 %4, i8* %5)
+  %call = call i32 @setup(ptr %d)
+  %4 = load i32, ptr %d, align 4
+  %_d2 = getelementptr inbounds %struct.Data, ptr %d, i32 0, i32 1
+  %5 = load ptr, ptr %_d2, align 4
+  call void @llvm.eh.return.i32(i32 %4, ptr %5)
   unreachable
 }
 
-declare i32 @setup(%struct.Data*)
+declare i32 @setup(ptr)
 
-declare void @llvm.eh.return.i32(i32, i8*) nounwind
+declare void @llvm.eh.return.i32(i32, ptr) nounwind

diff  --git a/llvm/test/CodeGen/Hexagon/eh_save_restore.ll b/llvm/test/CodeGen/Hexagon/eh_save_restore.ll
index b5217f2d1c57d..9da2b42c5c287 100644
--- a/llvm/test/CodeGen/Hexagon/eh_save_restore.ll
+++ b/llvm/test/CodeGen/Hexagon/eh_save_restore.ll
@@ -23,66 +23,60 @@
 %s.0 = type { i32 }
 
 @g0 = global i32 0, align 4
- at g1 = external constant i8*
+ at g1 = external constant ptr
 
 ; Function Attrs: noreturn
-define void @f0(i64 %a0) #0 personality i8* bitcast (i32 (...)* @f2 to i8*) {
+define void @f0(i64 %a0) #0 personality ptr @f2 {
 b0:
   %v0 = alloca %s.0, align 4
   %v1 = trunc i64 %a0 to i32
   %v2 = lshr i64 %a0, 32
   %v3 = trunc i64 %v2 to i32
-  %v4 = getelementptr inbounds %s.0, %s.0* %v0, i32 0, i32 0
-  store i32 0, i32* %v4, align 4, !tbaa !0
-  %v5 = load i32, i32* @g0, align 4, !tbaa !5
+  store i32 0, ptr %v0, align 4, !tbaa !0
+  %v5 = load i32, ptr @g0, align 4, !tbaa !5
   %v6 = or i32 %v5, 1
-  store i32 %v6, i32* @g0, align 4, !tbaa !5
-  %v7 = call i8* @f1(i32 4) #1
-  %v8 = bitcast i8* %v7 to i32*
-  %v9 = bitcast %s.0* %v0 to i8*
-  %v10 = getelementptr inbounds i8, i8* %v9, i32 %v3
-  %v11 = bitcast i8* %v10 to %s.0*
+  store i32 %v6, ptr @g0, align 4, !tbaa !5
+  %v7 = call ptr @f1(i32 4) #1
+  %v10 = getelementptr inbounds i8, ptr %v0, i32 %v3
   %v12 = and i32 %v1, 1
   %v13 = icmp eq i32 %v12, 0
   br i1 %v13, label %b2, label %b1
 
 b1:                                               ; preds = %b0
-  %v14 = bitcast i8* %v10 to i8**
-  %v15 = load i8*, i8** %v14, align 4
+  %v15 = load ptr, ptr %v10, align 4
   %v16 = add i32 %v1, -1
-  %v17 = getelementptr i8, i8* %v15, i32 %v16
-  %v18 = bitcast i8* %v17 to i32 (%s.0*)**
-  %v19 = load i32 (%s.0*)*, i32 (%s.0*)** %v18, align 4
+  %v17 = getelementptr i8, ptr %v15, i32 %v16
+  %v19 = load ptr, ptr %v17, align 4
   br label %b3
 
 b2:                                               ; preds = %b0
-  %v20 = inttoptr i32 %v1 to i32 (%s.0*)*
+  %v20 = inttoptr i32 %v1 to ptr
   br label %b3
 
 b3:                                               ; preds = %b2, %b1
-  %v21 = phi i32 (%s.0*)* [ %v19, %b1 ], [ %v20, %b2 ]
-  %v22 = invoke i32 %v21(%s.0* %v11)
+  %v21 = phi ptr [ %v19, %b1 ], [ %v20, %b2 ]
+  %v22 = invoke i32 %v21(ptr %v10)
           to label %b4 unwind label %b5
 
 b4:                                               ; preds = %b3
-  store i32 %v22, i32* %v8, align 4, !tbaa !5
-  call void @f4(i8* %v7, i8* bitcast (i8** @g1 to i8*), i8* null) #2
+  store i32 %v22, ptr %v7, align 4, !tbaa !5
+  call void @f4(ptr %v7, ptr @g1, ptr null) #2
   unreachable
 
 b5:                                               ; preds = %b3
-  %v23 = landingpad { i8*, i32 }
+  %v23 = landingpad { ptr, i32 }
           cleanup
-  call void @f3(i8* %v7) #1
-  resume { i8*, i32 } %v23
+  call void @f3(ptr %v7) #1
+  resume { ptr, i32 } %v23
 }
 
-declare i8* @f1(i32)
+declare ptr @f1(i32)
 
 declare i32 @f2(...)
 
-declare void @f3(i8*)
+declare void @f3(ptr)
 
-declare void @f4(i8*, i8*, i8*)
+declare void @f4(ptr, ptr, ptr)
 
 attributes #0 = { noreturn "target-cpu"="hexagonv55" }
 attributes #1 = { nounwind }

diff  --git a/llvm/test/CodeGen/Hexagon/ehabi.ll b/llvm/test/CodeGen/Hexagon/ehabi.ll
index cb8fd7892ecf7..d9ed4f6883d8a 100644
--- a/llvm/test/CodeGen/Hexagon/ehabi.ll
+++ b/llvm/test/CodeGen/Hexagon/ehabi.ll
@@ -5,73 +5,71 @@
 
 target triple = "hexagon"
 
- at g0 = external constant i8*
+ at g0 = external constant ptr
 
-define i32 @f0() #0 personality i8* bitcast (i32 (...)* @f3 to i8*) {
+define i32 @f0() #0 personality ptr @f3 {
 b0:
   %v0 = alloca i32, align 4
   %v1 = alloca i32, align 4
-  %v2 = alloca i8*
+  %v2 = alloca ptr
   %v3 = alloca i32
   %v4 = alloca i32, align 4
-  store i32 0, i32* %v0
-  store i32 1, i32* %v1, align 4
-  %v5 = call i8* @f1(i32 4) #2
-  %v6 = bitcast i8* %v5 to i32*
-  store i32 20, i32* %v6
-  invoke void @f2(i8* %v5, i8* bitcast (i8** @g0 to i8*), i8* null) #3
+  store i32 0, ptr %v0
+  store i32 1, ptr %v1, align 4
+  %v5 = call ptr @f1(i32 4) #2
+  store i32 20, ptr %v5
+  invoke void @f2(ptr %v5, ptr @g0, ptr null) #3
           to label %b6 unwind label %b1
 
 b1:                                               ; preds = %b0
-  %v7 = landingpad { i8*, i32 }
-          catch i8* bitcast (i8** @g0 to i8*)
-  %v8 = extractvalue { i8*, i32 } %v7, 0
-  store i8* %v8, i8** %v2
-  %v9 = extractvalue { i8*, i32 } %v7, 1
-  store i32 %v9, i32* %v3
+  %v7 = landingpad { ptr, i32 }
+          catch ptr @g0
+  %v8 = extractvalue { ptr, i32 } %v7, 0
+  store ptr %v8, ptr %v2
+  %v9 = extractvalue { ptr, i32 } %v7, 1
+  store i32 %v9, ptr %v3
   br label %b2
 
 b2:                                               ; preds = %b1
-  %v10 = load i32, i32* %v3
-  %v11 = call i32 @llvm.eh.typeid.for(i8* bitcast (i8** @g0 to i8*)) #2
+  %v10 = load i32, ptr %v3
+  %v11 = call i32 @llvm.eh.typeid.for(ptr @g0) #2
   %v12 = icmp eq i32 %v10, %v11
   br i1 %v12, label %b3, label %b5
 
 b3:                                               ; preds = %b2
-  %v13 = load i8*, i8** %v2
-  %v14 = call i8* @f4(i8* %v13) #2
-  %v15 = bitcast i8* %v14 to i32*
-  %v16 = load i32, i32* %v15, align 4
-  store i32 %v16, i32* %v4, align 4
-  store i32 2, i32* %v1, align 4
+  %v13 = load ptr, ptr %v2
+  %v14 = call ptr @f4(ptr %v13) #2
+  %v16 = load i32, ptr %v14, align 4
+  store i32 %v16, ptr %v4, align 4
+  store i32 2, ptr %v1, align 4
   call void @f5() #2
   br label %b4
 
 b4:                                               ; preds = %b3
-  %v17 = load i32, i32* %v1, align 4
+  %v17 = load i32, ptr %v1, align 4
   ret i32 %v17
 
 b5:                                               ; preds = %b2
-  %v18 = load i8*, i8** %v2
-  %v19 = load i32, i32* %v3
-  %v20 = insertvalue { i8*, i32 } undef, i8* %v18, 0
-  %v21 = insertvalue { i8*, i32 } %v20, i32 %v19, 1
-  resume { i8*, i32 } %v21
+  %v18 = load ptr, ptr %v2
+  %v19 = load i32, ptr %v3
+  %v20 = insertvalue { ptr, i32 } undef, ptr %v18, 0
+  %v21 = insertvalue { ptr, i32 } %v20, i32 %v19, 1
+  resume { ptr, i32 } %v21
 
 b6:                                               ; preds = %b0
   unreachable
 }
 
-declare i8* @f1(i32)
+declare ptr @f1(i32)
 
-declare void @f2(i8*, i8*, i8*)
+declare void @f2(ptr, ptr, ptr)
 
 declare i32 @f3(...)
 
 ; Function Attrs: nounwind readnone
-declare i32 @llvm.eh.typeid.for(i8*) #1
+declare i32 @llvm.eh.typeid.for(ptr) #1
 
-declare i8* @f4(i8*)
+declare ptr @f4(ptr)
 
 declare void @f5()
 

diff  --git a/llvm/test/CodeGen/Hexagon/eliminate-pred-spill.ll b/llvm/test/CodeGen/Hexagon/eliminate-pred-spill.ll
index 7cc92736fda4a..8a28d9f01559c 100644
--- a/llvm/test/CodeGen/Hexagon/eliminate-pred-spill.ll
+++ b/llvm/test/CodeGen/Hexagon/eliminate-pred-spill.ll
@@ -3,129 +3,127 @@
 ; This spill should be eliminated.
 ; CHECK-NOT: vmem(r29+#6)
 
-define void @test(i8* noalias nocapture %key, i8* noalias nocapture %data1) #0 {
+define void @test(ptr noalias nocapture %key, ptr noalias nocapture %data1) #0 {
 entry:
-  %0 = bitcast i8* %key to <32 x i32>*
-  %1 = bitcast i8* %data1 to <32 x i32>*
   br label %for.body
 
 for.body:
-  %pkey.0542 = phi <32 x i32>* [ %0, %entry ], [ null, %for.body ]
-  %pdata0.0541 = phi <32 x i32>* [ null, %entry ], [ %add.ptr48, %for.body ]
-  %pdata1.0540 = phi <32 x i32>* [ %1, %entry ], [ %add.ptr49, %for.body ]
-  %dAccum0.0539 = phi <64 x i32> [ undef, %entry ], [ %86, %for.body ]
-  %2 = load <32 x i32>, <32 x i32>* %pkey.0542, align 128
-  %3 = load <32 x i32>, <32 x i32>* %pdata0.0541, align 128
-  %4 = load <32 x i32>, <32 x i32>* undef, align 128
-  %arrayidx4 = getelementptr inbounds <32 x i32>, <32 x i32>* %pdata0.0541, i32 2
-  %5 = load <32 x i32>, <32 x i32>* %arrayidx4, align 128
-  %arrayidx5 = getelementptr inbounds <32 x i32>, <32 x i32>* %pdata1.0540, i32 2
-  %6 = load <32 x i32>, <32 x i32>* %arrayidx5, align 128
-  %7 = load <32 x i32>, <32 x i32>* null, align 128
-  %8 = load <32 x i32>, <32 x i32>* undef, align 128
-  %9 = load <32 x i32>, <32 x i32>* null, align 128
-  %arrayidx9 = getelementptr inbounds <32 x i32>, <32 x i32>* %pkey.0542, i32 3
-  %arrayidx10 = getelementptr inbounds <32 x i32>, <32 x i32>* %pdata0.0541, i32 6
-  %10 = load <32 x i32>, <32 x i32>* %arrayidx10, align 128
-  %arrayidx12 = getelementptr inbounds <32 x i32>, <32 x i32>* %pkey.0542, i32 4
-  %11 = load <32 x i32>, <32 x i32>* %arrayidx12, align 128
-  %arrayidx13 = getelementptr inbounds <32 x i32>, <32 x i32>* %pdata0.0541, i32 8
-  %arrayidx14 = getelementptr inbounds <32 x i32>, <32 x i32>* %pdata1.0540, i32 8
-  %12 = load <32 x i32>, <32 x i32>* %arrayidx14, align 128
-  %arrayidx15 = getelementptr inbounds <32 x i32>, <32 x i32>* %pkey.0542, i32 5
-  %13 = load <32 x i32>, <32 x i32>* %arrayidx15, align 128
-  %arrayidx16 = getelementptr inbounds <32 x i32>, <32 x i32>* %pdata0.0541, i32 10
-  %arrayidx17 = getelementptr inbounds <32 x i32>, <32 x i32>* %pdata1.0540, i32 10
-  %14 = load <32 x i32>, <32 x i32>* %arrayidx17, align 128
-  %arrayidx18 = getelementptr inbounds <32 x i32>, <32 x i32>* %pkey.0542, i32 6
-  %15 = load <32 x i32>, <32 x i32>* %arrayidx18, align 128
-  %arrayidx19 = getelementptr inbounds <32 x i32>, <32 x i32>* %pdata0.0541, i32 12
-  %16 = load <32 x i32>, <32 x i32>* %arrayidx19, align 128
-  %arrayidx20 = getelementptr inbounds <32 x i32>, <32 x i32>* %pdata1.0540, i32 12
-  %17 = load <32 x i32>, <32 x i32>* %arrayidx20, align 128
-  %arrayidx22 = getelementptr inbounds <32 x i32>, <32 x i32>* %pdata0.0541, i32 14
-  %18 = load <32 x i32>, <32 x i32>* %arrayidx22, align 128
-  %arrayidx23 = getelementptr inbounds <32 x i32>, <32 x i32>* %pdata1.0540, i32 14
-  %19 = load <32 x i32>, <32 x i32>* %arrayidx23, align 128
-  %20 = tail call <128 x i1> @llvm.hexagon.V6.vgtb.128B(<32 x i32> %2, <32 x i32> %11)
-  %21 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<128 x i1> %20, <32 x i32> %11, <32 x i32> %2)
-  %22 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<128 x i1> %20, <32 x i32> %2, <32 x i32> %11)
-  %23 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<128 x i1> %20, <32 x i32> undef, <32 x i32> %3)
-  %24 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<128 x i1> %20, <32 x i32> %12, <32 x i32> undef)
-  %25 = tail call <128 x i1> @llvm.hexagon.V6.vgtb.128B(<32 x i32> %7, <32 x i32> %15)
-  %26 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<128 x i1> %25, <32 x i32> %15, <32 x i32> %7)
-  %27 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<128 x i1> %25, <32 x i32> %7, <32 x i32> %15)
-  %28 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<128 x i1> %25, <32 x i32> %16, <32 x i32> %8)
-  %29 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<128 x i1> %25, <32 x i32> %8, <32 x i32> %16)
-  %30 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<128 x i1> %25, <32 x i32> %17, <32 x i32> %9)
-  %31 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<128 x i1> %25, <32 x i32> %9, <32 x i32> %17)
-  %32 = tail call <128 x i1> @llvm.hexagon.V6.vgtb.128B(<32 x i32> %4, <32 x i32> %13)
-  %33 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<128 x i1> %32, <32 x i32> %13, <32 x i32> %4)
-  %34 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<128 x i1> %32, <32 x i32> %4, <32 x i32> %13)
-  %35 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<128 x i1> %32, <32 x i32> undef, <32 x i32> %5)
-  %36 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<128 x i1> %32, <32 x i32> %5, <32 x i32> undef)
-  %37 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<128 x i1> %32, <32 x i32> %14, <32 x i32> %6)
-  %38 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<128 x i1> %32, <32 x i32> %6, <32 x i32> %14)
-  %39 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<128 x i1> zeroinitializer, <32 x i32> zeroinitializer, <32 x i32> undef)
-  %40 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<128 x i1> zeroinitializer, <32 x i32> undef, <32 x i32> zeroinitializer)
-  %41 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<128 x i1> zeroinitializer, <32 x i32> %18, <32 x i32> %10)
-  %42 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<128 x i1> zeroinitializer, <32 x i32> %10, <32 x i32> %18)
-  %43 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<128 x i1> zeroinitializer, <32 x i32> %19, <32 x i32> undef)
-  %44 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<128 x i1> zeroinitializer, <32 x i32> undef, <32 x i32> %19)
-  %45 = tail call <128 x i1> @llvm.hexagon.V6.vgtb.128B(<32 x i32> %21, <32 x i32> %26)
-  %46 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<128 x i1> %45, <32 x i32> %26, <32 x i32> %21)
-  %47 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<128 x i1> %45, <32 x i32> %21, <32 x i32> %26)
-  %48 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<128 x i1> %45, <32 x i32> %28, <32 x i32> %23)
-  %49 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<128 x i1> %45, <32 x i32> %23, <32 x i32> %28)
-  %50 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<128 x i1> %45, <32 x i32> %30, <32 x i32> %24)
-  %51 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<128 x i1> %45, <32 x i32> %24, <32 x i32> %30)
-  %52 = tail call <128 x i1> @llvm.hexagon.V6.vgtb.128B(<32 x i32> %22, <32 x i32> %27)
-  %53 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<128 x i1> %52, <32 x i32> %27, <32 x i32> %22)
-  %54 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<128 x i1> %52, <32 x i32> %22, <32 x i32> %27)
-  %55 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<128 x i1> %52, <32 x i32> %29, <32 x i32> undef)
-  %56 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<128 x i1> %52, <32 x i32> undef, <32 x i32> %31)
-  %57 = tail call <128 x i1> @llvm.hexagon.V6.vgtb.128B(<32 x i32> %33, <32 x i32> %39)
-  %58 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<128 x i1> %57, <32 x i32> %39, <32 x i32> %33)
-  %59 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<128 x i1> %57, <32 x i32> %33, <32 x i32> %39)
-  %60 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<128 x i1> %57, <32 x i32> %41, <32 x i32> %35)
-  %61 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<128 x i1> %57, <32 x i32> %43, <32 x i32> %37)
-  %62 = tail call <128 x i1> @llvm.hexagon.V6.vgtb.128B(<32 x i32> %34, <32 x i32> %40)
-  %63 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<128 x i1> %62, <32 x i32> %42, <32 x i32> %36)
-  %64 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<128 x i1> %62, <32 x i32> %38, <32 x i32> %44)
-  %65 = tail call <128 x i1> @llvm.hexagon.V6.vgtb.128B(<32 x i32> %46, <32 x i32> %58)
-  %66 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<128 x i1> %65, <32 x i32> %58, <32 x i32> %46)
-  %67 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<128 x i1> %65, <32 x i32> %60, <32 x i32> %48)
-  %68 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<128 x i1> %65, <32 x i32> %61, <32 x i32> %50)
-  %69 = tail call <128 x i1> @llvm.hexagon.V6.vgtb.128B(<32 x i32> %47, <32 x i32> %59)
-  %70 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<128 x i1> %69, <32 x i32> %51, <32 x i32> zeroinitializer)
-  %71 = tail call <128 x i1> @llvm.hexagon.V6.vgtb.128B(<32 x i32> %53, <32 x i32> zeroinitializer)
-  %72 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<128 x i1> %71, <32 x i32> %63, <32 x i32> %55)
-  %73 = tail call <128 x i1> @llvm.hexagon.V6.vgtb.128B(<32 x i32> %54, <32 x i32> undef)
-  %74 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<128 x i1> %73, <32 x i32> %56, <32 x i32> %64)
-  %75 = tail call <32 x i32> @llvm.hexagon.V6.vshuffeb.128B(<32 x i32> %68, <32 x i32> %67)
-  %76 = tail call <32 x i32> @llvm.hexagon.V6.vshuffeb.128B(<32 x i32> %70, <32 x i32> undef)
-  %77 = tail call <32 x i32> @llvm.hexagon.V6.vshuffeb.128B(<32 x i32> zeroinitializer, <32 x i32> %72)
-  %78 = tail call <32 x i32> @llvm.hexagon.V6.vshuffeb.128B(<32 x i32> %74, <32 x i32> zeroinitializer)
-  %79 = tail call <64 x i32> @llvm.hexagon.V6.vmpyuh.acc.128B(<64 x i32> %dAccum0.0539, <32 x i32> %75, i32 65537)
-  %80 = tail call <64 x i32> @llvm.hexagon.V6.vmpyuh.acc.128B(<64 x i32> %79, <32 x i32> zeroinitializer, i32 65537)
-  %81 = tail call <64 x i32> @llvm.hexagon.V6.vmpyuh.acc.128B(<64 x i32> %80, <32 x i32> zeroinitializer, i32 65537)
-  %82 = tail call <64 x i32> @llvm.hexagon.V6.vmpyuh.acc.128B(<64 x i32> %81, <32 x i32> %76, i32 65537)
-  %83 = tail call <64 x i32> @llvm.hexagon.V6.vmpyuh.acc.128B(<64 x i32> %82, <32 x i32> %77, i32 65537)
-  %84 = tail call <64 x i32> @llvm.hexagon.V6.vmpyuh.acc.128B(<64 x i32> %83, <32 x i32> zeroinitializer, i32 65537)
-  %85 = tail call <64 x i32> @llvm.hexagon.V6.vmpyuh.acc.128B(<64 x i32> %84, <32 x i32> undef, i32 65537)
-  %86 = tail call <64 x i32> @llvm.hexagon.V6.vmpyuh.acc.128B(<64 x i32> %85, <32 x i32> %78, i32 65537)
-  store <32 x i32> %66, <32 x i32>* %pkey.0542, align 128
-  store <32 x i32> %75, <32 x i32>* %pdata0.0541, align 128
-  store <32 x i32> zeroinitializer, <32 x i32>* %arrayidx4, align 128
-  store <32 x i32> zeroinitializer, <32 x i32>* undef, align 128
-  store <32 x i32> zeroinitializer, <32 x i32>* %arrayidx20, align 128
-  store <32 x i32> zeroinitializer, <32 x i32>* null, align 128
-  %add.ptr48 = getelementptr inbounds <32 x i32>, <32 x i32>* %pdata0.0541, i32 16
-  %add.ptr49 = getelementptr inbounds <32 x i32>, <32 x i32>* %pdata1.0540, i32 16
+  %pkey.0542 = phi ptr [ %key, %entry ], [ null, %for.body ]
+  %pdata0.0541 = phi ptr [ null, %entry ], [ %add.ptr48, %for.body ]
+  %pdata1.0540 = phi ptr [ %data1, %entry ], [ %add.ptr49, %for.body ]
+  %dAccum0.0539 = phi <64 x i32> [ undef, %entry ], [ %84, %for.body ]
+  %0 = load <32 x i32>, ptr %pkey.0542, align 128
+  %1 = load <32 x i32>, ptr %pdata0.0541, align 128
+  %2 = load <32 x i32>, ptr undef, align 128
+  %arrayidx4 = getelementptr inbounds <32 x i32>, ptr %pdata0.0541, i32 2
+  %3 = load <32 x i32>, ptr %arrayidx4, align 128
+  %arrayidx5 = getelementptr inbounds <32 x i32>, ptr %pdata1.0540, i32 2
+  %4 = load <32 x i32>, ptr %arrayidx5, align 128
+  %5 = load <32 x i32>, ptr null, align 128
+  %6 = load <32 x i32>, ptr undef, align 128
+  %7 = load <32 x i32>, ptr null, align 128
+  %arrayidx9 = getelementptr inbounds <32 x i32>, ptr %pkey.0542, i32 3
+  %arrayidx10 = getelementptr inbounds <32 x i32>, ptr %pdata0.0541, i32 6
+  %8 = load <32 x i32>, ptr %arrayidx10, align 128
+  %arrayidx12 = getelementptr inbounds <32 x i32>, ptr %pkey.0542, i32 4
+  %9 = load <32 x i32>, ptr %arrayidx12, align 128
+  %arrayidx13 = getelementptr inbounds <32 x i32>, ptr %pdata0.0541, i32 8
+  %arrayidx14 = getelementptr inbounds <32 x i32>, ptr %pdata1.0540, i32 8
+  %10 = load <32 x i32>, ptr %arrayidx14, align 128
+  %arrayidx15 = getelementptr inbounds <32 x i32>, ptr %pkey.0542, i32 5
+  %11 = load <32 x i32>, ptr %arrayidx15, align 128
+  %arrayidx16 = getelementptr inbounds <32 x i32>, ptr %pdata0.0541, i32 10
+  %arrayidx17 = getelementptr inbounds <32 x i32>, ptr %pdata1.0540, i32 10
+  %12 = load <32 x i32>, ptr %arrayidx17, align 128
+  %arrayidx18 = getelementptr inbounds <32 x i32>, ptr %pkey.0542, i32 6
+  %13 = load <32 x i32>, ptr %arrayidx18, align 128
+  %arrayidx19 = getelementptr inbounds <32 x i32>, ptr %pdata0.0541, i32 12
+  %14 = load <32 x i32>, ptr %arrayidx19, align 128
+  %arrayidx20 = getelementptr inbounds <32 x i32>, ptr %pdata1.0540, i32 12
+  %15 = load <32 x i32>, ptr %arrayidx20, align 128
+  %arrayidx22 = getelementptr inbounds <32 x i32>, ptr %pdata0.0541, i32 14
+  %16 = load <32 x i32>, ptr %arrayidx22, align 128
+  %arrayidx23 = getelementptr inbounds <32 x i32>, ptr %pdata1.0540, i32 14
+  %17 = load <32 x i32>, ptr %arrayidx23, align 128
+  %18 = tail call <128 x i1> @llvm.hexagon.V6.vgtb.128B(<32 x i32> %0, <32 x i32> %9)
+  %19 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<128 x i1> %18, <32 x i32> %9, <32 x i32> %0)
+  %20 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<128 x i1> %18, <32 x i32> %0, <32 x i32> %9)
+  %21 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<128 x i1> %18, <32 x i32> undef, <32 x i32> %1)
+  %22 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<128 x i1> %18, <32 x i32> %10, <32 x i32> undef)
+  %23 = tail call <128 x i1> @llvm.hexagon.V6.vgtb.128B(<32 x i32> %5, <32 x i32> %13)
+  %24 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<128 x i1> %23, <32 x i32> %13, <32 x i32> %5)
+  %25 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<128 x i1> %23, <32 x i32> %5, <32 x i32> %13)
+  %26 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<128 x i1> %23, <32 x i32> %14, <32 x i32> %6)
+  %27 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<128 x i1> %23, <32 x i32> %6, <32 x i32> %14)
+  %28 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<128 x i1> %23, <32 x i32> %15, <32 x i32> %7)
+  %29 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<128 x i1> %23, <32 x i32> %7, <32 x i32> %15)
+  %30 = tail call <128 x i1> @llvm.hexagon.V6.vgtb.128B(<32 x i32> %2, <32 x i32> %11)
+  %31 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<128 x i1> %30, <32 x i32> %11, <32 x i32> %2)
+  %32 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<128 x i1> %30, <32 x i32> %2, <32 x i32> %11)
+  %33 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<128 x i1> %30, <32 x i32> undef, <32 x i32> %3)
+  %34 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<128 x i1> %30, <32 x i32> %3, <32 x i32> undef)
+  %35 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<128 x i1> %30, <32 x i32> %12, <32 x i32> %4)
+  %36 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<128 x i1> %30, <32 x i32> %4, <32 x i32> %12)
+  %37 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<128 x i1> zeroinitializer, <32 x i32> zeroinitializer, <32 x i32> undef)
+  %38 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<128 x i1> zeroinitializer, <32 x i32> undef, <32 x i32> zeroinitializer)
+  %39 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<128 x i1> zeroinitializer, <32 x i32> %16, <32 x i32> %8)
+  %40 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<128 x i1> zeroinitializer, <32 x i32> %8, <32 x i32> %16)
+  %41 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<128 x i1> zeroinitializer, <32 x i32> %17, <32 x i32> undef)
+  %42 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<128 x i1> zeroinitializer, <32 x i32> undef, <32 x i32> %17)
+  %43 = tail call <128 x i1> @llvm.hexagon.V6.vgtb.128B(<32 x i32> %19, <32 x i32> %24)
+  %44 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<128 x i1> %43, <32 x i32> %24, <32 x i32> %19)
+  %45 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<128 x i1> %43, <32 x i32> %19, <32 x i32> %24)
+  %46 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<128 x i1> %43, <32 x i32> %26, <32 x i32> %21)
+  %47 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<128 x i1> %43, <32 x i32> %21, <32 x i32> %26)
+  %48 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<128 x i1> %43, <32 x i32> %28, <32 x i32> %22)
+  %49 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<128 x i1> %43, <32 x i32> %22, <32 x i32> %28)
+  %50 = tail call <128 x i1> @llvm.hexagon.V6.vgtb.128B(<32 x i32> %20, <32 x i32> %25)
+  %51 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<128 x i1> %50, <32 x i32> %25, <32 x i32> %20)
+  %52 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<128 x i1> %50, <32 x i32> %20, <32 x i32> %25)
+  %53 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<128 x i1> %50, <32 x i32> %27, <32 x i32> undef)
+  %54 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<128 x i1> %50, <32 x i32> undef, <32 x i32> %29)
+  %55 = tail call <128 x i1> @llvm.hexagon.V6.vgtb.128B(<32 x i32> %31, <32 x i32> %37)
+  %56 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<128 x i1> %55, <32 x i32> %37, <32 x i32> %31)
+  %57 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<128 x i1> %55, <32 x i32> %31, <32 x i32> %37)
+  %58 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<128 x i1> %55, <32 x i32> %39, <32 x i32> %33)
+  %59 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<128 x i1> %55, <32 x i32> %41, <32 x i32> %35)
+  %60 = tail call <128 x i1> @llvm.hexagon.V6.vgtb.128B(<32 x i32> %32, <32 x i32> %38)
+  %61 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<128 x i1> %60, <32 x i32> %40, <32 x i32> %34)
+  %62 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<128 x i1> %60, <32 x i32> %36, <32 x i32> %42)
+  %63 = tail call <128 x i1> @llvm.hexagon.V6.vgtb.128B(<32 x i32> %44, <32 x i32> %56)
+  %64 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<128 x i1> %63, <32 x i32> %56, <32 x i32> %44)
+  %65 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<128 x i1> %63, <32 x i32> %58, <32 x i32> %46)
+  %66 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<128 x i1> %63, <32 x i32> %59, <32 x i32> %48)
+  %67 = tail call <128 x i1> @llvm.hexagon.V6.vgtb.128B(<32 x i32> %45, <32 x i32> %57)
+  %68 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<128 x i1> %67, <32 x i32> %49, <32 x i32> zeroinitializer)
+  %69 = tail call <128 x i1> @llvm.hexagon.V6.vgtb.128B(<32 x i32> %51, <32 x i32> zeroinitializer)
+  %70 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<128 x i1> %69, <32 x i32> %61, <32 x i32> %53)
+  %71 = tail call <128 x i1> @llvm.hexagon.V6.vgtb.128B(<32 x i32> %52, <32 x i32> undef)
+  %72 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<128 x i1> %71, <32 x i32> %54, <32 x i32> %62)
+  %73 = tail call <32 x i32> @llvm.hexagon.V6.vshuffeb.128B(<32 x i32> %66, <32 x i32> %65)
+  %74 = tail call <32 x i32> @llvm.hexagon.V6.vshuffeb.128B(<32 x i32> %68, <32 x i32> undef)
+  %75 = tail call <32 x i32> @llvm.hexagon.V6.vshuffeb.128B(<32 x i32> zeroinitializer, <32 x i32> %70)
+  %76 = tail call <32 x i32> @llvm.hexagon.V6.vshuffeb.128B(<32 x i32> %72, <32 x i32> zeroinitializer)
+  %77 = tail call <64 x i32> @llvm.hexagon.V6.vmpyuh.acc.128B(<64 x i32> %dAccum0.0539, <32 x i32> %73, i32 65537)
+  %78 = tail call <64 x i32> @llvm.hexagon.V6.vmpyuh.acc.128B(<64 x i32> %77, <32 x i32> zeroinitializer, i32 65537)
+  %79 = tail call <64 x i32> @llvm.hexagon.V6.vmpyuh.acc.128B(<64 x i32> %78, <32 x i32> zeroinitializer, i32 65537)
+  %80 = tail call <64 x i32> @llvm.hexagon.V6.vmpyuh.acc.128B(<64 x i32> %79, <32 x i32> %74, i32 65537)
+  %81 = tail call <64 x i32> @llvm.hexagon.V6.vmpyuh.acc.128B(<64 x i32> %80, <32 x i32> %75, i32 65537)
+  %82 = tail call <64 x i32> @llvm.hexagon.V6.vmpyuh.acc.128B(<64 x i32> %81, <32 x i32> zeroinitializer, i32 65537)
+  %83 = tail call <64 x i32> @llvm.hexagon.V6.vmpyuh.acc.128B(<64 x i32> %82, <32 x i32> undef, i32 65537)
+  %84 = tail call <64 x i32> @llvm.hexagon.V6.vmpyuh.acc.128B(<64 x i32> %83, <32 x i32> %76, i32 65537)
+  store <32 x i32> %64, ptr %pkey.0542, align 128
+  store <32 x i32> %73, ptr %pdata0.0541, align 128
+  store <32 x i32> zeroinitializer, ptr %arrayidx4, align 128
+  store <32 x i32> zeroinitializer, ptr undef, align 128
+  store <32 x i32> zeroinitializer, ptr %arrayidx20, align 128
+  store <32 x i32> zeroinitializer, ptr null, align 128
+  %add.ptr48 = getelementptr inbounds <32 x i32>, ptr %pdata0.0541, i32 16
+  %add.ptr49 = getelementptr inbounds <32 x i32>, ptr %pdata1.0540, i32 16
   br i1 false, label %for.end, label %for.body
 
 for.end:
-  %87 = tail call <32 x i32> @llvm.hexagon.V6.hi.128B(<64 x i32> %86)
+  %85 = tail call <32 x i32> @llvm.hexagon.V6.hi.128B(<64 x i32> %84)
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/expand-condsets-copy-lis.ll b/llvm/test/CodeGen/Hexagon/expand-condsets-copy-lis.ll
index 943518263b02a..2bb11e9e7de12 100644
--- a/llvm/test/CodeGen/Hexagon/expand-condsets-copy-lis.ll
+++ b/llvm/test/CodeGen/Hexagon/expand-condsets-copy-lis.ll
@@ -24,7 +24,7 @@ b1:                                               ; preds = %b3, %b0
   br i1 undef, label %b2, label %b3
 
 b2:                                               ; preds = %b1
-  store i32 %v8, i32* undef, align 4
+  store i32 %v8, ptr undef, align 4
   br label %b3
 
 b3:                                               ; preds = %b2, %b1

diff  --git a/llvm/test/CodeGen/Hexagon/expand-condsets-dead-bad.ll b/llvm/test/CodeGen/Hexagon/expand-condsets-dead-bad.ll
index 222c70112ab9b..af314d233e808 100644
--- a/llvm/test/CodeGen/Hexagon/expand-condsets-dead-bad.ll
+++ b/llvm/test/CodeGen/Hexagon/expand-condsets-dead-bad.ll
@@ -9,7 +9,7 @@ target triple = "hexagon"
 ; Function Attrs: nounwind
 define void @fred() local_unnamed_addr #0 {
 b0:
-  %v1 = load i32, i32* undef, align 4
+  %v1 = load i32, ptr undef, align 4
   %v2 = and i32 %v1, 603979776
   %v3 = trunc i32 %v2 to i30
   switch i30 %v3, label %b23 [
@@ -21,7 +21,7 @@ b4:                                               ; preds = %b0
   unreachable
 
 b5:                                               ; preds = %b0
-  %v6 = load i32, i32* undef, align 4
+  %v6 = load i32, ptr undef, align 4
   br i1 undef, label %b7, label %b8
 
 b7:                                               ; preds = %b5
@@ -31,8 +31,8 @@ b8:                                               ; preds = %b5
   br label %b9
 
 b9:                                               ; preds = %b8, %b7
-  %v10 = load i32, i32* undef, align 4
-  %v11 = load i32, i32* undef, align 4
+  %v10 = load i32, ptr undef, align 4
+  %v11 = load i32, ptr undef, align 4
   %v12 = mul nsw i32 %v11, %v10
   %v13 = ashr i32 %v12, 13
   %v14 = mul nsw i32 %v13, %v13
@@ -44,7 +44,7 @@ b9:                                               ; preds = %b8, %b7
   %v20 = mul nuw nsw i64 %v19, %v15
   %v21 = trunc i64 %v20 to i32
   %v22 = and i32 %v21, 2147483647
-  store i32 %v22, i32* undef, align 4
+  store i32 %v22, ptr undef, align 4
   unreachable
 
 b23:                                              ; preds = %b0

diff  --git a/llvm/test/CodeGen/Hexagon/expand-condsets-dead-pred.ll b/llvm/test/CodeGen/Hexagon/expand-condsets-dead-pred.ll
index af5737675d31b..d2e8b1c733903 100644
--- a/llvm/test/CodeGen/Hexagon/expand-condsets-dead-pred.ll
+++ b/llvm/test/CodeGen/Hexagon/expand-condsets-dead-pred.ll
@@ -20,7 +20,7 @@ b2:                                               ; preds = %b2, %b1
   %v3 = phi i32 [ 0, %b1 ], [ %v17, %b2 ]
   %v4 = phi i32 [ 0, %b1 ], [ %v16, %b2 ]
   %v5 = phi i32 [ undef, %b1 ], [ %v18, %b2 ]
-  %v6 = load i32, i32* undef, align 8
+  %v6 = load i32, ptr undef, align 8
   %v7 = icmp sgt i32 %v6, undef
   %v8 = select i1 %v7, i32 %v3, i32 %v4
   %v9 = select i1 undef, i32 0, i32 %v8
@@ -37,8 +37,8 @@ b2:                                               ; preds = %b2, %b1
   br i1 %v19, label %b20, label %b2
 
 b20:                                              ; preds = %b2
-  %v21 = getelementptr inbounds [80 x i32], [80 x i32]* @x, i32 0, i32 %v16
-  store i32 -2000, i32* %v21, align 4
+  %v21 = getelementptr inbounds [80 x i32], ptr @x, i32 0, i32 %v16
+  store i32 -2000, ptr %v21, align 4
   br label %b1
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/expand-condsets-dead.ll b/llvm/test/CodeGen/Hexagon/expand-condsets-dead.ll
index 3f8d86d6e3ff8..ba9859aa95cf4 100644
--- a/llvm/test/CodeGen/Hexagon/expand-condsets-dead.ll
+++ b/llvm/test/CodeGen/Hexagon/expand-condsets-dead.ll
@@ -12,7 +12,7 @@ b0:
   br label %b1
 
 b1:                                               ; preds = %b3, %b0
-  %v0 = load i16, i16* undef, align 4
+  %v0 = load i16, ptr undef, align 4
   %v1 = sext i16 %v0 to i32
   %v2 = and i32 %v1, 7
   %v3 = sub nsw i32 8, %v2
@@ -20,9 +20,9 @@ b1:                                               ; preds = %b3, %b0
   br label %b2
 
 b2:                                               ; preds = %b2, %b1
-  %v5 = phi i8* [ undef, %b1 ], [ %v16, %b2 ]
+  %v5 = phi ptr [ undef, %b1 ], [ %v16, %b2 ]
   %v6 = phi i32 [ 4, %b1 ], [ %v17, %b2 ]
-  %v7 = load i8, i8* undef, align 1
+  %v7 = load i8, ptr undef, align 1
   %v8 = zext i8 %v7 to i32
   %v9 = mul nuw nsw i32 %v8, %v3
   %v10 = add nuw nsw i32 0, %v9
@@ -30,9 +30,9 @@ b2:                                               ; preds = %b2, %b1
   %v12 = add nuw nsw i32 0, %v11
   %v13 = lshr i32 %v12, 6
   %v14 = trunc i32 %v13 to i8
-  store i8 %v14, i8* %v5, align 1
-  %v15 = getelementptr inbounds i8, i8* %v5, i32 1
-  %v16 = select i1 undef, i8* undef, i8* %v15
+  store i8 %v14, ptr %v5, align 1
+  %v15 = getelementptr inbounds i8, ptr %v5, i32 1
+  %v16 = select i1 undef, ptr undef, ptr %v15
   %v17 = add nsw i32 %v6, -1
   %v18 = icmp eq i32 %v17, 0
   br i1 %v18, label %b3, label %b2

diff  --git a/llvm/test/CodeGen/Hexagon/expand-condsets-extend.ll b/llvm/test/CodeGen/Hexagon/expand-condsets-extend.ll
index e716925ed8ef1..359c449d75479 100644
--- a/llvm/test/CodeGen/Hexagon/expand-condsets-extend.ll
+++ b/llvm/test/CodeGen/Hexagon/expand-condsets-extend.ll
@@ -8,7 +8,7 @@ target triple = "hexagon"
 
 define void @fred() local_unnamed_addr #0 {
 entry:
-  %0 = load i64, i64* undef, align 8
+  %0 = load i64, ptr undef, align 8
   %shr.i465 = lshr i64 %0, 48
   %trunc = trunc i64 %shr.i465 to i15
   switch i15 %trunc, label %if.end26 [

diff  --git a/llvm/test/CodeGen/Hexagon/expand-condsets-pred-undef.ll b/llvm/test/CodeGen/Hexagon/expand-condsets-pred-undef.ll
index 8fef933dd12e9..dc02f5222ea98 100644
--- a/llvm/test/CodeGen/Hexagon/expand-condsets-pred-undef.ll
+++ b/llvm/test/CodeGen/Hexagon/expand-condsets-pred-undef.ll
@@ -5,16 +5,16 @@ target triple = "hexagon"
 
 %struct.0 = type { i64, i16 }
 
-declare void @foo(%struct.0* noalias nocapture sret(%struct.0), i8 zeroext, i32, i64) #0
+declare void @foo(ptr noalias nocapture sret(%struct.0), i8 zeroext, i32, i64) #0
 
-define hidden fastcc void @fred(%struct.0* noalias nocapture %p, i8 zeroext %t, i32 %r) unnamed_addr #0 {
+define hidden fastcc void @fred(ptr noalias nocapture %p, i8 zeroext %t, i32 %r) unnamed_addr #0 {
 entry:
   %. = select i1 undef, i64 549755813888, i64 1024
   %cmp104 = icmp ult i64 undef, %.
   %inc = zext i1 %cmp104 to i32
   %inc.r = add nsw i32 %inc, %r
   %.inc.r = select i1 undef, i32 0, i32 %inc.r
-  tail call void @foo(%struct.0* sret(%struct.0) %p, i8 zeroext %t, i32 %.inc.r, i64 undef)
+  tail call void @foo(ptr sret(%struct.0) %p, i8 zeroext %t, i32 %.inc.r, i64 undef)
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/expand-condsets-rm-segment.ll b/llvm/test/CodeGen/Hexagon/expand-condsets-rm-segment.ll
index 054fb0963070d..bc8ae52eb6549 100644
--- a/llvm/test/CodeGen/Hexagon/expand-condsets-rm-segment.ll
+++ b/llvm/test/CodeGen/Hexagon/expand-condsets-rm-segment.ll
@@ -16,7 +16,7 @@ target triple = "hexagon-unknown--elf"
 @sysctl_sched_migration_cost = constant i32 500000, align 4
 @sysctl_sched_shares_window = global i32 10000000, align 4
 @sysctl_sched_child_runs_first = common global i32 0, align 4
- at cpu_online_mask = external constant %struct.cpumask*
+ at cpu_online_mask = external constant ptr
 
 ; Function Attrs: noinline nounwind
 define void @sched_init_granularity() #0 {
@@ -29,20 +29,20 @@ entry:
 define internal fastcc void @update_sysctl() #0 {
 entry:
   %call = tail call i32 @get_update_sysctl_factor()
-  %0 = load i32, i32* @normalized_sysctl_sched_min_granularity, align 4, !tbaa !1
+  %0 = load i32, ptr @normalized_sysctl_sched_min_granularity, align 4, !tbaa !1
   %mul = mul i32 %0, %call
-  store i32 %mul, i32* @sysctl_sched_min_granularity, align 4, !tbaa !1
-  %1 = load i32, i32* @normalized_sysctl_sched_latency, align 4, !tbaa !1
+  store i32 %mul, ptr @sysctl_sched_min_granularity, align 4, !tbaa !1
+  %1 = load i32, ptr @normalized_sysctl_sched_latency, align 4, !tbaa !1
   %mul1 = mul i32 %1, %call
-  store i32 %mul1, i32* @sysctl_sched_latency, align 4, !tbaa !1
-  %2 = load i32, i32* @normalized_sysctl_sched_wakeup_granularity, align 4, !tbaa !1
+  store i32 %mul1, ptr @sysctl_sched_latency, align 4, !tbaa !1
+  %2 = load i32, ptr @normalized_sysctl_sched_wakeup_granularity, align 4, !tbaa !1
   %mul2 = mul i32 %2, %call
-  store i32 %mul2, i32* @sysctl_sched_wakeup_granularity, align 4, !tbaa !1
+  store i32 %mul2, ptr @sysctl_sched_wakeup_granularity, align 4, !tbaa !1
   ret void
 }
 
 ; Function Attrs: noinline nounwind
-define i32 @calc_delta_mine(i32 %delta_exec, i32 %weight, %struct.load_weight* nocapture %lw) #0 {
+define i32 @calc_delta_mine(i32 %delta_exec, i32 %weight, ptr nocapture %lw) #0 {
 entry:
   %cmp = icmp ugt i32 %weight, 1
   %conv = zext i32 %delta_exec to i64
@@ -55,24 +55,23 @@ if.then:                                          ; preds = %entry
 
 if.end:                                           ; preds = %entry, %if.then
   %tmp.0 = phi i64 [ %mul, %if.then ], [ %conv, %entry ]
-  %inv_weight = getelementptr inbounds %struct.load_weight, %struct.load_weight* %lw, i32 0, i32 1
-  %0 = load i32, i32* %inv_weight, align 4, !tbaa !6
+  %inv_weight = getelementptr inbounds %struct.load_weight, ptr %lw, i32 0, i32 1
+  %0 = load i32, ptr %inv_weight, align 4, !tbaa !6
   %tobool4 = icmp eq i32 %0, 0
   br i1 %tobool4, label %if.then5, label %if.end22
 
 if.then5:                                         ; preds = %if.end
-  %weight7 = getelementptr inbounds %struct.load_weight, %struct.load_weight* %lw, i32 0, i32 0
-  %1 = load i32, i32* %weight7, align 4, !tbaa !9
+  %1 = load i32, ptr %lw, align 4, !tbaa !9
   %lnot9 = icmp eq i32 %1, 0
   br i1 %lnot9, label %if.then17, label %if.else19, !prof !10
 
 if.then17:                                        ; preds = %if.then5
-  store i32 -1, i32* %inv_weight, align 4, !tbaa !6
+  store i32 -1, ptr %inv_weight, align 4, !tbaa !6
   br label %if.end22
 
 if.else19:                                        ; preds = %if.then5
   %div = udiv i32 -1, %1
-  store i32 %div, i32* %inv_weight, align 4, !tbaa !6
+  store i32 %div, ptr %inv_weight, align 4, !tbaa !6
   br label %if.end22
 
 if.end22:                                         ; preds = %if.end, %if.then17, %if.else19
@@ -105,7 +104,7 @@ if.end43:                                         ; preds = %if.else37, %if.then
 }
 
 declare i32 @get_update_sysctl_factor() #0
-declare i32 @__bitmap_weight(i32*, i32) #0
+declare i32 @__bitmap_weight(ptr, i32) #0
 
 attributes #0 = { noinline nounwind }
 

diff  --git a/llvm/test/CodeGen/Hexagon/expand-condsets-undef.ll b/llvm/test/CodeGen/Hexagon/expand-condsets-undef.ll
index 85e72aa22f0a5..3f3ad262593a0 100644
--- a/llvm/test/CodeGen/Hexagon/expand-condsets-undef.ll
+++ b/llvm/test/CodeGen/Hexagon/expand-condsets-undef.ll
@@ -7,7 +7,7 @@ target triple = "hexagon"
 ; Function Attrs: nounwind optsize ssp
 define internal fastcc void @foo() nounwind {
 if.else473:
-  %0 = load i64, i64* undef, align 8
+  %0 = load i64, ptr undef, align 8
   %sub = sub nsw i64 undef, %0
   %conv476 = sitofp i64 %sub to double
   %mul477 = fmul double %conv476, 0x3F50624DE0000000

diff  --git a/llvm/test/CodeGen/Hexagon/expand-condsets-undef2.ll b/llvm/test/CodeGen/Hexagon/expand-condsets-undef2.ll
index d62d50d836132..f29a21017dc3b 100644
--- a/llvm/test/CodeGen/Hexagon/expand-condsets-undef2.ll
+++ b/llvm/test/CodeGen/Hexagon/expand-condsets-undef2.ll
@@ -10,7 +10,7 @@ entry:
   br i1 undef, label %cleanup, label %if.end
 
 if.end:
-  %0 = load i32, i32* undef, align 4
+  %0 = load i32, ptr undef, align 4
   %sext = shl i32 %0, 16
   %conv19 = ashr exact i32 %sext, 16
   br i1 undef, label %cleanup, label %for.body.lr.ph

diff  --git a/llvm/test/CodeGen/Hexagon/expand-condsets.ll b/llvm/test/CodeGen/Hexagon/expand-condsets.ll
index 9e1787e661957..c38795ef6f0d3 100644
--- a/llvm/test/CodeGen/Hexagon/expand-condsets.ll
+++ b/llvm/test/CodeGen/Hexagon/expand-condsets.ll
@@ -9,7 +9,7 @@
 target triple = "hexagon"
 
 ; Function Attrs: nounwind
-define void @f0(i32 %a0, i32* nocapture %a1, i32* nocapture %a2, i32 %a3, i32 %a4, i32 %a5, i32 %a6) #0 {
+define void @f0(i32 %a0, ptr nocapture %a1, ptr nocapture %a2, i32 %a3, i32 %a4, i32 %a5, i32 %a6) #0 {
 b0:
   %v0 = icmp ugt i32 %a0, 32
   %v1 = lshr i32 %a0, 6
@@ -19,32 +19,32 @@ b0:
 
 b1:                                               ; preds = %b0
   %v4 = lshr i32 %a0, 2
-  %v5 = getelementptr inbounds i32, i32* %a1, i32 %v4
+  %v5 = getelementptr inbounds i32, ptr %a1, i32 %v4
   br label %b2
 
 b2:                                               ; preds = %b7, %b1
-  %v6 = phi i32* [ %v5, %b1 ], [ %v9, %b7 ]
-  %v7 = phi i32* [ %a1, %b1 ], [ %v49, %b7 ]
+  %v6 = phi ptr [ %v5, %b1 ], [ %v9, %b7 ]
+  %v7 = phi ptr [ %a1, %b1 ], [ %v49, %b7 ]
   %v8 = phi i32 [ 0, %b1 ], [ %v55, %b7 ]
-  %v9 = getelementptr i32, i32* %v6, i32 64
+  %v9 = getelementptr i32, ptr %v6, i32 64
   br label %b3
 
 b3:                                               ; preds = %b3, %b2
   %v10 = phi i32 [ 2, %b2 ], [ %v46, %b3 ]
   %v11 = phi i32 [ 1, %b2 ], [ %v45, %b3 ]
-  %v12 = phi i32* [ %v6, %b2 ], [ %v23, %b3 ]
-  %v13 = phi i32* [ %v7, %b2 ], [ %v19, %b3 ]
+  %v12 = phi ptr [ %v6, %b2 ], [ %v23, %b3 ]
+  %v13 = phi ptr [ %v7, %b2 ], [ %v19, %b3 ]
   %v14 = phi i32 [ 0, %b2 ], [ %v47, %b3 ]
   %v15 = phi i32 [ 0, %b2 ], [ %v41, %b3 ]
   %v16 = phi i32 [ 0, %b2 ], [ %v44, %b3 ]
-  %v17 = getelementptr inbounds i32, i32* %v13, i32 1
-  %v18 = load i32, i32* %v13, align 4, !tbaa !0
-  %v19 = getelementptr inbounds i32, i32* %v13, i32 2
-  %v20 = load i32, i32* %v17, align 4, !tbaa !0
-  %v21 = getelementptr inbounds i32, i32* %v12, i32 1
-  %v22 = load i32, i32* %v12, align 4, !tbaa !0
-  %v23 = getelementptr inbounds i32, i32* %v12, i32 2
-  %v24 = load i32, i32* %v21, align 4, !tbaa !0
+  %v17 = getelementptr inbounds i32, ptr %v13, i32 1
+  %v18 = load i32, ptr %v13, align 4, !tbaa !0
+  %v19 = getelementptr inbounds i32, ptr %v13, i32 2
+  %v20 = load i32, ptr %v17, align 4, !tbaa !0
+  %v21 = getelementptr inbounds i32, ptr %v12, i32 1
+  %v22 = load i32, ptr %v12, align 4, !tbaa !0
+  %v23 = getelementptr inbounds i32, ptr %v12, i32 2
+  %v24 = load i32, ptr %v21, align 4, !tbaa !0
   %v25 = tail call i32 @llvm.hexagon.A2.add(i32 %v22, i32 %a4)
   %v26 = tail call i32 @llvm.hexagon.A2.sub(i32 %v25, i32 %a3)
   %v27 = tail call i32 @llvm.hexagon.A2.add(i32 %v24, i32 %a4)
@@ -72,21 +72,21 @@ b3:                                               ; preds = %b3, %b2
   br i1 %v48, label %b4, label %b3
 
 b4:                                               ; preds = %b3
-  %v49 = getelementptr i32, i32* %v7, i32 64
+  %v49 = getelementptr i32, ptr %v7, i32 64
   br i1 %v0, label %b5, label %b6
 
 b5:                                               ; preds = %b4
-  %v50 = getelementptr inbounds i32, i32* %a2, i32 %v8
-  store i32 %v41, i32* %v50, align 4, !tbaa !0
+  %v50 = getelementptr inbounds i32, ptr %a2, i32 %v8
+  store i32 %v41, ptr %v50, align 4, !tbaa !0
   %v51 = add i32 %v8, %v2
-  %v52 = getelementptr inbounds i32, i32* %a2, i32 %v51
-  store i32 %v44, i32* %v52, align 4, !tbaa !0
+  %v52 = getelementptr inbounds i32, ptr %a2, i32 %v51
+  store i32 %v44, ptr %v52, align 4, !tbaa !0
   br label %b7
 
 b6:                                               ; preds = %b4
   %v53 = or i32 %v41, %v44
-  %v54 = getelementptr inbounds i32, i32* %a2, i32 %v8
-  store i32 %v53, i32* %v54, align 4, !tbaa !0
+  %v54 = getelementptr inbounds i32, ptr %a2, i32 %v8
+  store i32 %v53, ptr %v54, align 4, !tbaa !0
   br label %b7
 
 b7:                                               ; preds = %b6, %b5

diff  --git a/llvm/test/CodeGen/Hexagon/expand-vstorerw-undef.ll b/llvm/test/CodeGen/Hexagon/expand-vstorerw-undef.ll
index fa66db24fea15..867ce3b930f8f 100644
--- a/llvm/test/CodeGen/Hexagon/expand-vstorerw-undef.ll
+++ b/llvm/test/CodeGen/Hexagon/expand-vstorerw-undef.ll
@@ -28,7 +28,7 @@ declare <64 x i32> @llvm.hexagon.V6.vaddh.dv.128B(<64 x i32>, <64 x i32>) #1
 
 define hidden void @fred() #2 {
 b0:
-  %v1 = load i32, i32* null, align 4
+  %v1 = load i32, ptr null, align 4
   %v2 = icmp ult i64 0, 2147483648
   br i1 %v2, label %b3, label %b5
 
@@ -80,11 +80,10 @@ b22:                                              ; preds = %b22, %b18
   %v27 = tail call <32 x i32> @llvm.hexagon.V6.vshuffeb.128B(<32 x i32> zeroinitializer, <32 x i32> zeroinitializer) #0
   %v28 = tail call <32 x i32> @llvm.hexagon.V6.lo.128B(<64 x i32> %v26) #0
   %v29 = tail call <32 x i32> @llvm.hexagon.V6.vshuffeb.128B(<32 x i32> zeroinitializer, <32 x i32> %v28) #0
-  store <32 x i32> %v27, <32 x i32>* null, align 128
+  store <32 x i32> %v27, ptr null, align 128
   %v30 = add nsw i32 0, 128
-  %v31 = getelementptr inbounds i8, i8* null, i32 %v30
-  %v32 = bitcast i8* %v31 to <32 x i32>*
-  store <32 x i32> %v29, <32 x i32>* %v32, align 128
+  %v31 = getelementptr inbounds i8, ptr null, i32 %v30
+  store <32 x i32> %v29, ptr %v31, align 128
   %v33 = icmp eq i32 0, 0
   br i1 %v33, label %b21, label %b22
 }

diff  --git a/llvm/test/CodeGen/Hexagon/expand-vstorerw-undef2.ll b/llvm/test/CodeGen/Hexagon/expand-vstorerw-undef2.ll
index 641d53c87837b..5c14e5de4c7ac 100644
--- a/llvm/test/CodeGen/Hexagon/expand-vstorerw-undef2.ll
+++ b/llvm/test/CodeGen/Hexagon/expand-vstorerw-undef2.ll
@@ -6,7 +6,7 @@
 
 target triple = "hexagon-unknown--elf"
 
-declare noalias i8* @halide_malloc() local_unnamed_addr #0
+declare noalias ptr @halide_malloc() local_unnamed_addr #0
 declare void @halide_free() local_unnamed_addr #0
 
 declare <32 x i32> @llvm.hexagon.V6.hi.128B(<64 x i32>) #1
@@ -37,14 +37,10 @@ b1:                                               ; preds = %b0
   ret void
 
 b2:                                               ; preds = %b0
-  %v3 = tail call i8* @halide_malloc()
-  %v4 = bitcast i8* %v3 to i16*
-  %v5 = tail call i8* @halide_malloc()
-  %v6 = bitcast i8* %v5 to i16*
-  %v7 = tail call i8* @halide_malloc()
-  %v8 = bitcast i8* %v7 to i16*
-  %v9 = tail call i8* @halide_malloc()
-  %v10 = bitcast i8* %v9 to i16*
+  %v3 = tail call ptr @halide_malloc()
+  %v5 = tail call ptr @halide_malloc()
+  %v7 = tail call ptr @halide_malloc()
+  %v9 = tail call ptr @halide_malloc()
   br label %b11
 
 b11:                                              ; preds = %b11, %b2
@@ -70,12 +66,12 @@ b19:                                              ; preds = %b17, %b13
   %v21 = tail call <32 x i32> @llvm.hexagon.V6.vaddhsat.128B(<32 x i32> zeroinitializer, <32 x i32> %v20) #2
   %v22 = tail call <64 x i32> @llvm.hexagon.V6.vshuffvdd.128B(<32 x i32> %v21, <32 x i32> undef, i32 -2)
   %v23 = tail call <32 x i32> @llvm.hexagon.V6.lo.128B(<64 x i32> %v22)
-  store <32 x i32> %v23, <32 x i32>* undef, align 128
+  store <32 x i32> %v23, ptr undef, align 128
   tail call void @halide_free() #3
   br label %b24
 
 b24:                                              ; preds = %b33, %b19
-  %v25 = load <32 x i32>, <32 x i32>* undef, align 128
+  %v25 = load <32 x i32>, ptr undef, align 128
   %v26 = fptoui float undef to i16
   %v27 = tail call <32 x i32> @llvm.hexagon.V6.lvsplatw.128B(i32 -2147450880) #2
   %v28 = xor i16 %v26, -1
@@ -93,15 +89,14 @@ b34:                                              ; preds = %b34, %b24
   %v36 = phi <32 x i32> [ undef, %b34 ], [ %v25, %b24 ]
   %v37 = phi <32 x i32> [ %v46, %b34 ], [ undef, %b24 ]
   %v38 = phi i32 [ %v145, %b34 ], [ 0, %b24 ]
-  %v39 = load <32 x i32>, <32 x i32>* undef, align 128
+  %v39 = load <32 x i32>, ptr undef, align 128
   %v40 = add nsw i32 %v38, undef
   %v41 = shl nsw i32 %v40, 6
   %v42 = add nsw i32 %v41, 64
-  %v43 = getelementptr inbounds i16, i16* %v6, i32 %v42
-  %v44 = bitcast i16* %v43 to <32 x i32>*
-  %v45 = load <32 x i32>, <32 x i32>* %v44, align 128
-  %v46 = load <32 x i32>, <32 x i32>* undef, align 128
-  %v47 = load <32 x i32>, <32 x i32>* null, align 128
+  %v43 = getelementptr inbounds i16, ptr %v5, i32 %v42
+  %v45 = load <32 x i32>, ptr %v43, align 128
+  %v46 = load <32 x i32>, ptr undef, align 128
+  %v47 = load <32 x i32>, ptr null, align 128
   %v48 = tail call <32 x i32> @llvm.hexagon.V6.valignbi.128B(<32 x i32> undef, <32 x i32> undef, i32 2)
   %v49 = tail call <32 x i32> @llvm.hexagon.V6.valignb.128B(<32 x i32> %v45, <32 x i32> %v35, i32 24)
   %v50 = tail call <32 x i32> @llvm.hexagon.V6.vsubhsat.128B(<32 x i32> %v48, <32 x i32> %v49) #2
@@ -145,18 +140,14 @@ b34:                                              ; preds = %b34, %b24
   %v88 = tail call <64 x i32> @llvm.hexagon.V6.vshuffvdd.128B(<32 x i32> %v87, <32 x i32> %v67, i32 -2)
   %v89 = tail call <32 x i32> @llvm.hexagon.V6.hi.128B(<64 x i32> %v88)
   %v90 = tail call <32 x i32> @llvm.hexagon.V6.lo.128B(<64 x i32> %v88)
-  %v91 = getelementptr inbounds i16, i16* %v10, i32 undef
-  %v92 = bitcast i16* %v91 to <32 x i32>*
-  store <32 x i32> %v90, <32 x i32>* %v92, align 128
-  %v93 = getelementptr inbounds i16, i16* %v10, i32 undef
-  %v94 = bitcast i16* %v93 to <32 x i32>*
-  store <32 x i32> %v89, <32 x i32>* %v94, align 128
-  %v95 = getelementptr inbounds i16, i16* %v4, i32 undef
-  %v96 = bitcast i16* %v95 to <32 x i32>*
-  %v97 = load <32 x i32>, <32 x i32>* %v96, align 128
-  %v98 = getelementptr inbounds i16, i16* %v8, i32 undef
-  %v99 = bitcast i16* %v98 to <32 x i32>*
-  %v100 = load <32 x i32>, <32 x i32>* %v99, align 128
+  %v91 = getelementptr inbounds i16, ptr %v9, i32 undef
+  store <32 x i32> %v90, ptr %v91, align 128
+  %v93 = getelementptr inbounds i16, ptr %v9, i32 undef
+  store <32 x i32> %v89, ptr %v93, align 128
+  %v95 = getelementptr inbounds i16, ptr %v3, i32 undef
+  %v97 = load <32 x i32>, ptr %v95, align 128
+  %v98 = getelementptr inbounds i16, ptr %v7, i32 undef
+  %v100 = load <32 x i32>, ptr %v98, align 128
   %v101 = tail call <32 x i32> @llvm.hexagon.V6.valignb.128B(<32 x i32> undef, <32 x i32> %v36, i32 22)
   %v102 = tail call <32 x i32> @llvm.hexagon.V6.vsubhsat.128B(<32 x i32> %v100, <32 x i32> %v101) #2
   %v103 = tail call <32 x i32> @llvm.hexagon.V6.vaddhsat.128B(<32 x i32> undef, <32 x i32> %v102) #2
@@ -170,21 +161,15 @@ b34:                                              ; preds = %b34, %b24
   %v111 = tail call <64 x i32> @llvm.hexagon.V6.vshuffvdd.128B(<32 x i32> %v110, <32 x i32> %v103, i32 -2)
   %v112 = tail call <32 x i32> @llvm.hexagon.V6.hi.128B(<64 x i32> %v111)
   %v113 = tail call <32 x i32> @llvm.hexagon.V6.lo.128B(<64 x i32> %v111)
-  %v114 = getelementptr inbounds i16, i16* %v10, i32 undef
-  %v115 = bitcast i16* %v114 to <32 x i32>*
-  store <32 x i32> %v113, <32 x i32>* %v115, align 128
-  %v116 = getelementptr inbounds i16, i16* %v10, i32 undef
-  %v117 = bitcast i16* %v116 to <32 x i32>*
-  store <32 x i32> %v112, <32 x i32>* %v117, align 128
-  %v118 = getelementptr inbounds i16, i16* %v4, i32 undef
-  %v119 = bitcast i16* %v118 to <32 x i32>*
-  %v120 = load <32 x i32>, <32 x i32>* %v119, align 128
-  %v121 = getelementptr inbounds i16, i16* %v6, i32 undef
-  %v122 = bitcast i16* %v121 to <32 x i32>*
-  %v123 = load <32 x i32>, <32 x i32>* %v122, align 128
-  %v124 = getelementptr inbounds i16, i16* %v6, i32 0
-  %v125 = bitcast i16* %v124 to <32 x i32>*
-  %v126 = load <32 x i32>, <32 x i32>* %v125, align 128
+  %v114 = getelementptr inbounds i16, ptr %v9, i32 undef
+  store <32 x i32> %v113, ptr %v114, align 128
+  %v116 = getelementptr inbounds i16, ptr %v9, i32 undef
+  store <32 x i32> %v112, ptr %v116, align 128
+  %v118 = getelementptr inbounds i16, ptr %v3, i32 undef
+  %v120 = load <32 x i32>, ptr %v118, align 128
+  %v121 = getelementptr inbounds i16, ptr %v5, i32 undef
+  %v123 = load <32 x i32>, ptr %v121, align 128
+  %v126 = load <32 x i32>, ptr %v5, align 128
   %v127 = tail call <32 x i32> @llvm.hexagon.V6.valignb.128B(<32 x i32> %v126, <32 x i32> %v123, i32 22)
   %v128 = tail call <32 x i32> @llvm.hexagon.V6.vsubhsat.128B(<32 x i32> undef, <32 x i32> %v127) #2
   %v129 = tail call <32 x i32> @llvm.hexagon.V6.valignb.128B(<32 x i32> %v126, <32 x i32> %v123, i32 24)
@@ -203,8 +188,8 @@ b34:                                              ; preds = %b34, %b24
   %v142 = tail call <64 x i32> @llvm.hexagon.V6.vshuffvdd.128B(<32 x i32> %v141, <32 x i32> %v134, i32 -2)
   %v143 = tail call <32 x i32> @llvm.hexagon.V6.hi.128B(<64 x i32> %v142)
   %v144 = tail call <32 x i32> @llvm.hexagon.V6.lo.128B(<64 x i32> %v142)
-  store <32 x i32> %v144, <32 x i32>* undef, align 128
-  store <32 x i32> %v143, <32 x i32>* undef, align 128
+  store <32 x i32> %v144, ptr undef, align 128
+  store <32 x i32> %v143, ptr undef, align 128
   %v145 = add nuw nsw i32 %v38, 1
   %v146 = icmp eq i32 %v38, undef
   br i1 %v146, label %b33, label %b34

diff  --git a/llvm/test/CodeGen/Hexagon/extload-combine.ll b/llvm/test/CodeGen/Hexagon/extload-combine.ll
index c7a386a664ba6..3999b60ae0ed1 100644
--- a/llvm/test/CodeGen/Hexagon/extload-combine.ll
+++ b/llvm/test/CodeGen/Hexagon/extload-combine.ll
@@ -18,8 +18,8 @@ define i64 @short_test1() #0 {
 ; CHECK: [[VAR:r[0-9]+]] = memuh(##
 ; CHECK: combine(#0,[[VAR]])
 entry:
-  store i16 0, i16* @a, align 2
-  %0 = load i16, i16* @b, align 2
+  store i16 0, ptr @a, align 2
+  %0 = load i16, ptr @b, align 2
   %conv2 = zext i16 %0 to i64
   ret i64 %conv2
 }
@@ -29,8 +29,8 @@ define i64 @short_test2() #0 {
 ; CHECK: [[VAR1:r[0-9]+]] = memh(##
 ; CHECK: sxtw([[VAR1]])
 entry:
-  store i16 0, i16* @a, align 2
-  %0 = load i16, i16* @c, align 2
+  store i16 0, ptr @a, align 2
+  %0 = load i16, ptr @c, align 2
   %conv2 = sext i16 %0 to i64
   ret i64 %conv2
 }
@@ -40,8 +40,8 @@ define i64 @char_test1() #0 {
 ; CHECK: [[VAR2:r[0-9]+]] = memub(##
 ; CHECK: combine(#0,[[VAR2]])
 entry:
-  store i8 0, i8* @char_a, align 1
-  %0 = load i8, i8* @char_b, align 1
+  store i8 0, ptr @char_a, align 1
+  %0 = load i8, ptr @char_b, align 1
   %conv2 = zext i8 %0 to i64
   ret i64 %conv2
 }
@@ -51,8 +51,8 @@ define i64 @char_test2() #0 {
 ; CHECK: [[VAR3:r[0-9]+]] = memb(##
 ; CHECK: sxtw([[VAR3]])
 entry:
-  store i8 0, i8* @char_a, align 1
-  %0 = load i8, i8* @char_c, align 1
+  store i8 0, ptr @char_a, align 1
+  %0 = load i8, ptr @char_c, align 1
   %conv2 = sext i8 %0 to i64
   ret i64 %conv2
 }
@@ -62,8 +62,8 @@ define i64 @int_test1() #0 {
 ; CHECK: [[VAR4:r[0-9]+]] = memw(##
 ; CHECK: combine(#0,[[VAR4]])
 entry:
-  store i32 0, i32* @int_a, align 4
-  %0 = load i32, i32* @int_b, align 4
+  store i32 0, ptr @int_a, align 4
+  %0 = load i32, ptr @int_b, align 4
   %conv = zext i32 %0 to i64
   ret i64 %conv
 }
@@ -73,8 +73,8 @@ define i64 @int_test2() #0 {
 ; CHECK: [[VAR5:r[0-9]+]] = memw(##
 ; CHECK: sxtw([[VAR5]])
 entry:
-  store i32 0, i32* @int_a, align 4
-  %0 = load i32, i32* @int_c, align 4
+  store i32 0, ptr @int_a, align 4
+  %0 = load i32, ptr @int_c, align 4
   %conv = sext i32 %0 to i64
   ret i64 %conv
 }

diff  --git a/llvm/test/CodeGen/Hexagon/extlow.ll b/llvm/test/CodeGen/Hexagon/extlow.ll
index cb3caa29ecca8..f3925d89ab988 100644
--- a/llvm/test/CodeGen/Hexagon/extlow.ll
+++ b/llvm/test/CodeGen/Hexagon/extlow.ll
@@ -7,6 +7,6 @@ define void @f0(i32 %a0) {
 b0:
   %v0 = add i32 16777279, %a0
   %v1 = alloca i32, align 4
-  store i32 %v0, i32* %v1, align 4
+  store i32 %v0, ptr %v1, align 4
   ret void
 }

diff  --git a/llvm/test/CodeGen/Hexagon/extract-basic.ll b/llvm/test/CodeGen/Hexagon/extract-basic.ll
index ad118dea0ab65..9df5817e5e29c 100644
--- a/llvm/test/CodeGen/Hexagon/extract-basic.ll
+++ b/llvm/test/CodeGen/Hexagon/extract-basic.ll
@@ -35,42 +35,40 @@ target triple = "hexagon"
 %struct.structx_t = type { i8, i8, i8, i8 }
 %struct.structy_t = type { i8, i8, i8, i8 }
 
-define void @foo(%struct.structx_t* nocapture %px, %struct.structy_t* nocapture %py) nounwind {
+define void @foo(ptr nocapture %px, ptr nocapture %py) nounwind {
 entry:
-  %0 = bitcast %struct.structy_t* %py to i32*
-  %1 = load i32, i32* %0, align 4
-  %bf.value = and i32 %1, 7
-  %2 = bitcast %struct.structx_t* %px to i32*
-  %3 = load i32, i32* %2, align 4
-  %4 = and i32 %3, -8
-  %5 = or i32 %4, %bf.value
-  store i32 %5, i32* %2, align 4
-  %6 = load i32, i32* %0, align 4
-  %7 = lshr i32 %6, 4
-  %bf.clear1 = shl nuw nsw i32 %7, 3
-  %8 = and i32 %bf.clear1, 56
-  %9 = and i32 %5, -1017
-  %10 = or i32 %8, %9
-  store i32 %10, i32* %2, align 4
-  %11 = load i32, i32* %0, align 4
-  %12 = lshr i32 %11, 7
-  %bf.value4 = shl i32 %12, 10
-  %13 = and i32 %bf.value4, 261120
-  %14 = and i32 %10, -262081
-  %15 = or i32 %14, %13
-  store i32 %15, i32* %2, align 4
-  %16 = load i32, i32* %0, align 4
-  %17 = lshr i32 %16, 16
-  %bf.clear5 = shl i32 %17, 18
-  %18 = and i32 %bf.clear5, 66846720
-  %19 = and i32 %15, -1073480641
-  %20 = or i32 %19, %18
-  store i32 %20, i32* %2, align 4
-  %21 = load i32, i32* %0, align 4
-  %22 = lshr i32 %21, 24
-  %23 = shl i32 %22, 30
-  %24 = and i32 %20, 67107903
-  %25 = or i32 %24, %23
-  store i32 %25, i32* %2, align 4
+  %0 = load i32, ptr %py, align 4
+  %bf.value = and i32 %0, 7
+  %1 = load i32, ptr %px, align 4
+  %2 = and i32 %1, -8
+  %3 = or i32 %2, %bf.value
+  store i32 %3, ptr %px, align 4
+  %4 = load i32, ptr %py, align 4
+  %5 = lshr i32 %4, 4
+  %bf.clear1 = shl nuw nsw i32 %5, 3
+  %6 = and i32 %bf.clear1, 56
+  %7 = and i32 %3, -1017
+  %8 = or i32 %6, %7
+  store i32 %8, ptr %px, align 4
+  %9 = load i32, ptr %py, align 4
+  %10 = lshr i32 %9, 7
+  %bf.value4 = shl i32 %10, 10
+  %11 = and i32 %bf.value4, 261120
+  %12 = and i32 %8, -262081
+  %13 = or i32 %12, %11
+  store i32 %13, ptr %px, align 4
+  %14 = load i32, ptr %py, align 4
+  %15 = lshr i32 %14, 16
+  %bf.clear5 = shl i32 %15, 18
+  %16 = and i32 %bf.clear5, 66846720
+  %17 = and i32 %13, -1073480641
+  %18 = or i32 %17, %16
+  store i32 %18, ptr %px, align 4
+  %19 = load i32, ptr %py, align 4
+  %20 = lshr i32 %19, 24
+  %21 = shl i32 %20, 30
+  %22 = and i32 %18, 67107903
+  %23 = or i32 %22, %21
+  store i32 %23, ptr %px, align 4
   ret void
 }

diff  --git a/llvm/test/CodeGen/Hexagon/fadd.ll b/llvm/test/CodeGen/Hexagon/fadd.ll
index 65c6182dcc77f..1fdd3ca296094 100644
--- a/llvm/test/CodeGen/Hexagon/fadd.ll
+++ b/llvm/test/CodeGen/Hexagon/fadd.ll
@@ -8,11 +8,11 @@ entry:
   %a = alloca float, align 4
   %b = alloca float, align 4
   %c = alloca float, align 4
-  store volatile float 0x402ECCCCC0000000, float* %a, align 4
-  store volatile float 0x4022333340000000, float* %b, align 4
-  %0 = load volatile float, float* %a, align 4
-  %1 = load volatile float, float* %b, align 4
+  store volatile float 0x402ECCCCC0000000, ptr %a, align 4
+  store volatile float 0x4022333340000000, ptr %b, align 4
+  %0 = load volatile float, ptr %a, align 4
+  %1 = load volatile float, ptr %b, align 4
   %add = fadd float %0, %1
-  store float %add, float* %c, align 4
+  store float %add, ptr %c, align 4
   ret i32 0
 }

diff  --git a/llvm/test/CodeGen/Hexagon/fcmp.ll b/llvm/test/CodeGen/Hexagon/fcmp.ll
index 5cf3c57b5e9cc..5e52ccf05c286 100644
--- a/llvm/test/CodeGen/Hexagon/fcmp.ll
+++ b/llvm/test/CodeGen/Hexagon/fcmp.ll
@@ -7,21 +7,21 @@ define i32 @foo(float %y) nounwind {
 entry:
   %retval = alloca i32, align 4
   %y.addr = alloca float, align 4
-  store float %y, float* %y.addr, align 4
-  %0 = load float, float* %y.addr, align 4
+  store float %y, ptr %y.addr, align 4
+  %0 = load float, ptr %y.addr, align 4
   %cmp = fcmp ogt float %0, 0x406AD7EFA0000000
   br i1 %cmp, label %if.then, label %if.else
 
 if.then:                                          ; preds = %entry
-  store i32 1, i32* %retval
+  store i32 1, ptr %retval
   br label %return
 
 if.else:                                          ; preds = %entry
-  store i32 2, i32* %retval
+  store i32 2, ptr %retval
   br label %return
 
 return:                                           ; preds = %if.else, %if.then
-  %1 = load i32, i32* %retval
+  %1 = load i32, ptr %retval
   ret i32 %1
 }
 
@@ -29,9 +29,9 @@ define i32 @main() nounwind {
 entry:
   %retval = alloca i32, align 4
   %a = alloca float, align 4
-  store i32 0, i32* %retval
-  store float 0x40012E0A00000000, float* %a, align 4
-  %0 = load float, float* %a, align 4
+  store i32 0, ptr %retval
+  store float 0x40012E0A00000000, ptr %a, align 4
+  %0 = load float, ptr %a, align 4
   %call = call i32 @foo(float %0)
   ret i32 %call
 }

diff  --git a/llvm/test/CodeGen/Hexagon/feature-memops.ll b/llvm/test/CodeGen/Hexagon/feature-memops.ll
index 2d5103d10b8d4..7b638ec238bcf 100644
--- a/llvm/test/CodeGen/Hexagon/feature-memops.ll
+++ b/llvm/test/CodeGen/Hexagon/feature-memops.ll
@@ -2,19 +2,19 @@
 
 ; CHECK-LABEL: enabled:
 ; CHECK: memw({{.*}}) += #1
-define void @enabled(i32* %p) #0 {
-  %v0 = load i32, i32* %p
+define void @enabled(ptr %p) #0 {
+  %v0 = load i32, ptr %p
   %v1 = add i32 %v0, 1
-  store i32 %v1, i32* %p
+  store i32 %v1, ptr %p
   ret void
 }
 
 ; CHECK-LABEL: disabled:
 ; CHECK-NOT: memw({{.*}}) += #1
-define void @disabled(i32* %p) #1 {
-  %v0 = load i32, i32* %p
+define void @disabled(ptr %p) #1 {
+  %v0 = load i32, ptr %p
   %v1 = add i32 %v0, 1
-  store i32 %v1, i32* %p
+  store i32 %v1, ptr %p
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/find-loop-instr.ll b/llvm/test/CodeGen/Hexagon/find-loop-instr.ll
index b9743ad33aad4..04e9d3fa31c68 100644
--- a/llvm/test/CodeGen/Hexagon/find-loop-instr.ll
+++ b/llvm/test/CodeGen/Hexagon/find-loop-instr.ll
@@ -54,7 +54,7 @@ b15:                                              ; preds = %b12
   br i1 undef, label %b16, label %b17
 
 b16:                                              ; preds = %b15
-  store i32 0, i32* undef, align 4
+  store i32 0, ptr undef, align 4
   br label %b21
 
 b17:                                              ; preds = %b15
@@ -67,7 +67,7 @@ b19:                                              ; preds = %b18
   br label %b21
 
 b20:                                              ; preds = %b18
-  store i32 0, i32* undef, align 4
+  store i32 0, ptr undef, align 4
   br label %b21
 
 b21:                                              ; preds = %b20, %b19, %b16, %b10

diff  --git a/llvm/test/CodeGen/Hexagon/find-loop.ll b/llvm/test/CodeGen/Hexagon/find-loop.ll
index 7c2f9c8ff857c..3d2ffa12ce553 100644
--- a/llvm/test/CodeGen/Hexagon/find-loop.ll
+++ b/llvm/test/CodeGen/Hexagon/find-loop.ll
@@ -11,8 +11,7 @@
 define void @f0() #0 {
 b0:
   %v0 = alloca i64, align 8
-  %v1 = bitcast i64* %v0 to [2 x i32]*
-  %v2 = load i32, i32* @g0, align 4
+  %v2 = load i32, ptr @g0, align 4
   br i1 undef, label %b1, label %b2
 
 b1:                                               ; preds = %b1, %b0
@@ -36,8 +35,8 @@ b5:                                               ; preds = %b5, %b4
   %v8 = phi i32 [ %v19, %b5 ], [ 0, %b4 ]
   %v9 = add nsw i32 %v8, 0
   %v10 = lshr i32 %v9, 5
-  %v11 = getelementptr inbounds [2 x i32], [2 x i32]* %v1, i32 0, i32 %v10
-  %v12 = load i32, i32* %v11, align 4
+  %v11 = getelementptr inbounds [2 x i32], ptr %v0, i32 0, i32 %v10
+  %v12 = load i32, ptr %v11, align 4
   %v13 = and i32 %v9, 31
   %v14 = shl i32 1, %v13
   %v15 = and i32 %v12, %v14

diff  --git a/llvm/test/CodeGen/Hexagon/float-amode.ll b/llvm/test/CodeGen/Hexagon/float-amode.ll
index d770582ecab99..62ec0c6f13c8b 100644
--- a/llvm/test/CodeGen/Hexagon/float-amode.ll
+++ b/llvm/test/CodeGen/Hexagon/float-amode.ll
@@ -3,7 +3,7 @@
 ; The test checks for various addressing modes for floating point loads/stores.
 
 %struct.matrix_paramsGlob = type { [50 x i8], i16, [50 x float] }
-%struct.matrix_params = type { [50 x i8], i16, float** }
+%struct.matrix_params = type { [50 x i8], i16, ptr }
 %struct.matrix_params2 = type { i16, [50 x [50 x float]] }
 
 @globB = common global %struct.matrix_paramsGlob zeroinitializer, align 4
@@ -17,20 +17,20 @@
 ; CHECK: memw(r{{[0-9]+}}+r{{[0-9]+}}<<#2) = [[REG12]].new
 
 ; Function Attrs: norecurse nounwind
-define void @test1(%struct.matrix_params* nocapture readonly %params, i32 %col1) {
+define void @test1(ptr nocapture readonly %params, i32 %col1) {
 entry:
-  %matrixA = getelementptr inbounds %struct.matrix_params, %struct.matrix_params* %params, i32 0, i32 2
-  %0 = load float**, float*** %matrixA, align 4
-  %arrayidx = getelementptr inbounds float*, float** %0, i32 2
-  %1 = load float*, float** %arrayidx, align 4
-  %arrayidx1 = getelementptr inbounds float, float* %1, i32 %col1
-  %2 = load float, float* %arrayidx1, align 4
+  %matrixA = getelementptr inbounds %struct.matrix_params, ptr %params, i32 0, i32 2
+  %0 = load ptr, ptr %matrixA, align 4
+  %arrayidx = getelementptr inbounds ptr, ptr %0, i32 2
+  %1 = load ptr, ptr %arrayidx, align 4
+  %arrayidx1 = getelementptr inbounds float, ptr %1, i32 %col1
+  %2 = load float, ptr %arrayidx1, align 4
   %mul = fmul float %2, 2.000000e+01
   %add = fadd float %mul, 1.000000e+01
-  %arrayidx3 = getelementptr inbounds float*, float** %0, i32 5
-  %3 = load float*, float** %arrayidx3, align 4
-  %arrayidx4 = getelementptr inbounds float, float* %3, i32 %col1
-  store float %add, float* %arrayidx4, align 4
+  %arrayidx3 = getelementptr inbounds ptr, ptr %0, i32 5
+  %3 = load ptr, ptr %arrayidx3, align 4
+  %arrayidx4 = getelementptr inbounds float, ptr %3, i32 %col1
+  store float %add, ptr %arrayidx4, align 4
   ret void
 }
 
@@ -40,16 +40,16 @@ entry:
 ; CHECK: memw(##globA+84) = [[REG22]]
 
 ; Function Attrs: norecurse nounwind
-define void @test2(%struct.matrix_params* nocapture readonly %params, i32 %col1) {
+define void @test2(ptr nocapture readonly %params, i32 %col1) {
 entry:
-  %matrixA = getelementptr inbounds %struct.matrix_params, %struct.matrix_params* %params, i32 0, i32 2
-  %0 = load float**, float*** %matrixA, align 4
-  %1 = load float*, float** %0, align 4
-  %arrayidx1 = getelementptr inbounds float, float* %1, i32 %col1
-  %2 = load float, float* %arrayidx1, align 4
-  %3 = load float, float* getelementptr inbounds (%struct.matrix_paramsGlob, %struct.matrix_paramsGlob* @globB, i32 0, i32 2, i32 10), align 4
+  %matrixA = getelementptr inbounds %struct.matrix_params, ptr %params, i32 0, i32 2
+  %0 = load ptr, ptr %matrixA, align 4
+  %1 = load ptr, ptr %0, align 4
+  %arrayidx1 = getelementptr inbounds float, ptr %1, i32 %col1
+  %2 = load float, ptr %arrayidx1, align 4
+  %3 = load float, ptr getelementptr inbounds (%struct.matrix_paramsGlob, ptr @globB, i32 0, i32 2, i32 10), align 4
   %add = fadd float %2, %3
-  store float %add, float* getelementptr inbounds (%struct.matrix_paramsGlob, %struct.matrix_paramsGlob* @globA, i32 0, i32 2, i32 8), align 4
+  store float %add, ptr getelementptr inbounds (%struct.matrix_paramsGlob, ptr @globA, i32 0, i32 2, i32 8), align 4
   ret void
 }
 
@@ -59,16 +59,16 @@ entry:
 ; CHECK: memw(gp+#a) = [[REG32]]
 
 ; Function Attrs: norecurse nounwind
-define void @test3(%struct.matrix_params* nocapture readonly %params, i32 %col1) {
+define void @test3(ptr nocapture readonly %params, i32 %col1) {
 entry:
-  %matrixA = getelementptr inbounds %struct.matrix_params, %struct.matrix_params* %params, i32 0, i32 2
-  %0 = load float**, float*** %matrixA, align 4
-  %1 = load float*, float** %0, align 4
-  %arrayidx1 = getelementptr inbounds float, float* %1, i32 %col1
-  %2 = load float, float* %arrayidx1, align 4
-  %3 = load float, float* @b, align 4
+  %matrixA = getelementptr inbounds %struct.matrix_params, ptr %params, i32 0, i32 2
+  %0 = load ptr, ptr %matrixA, align 4
+  %1 = load ptr, ptr %0, align 4
+  %arrayidx1 = getelementptr inbounds float, ptr %1, i32 %col1
+  %2 = load float, ptr %arrayidx1, align 4
+  %3 = load float, ptr @b, align 4
   %add = fadd float %2, %3
-  store float %add, float* @a, align 4
+  store float %add, ptr @a, align 4
   ret void
 }
 
@@ -79,11 +79,11 @@ entry:
 ; Function Attrs: noinline norecurse nounwind
 define void @test4(i32 %col1) {
 entry:
-  %arrayidx = getelementptr inbounds %struct.matrix_paramsGlob, %struct.matrix_paramsGlob* @globB, i32 0, i32 2, i32 %col1
-  %0 = load float, float* %arrayidx, align 4
+  %arrayidx = getelementptr inbounds %struct.matrix_paramsGlob, ptr @globB, i32 0, i32 2, i32 %col1
+  %0 = load float, ptr %arrayidx, align 4
   %add = fadd float %0, 0.000000e+00
   %add1 = add nsw i32 %col1, 2
-  %arrayidx2 = getelementptr inbounds %struct.matrix_paramsGlob, %struct.matrix_paramsGlob* @globA, i32 0, i32 2, i32 %add1
-  store float %add, float* %arrayidx2, align 4
+  %arrayidx2 = getelementptr inbounds %struct.matrix_paramsGlob, ptr @globA, i32 0, i32 2, i32 %add1
+  store float %add, ptr %arrayidx2, align 4
   ret void
 }

diff  --git a/llvm/test/CodeGen/Hexagon/float-gen-cmpop.ll b/llvm/test/CodeGen/Hexagon/float-gen-cmpop.ll
index 6a735057f93d9..8d39ec676794e 100644
--- a/llvm/test/CodeGen/Hexagon/float-gen-cmpop.ll
+++ b/llvm/test/CodeGen/Hexagon/float-gen-cmpop.ll
@@ -5,15 +5,15 @@ target triple = "hexagon"
 ; CHECK-LABEL: f0:
 ; CHECK: p{{[0-9]+}} = sfcmp.ge(r{{[0-9]+}},r{{[0-9]+}})
 ; CHECK: p{{[0-9]+}} = sfcmp.gt(r{{[0-9]+}},r{{[0-9]+}})
-define i32 @f0(float* nocapture %a0) #0 {
+define i32 @f0(ptr nocapture %a0) #0 {
 b0:
-  %v0 = load float, float* %a0, align 4, !tbaa !0
+  %v0 = load float, ptr %a0, align 4, !tbaa !0
   %v1 = fcmp olt float %v0, 6.000000e+01
   br i1 %v1, label %b1, label %b2
 
 b1:                                               ; preds = %b0
-  %v2 = getelementptr inbounds float, float* %a0, i32 1
-  %v3 = load float, float* %v2, align 4, !tbaa !0
+  %v2 = getelementptr inbounds float, ptr %a0, i32 1
+  %v3 = load float, ptr %v2, align 4, !tbaa !0
   %v4 = fcmp ogt float %v3, 0x3FECCCCCC0000000
   br label %b2
 
@@ -25,9 +25,9 @@ b2:                                               ; preds = %b1, %b0
 
 ; CHECK-LABEL: f1:
 ; CHECK: p{{[0-9]+}} = sfcmp.eq(r{{[0-9]+}},r{{[0-9]+}})
-define i32 @f1(float* nocapture %a0) #0 {
+define i32 @f1(ptr nocapture %a0) #0 {
 b0:
-  %v0 = load float, float* %a0, align 4, !tbaa !0
+  %v0 = load float, ptr %a0, align 4, !tbaa !0
   %v1 = fcmp oeq float %v0, 6.000000e+01
   %v2 = zext i1 %v1 to i32
   ret i32 %v2
@@ -36,15 +36,15 @@ b0:
 ; CHECK-LABEL: f2:
 ; CHECK: p{{[0-9]+}} = dfcmp.ge(r{{[0-9]+}}:{{[0-9]+}},r{{[0-9]+}}:{{[0-9]+}})
 ; CHECK: p{{[0-9]+}} = dfcmp.gt(r{{[0-9]+}}:{{[0-9]+}},r{{[0-9]+}}:{{[0-9]+}})
-define i32 @f2(double* nocapture %a0) #0 {
+define i32 @f2(ptr nocapture %a0) #0 {
 b0:
-  %v0 = load double, double* %a0, align 8, !tbaa !4
+  %v0 = load double, ptr %a0, align 8, !tbaa !4
   %v1 = fcmp olt double %v0, 6.000000e+01
   br i1 %v1, label %b1, label %b2
 
 b1:                                               ; preds = %b0
-  %v2 = getelementptr inbounds double, double* %a0, i32 1
-  %v3 = load double, double* %v2, align 8, !tbaa !4
+  %v2 = getelementptr inbounds double, ptr %a0, i32 1
+  %v3 = load double, ptr %v2, align 8, !tbaa !4
   %v4 = fcmp ogt double %v3, 0x3FECCCCCC0000000
   br label %b2
 
@@ -54,9 +54,9 @@ b2:                                               ; preds = %b1, %b0
   ret i32 %v6
 }
 
-define i32 @f3(double* nocapture %a0) #0 {
+define i32 @f3(ptr nocapture %a0) #0 {
 b0:
-  %v0 = load double, double* %a0, align 8, !tbaa !4
+  %v0 = load double, ptr %a0, align 8, !tbaa !4
   %v1 = fcmp oeq double %v0, 6.000000e+01
   %v2 = zext i1 %v1 to i32
   ret i32 %v2

diff  --git a/llvm/test/CodeGen/Hexagon/float.ll b/llvm/test/CodeGen/Hexagon/float.ll
index cc024a76d037c..86e1035229843 100644
--- a/llvm/test/CodeGen/Hexagon/float.ll
+++ b/llvm/test/CodeGen/Hexagon/float.ll
@@ -2,22 +2,22 @@
 ; CHECK: sfadd
 ; CHECK: sfsub
 
-define void @f0(float* %a0, float %a1, float %a2) #0 {
+define void @f0(ptr %a0, float %a1, float %a2) #0 {
 b0:
-  %v0 = alloca float*, align 4
+  %v0 = alloca ptr, align 4
   %v1 = alloca float, align 4
   %v2 = alloca float, align 4
-  store float* %a0, float** %v0, align 4
-  store float %a1, float* %v1, align 4
-  store float %a2, float* %v2, align 4
-  %v3 = load float*, float** %v0, align 4
-  %v4 = load float, float* %v3
-  %v5 = load float, float* %v1, align 4
+  store ptr %a0, ptr %v0, align 4
+  store float %a1, ptr %v1, align 4
+  store float %a2, ptr %v2, align 4
+  %v3 = load ptr, ptr %v0, align 4
+  %v4 = load float, ptr %v3
+  %v5 = load float, ptr %v1, align 4
   %v6 = fadd float %v4, %v5
-  %v7 = load float, float* %v2, align 4
+  %v7 = load float, ptr %v2, align 4
   %v8 = fsub float %v6, %v7
-  %v9 = load float*, float** %v0, align 4
-  store float %v8, float* %v9
+  %v9 = load ptr, ptr %v0, align 4
+  store float %v8, ptr %v9
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/floatconvert-ieee-rnd-near.ll b/llvm/test/CodeGen/Hexagon/floatconvert-ieee-rnd-near.ll
index cc024a76d037c..86e1035229843 100644
--- a/llvm/test/CodeGen/Hexagon/floatconvert-ieee-rnd-near.ll
+++ b/llvm/test/CodeGen/Hexagon/floatconvert-ieee-rnd-near.ll
@@ -2,22 +2,22 @@
 ; CHECK: sfadd
 ; CHECK: sfsub
 
-define void @f0(float* %a0, float %a1, float %a2) #0 {
+define void @f0(ptr %a0, float %a1, float %a2) #0 {
 b0:
-  %v0 = alloca float*, align 4
+  %v0 = alloca ptr, align 4
   %v1 = alloca float, align 4
   %v2 = alloca float, align 4
-  store float* %a0, float** %v0, align 4
-  store float %a1, float* %v1, align 4
-  store float %a2, float* %v2, align 4
-  %v3 = load float*, float** %v0, align 4
-  %v4 = load float, float* %v3
-  %v5 = load float, float* %v1, align 4
+  store ptr %a0, ptr %v0, align 4
+  store float %a1, ptr %v1, align 4
+  store float %a2, ptr %v2, align 4
+  %v3 = load ptr, ptr %v0, align 4
+  %v4 = load float, ptr %v3
+  %v5 = load float, ptr %v1, align 4
   %v6 = fadd float %v4, %v5
-  %v7 = load float, float* %v2, align 4
+  %v7 = load float, ptr %v2, align 4
   %v8 = fsub float %v6, %v7
-  %v9 = load float*, float** %v0, align 4
-  store float %v8, float* %v9
+  %v9 = load ptr, ptr %v0, align 4
+  store float %v8, ptr %v9
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/fltnvjump.ll b/llvm/test/CodeGen/Hexagon/fltnvjump.ll
index 6c4bc533c20e6..259ff57eb3693 100644
--- a/llvm/test/CodeGen/Hexagon/fltnvjump.ll
+++ b/llvm/test/CodeGen/Hexagon/fltnvjump.ll
@@ -7,32 +7,32 @@
 
 target triple = "hexagon"
 
-%s.0 = type { %s.1, i8*, i8* }
+%s.0 = type { %s.1, ptr, ptr }
 %s.1 = type { i16, i16, i32 }
-%s.2 = type { i8, i32, i32, i16, i16, i16, i32, i8, i16, i16, i16, i16, i16, i16, i16, i16, i16, i16, i16, %s.3* }
-%s.3 = type { [2 x i16], i16, i16, i16, i16, [13 x i16], i16, i16, [2 x i16*], [25 x i16], [49 x i16], [6 x i16], [49 x i16] }
+%s.2 = type { i8, i32, i32, i16, i16, i16, i32, i8, i16, i16, i16, i16, i16, i16, i16, i16, i16, i16, i16, ptr }
+%s.3 = type { [2 x i16], i16, i16, i16, i16, [13 x i16], i16, i16, [2 x ptr], [25 x i16], [49 x i16], [6 x i16], [49 x i16] }
 
- at g0 = internal constant %s.0 { %s.1 { i16 705, i16 0, i32 16 }, i8* getelementptr inbounds ([110 x i8], [110 x i8]* @g1, i32 0, i32 0), i8* getelementptr inbounds ([13 x i8], [13 x i8]* @g2, i32 0, i32 0) }, align 4
+ at g0 = internal constant %s.0 { %s.1 { i16 705, i16 0, i32 16 }, ptr @g1, ptr @g2 }, align 4
 @g1 = private unnamed_addr constant [110 x i8] c"Assertion ............................................................................................ failed\00", align 1
 @g2 = private unnamed_addr constant [13 x i8] c"............\00", align 1
 
-define signext i16 @f0(%s.2* %a0) #0 {
+define signext i16 @f0(ptr %a0) #0 {
 b0:
   %v0 = alloca i16, align 2
   %v1 = alloca i16, align 2
-  %v2 = getelementptr inbounds %s.2, %s.2* %a0, i32 0, i32 19
-  %v3 = load %s.3*, %s.3** %v2, align 4, !tbaa !0
-  %v4 = getelementptr inbounds %s.3, %s.3* %v3, i32 0, i32 12, i32 0
-  %v5 = getelementptr inbounds %s.3, %s.3* %v3, i32 0, i32 2
-  %v6 = call signext i16 @f1(i16* %v4, i16* %v5, %s.2* %a0)
+  %v2 = getelementptr inbounds %s.2, ptr %a0, i32 0, i32 19
+  %v3 = load ptr, ptr %v2, align 4, !tbaa !0
+  %v4 = getelementptr inbounds %s.3, ptr %v3, i32 0, i32 12, i32 0
+  %v5 = getelementptr inbounds %s.3, ptr %v3, i32 0, i32 2
+  %v6 = call signext i16 @f1(ptr %v4, ptr %v5, ptr %a0)
   %v7 = icmp eq i16 %v6, 0
   br i1 %v7, label %b1, label %b13
 
 b1:                                               ; preds = %b0
-  %v8 = getelementptr inbounds %s.2, %s.2* %a0, i32 0, i32 11
-  %v9 = load i16, i16* %v8, align 2, !tbaa !4
+  %v8 = getelementptr inbounds %s.2, ptr %a0, i32 0, i32 11
+  %v9 = load i16, ptr %v8, align 2, !tbaa !4
   %v10 = sext i16 %v9 to i32
-  %v11 = load i16, i16* %v5, align 2, !tbaa !4
+  %v11 = load i16, ptr %v5, align 2, !tbaa !4
   %v12 = sext i16 %v11 to i32
   %v13 = call i32 @llvm.hexagon.A2.subh.l16.sat.ll(i32 %v10, i32 %v12)
   %v14 = trunc i32 %v13 to i16
@@ -40,21 +40,20 @@ b1:                                               ; preds = %b0
   br i1 %v15, label %b13, label %b2
 
 b2:                                               ; preds = %b1
-  %v16 = getelementptr inbounds %s.3, %s.3* %v3, i32 0, i32 8, i32 1
-  %v17 = load i16*, i16** %v16, align 4, !tbaa !0
-  call void @f2(i16* %v17, i16* %v1, i16* %v4, i16 signext %v11, i16 signext %v9)
-  %v18 = getelementptr inbounds %s.3, %s.3* %v3, i32 0, i32 8, i32 0
-  %v19 = load i16*, i16** %v18, align 4, !tbaa !0
-  %v20 = load i16*, i16** %v16, align 4, !tbaa !0
-  %v21 = load i16, i16* %v1, align 2, !tbaa !4
-  call void @f3(i16* %v19, i16* %v0, i16* %v20, i16 signext %v21)
-  %v22 = load i16, i16* %v0, align 2, !tbaa !4
-  %v23 = getelementptr inbounds %s.3, %s.3* %v3, i32 0, i32 0, i32 0
-  store i16 %v22, i16* %v23, align 2, !tbaa !4
-  %v24 = load i16, i16* %v1, align 2, !tbaa !4
-  %v25 = getelementptr inbounds %s.3, %s.3* %v3, i32 0, i32 0, i32 1
-  store i16 %v24, i16* %v25, align 2, !tbaa !4
-  %v26 = load i16, i16* %v0, align 2, !tbaa !4
+  %v16 = getelementptr inbounds %s.3, ptr %v3, i32 0, i32 8, i32 1
+  %v17 = load ptr, ptr %v16, align 4, !tbaa !0
+  call void @f2(ptr %v17, ptr %v1, ptr %v4, i16 signext %v11, i16 signext %v9)
+  %v18 = getelementptr inbounds %s.3, ptr %v3, i32 0, i32 8, i32 0
+  %v19 = load ptr, ptr %v18, align 4, !tbaa !0
+  %v20 = load ptr, ptr %v16, align 4, !tbaa !0
+  %v21 = load i16, ptr %v1, align 2, !tbaa !4
+  call void @f3(ptr %v19, ptr %v0, ptr %v20, i16 signext %v21)
+  %v22 = load i16, ptr %v0, align 2, !tbaa !4
+  store i16 %v22, ptr %v3, align 2, !tbaa !4
+  %v24 = load i16, ptr %v1, align 2, !tbaa !4
+  %v25 = getelementptr inbounds %s.3, ptr %v3, i32 0, i32 0, i32 1
+  store i16 %v24, ptr %v25, align 2, !tbaa !4
+  %v26 = load i16, ptr %v0, align 2, !tbaa !4
   %v27 = sext i16 %v26 to i32
   %v28 = icmp slt i16 %v26, 1
   br i1 %v28, label %b13, label %b3
@@ -70,10 +69,10 @@ b3:                                               ; preds = %b2
   br i1 %v35, label %b13, label %b4
 
 b4:                                               ; preds = %b3
-  %v36 = load i16*, i16** %v18, align 4, !tbaa !0
-  %v37 = load i16, i16* %v36, align 2, !tbaa !4
-  %v38 = getelementptr inbounds i16, i16* %v36, i32 %v27
-  %v39 = load i16, i16* %v38, align 2, !tbaa !4
+  %v36 = load ptr, ptr %v18, align 4, !tbaa !0
+  %v37 = load i16, ptr %v36, align 2, !tbaa !4
+  %v38 = getelementptr inbounds i16, ptr %v36, i32 %v27
+  %v39 = load i16, ptr %v38, align 2, !tbaa !4
   %v40 = sext i16 %v37 to i32
   %v41 = call i32 @llvm.hexagon.A2.subh.l16.sat.ll(i32 %v40, i32 32)
   %v42 = trunc i32 %v41 to i16
@@ -88,23 +87,23 @@ b5:                                               ; preds = %b4
   br i1 %v47, label %b13, label %b6
 
 b6:                                               ; preds = %b5
-  %v48 = load i16, i16* %v1, align 2, !tbaa !4
+  %v48 = load i16, ptr %v1, align 2, !tbaa !4
   %v49 = sext i16 %v48 to i32
-  %v50 = load i16*, i16** %v16, align 4, !tbaa !0
-  %v51 = getelementptr inbounds i16, i16* %v50, i32 %v49
-  %v52 = load i16, i16* %v51, align 2, !tbaa !4
-  %v53 = getelementptr inbounds %s.2, %s.2* %a0, i32 0, i32 14
-  %v54 = load i16, i16* %v53, align 2, !tbaa !4
+  %v50 = load ptr, ptr %v16, align 4, !tbaa !0
+  %v51 = getelementptr inbounds i16, ptr %v50, i32 %v49
+  %v52 = load i16, ptr %v51, align 2, !tbaa !4
+  %v53 = getelementptr inbounds %s.2, ptr %a0, i32 0, i32 14
+  %v54 = load i16, ptr %v53, align 2, !tbaa !4
   %v55 = icmp eq i16 %v54, 0
   br i1 %v55, label %b7, label %b8
 
 b7:                                               ; preds = %b6
-  %v56 = getelementptr inbounds %s.3, %s.3* %v3, i32 0, i32 1
-  store i16 1, i16* %v56, align 2, !tbaa !4
+  %v56 = getelementptr inbounds %s.3, ptr %v3, i32 0, i32 1
+  store i16 1, ptr %v56, align 2, !tbaa !4
   br label %b11
 
 b8:                                               ; preds = %b6
-  %v57 = load i16, i16* %v50, align 2, !tbaa !4
+  %v57 = load i16, ptr %v50, align 2, !tbaa !4
   %v58 = sext i16 %v57 to i32
   %v59 = sext i16 %v52 to i32
   %v60 = call signext i16 @f4(i32 %v58, i32 %v59)
@@ -113,7 +112,7 @@ b8:                                               ; preds = %b6
   %v63 = call i32 @llvm.hexagon.A2.sath(i32 %v62)
   %v64 = shl i32 %v63, 16
   %v65 = ashr exact i32 %v64, 16
-  %v66 = load i16, i16* %v53, align 2, !tbaa !4
+  %v66 = load i16, ptr %v53, align 2, !tbaa !4
   %v67 = sext i16 %v66 to i32
   %v68 = call i32 @llvm.hexagon.M2.mpy.acc.sat.ll.s1(i32 1024, i32 %v65, i32 %v67)
   %v69 = shl i32 %v68, 16
@@ -140,21 +139,21 @@ b8:                                               ; preds = %b6
   br i1 %v89, label %b10, label %b9
 
 b9:                                               ; preds = %b8
-  call void @f5(%s.0* @g0) #2
+  call void @f5(ptr @g0) #2
   unreachable
 
 b10:                                              ; preds = %b8
   %v90 = trunc i32 %v76 to i16
   %v91 = icmp eq i32 %v78, 0
   %v92 = select i1 %v91, i16 1, i16 %v90
-  %v93 = getelementptr inbounds %s.3, %s.3* %v3, i32 0, i32 1
-  store i16 %v92, i16* %v93, align 2, !tbaa !4
+  %v93 = getelementptr inbounds %s.3, ptr %v3, i32 0, i32 1
+  store i16 %v92, ptr %v93, align 2, !tbaa !4
   br label %b11
 
 b11:                                              ; preds = %b10, %b7
   %v94 = phi i16 [ %v92, %b10 ], [ 1, %b7 ]
-  %v95 = getelementptr inbounds %s.3, %s.3* %v3, i32 0, i32 7
-  store i16 %v94, i16* %v95, align 2, !tbaa !4
+  %v95 = getelementptr inbounds %s.3, ptr %v3, i32 0, i32 7
+  store i16 %v94, ptr %v95, align 2, !tbaa !4
   %v96 = sext i16 %v94 to i32
   %v97 = call i32 @llvm.hexagon.A2.subh.l16.sat.ll(i32 %v96, i32 5)
   %v98 = trunc i32 %v97 to i16
@@ -162,14 +161,14 @@ b11:                                              ; preds = %b10, %b7
   br i1 %v99, label %b13, label %b12
 
 b12:                                              ; preds = %b11
-  %v100 = getelementptr inbounds %s.3, %s.3* %v3, i32 0, i32 11, i32 0
-  %v101 = load i16*, i16** %v18, align 4, !tbaa !0
-  %v102 = load i16, i16* %v0, align 2, !tbaa !4
-  call void @f6(i16* %v100, i16 signext %v94, i16* %v101, i16 signext %v102)
-  %v103 = getelementptr inbounds %s.3, %s.3* %v3, i32 0, i32 3
-  store i16 %v37, i16* %v103, align 2, !tbaa !4
-  %v104 = getelementptr inbounds %s.3, %s.3* %v3, i32 0, i32 4
-  store i16 %v39, i16* %v104, align 2, !tbaa !4
+  %v100 = getelementptr inbounds %s.3, ptr %v3, i32 0, i32 11, i32 0
+  %v101 = load ptr, ptr %v18, align 4, !tbaa !0
+  %v102 = load i16, ptr %v0, align 2, !tbaa !4
+  call void @f6(ptr %v100, i16 signext %v94, ptr %v101, i16 signext %v102)
+  %v103 = getelementptr inbounds %s.3, ptr %v3, i32 0, i32 3
+  store i16 %v37, ptr %v103, align 2, !tbaa !4
+  %v104 = getelementptr inbounds %s.3, ptr %v3, i32 0, i32 4
+  store i16 %v39, ptr %v104, align 2, !tbaa !4
   br label %b13
 
 b13:                                              ; preds = %b12, %b11, %b5, %b4, %b3, %b2, %b1, %b0
@@ -177,14 +176,14 @@ b13:                                              ; preds = %b12, %b11, %b5, %b4
   ret i16 %v105
 }
 
-declare signext i16 @f1(i16*, i16*, %s.2*) #0
+declare signext i16 @f1(ptr, ptr, ptr) #0
 
 ; Function Attrs: nounwind readnone
 declare i32 @llvm.hexagon.A2.subh.l16.sat.ll(i32, i32) #1
 
-declare void @f2(i16*, i16*, i16*, i16 signext, i16 signext) #0
+declare void @f2(ptr, ptr, ptr, i16 signext, i16 signext) #0
 
-declare void @f3(i16*, i16*, i16*, i16 signext) #0
+declare void @f3(ptr, ptr, ptr, i16 signext) #0
 
 ; Function Attrs: nounwind readnone
 declare i32 @llvm.hexagon.A2.sath(i32) #1
@@ -198,9 +197,9 @@ declare signext i16 @f4(i32, i32) #0
 declare i32 @llvm.hexagon.M2.mpy.acc.sat.ll.s1(i32, i32, i32) #1
 
 ; Function Attrs: noreturn
-declare void @f5(%s.0*) #2
+declare void @f5(ptr) #2
 
-declare void @f6(i16*, i16 signext, i16*, i16 signext) #0
+declare void @f6(ptr, i16 signext, ptr, i16 signext) #0
 
 declare float @f7(float, i32) #0
 

diff  --git a/llvm/test/CodeGen/Hexagon/fmadd.ll b/llvm/test/CodeGen/Hexagon/fmadd.ll
index c7441af0bca1a..06333bfc4c167 100644
--- a/llvm/test/CodeGen/Hexagon/fmadd.ll
+++ b/llvm/test/CodeGen/Hexagon/fmadd.ll
@@ -7,13 +7,13 @@
 ; CHECK: r{{[0-9]+}} += sfmpy(r{{[0-9]+}},r{{[0-9]+}})
 define void @f0() #0 {
 b0:
-  %v0 = load float, float* @g0, align 4
-  %v1 = load float, float* @g1, align 4
-  %v2 = load float, float* @g2, align 4
+  %v0 = load float, ptr @g0, align 4
+  %v1 = load float, ptr @g1, align 4
+  %v2 = load float, ptr @g2, align 4
   %v3 = alloca float, align 4
   %v4 = fmul float %v0, %v1
   %v5 = fadd float %v2, %v4
-  store float %v5, float* %v3, align 4
+  store float %v5, ptr %v3, align 4
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/fmul.ll b/llvm/test/CodeGen/Hexagon/fmul.ll
index e20e293c0a137..2de3836d5bd82 100644
--- a/llvm/test/CodeGen/Hexagon/fmul.ll
+++ b/llvm/test/CodeGen/Hexagon/fmul.ll
@@ -9,11 +9,11 @@ entry:
   %a = alloca float, align 4
   %b = alloca float, align 4
   %c = alloca float, align 4
-  store volatile float 0x402ECCCCC0000000, float* %a, align 4
-  store volatile float 0x4022333340000000, float* %b, align 4
-  %0 = load volatile float, float* %b, align 4
-  %1 = load volatile float, float* %a, align 4
+  store volatile float 0x402ECCCCC0000000, ptr %a, align 4
+  store volatile float 0x4022333340000000, ptr %b, align 4
+  %0 = load volatile float, ptr %b, align 4
+  %1 = load volatile float, ptr %a, align 4
   %mul = fmul float %0, %1
-  store float %mul, float* %c, align 4
+  store float %mul, ptr %c, align 4
   ret i32 0
 }

diff  --git a/llvm/test/CodeGen/Hexagon/formal-args-i1.ll b/llvm/test/CodeGen/Hexagon/formal-args-i1.ll
index 050b572e351e8..52f903dad0ded 100644
--- a/llvm/test/CodeGen/Hexagon/formal-args-i1.ll
+++ b/llvm/test/CodeGen/Hexagon/formal-args-i1.ll
@@ -7,10 +7,10 @@
 
 target triple = "hexagon"
 
-define void @f0(i1 zeroext %a0, i8* nocapture %a1, i8 %a2) local_unnamed_addr #0 {
+define void @f0(i1 zeroext %a0, ptr nocapture %a1, i8 %a2) local_unnamed_addr #0 {
 entry:
   %v0 = select i1 %a0, i8 3, i8 %a2
-  store i8 %v0, i8* %a1, align 1
+  store i8 %v0, ptr %a1, align 1
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/fp16.ll b/llvm/test/CodeGen/Hexagon/fp16.ll
index c1fd501fd51a4..5f256e22aaffd 100644
--- a/llvm/test/CodeGen/Hexagon/fp16.ll
+++ b/llvm/test/CodeGen/Hexagon/fp16.ll
@@ -15,9 +15,9 @@
 ;CHECK-LABEL: @test1
 ;CHECK: call __extendhfsf2
 ;CHECK: r0 = memuh
-define dso_local float @test1(i16* nocapture readonly %a) local_unnamed_addr #0 {
+define dso_local float @test1(ptr nocapture readonly %a) local_unnamed_addr #0 {
 entry:
-  %0 = load i16, i16* %a, align 2
+  %0 = load i16, ptr %a, align 2
   %1 = tail call float @llvm.convert.from.fp16.f32(i16 %0)
   ret float %1
 }
@@ -26,9 +26,9 @@ entry:
 ;CHECK: call __extendhfsf2
 ;CHECK: r0 = memuh
 ;CHECK: convert_sf2d
-define dso_local double @test2(i16* nocapture readonly %a) local_unnamed_addr #0 {
+define dso_local double @test2(ptr nocapture readonly %a) local_unnamed_addr #0 {
 entry:
-  %0 = load i16, i16* %a, align 2
+  %0 = load i16, ptr %a, align 2
   %1 = tail call double @llvm.convert.from.fp16.f64(i16 %0)
   ret double %1
 }
@@ -36,20 +36,20 @@ entry:
 ;CHECK-LABEL: @test3
 ;CHECK: call __truncsfhf2
 ;CHECK: memh{{.*}}= r0
-define dso_local void @test3(float %src, i16* nocapture %dst) local_unnamed_addr #0 {
+define dso_local void @test3(float %src, ptr nocapture %dst) local_unnamed_addr #0 {
 entry:
   %0 = tail call i16 @llvm.convert.to.fp16.f32(float %src)
-  store i16 %0, i16* %dst, align 2
+  store i16 %0, ptr %dst, align 2
   ret void
 }
 
 ;CHECK-LABEL: @test4
 ;CHECK: call __truncdfhf2
 ;CHECK: memh{{.*}}= r0
-define dso_local void @test4(double %src, i16* nocapture %dst) local_unnamed_addr #0 {
+define dso_local void @test4(double %src, ptr nocapture %dst) local_unnamed_addr #0 {
 entry:
   %0 = tail call i16 @llvm.convert.to.fp16.f64(double %src)
-  store i16 %0, i16* %dst, align 2
+  store i16 %0, ptr %dst, align 2
   ret void
 }
 
@@ -57,11 +57,11 @@ entry:
 ;CHECK: call __extendhfsf2
 ;CHECK: call __extendhfsf2
 ;CHECK: sfadd
-define dso_local float @test5(i16* nocapture readonly %a, i16* nocapture readonly %b) local_unnamed_addr #0 {
+define dso_local float @test5(ptr nocapture readonly %a, ptr nocapture readonly %b) local_unnamed_addr #0 {
 entry:
-  %0 = load i16, i16* %a, align 2
+  %0 = load i16, ptr %a, align 2
   %1 = tail call float @llvm.convert.from.fp16.f32(i16 %0)
-  %2 = load i16, i16* %b, align 2
+  %2 = load i16, ptr %b, align 2
   %3 = tail call float @llvm.convert.from.fp16.f32(i16 %2)
   %add = fadd float %1, %3
   ret float %add

diff  --git a/llvm/test/CodeGen/Hexagon/fp_latency.ll b/llvm/test/CodeGen/Hexagon/fp_latency.ll
index 4934897a7e356..275127e012469 100644
--- a/llvm/test/CodeGen/Hexagon/fp_latency.ll
+++ b/llvm/test/CodeGen/Hexagon/fp_latency.ll
@@ -16,17 +16,12 @@ b0:
   %v1 = alloca [1000 x float], align 64
   %v2 = alloca [1000 x float], align 64
   %v3 = alloca [1000 x float], align 64
-  %v4 = bitcast [1000 x float]* %v0 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 4000, i8* %v4) #2
-  %v5 = bitcast [1000 x float]* %v1 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 4000, i8* %v5) #2
-  %v6 = bitcast [1000 x float]* %v2 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 4000, i8* %v6) #2
-  %v7 = bitcast [1000 x float]* %v3 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 4000, i8* %v7) #2
+  call void @llvm.lifetime.start.p0(i64 4000, ptr %v0) #2
+  call void @llvm.lifetime.start.p0(i64 4000, ptr %v1) #2
+  call void @llvm.lifetime.start.p0(i64 4000, ptr %v2) #2
+  call void @llvm.lifetime.start.p0(i64 4000, ptr %v3) #2
   %v8 = icmp sgt i32 %a1, 0
   %v9 = add i32 %a1, -1
-  %v10 = getelementptr [1000 x float], [1000 x float]* %v3, i32 0, i32 0
   br label %b1
 
 b1:                                               ; preds = %b3, %b0
@@ -34,29 +29,29 @@ b1:                                               ; preds = %b3, %b0
   br i1 %v8, label %b2, label %b3
 
 b2:                                               ; preds = %b2, %b1
-  %v12 = phi float* [ %v33, %b2 ], [ %v10, %b1 ]
+  %v12 = phi ptr [ %v33, %b2 ], [ %v3, %b1 ]
   %v13 = phi i32 [ %v31, %b2 ], [ 0, %b1 ]
   %v14 = mul nsw i32 %v13, %a1
   %v15 = add nsw i32 %v14, %v11
-  %v16 = getelementptr inbounds [1000 x float], [1000 x float]* %v1, i32 0, i32 %v15
-  %v17 = load float, float* %v16, align 4, !tbaa !0
+  %v16 = getelementptr inbounds [1000 x float], ptr %v1, i32 0, i32 %v15
+  %v17 = load float, ptr %v16, align 4, !tbaa !0
   %v18 = fmul float %v17, %v17
   %v19 = mul nsw i32 %v13, 25
   %v20 = add nsw i32 %v19, %v11
-  %v21 = getelementptr inbounds [1000 x float], [1000 x float]* %v2, i32 0, i32 %v20
-  %v22 = load float, float* %v21, align 4, !tbaa !0
+  %v21 = getelementptr inbounds [1000 x float], ptr %v2, i32 0, i32 %v20
+  %v22 = load float, ptr %v21, align 4, !tbaa !0
   %v23 = fmul float %v22, %v22
   %v24 = fadd float %v18, %v23
-  %v25 = load float, float* %v12, align 4, !tbaa !0
+  %v25 = load float, ptr %v12, align 4, !tbaa !0
   %v26 = fmul float %v25, %v25
   %v27 = fadd float %v24, %v26
-  %v28 = getelementptr inbounds [1000 x float], [1000 x float]* %v0, i32 0, i32 %v20
-  %v29 = load float, float* %v28, align 4, !tbaa !0
+  %v28 = getelementptr inbounds [1000 x float], ptr %v0, i32 0, i32 %v20
+  %v29 = load float, ptr %v28, align 4, !tbaa !0
   %v30 = fadd float %v29, %v27
-  store float %v30, float* %v28, align 4, !tbaa !0
+  store float %v30, ptr %v28, align 4, !tbaa !0
   %v31 = add nuw nsw i32 %v13, 1
   %v32 = icmp eq i32 %v13, %v9
-  %v33 = getelementptr float, float* %v12, i32 1
+  %v33 = getelementptr float, ptr %v12, i32 1
   br i1 %v32, label %b3, label %b2
 
 b3:                                               ; preds = %b2, %b1
@@ -65,18 +60,18 @@ b3:                                               ; preds = %b2, %b1
   br i1 %v35, label %b4, label %b1
 
 b4:                                               ; preds = %b3
-  call void @llvm.lifetime.end.p0i8(i64 4000, i8* %v7) #2
-  call void @llvm.lifetime.end.p0i8(i64 4000, i8* %v6) #2
-  call void @llvm.lifetime.end.p0i8(i64 4000, i8* %v5) #2
-  call void @llvm.lifetime.end.p0i8(i64 4000, i8* %v4) #2
+  call void @llvm.lifetime.end.p0(i64 4000, ptr %v3) #2
+  call void @llvm.lifetime.end.p0(i64 4000, ptr %v2) #2
+  call void @llvm.lifetime.end.p0(i64 4000, ptr %v1) #2
+  call void @llvm.lifetime.end.p0(i64 4000, ptr %v0) #2
   ret void
 }
 
 ; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #1
+declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
 
 ; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #1
+declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
 
 attributes #0 = { nounwind readnone "target-cpu"="hexagonv60" "target-features"="+hvxv60,+hvx-length64b" }
 attributes #1 = { argmemonly nounwind }

diff  --git a/llvm/test/CodeGen/Hexagon/fpelim-basic.ll b/llvm/test/CodeGen/Hexagon/fpelim-basic.ll
index db926d6943e9b..52a68a9d22f73 100644
--- a/llvm/test/CodeGen/Hexagon/fpelim-basic.ll
+++ b/llvm/test/CodeGen/Hexagon/fpelim-basic.ll
@@ -10,35 +10,34 @@ target triple = "hexagon"
 define i32 @danny(i32 %a0, i32 %a1) local_unnamed_addr #0 {
 b2:
   %v3 = alloca [32 x i32], align 8
-  %v4 = bitcast [32 x i32]* %v3 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 128, i8* nonnull %v4) #3
+  call void @llvm.lifetime.start.p0(i64 128, ptr nonnull %v3) #3
   br label %b5
 
 b5:                                               ; preds = %b5, %b2
   %v6 = phi i32 [ 0, %b2 ], [ %v8, %b5 ]
-  %v7 = getelementptr inbounds [32 x i32], [32 x i32]* %v3, i32 0, i32 %v6
-  store i32 %v6, i32* %v7, align 4
+  %v7 = getelementptr inbounds [32 x i32], ptr %v3, i32 0, i32 %v6
+  store i32 %v6, ptr %v7, align 4
   %v8 = add nuw nsw i32 %v6, 1
   %v9 = icmp eq i32 %v8, 32
   br i1 %v9, label %b10, label %b5
 
 b10:                                              ; preds = %b5
-  %v11 = getelementptr inbounds [32 x i32], [32 x i32]* %v3, i32 0, i32 %a0
-  store i32 %a1, i32* %v11, align 4
+  %v11 = getelementptr inbounds [32 x i32], ptr %v3, i32 0, i32 %a0
+  store i32 %a1, ptr %v11, align 4
   br label %b12
 
 b12:                                              ; preds = %b12, %b10
   %v13 = phi i32 [ 0, %b10 ], [ %v18, %b12 ]
   %v14 = phi i32 [ 0, %b10 ], [ %v17, %b12 ]
-  %v15 = getelementptr inbounds [32 x i32], [32 x i32]* %v3, i32 0, i32 %v13
-  %v16 = load i32, i32* %v15, align 4
+  %v15 = getelementptr inbounds [32 x i32], ptr %v3, i32 0, i32 %v13
+  %v16 = load i32, ptr %v15, align 4
   %v17 = add nsw i32 %v16, %v14
   %v18 = add nuw nsw i32 %v13, 1
   %v19 = icmp eq i32 %v18, 32
   br i1 %v19, label %b20, label %b12
 
 b20:                                              ; preds = %b12
-  call void @llvm.lifetime.end.p0i8(i64 128, i8* nonnull %v4) #3
+  call void @llvm.lifetime.end.p0(i64 128, ptr nonnull %v3) #3
   ret i32 %v17
 }
 
@@ -50,40 +49,39 @@ b20:                                              ; preds = %b12
 define i32 @sammy(i32 %a0, i32 %a1) local_unnamed_addr #1 {
 b2:
   %v3 = alloca [32 x i32], align 8
-  %v4 = bitcast [32 x i32]* %v3 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 128, i8* nonnull %v4) #3
+  call void @llvm.lifetime.start.p0(i64 128, ptr nonnull %v3) #3
   br label %b5
 
 b5:                                               ; preds = %b5, %b2
   %v6 = phi i32 [ 0, %b2 ], [ %v8, %b5 ]
-  %v7 = getelementptr inbounds [32 x i32], [32 x i32]* %v3, i32 0, i32 %v6
-  store i32 %v6, i32* %v7, align 4
+  %v7 = getelementptr inbounds [32 x i32], ptr %v3, i32 0, i32 %v6
+  store i32 %v6, ptr %v7, align 4
   %v8 = add nuw nsw i32 %v6, 1
   %v9 = icmp eq i32 %v8, 32
   br i1 %v9, label %b10, label %b5
 
 b10:                                              ; preds = %b5
-  %v11 = getelementptr inbounds [32 x i32], [32 x i32]* %v3, i32 0, i32 %a0
-  store i32 %a1, i32* %v11, align 4
+  %v11 = getelementptr inbounds [32 x i32], ptr %v3, i32 0, i32 %a0
+  store i32 %a1, ptr %v11, align 4
   br label %b12
 
 b12:                                              ; preds = %b12, %b10
   %v13 = phi i32 [ 0, %b10 ], [ %v18, %b12 ]
   %v14 = phi i32 [ 0, %b10 ], [ %v17, %b12 ]
-  %v15 = getelementptr inbounds [32 x i32], [32 x i32]* %v3, i32 0, i32 %v13
-  %v16 = load i32, i32* %v15, align 4
+  %v15 = getelementptr inbounds [32 x i32], ptr %v3, i32 0, i32 %v13
+  %v16 = load i32, ptr %v15, align 4
   %v17 = add nsw i32 %v16, %v14
   %v18 = add nuw nsw i32 %v13, 1
   %v19 = icmp eq i32 %v18, 32
   br i1 %v19, label %b20, label %b12
 
 b20:                                              ; preds = %b12
-  call void @llvm.lifetime.end.p0i8(i64 128, i8* nonnull %v4) #3
+  call void @llvm.lifetime.end.p0(i64 128, ptr nonnull %v3) #3
   ret i32 %v17
 }
 
-declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #2
-declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #2
+declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #2
+declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #2
 
 attributes #0 = { nounwind readnone "frame-pointer"="none" "target-cpu"="hexagonv60" }
 attributes #1 = { nounwind readnone "frame-pointer"="all" "target-cpu"="hexagonv60" }

diff  --git a/llvm/test/CodeGen/Hexagon/frame-offset-overflow.ll b/llvm/test/CodeGen/Hexagon/frame-offset-overflow.ll
index a3cff049b3a27..861a54f893f0f 100644
--- a/llvm/test/CodeGen/Hexagon/frame-offset-overflow.ll
+++ b/llvm/test/CodeGen/Hexagon/frame-offset-overflow.ll
@@ -11,135 +11,127 @@
 
 target triple = "hexagon"
 
-define void @fred(i16* noalias nocapture readonly %p0, i32 %p1, i32 %p2, i16* noalias nocapture %p3, i32 %p4) local_unnamed_addr #1 {
+define void @fred(ptr noalias nocapture readonly %p0, i32 %p1, i32 %p2, ptr noalias nocapture %p3, i32 %p4) local_unnamed_addr #1 {
 entry:
   %mul = mul i32 %p4, %p1
-  %add.ptr = getelementptr inbounds i16, i16* %p0, i32 %mul
+  %add.ptr = getelementptr inbounds i16, ptr %p0, i32 %mul
   %add = add nsw i32 %p4, 1
   %rem = srem i32 %add, 5
   %mul1 = mul i32 %rem, %p1
-  %add.ptr2 = getelementptr inbounds i16, i16* %p0, i32 %mul1
-  %add.ptr6 = getelementptr inbounds i16, i16* %p0, i32 0
+  %add.ptr2 = getelementptr inbounds i16, ptr %p0, i32 %mul1
   %add7 = add nsw i32 %p4, 3
   %rem8 = srem i32 %add7, 5
   %mul9 = mul i32 %rem8, %p1
-  %add.ptr10 = getelementptr inbounds i16, i16* %p0, i32 %mul9
-  %add.ptr14 = getelementptr inbounds i16, i16* %p0, i32 0
-  %incdec.ptr18 = getelementptr inbounds i16, i16* %add.ptr14, i32 32
-  %0 = bitcast i16* %incdec.ptr18 to <16 x i32>*
-  %incdec.ptr17 = getelementptr inbounds i16, i16* %add.ptr10, i32 32
-  %1 = bitcast i16* %incdec.ptr17 to <16 x i32>*
-  %incdec.ptr16 = getelementptr inbounds i16, i16* %add.ptr6, i32 32
-  %2 = bitcast i16* %incdec.ptr16 to <16 x i32>*
-  %incdec.ptr15 = getelementptr inbounds i16, i16* %add.ptr2, i32 32
-  %3 = bitcast i16* %incdec.ptr15 to <16 x i32>*
-  %incdec.ptr = getelementptr inbounds i16, i16* %add.ptr, i32 32
-  %4 = bitcast i16* %incdec.ptr to <16 x i32>*
-  %5 = bitcast i16* %p3 to <16 x i32>*
+  %add.ptr10 = getelementptr inbounds i16, ptr %p0, i32 %mul9
+  %incdec.ptr18 = getelementptr inbounds i16, ptr %p0, i32 32
+  %incdec.ptr17 = getelementptr inbounds i16, ptr %add.ptr10, i32 32
+  %incdec.ptr16 = getelementptr inbounds i16, ptr %p0, i32 32
+  %incdec.ptr15 = getelementptr inbounds i16, ptr %add.ptr2, i32 32
+  %incdec.ptr = getelementptr inbounds i16, ptr %add.ptr, i32 32
   br i1 undef, label %for.end.loopexit.unr-lcssa, label %for.body
 
 for.body:                                         ; preds = %for.body, %entry
-  %optr.0102 = phi <16 x i32>* [ %incdec.ptr24.3, %for.body ], [ %5, %entry ]
-  %iptr4.0101 = phi <16 x i32>* [ %incdec.ptr23.3, %for.body ], [ %0, %entry ]
-  %iptr3.0100 = phi <16 x i32>* [ %incdec.ptr22.3, %for.body ], [ %1, %entry ]
-  %iptr2.099 = phi <16 x i32>* [ undef, %for.body ], [ %2, %entry ]
-  %iptr1.098 = phi <16 x i32>* [ %incdec.ptr20.3, %for.body ], [ %3, %entry ]
-  %iptr0.097 = phi <16 x i32>* [ %incdec.ptr19.3, %for.body ], [ %4, %entry ]
-  %dVsumv1.096 = phi <32 x i32> [ %66, %for.body ], [ undef, %entry ]
+  %optr.0102 = phi ptr [ %incdec.ptr24.3, %for.body ], [ %p3, %entry ]
+  %iptr4.0101 = phi ptr [ %incdec.ptr23.3, %for.body ], [ %incdec.ptr18, %entry ]
+  %iptr3.0100 = phi ptr [ %incdec.ptr22.3, %for.body ], [ %incdec.ptr17, %entry ]
+  %iptr2.099 = phi ptr [ undef, %for.body ], [ %incdec.ptr16, %entry ]
+  %iptr1.098 = phi ptr [ %incdec.ptr20.3, %for.body ], [ %incdec.ptr15, %entry ]
+  %iptr0.097 = phi ptr [ %incdec.ptr19.3, %for.body ], [ %incdec.ptr, %entry ]
+  %dVsumv1.096 = phi <32 x i32> [ %60, %for.body ], [ undef, %entry ]
   %niter = phi i32 [ %niter.nsub.3, %for.body ], [ undef, %entry ]
-  %6 = load <16 x i32>, <16 x i32>* %iptr0.097, align 64, !tbaa !1
-  %7 = load <16 x i32>, <16 x i32>* %iptr1.098, align 64, !tbaa !1
-  %8 = load <16 x i32>, <16 x i32>* %iptr2.099, align 64, !tbaa !1
-  %9 = load <16 x i32>, <16 x i32>* %iptr3.0100, align 64, !tbaa !1
-  %10 = load <16 x i32>, <16 x i32>* %iptr4.0101, align 64, !tbaa !1
-  %11 = tail call <32 x i32> @llvm.hexagon.V6.vaddhw(<16 x i32> %6, <16 x i32> %10)
-  %12 = tail call <32 x i32> @llvm.hexagon.V6.vmpyhsat.acc(<32 x i32> %11, <16 x i32> %8, i32 393222)
-  %13 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %9, <16 x i32> %7)
-  %14 = tail call <32 x i32> @llvm.hexagon.V6.vmpahb.acc(<32 x i32> %12, <32 x i32> %13, i32 67372036)
-  %15 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %dVsumv1.096)
-  %16 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %14)
-  %17 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %16, <16 x i32> %15, i32 4)
-  %18 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %14)
-  %19 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %16, <16 x i32> %15, i32 8)
-  %20 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %18, <16 x i32> undef, i32 8)
-  %21 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %17, <16 x i32> %19)
-  %22 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %15, <16 x i32> %19)
-  %23 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwb.acc(<16 x i32> %22, <16 x i32> %17, i32 101058054)
-  %24 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwb.acc(<16 x i32> %23, <16 x i32> zeroinitializer, i32 67372036)
-  %25 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> undef, <16 x i32> %20)
-  %26 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwb.acc(<16 x i32> %25, <16 x i32> undef, i32 101058054)
-  %27 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwb.acc(<16 x i32> %26, <16 x i32> %21, i32 67372036)
-  %28 = tail call <16 x i32> @llvm.hexagon.V6.vasrwh(<16 x i32> %27, <16 x i32> %24, i32 8)
-  %incdec.ptr24 = getelementptr inbounds <16 x i32>, <16 x i32>* %optr.0102, i32 1
-  store <16 x i32> %28, <16 x i32>* %optr.0102, align 64, !tbaa !1
-  %incdec.ptr19.1 = getelementptr inbounds <16 x i32>, <16 x i32>* %iptr0.097, i32 2
-  %incdec.ptr23.1 = getelementptr inbounds <16 x i32>, <16 x i32>* %iptr4.0101, i32 2
-  %29 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %14)
-  %30 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %14)
-  %31 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> undef, <16 x i32> %29, i32 4)
-  %32 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> undef, <16 x i32> %30, i32 4)
-  %33 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> undef, <16 x i32> %29, i32 8)
-  %34 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> undef, <16 x i32> %30, i32 8)
-  %35 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %31, <16 x i32> %33)
-  %36 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %29, <16 x i32> %33)
-  %37 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwb.acc(<16 x i32> %36, <16 x i32> %31, i32 101058054)
-  %38 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwb.acc(<16 x i32> %37, <16 x i32> undef, i32 67372036)
-  %39 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %30, <16 x i32> %34)
-  %40 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwb.acc(<16 x i32> %39, <16 x i32> %32, i32 101058054)
-  %41 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwb.acc(<16 x i32> %40, <16 x i32> %35, i32 67372036)
-  %42 = tail call <16 x i32> @llvm.hexagon.V6.vasrwh(<16 x i32> %41, <16 x i32> %38, i32 8)
-  %incdec.ptr24.1 = getelementptr inbounds <16 x i32>, <16 x i32>* %optr.0102, i32 2
-  store <16 x i32> %42, <16 x i32>* %incdec.ptr24, align 64, !tbaa !1
-  %incdec.ptr19.2 = getelementptr inbounds <16 x i32>, <16 x i32>* %iptr0.097, i32 3
-  %43 = load <16 x i32>, <16 x i32>* %incdec.ptr19.1, align 64, !tbaa !1
-  %incdec.ptr20.2 = getelementptr inbounds <16 x i32>, <16 x i32>* %iptr1.098, i32 3
-  %incdec.ptr21.2 = getelementptr inbounds <16 x i32>, <16 x i32>* %iptr2.099, i32 3
-  %incdec.ptr22.2 = getelementptr inbounds <16 x i32>, <16 x i32>* %iptr3.0100, i32 3
-  %incdec.ptr23.2 = getelementptr inbounds <16 x i32>, <16 x i32>* %iptr4.0101, i32 3
-  %44 = load <16 x i32>, <16 x i32>* %incdec.ptr23.1, align 64, !tbaa !1
-  %45 = tail call <32 x i32> @llvm.hexagon.V6.vaddhw(<16 x i32> %43, <16 x i32> %44)
-  %46 = tail call <32 x i32> @llvm.hexagon.V6.vmpyhsat.acc(<32 x i32> %45, <16 x i32> undef, i32 393222)
-  %47 = tail call <32 x i32> @llvm.hexagon.V6.vmpahb.acc(<32 x i32> %46, <32 x i32> undef, i32 67372036)
-  %48 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %47)
-  %49 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %48, <16 x i32> undef, i32 4)
-  %50 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %48, <16 x i32> undef, i32 8)
-  %51 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> zeroinitializer, <16 x i32> undef)
-  %52 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %49, <16 x i32> %50)
-  %53 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> undef, <16 x i32> %50)
-  %54 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwb.acc(<16 x i32> %53, <16 x i32> %49, i32 101058054)
-  %55 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwb.acc(<16 x i32> %54, <16 x i32> %51, i32 67372036)
-  %56 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwb.acc(<16 x i32> undef, <16 x i32> %52, i32 67372036)
-  %57 = tail call <16 x i32> @llvm.hexagon.V6.vasrwh(<16 x i32> %56, <16 x i32> %55, i32 8)
-  %incdec.ptr24.2 = getelementptr inbounds <16 x i32>, <16 x i32>* %optr.0102, i32 3
-  store <16 x i32> %57, <16 x i32>* %incdec.ptr24.1, align 64, !tbaa !1
-  %incdec.ptr19.3 = getelementptr inbounds <16 x i32>, <16 x i32>* %iptr0.097, i32 4
-  %58 = load <16 x i32>, <16 x i32>* %incdec.ptr19.2, align 64, !tbaa !1
-  %incdec.ptr20.3 = getelementptr inbounds <16 x i32>, <16 x i32>* %iptr1.098, i32 4
-  %59 = load <16 x i32>, <16 x i32>* %incdec.ptr20.2, align 64, !tbaa !1
-  %60 = load <16 x i32>, <16 x i32>* %incdec.ptr21.2, align 64, !tbaa !1
-  %incdec.ptr22.3 = getelementptr inbounds <16 x i32>, <16 x i32>* %iptr3.0100, i32 4
-  %61 = load <16 x i32>, <16 x i32>* %incdec.ptr22.2, align 64, !tbaa !1
-  %incdec.ptr23.3 = getelementptr inbounds <16 x i32>, <16 x i32>* %iptr4.0101, i32 4
-  %62 = load <16 x i32>, <16 x i32>* %incdec.ptr23.2, align 64, !tbaa !1
-  %63 = tail call <32 x i32> @llvm.hexagon.V6.vaddhw(<16 x i32> %58, <16 x i32> %62)
-  %64 = tail call <32 x i32> @llvm.hexagon.V6.vmpyhsat.acc(<32 x i32> %63, <16 x i32> %60, i32 393222)
-  %65 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %61, <16 x i32> %59)
-  %66 = tail call <32 x i32> @llvm.hexagon.V6.vmpahb.acc(<32 x i32> %64, <32 x i32> %65, i32 67372036)
-  %67 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %47)
-  %68 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %66)
-  %69 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %68, <16 x i32> undef, i32 4)
-  %70 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %66)
-  %71 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %70, <16 x i32> %67, i32 4)
-  %72 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %70, <16 x i32> %67, i32 8)
-  %73 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %67, <16 x i32> %71)
-  %74 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwb.acc(<16 x i32> undef, <16 x i32> %69, i32 101058054)
-  %75 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwb.acc(<16 x i32> %74, <16 x i32> %73, i32 67372036)
-  %76 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %67, <16 x i32> %72)
-  %77 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwb.acc(<16 x i32> %76, <16 x i32> %71, i32 101058054)
-  %78 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwb.acc(<16 x i32> %77, <16 x i32> undef, i32 67372036)
-  %79 = tail call <16 x i32> @llvm.hexagon.V6.vasrwh(<16 x i32> %78, <16 x i32> %75, i32 8)
-  %incdec.ptr24.3 = getelementptr inbounds <16 x i32>, <16 x i32>* %optr.0102, i32 4
-  store <16 x i32> %79, <16 x i32>* %incdec.ptr24.2, align 64, !tbaa !1
+  %0 = load <16 x i32>, ptr %iptr0.097, align 64, !tbaa !1
+  %1 = load <16 x i32>, ptr %iptr1.098, align 64, !tbaa !1
+  %2 = load <16 x i32>, ptr %iptr2.099, align 64, !tbaa !1
+  %3 = load <16 x i32>, ptr %iptr3.0100, align 64, !tbaa !1
+  %4 = load <16 x i32>, ptr %iptr4.0101, align 64, !tbaa !1
+  %5 = tail call <32 x i32> @llvm.hexagon.V6.vaddhw(<16 x i32> %0, <16 x i32> %4)
+  %6 = tail call <32 x i32> @llvm.hexagon.V6.vmpyhsat.acc(<32 x i32> %5, <16 x i32> %2, i32 393222)
+  %7 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %3, <16 x i32> %1)
+  %8 = tail call <32 x i32> @llvm.hexagon.V6.vmpahb.acc(<32 x i32> %6, <32 x i32> %7, i32 67372036)
+  %9 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %dVsumv1.096)
+  %10 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %8)
+  %11 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %10, <16 x i32> %9, i32 4)
+  %12 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %8)
+  %13 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %10, <16 x i32> %9, i32 8)
+  %14 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %12, <16 x i32> undef, i32 8)
+  %15 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %11, <16 x i32> %13)
+  %16 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %9, <16 x i32> %13)
+  %17 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwb.acc(<16 x i32> %16, <16 x i32> %11, i32 101058054)
+  %18 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwb.acc(<16 x i32> %17, <16 x i32> zeroinitializer, i32 67372036)
+  %19 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> undef, <16 x i32> %14)
+  %20 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwb.acc(<16 x i32> %19, <16 x i32> undef, i32 101058054)
+  %21 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwb.acc(<16 x i32> %20, <16 x i32> %15, i32 67372036)
+  %22 = tail call <16 x i32> @llvm.hexagon.V6.vasrwh(<16 x i32> %21, <16 x i32> %18, i32 8)
+  %incdec.ptr24 = getelementptr inbounds <16 x i32>, ptr %optr.0102, i32 1
+  store <16 x i32> %22, ptr %optr.0102, align 64, !tbaa !1
+  %incdec.ptr19.1 = getelementptr inbounds <16 x i32>, ptr %iptr0.097, i32 2
+  %incdec.ptr23.1 = getelementptr inbounds <16 x i32>, ptr %iptr4.0101, i32 2
+  %23 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %8)
+  %24 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %8)
+  %25 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> undef, <16 x i32> %23, i32 4)
+  %26 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> undef, <16 x i32> %24, i32 4)
+  %27 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> undef, <16 x i32> %23, i32 8)
+  %28 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> undef, <16 x i32> %24, i32 8)
+  %29 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %25, <16 x i32> %27)
+  %30 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %23, <16 x i32> %27)
+  %31 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwb.acc(<16 x i32> %30, <16 x i32> %25, i32 101058054)
+  %32 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwb.acc(<16 x i32> %31, <16 x i32> undef, i32 67372036)
+  %33 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %24, <16 x i32> %28)
+  %34 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwb.acc(<16 x i32> %33, <16 x i32> %26, i32 101058054)
+  %35 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwb.acc(<16 x i32> %34, <16 x i32> %29, i32 67372036)
+  %36 = tail call <16 x i32> @llvm.hexagon.V6.vasrwh(<16 x i32> %35, <16 x i32> %32, i32 8)
+  %incdec.ptr24.1 = getelementptr inbounds <16 x i32>, ptr %optr.0102, i32 2
+  store <16 x i32> %36, ptr %incdec.ptr24, align 64, !tbaa !1
+  %incdec.ptr19.2 = getelementptr inbounds <16 x i32>, ptr %iptr0.097, i32 3
+  %37 = load <16 x i32>, ptr %incdec.ptr19.1, align 64, !tbaa !1
+  %incdec.ptr20.2 = getelementptr inbounds <16 x i32>, ptr %iptr1.098, i32 3
+  %incdec.ptr21.2 = getelementptr inbounds <16 x i32>, ptr %iptr2.099, i32 3
+  %incdec.ptr22.2 = getelementptr inbounds <16 x i32>, ptr %iptr3.0100, i32 3
+  %incdec.ptr23.2 = getelementptr inbounds <16 x i32>, ptr %iptr4.0101, i32 3
+  %38 = load <16 x i32>, ptr %incdec.ptr23.1, align 64, !tbaa !1
+  %39 = tail call <32 x i32> @llvm.hexagon.V6.vaddhw(<16 x i32> %37, <16 x i32> %38)
+  %40 = tail call <32 x i32> @llvm.hexagon.V6.vmpyhsat.acc(<32 x i32> %39, <16 x i32> undef, i32 393222)
+  %41 = tail call <32 x i32> @llvm.hexagon.V6.vmpahb.acc(<32 x i32> %40, <32 x i32> undef, i32 67372036)
+  %42 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %41)
+  %43 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %42, <16 x i32> undef, i32 4)
+  %44 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %42, <16 x i32> undef, i32 8)
+  %45 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> zeroinitializer, <16 x i32> undef)
+  %46 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %43, <16 x i32> %44)
+  %47 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> undef, <16 x i32> %44)
+  %48 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwb.acc(<16 x i32> %47, <16 x i32> %43, i32 101058054)
+  %49 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwb.acc(<16 x i32> %48, <16 x i32> %45, i32 67372036)
+  %50 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwb.acc(<16 x i32> undef, <16 x i32> %46, i32 67372036)
+  %51 = tail call <16 x i32> @llvm.hexagon.V6.vasrwh(<16 x i32> %50, <16 x i32> %49, i32 8)
+  %incdec.ptr24.2 = getelementptr inbounds <16 x i32>, ptr %optr.0102, i32 3
+  store <16 x i32> %51, ptr %incdec.ptr24.1, align 64, !tbaa !1
+  %incdec.ptr19.3 = getelementptr inbounds <16 x i32>, ptr %iptr0.097, i32 4
+  %52 = load <16 x i32>, ptr %incdec.ptr19.2, align 64, !tbaa !1
+  %incdec.ptr20.3 = getelementptr inbounds <16 x i32>, ptr %iptr1.098, i32 4
+  %53 = load <16 x i32>, ptr %incdec.ptr20.2, align 64, !tbaa !1
+  %54 = load <16 x i32>, ptr %incdec.ptr21.2, align 64, !tbaa !1
+  %incdec.ptr22.3 = getelementptr inbounds <16 x i32>, ptr %iptr3.0100, i32 4
+  %55 = load <16 x i32>, ptr %incdec.ptr22.2, align 64, !tbaa !1
+  %incdec.ptr23.3 = getelementptr inbounds <16 x i32>, ptr %iptr4.0101, i32 4
+  %56 = load <16 x i32>, ptr %incdec.ptr23.2, align 64, !tbaa !1
+  %57 = tail call <32 x i32> @llvm.hexagon.V6.vaddhw(<16 x i32> %52, <16 x i32> %56)
+  %58 = tail call <32 x i32> @llvm.hexagon.V6.vmpyhsat.acc(<32 x i32> %57, <16 x i32> %54, i32 393222)
+  %59 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %55, <16 x i32> %53)
+  %60 = tail call <32 x i32> @llvm.hexagon.V6.vmpahb.acc(<32 x i32> %58, <32 x i32> %59, i32 67372036)
+  %61 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %41)
+  %62 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %60)
+  %63 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %62, <16 x i32> undef, i32 4)
+  %64 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %60)
+  %65 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %64, <16 x i32> %61, i32 4)
+  %66 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %64, <16 x i32> %61, i32 8)
+  %67 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %61, <16 x i32> %65)
+  %68 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwb.acc(<16 x i32> undef, <16 x i32> %63, i32 101058054)
+  %69 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwb.acc(<16 x i32> %68, <16 x i32> %67, i32 67372036)
+  %70 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %61, <16 x i32> %66)
+  %71 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwb.acc(<16 x i32> %70, <16 x i32> %65, i32 101058054)
+  %72 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwb.acc(<16 x i32> %71, <16 x i32> undef, i32 67372036)
+  %73 = tail call <16 x i32> @llvm.hexagon.V6.vasrwh(<16 x i32> %72, <16 x i32> %69, i32 8)
+  %incdec.ptr24.3 = getelementptr inbounds <16 x i32>, ptr %optr.0102, i32 4
+  store <16 x i32> %73, ptr %incdec.ptr24.2, align 64, !tbaa !1
   %niter.nsub.3 = add i32 %niter, -4
   %niter.ncmp.3 = icmp eq i32 %niter.nsub.3, 0
   br i1 %niter.ncmp.3, label %for.end.loopexit.unr-lcssa, label %for.body

diff  --git a/llvm/test/CodeGen/Hexagon/fsub.ll b/llvm/test/CodeGen/Hexagon/fsub.ll
index e9a1fa3d192bc..a9711f43710b6 100644
--- a/llvm/test/CodeGen/Hexagon/fsub.ll
+++ b/llvm/test/CodeGen/Hexagon/fsub.ll
@@ -8,11 +8,11 @@ entry:
   %a = alloca float, align 4
   %b = alloca float, align 4
   %c = alloca float, align 4
-  store volatile float 0x402ECCCCC0000000, float* %a, align 4
-  store volatile float 0x4022333340000000, float* %b, align 4
-  %0 = load volatile float, float* %b, align 4
-  %1 = load volatile float, float* %a, align 4
+  store volatile float 0x402ECCCCC0000000, ptr %a, align 4
+  store volatile float 0x4022333340000000, ptr %b, align 4
+  %0 = load volatile float, ptr %b, align 4
+  %1 = load volatile float, ptr %a, align 4
   %sub = fsub float %0, %1
-  store float %sub, float* %c, align 4
+  store float %sub, ptr %c, align 4
   ret i32 0
 }

diff  --git a/llvm/test/CodeGen/Hexagon/funnel-shift2.ll b/llvm/test/CodeGen/Hexagon/funnel-shift2.ll
index b3032dbb7efd0..03e6355149b15 100644
--- a/llvm/test/CodeGen/Hexagon/funnel-shift2.ll
+++ b/llvm/test/CodeGen/Hexagon/funnel-shift2.ll
@@ -14,17 +14,17 @@ define dso_local i64 @fshl(i64 %x, i64 %y) {
 entry:
   %x.addr = alloca i64, align 8
   %y.addr = alloca i64, align 8
-  store i64 %x, i64* %x.addr, align 8
-  store i64 %y, i64* %y.addr, align 8
-  %0 = load i64, i64* %x.addr, align 8
+  store i64 %x, ptr %x.addr, align 8
+  store i64 %y, ptr %y.addr, align 8
+  %0 = load i64, ptr %x.addr, align 8
   %shr = lshr i64 %0, 1
-  %1 = load i64, i64* %x.addr, align 8
-  %2 = load i64, i64* %y.addr, align 8
+  %1 = load i64, ptr %x.addr, align 8
+  %2 = load i64, ptr %y.addr, align 8
   %call = call i64 @parity(i64 %1, i64 %2)
   %shl = shl i64 %call, 63
   %or = or i64 %shr, %shl
-  store i64 %or, i64* %x.addr, align 8
-  %3 = load i64, i64* %x.addr, align 8
+  store i64 %or, ptr %x.addr, align 8
+  %3 = load i64, ptr %x.addr, align 8
   ret i64 %3
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/fusedandshift.ll b/llvm/test/CodeGen/Hexagon/fusedandshift.ll
index 9abd366e6916b..e7c48249bf059 100644
--- a/llvm/test/CodeGen/Hexagon/fusedandshift.ll
+++ b/llvm/test/CodeGen/Hexagon/fusedandshift.ll
@@ -4,14 +4,14 @@
 
 ; CHECK: r{{[0-9]+}} = and(#15,lsr(r{{[0-9]+}},#{{[0-9]+}})
 
-define i32 @main(i16* %a, i16* %b) nounwind {
+define i32 @main(ptr %a, ptr %b) nounwind {
   entry:
-  %0 = load i16, i16* %a, align 2
+  %0 = load i16, ptr %a, align 2
   %conv1 = sext i16 %0 to i32
   %shr1 = ashr i32 %conv1, 3
   %and1 = and i32 %shr1, 15
   %conv2 = trunc i32 %and1 to i16
-  store i16 %conv2, i16* %b, align 2
+  store i16 %conv2, ptr %b, align 2
   ret i32 0
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/generate-const-buildvector32.ll b/llvm/test/CodeGen/Hexagon/generate-const-buildvector32.ll
index 645efc2932ca5..637e622cd2842 100644
--- a/llvm/test/CodeGen/Hexagon/generate-const-buildvector32.ll
+++ b/llvm/test/CodeGen/Hexagon/generate-const-buildvector32.ll
@@ -5,7 +5,7 @@
 define dso_local i32 @main() #0 {
 entry:
   %a = alloca <4 x i8>, align 4
-  store <4 x i8> <i8 10, i8 20, i8 30, i8 40>, <4 x i8>* %a, align 4
+  store <4 x i8> <i8 10, i8 20, i8 30, i8 40>, ptr %a, align 4
   ret i32 0
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/getBlockAddress.ll b/llvm/test/CodeGen/Hexagon/getBlockAddress.ll
index 923780edaccf4..07d9b19ec4904 100644
--- a/llvm/test/CodeGen/Hexagon/getBlockAddress.ll
+++ b/llvm/test/CodeGen/Hexagon/getBlockAddress.ll
@@ -4,14 +4,14 @@
 ; Function Attrs: nounwind
 define void @f0() #0 {
 b0:
-  call void bitcast (void (...)* @f1 to void (i8*)*)(i8* blockaddress(@f0, %b1))
+  call void @f1(ptr blockaddress(@f0, %b1))
   br label %b1
 
 b1:                                               ; preds = %b2, %b0
   ret void
 
 b2:                                               ; No predecessors!
-  indirectbr i8* undef, [label %b1]
+  indirectbr ptr undef, [label %b1]
 }
 
 declare void @f1(...)

diff  --git a/llvm/test/CodeGen/Hexagon/glob-align-volatile.ll b/llvm/test/CodeGen/Hexagon/glob-align-volatile.ll
index b954584f57d97..9d4cbd9bc7c80 100644
--- a/llvm/test/CodeGen/Hexagon/glob-align-volatile.ll
+++ b/llvm/test/CodeGen/Hexagon/glob-align-volatile.ll
@@ -9,9 +9,8 @@ target triple = "hexagon"
 ; Function Attrs: nounwind optsize
 define i32 @f0(i32 %a0) #0 {
 b0:
-  %v0 = inttoptr i32 %a0 to %s.0*
-  %v1 = getelementptr inbounds %s.0, %s.0* %v0, i32 0, i32 0
-  %v2 = load volatile i32, i32* %v1, align 4, !tbaa !0
+  %v0 = inttoptr i32 %a0 to ptr
+  %v2 = load volatile i32, ptr %v0, align 4, !tbaa !0
   ret i32 %v2
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/global-const-gep.ll b/llvm/test/CodeGen/Hexagon/global-const-gep.ll
index 373cf94fb6cbe..1c163326847a5 100644
--- a/llvm/test/CodeGen/Hexagon/global-const-gep.ll
+++ b/llvm/test/CodeGen/Hexagon/global-const-gep.ll
@@ -7,11 +7,11 @@ target triple = "hexagon-unknown--elf"
 %s.0 = type { i32, i64, [100 x i8] }
 
 @g0 = common global %s.0 zeroinitializer, align 8
- at g1 = global i8* getelementptr inbounds (%s.0, %s.0* @g0, i32 0, i32 2, i32 10), align 4
+ at g1 = global ptr getelementptr inbounds (%s.0, ptr @g0, i32 0, i32 2, i32 10), align 4
 ; CHECK-LABEL: g1:
 ; CHECK: .word g0+26
 
 @g2 = common global [100 x i8] zeroinitializer, align 8
- at g3 = global i8* getelementptr inbounds ([100 x i8], [100 x i8]* @g2, i32 0, i32 10), align 4
+ at g3 = global ptr getelementptr inbounds ([100 x i8], ptr @g2, i32 0, i32 10), align 4
 ; CHECK-LABEL: g3:
 ; CHECK: .word g2+10

diff  --git a/llvm/test/CodeGen/Hexagon/global-ctor-pcrel.ll b/llvm/test/CodeGen/Hexagon/global-ctor-pcrel.ll
index f11b80291679e..9e256bcee0205 100644
--- a/llvm/test/CodeGen/Hexagon/global-ctor-pcrel.ll
+++ b/llvm/test/CodeGen/Hexagon/global-ctor-pcrel.ll
@@ -7,30 +7,29 @@ target triple = "hexagon"
 
 @g0 = global %s.0 zeroinitializer, align 4
 
- at e0 = alias void (%s.0*, i32, i32), void (%s.0*, i32, i32)* @f0
+ at e0 = alias void (ptr, i32, i32), ptr @f0
 
 ; Function Attrs: nounwind
-define void @f0(%s.0* %a0, i32 %a1, i32 %a2) unnamed_addr #0 align 2 {
+define void @f0(ptr %a0, i32 %a1, i32 %a2) unnamed_addr #0 align 2 {
 b0:
-  %v0 = alloca %s.0*, align 4
+  %v0 = alloca ptr, align 4
   %v1 = alloca i32, align 4
   %v2 = alloca i32, align 4
-  store %s.0* %a0, %s.0** %v0, align 4
-  store i32 %a1, i32* %v1, align 4
-  store i32 %a2, i32* %v2, align 4
-  %v3 = load %s.0*, %s.0** %v0
-  %v4 = getelementptr inbounds %s.0, %s.0* %v3, i32 0, i32 0
-  %v5 = load i32, i32* %v2, align 4
-  store i32 %v5, i32* %v4, align 4
-  %v6 = getelementptr inbounds %s.0, %s.0* %v3, i32 0, i32 1
-  %v7 = load i32, i32* %v1, align 4
-  store i32 %v7, i32* %v6, align 4
+  store ptr %a0, ptr %v0, align 4
+  store i32 %a1, ptr %v1, align 4
+  store i32 %a2, ptr %v2, align 4
+  %v3 = load ptr, ptr %v0
+  %v5 = load i32, ptr %v2, align 4
+  store i32 %v5, ptr %v3, align 4
+  %v6 = getelementptr inbounds %s.0, ptr %v3, i32 0, i32 1
+  %v7 = load i32, ptr %v1, align 4
+  store i32 %v7, ptr %v6, align 4
   ret void
 }
 
 define internal void @f1() {
 b0:
-  call void @e0(%s.0* @g0, i32 3, i32 7)
+  call void @e0(ptr @g0, i32 3, i32 7)
   ret void
 }
 
@@ -38,7 +37,7 @@ b0:
 define i32 @f2() #0 {
 b0:
   %v0 = alloca i32, align 4
-  store i32 0, i32* %v0
+  store i32 0, ptr %v0
   ret i32 0
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/gp-plus-offset-load.ll b/llvm/test/CodeGen/Hexagon/gp-plus-offset-load.ll
index 2514d4109c094..c9a30f74d5368 100644
--- a/llvm/test/CodeGen/Hexagon/gp-plus-offset-load.ll
+++ b/llvm/test/CodeGen/Hexagon/gp-plus-offset-load.ll
@@ -8,14 +8,14 @@
 
 ; CHECK-LABEL: f0:
 ; CHECK: r{{[0-9]+}} = memw(##g0+4)
-define void @f0(i32 %a0, i32 %a1, i32* nocapture %a2) #0 {
+define void @f0(i32 %a0, i32 %a1, ptr nocapture %a2) #0 {
 b0:
   %v0 = icmp sgt i32 %a0, %a1
   br i1 %v0, label %b1, label %b2
 
 b1:                                               ; preds = %b0
-  %v1 = load i32, i32* getelementptr inbounds (%s.0, %s.0* @g0, i32 0, i32 3), align 4
-  store i32 %v1, i32* %a2, align 4
+  %v1 = load i32, ptr getelementptr inbounds (%s.0, ptr @g0, i32 0, i32 3), align 4
+  store i32 %v1, ptr %a2, align 4
   br label %b2
 
 b2:                                               ; preds = %b1, %b0
@@ -24,14 +24,14 @@ b2:                                               ; preds = %b1, %b0
 
 ; CHECK-LABEL: f1:
 ; CHECK: r{{[0-9]+}} = memub(##g0+1)
-define void @f1(i32 %a0, i32 %a1, i8* nocapture %a2) #0 {
+define void @f1(i32 %a0, i32 %a1, ptr nocapture %a2) #0 {
 b0:
   %v0 = icmp sgt i32 %a0, %a1
   br i1 %v0, label %b1, label %b2
 
 b1:                                               ; preds = %b0
-  %v1 = load i8, i8* getelementptr inbounds (%s.0, %s.0* @g0, i32 0, i32 1), align 1
-  store i8 %v1, i8* %a2, align 1
+  %v1 = load i8, ptr getelementptr inbounds (%s.0, ptr @g0, i32 0, i32 1), align 1
+  store i8 %v1, ptr %a2, align 1
   br label %b2
 
 b2:                                               ; preds = %b1, %b0
@@ -40,14 +40,14 @@ b2:                                               ; preds = %b1, %b0
 
 ; CHECK-LABEL: f2:
 ; CHECK: r{{[0-9]+}} = memuh(##g0+2)
-define void @f2(i32 %a0, i32 %a1, i16* %a2) #0 {
+define void @f2(i32 %a0, i32 %a1, ptr %a2) #0 {
 b0:
   %v0 = icmp sgt i32 %a0, %a1
   br i1 %v0, label %b1, label %b2
 
 b1:                                               ; preds = %b0
-  %v1 = load i16, i16* getelementptr inbounds (%s.0, %s.0* @g0, i32 0, i32 2), align 2
-  store i16 %v1, i16* %a2, align 2
+  %v1 = load i16, ptr getelementptr inbounds (%s.0, ptr @g0, i32 0, i32 2), align 2
+  store i16 %v1, ptr %a2, align 2
   br label %b2
 
 b2:                                               ; preds = %b1, %b0

diff  --git a/llvm/test/CodeGen/Hexagon/gp-plus-offset-store.ll b/llvm/test/CodeGen/Hexagon/gp-plus-offset-store.ll
index 91e412f7c1353..8eb8abe0fa61d 100644
--- a/llvm/test/CodeGen/Hexagon/gp-plus-offset-store.ll
+++ b/llvm/test/CodeGen/Hexagon/gp-plus-offset-store.ll
@@ -13,7 +13,7 @@ b0:
   br i1 %v0, label %b1, label %b2
 
 b1:                                               ; preds = %b0
-  store i8 %a2, i8* getelementptr inbounds (%s.0, %s.0* @g0, i32 0, i32 1), align 1
+  store i8 %a2, ptr getelementptr inbounds (%s.0, ptr @g0, i32 0, i32 1), align 1
   br label %b2
 
 b2:                                               ; preds = %b1, %b0
@@ -28,7 +28,7 @@ b0:
   br i1 %v0, label %b1, label %b2
 
 b1:                                               ; preds = %b0
-  store i16 %a2, i16* getelementptr inbounds (%s.0, %s.0* @g0, i32 0, i32 2), align 2
+  store i16 %a2, ptr getelementptr inbounds (%s.0, ptr @g0, i32 0, i32 2), align 2
   br label %b2
 
 b2:                                               ; preds = %b1, %b0

diff  --git a/llvm/test/CodeGen/Hexagon/gp-rel.ll b/llvm/test/CodeGen/Hexagon/gp-rel.ll
index 3ce40bb54704b..0228df9d3f077 100644
--- a/llvm/test/CodeGen/Hexagon/gp-rel.ll
+++ b/llvm/test/CodeGen/Hexagon/gp-rel.ll
@@ -11,19 +11,19 @@
 
 define i32 @f0(i32 %a0) #0 {
 b0:
-  %v0 = load i32, i32* @g0, align 4
-  %v1 = load i32, i32* @g1, align 4
+  %v0 = load i32, ptr @g0, align 4
+  %v1 = load i32, ptr @g1, align 4
   %v2 = add nsw i32 %v1, %v0
   %v3 = icmp eq i32 %v0, %v1
   br i1 %v3, label %b2, label %b1
 
 b1:                                               ; preds = %b0
-  %v4 = load i32, i32* @g2, align 4
+  %v4 = load i32, ptr @g2, align 4
   br label %b3
 
 b2:                                               ; preds = %b0
   %v5 = add nsw i32 %v2, %v0
-  store i32 %v5, i32* @g2, align 4
+  store i32 %v5, ptr @g2, align 4
   br label %b3
 
 b3:                                               ; preds = %b2, %b1

diff  --git a/llvm/test/CodeGen/Hexagon/hasfp-crash1.ll b/llvm/test/CodeGen/Hexagon/hasfp-crash1.ll
index 161b7aba1e5d7..e16bd6fab4aa6 100644
--- a/llvm/test/CodeGen/Hexagon/hasfp-crash1.ll
+++ b/llvm/test/CodeGen/Hexagon/hasfp-crash1.ll
@@ -6,10 +6,10 @@
 target triple = "hexagon"
 
 ; Function Attrs: nounwind
-declare i32 @foo0(i32*, i32, i64, i32, i8 zeroext, i8 zeroext, i32) local_unnamed_addr #0
+declare i32 @foo0(ptr, i32, i64, i32, i8 zeroext, i8 zeroext, i32) local_unnamed_addr #0
 
 ; Function Attrs: nounwind
-define i32 @foo1(i32* %a0, i32 %a1, i32 %a2, i32 %a3, i8 zeroext %a4, i8 zeroext %a5, i32 %a6) local_unnamed_addr #0 !dbg !33 {
+define i32 @foo1(ptr %a0, i32 %a1, i32 %a2, i32 %a3, i8 zeroext %a4, i8 zeroext %a5, i32 %a6) local_unnamed_addr #0 !dbg !33 {
 entry:
   tail call void @llvm.dbg.value(metadata i32 %a6, i64 0, metadata !51, metadata !52), !dbg !53
   ret i32 undef, !dbg !54

diff  --git a/llvm/test/CodeGen/Hexagon/hello-world-v55.ll b/llvm/test/CodeGen/Hexagon/hello-world-v55.ll
index 3dfbbde2a59d4..3224d8cd9f0c1 100644
--- a/llvm/test/CodeGen/Hexagon/hello-world-v55.ll
+++ b/llvm/test/CodeGen/Hexagon/hello-world-v55.ll
@@ -6,11 +6,11 @@
 ; Function Attrs: nounwind
 define i32 @f0() #0 {
 b0:
-  %v0 = tail call i32 @puts(i8* getelementptr inbounds ([13 x i8], [13 x i8]* @g0, i32 0, i32 0))
+  %v0 = tail call i32 @puts(ptr @g0)
   ret i32 0
 }
 
 ; Function Attrs: nounwind
-declare i32 @puts(i8* nocapture) #0
+declare i32 @puts(ptr nocapture) #0
 
 attributes #0 = { nounwind "target-cpu"="hexagonv55" }

diff  --git a/llvm/test/CodeGen/Hexagon/hello-world-v60.ll b/llvm/test/CodeGen/Hexagon/hello-world-v60.ll
index 8b905660f8ad9..c2c86c87e6381 100644
--- a/llvm/test/CodeGen/Hexagon/hello-world-v60.ll
+++ b/llvm/test/CodeGen/Hexagon/hello-world-v60.ll
@@ -6,11 +6,11 @@
 ; Function Attrs: nounwind
 define i32 @f0() #0 {
 b0:
-  %v0 = tail call i32 @puts(i8* getelementptr inbounds ([13 x i8], [13 x i8]* @g0, i32 0, i32 0))
+  %v0 = tail call i32 @puts(ptr @g0)
   ret i32 0
 }
 
 ; Function Attrs: nounwind
-declare i32 @puts(i8* nocapture) #0
+declare i32 @puts(ptr nocapture) #0
 
 attributes #0 = { nounwind "target-cpu"="hexagonv60" }

diff  --git a/llvm/test/CodeGen/Hexagon/hexagon-cond-jumpr31.ll b/llvm/test/CodeGen/Hexagon/hexagon-cond-jumpr31.ll
index d79cbd413d9e4..becc6e32219f4 100644
--- a/llvm/test/CodeGen/Hexagon/hexagon-cond-jumpr31.ll
+++ b/llvm/test/CodeGen/Hexagon/hexagon-cond-jumpr31.ll
@@ -5,18 +5,18 @@
 @g0 = common global i8 0, align 1
 @g1 = common global i32 0, align 4
 
-define i32 @f0(i32* nocapture %a0) {
+define i32 @f0(ptr nocapture %a0) {
 b0:
-  %v0 = load i8, i8* @g0, align 1
+  %v0 = load i8, ptr @g0, align 1
   %v1 = icmp eq i8 %v0, 65
   br i1 %v1, label %b1, label %b2
 
 b1:                                               ; preds = %b0
-  %v2 = load i32, i32* %a0, align 4
+  %v2 = load i32, ptr %a0, align 4
   %v3 = add nsw i32 %v2, 9
-  %v4 = load i32, i32* @g1, align 4
+  %v4 = load i32, ptr @g1, align 4
   %v5 = sub i32 %v3, %v4
-  store i32 %v5, i32* %a0, align 4
+  store i32 %v5, ptr %a0, align 4
   br label %b2
 
 b2:                                               ; preds = %b1, %b0

diff  --git a/llvm/test/CodeGen/Hexagon/hexagon-tfr-add.ll b/llvm/test/CodeGen/Hexagon/hexagon-tfr-add.ll
index 1c1645e613cf8..cb37fce4baebf 100644
--- a/llvm/test/CodeGen/Hexagon/hexagon-tfr-add.ll
+++ b/llvm/test/CodeGen/Hexagon/hexagon-tfr-add.ll
@@ -7,7 +7,7 @@ target triple = "hexagon"
 %s.1 = type { i32, [30 x %s.16] }
 %s.2 = type { [10 x %s.27], i8, i8 }
 %s.3 = type { i8, %s.41 }
-%s.4 = type { i8, i8, i8* }
+%s.4 = type { i8, i8, ptr }
 %s.5 = type { %s.22, %s.1, i8, i8, i64, i8, i8, i32, i8, i8, i8, %s.34, i8, i8, i32, i8 }
 %s.6 = type { i64, i8, i8, %s.7, i8, i8, %s.34, %s.34, i8, i8, %s.26 }
 %s.7 = type { i32, [256 x %s.8] }
@@ -49,7 +49,7 @@ target triple = "hexagon"
 %s.43 = type { i8, i8, i8, i8, [9 x i16] }
 %s.44 = type { %s.45, %s.47 }
 %s.45 = type { %s.46, i32, i8 }
-%s.46 = type { %s.46*, %s.46* }
+%s.46 = type { ptr, ptr }
 %s.47 = type { %s.48 }
 %s.48 = type { %s.46, %s.49 }
 %s.49 = type { %s.50 }
@@ -57,11 +57,11 @@ target triple = "hexagon"
 %s.51 = type { i32, i16, i8, i8, i8, i8, i8, [5 x i8] }
 %s.52 = type { i8, %s.53 }
 %s.53 = type { %s.54 }
-%s.54 = type { %s.55*, i8, i32 }
-%s.55 = type { %s.46, i32, i8*, i8*, %s.55*, %s.55*, i32, i8, i8, i16, i32, i8, %s.56, i16, [1 x %s.58], i32 }
+%s.54 = type { ptr, i8, i32 }
+%s.55 = type { %s.46, i32, ptr, ptr, ptr, ptr, i32, i8, i8, i16, i32, i8, %s.56, i16, [1 x %s.58], i32 }
 %s.56 = type { %s.57 }
 %s.57 = type { i8 }
-%s.58 = type { i8*, i32 }
+%s.58 = type { ptr, i32 }
 %s.59 = type { i8, [17 x %s.60] }
 %s.60 = type { i16, i8, [16 x %s.61] }
 %s.61 = type { i8, i8 }
@@ -72,16 +72,16 @@ target triple = "hexagon"
 %s.66 = type { i8, i32, i8 }
 %s.67 = type { i16, i8 }
 %s.68 = type { %s.69 }
-%s.69 = type { i32, i8* }
+%s.69 = type { i32, ptr }
 
 @g0 = external global %s.0, align 8
 @g1 = external constant %s.68, section ".dummy.dummy.dummy.dumm", align 4
 
 ; Function Attrs: optsize
-declare void @f0(%s.68*) #0
+declare void @f0(ptr) #0
 
 ; Function Attrs: nounwind optsize
-declare zeroext i8 @f1(i8*) #1
+declare zeroext i8 @f1(ptr) #1
 
 ; Function Attrs: nounwind optsize
 declare void @f2(i32) #1
@@ -92,29 +92,29 @@ declare void @f2(i32) #1
 ; Function Attrs: nounwind optsize ssp
 define zeroext i8 @f3() #2 {
 b0:
-  %v0 = load i8, i8* getelementptr inbounds (%s.0, %s.0* @g0, i32 0, i32 57), align 2
+  %v0 = load i8, ptr getelementptr inbounds (%s.0, ptr @g0, i32 0, i32 57), align 2
   %v1 = icmp eq i8 %v0, 0
   br i1 %v1, label %b2, label %b1
 
 b1:                                               ; preds = %b0
-  tail call void @f0(%s.68* nonnull @g1) #3
+  tail call void @f0(ptr nonnull @g1) #3
   unreachable
 
 b2:                                               ; preds = %b0
-  %v2 = call zeroext i8 @f1(i8* nonnull undef) #4
+  %v2 = call zeroext i8 @f1(ptr nonnull undef) #4
   br i1 undef, label %b3, label %b8
 
 b3:                                               ; preds = %b2
-  %v3 = load i8, i8* getelementptr inbounds (%s.0, %s.0* @g0, i32 0, i32 1), align 1
+  %v3 = load i8, ptr getelementptr inbounds (%s.0, ptr @g0, i32 0, i32 1), align 1
   %v4 = add i8 %v3, -17
   %v5 = icmp ult i8 %v4, 2
   br i1 %v5, label %b4, label %b7
 
 b4:                                               ; preds = %b3
-  %v6 = load i8, i8* getelementptr inbounds (%s.0, %s.0* @g0, i32 0, i32 167, i32 2), align 2
+  %v6 = load i8, ptr getelementptr inbounds (%s.0, ptr @g0, i32 0, i32 167, i32 2), align 2
   %v7 = sext i8 %v6 to i32
   %v8 = add nsw i32 %v7, 1
-  %v9 = load i8, i8* getelementptr inbounds (%s.0, %s.0* @g0, i32 0, i32 167, i32 0), align 2
+  %v9 = load i8, ptr getelementptr inbounds (%s.0, ptr @g0, i32 0, i32 167, i32 0), align 2
   %v10 = zext i8 %v9 to i32
   %v11 = icmp slt i32 %v8, %v10
   br i1 %v11, label %b6, label %b5
@@ -141,7 +141,7 @@ b11:                                              ; preds = %b10
   unreachable
 
 b12:                                              ; preds = %b10
-  %v12 = load i8, i8* getelementptr inbounds (%s.0, %s.0* @g0, i32 0, i32 1), align 1
+  %v12 = load i8, ptr getelementptr inbounds (%s.0, ptr @g0, i32 0, i32 1), align 1
   %v13 = zext i8 %v12 to i32
   switch i32 %v13, label %b14 [
     i32 17, label %b13

diff  --git a/llvm/test/CodeGen/Hexagon/hexagon-verify-implicit-use.ll b/llvm/test/CodeGen/Hexagon/hexagon-verify-implicit-use.ll
index c5ced77851263..541a359e7fa8c 100644
--- a/llvm/test/CodeGen/Hexagon/hexagon-verify-implicit-use.ll
+++ b/llvm/test/CodeGen/Hexagon/hexagon-verify-implicit-use.ll
@@ -3,44 +3,44 @@
 
 target triple = "hexagon"
 
-%s.0 = type { %s.1* }
-%s.1 = type { %s.2, %s.2**, i32, i32, i8, %s.3 }
-%s.2 = type { i32 (...)**, i32 }
+%s.0 = type { ptr }
+%s.1 = type { %s.2, ptr, i32, i32, i8, %s.3 }
+%s.2 = type { ptr, i32 }
 %s.3 = type { %s.4, %s.6, i32, i32 }
 %s.4 = type { %s.5 }
 %s.5 = type { i8 }
-%s.6 = type { i8*, [12 x i8] }
+%s.6 = type { ptr, [12 x i8] }
 %s.7 = type { %s.2, %s.8 }
-%s.8 = type { %s.9*, %s.9* }
-%s.9 = type { [16 x i16*] }
-%s.10 = type { i32 (...)**, i32, i8, i8, i16, i32, i32, %s.11*, %s.12*, %s.0* }
-%s.11 = type { %s.11*, i32, i32, i8* }
-%s.12 = type { %s.12*, i32, void (i8, %s.10*, i32)* }
+%s.8 = type { ptr, ptr }
+%s.9 = type { [16 x ptr] }
+%s.10 = type { ptr, i32, i8, i8, i16, i32, i32, ptr, ptr, ptr }
+%s.11 = type { ptr, i32, i32, ptr }
+%s.12 = type { ptr, i32, ptr }
 
-define i32 @f0() #0 personality i8* bitcast (i32 (...)* @f2 to i8*) {
+define i32 @f0() #0 personality ptr @f2 {
 b0:
-  %v0 = invoke dereferenceable(4) %s.0* @f1()
+  %v0 = invoke dereferenceable(4) ptr @f1()
           to label %b1 unwind label %b2
 
 b1:                                               ; preds = %b0
-  %v1 = load i32, i32* undef, align 4
+  %v1 = load i32, ptr undef, align 4
   %v2 = icmp eq i32 %v1, 0
   %v3 = zext i1 %v2 to i64
   %v4 = shl nuw nsw i64 %v3, 32
   %v5 = or i64 %v4, 0
-  %v6 = call i64 @f3(%s.7* undef, i64 %v5, i64 4294967296, %s.10* nonnull dereferenceable(32) undef, i8* nonnull dereferenceable(1) undef, i32* nonnull dereferenceable(4) undef)
+  %v6 = call i64 @f3(ptr undef, i64 %v5, i64 4294967296, ptr nonnull dereferenceable(32) undef, ptr nonnull dereferenceable(1) undef, ptr nonnull dereferenceable(4) undef)
   unreachable
 
 b2:                                               ; preds = %b0
-  %v7 = landingpad { i8*, i32 }
+  %v7 = landingpad { ptr, i32 }
           cleanup
-  resume { i8*, i32 } undef
+  resume { ptr, i32 } undef
 }
 
-declare dereferenceable(4) %s.0* @f1()
+declare dereferenceable(4) ptr @f1()
 
 declare i32 @f2(...)
 
-declare i64 @f3(%s.7* nocapture readnone, i64, i64, %s.10* nocapture readonly dereferenceable(32), i8* nocapture dereferenceable(1), i32* nocapture dereferenceable(4)) unnamed_addr align 2
+declare i64 @f3(ptr nocapture readnone, i64, i64, ptr nocapture readonly dereferenceable(32), ptr nocapture dereferenceable(1), ptr nocapture dereferenceable(4)) unnamed_addr align 2
 
 attributes #0 = { "target-cpu"="hexagonv55" }

diff  --git a/llvm/test/CodeGen/Hexagon/hexagon_cfi_offset.ll b/llvm/test/CodeGen/Hexagon/hexagon_cfi_offset.ll
index 6972d35d5938a..23d76dc6f1935 100644
--- a/llvm/test/CodeGen/Hexagon/hexagon_cfi_offset.ll
+++ b/llvm/test/CodeGen/Hexagon/hexagon_cfi_offset.ll
@@ -6,83 +6,71 @@
 ; CHECK: .cfi_offset r17, -12
 ; CHECK: .cfi_offset r16, -16
 
-%s.0 = type { i32 (...)**, i8* }
-%s.1 = type { i8*, void (i8*)*, void ()*, void ()*, %s.1*, i32, i32, i8*, i8*, i8*, i8*, %s.2 }
-%s.2 = type { i64, void (i8, %s.2*)*, i32, i32, [12 x i8] }
-%s.3 = type { %s.1*, i32, i8*, i32 }
+%s.0 = type { ptr, ptr }
+%s.1 = type { ptr, ptr, ptr, ptr, ptr, i32, i32, ptr, ptr, ptr, ptr, %s.2 }
+%s.2 = type { i64, ptr, i32, i32, [12 x i8] }
+%s.3 = type { ptr, i32, ptr, i32 }
 
 ; Function Attrs: noreturn
-define void @f0(i8* %a0, %s.0* %a1, void (i8*)* %a2) #0 {
+define void @f0(ptr %a0, ptr %a1, ptr %a2) #0 {
 b0:
-  %v0 = getelementptr inbounds i8, i8* %a0, i32 -80
-  %v1 = bitcast i8* %v0 to %s.0**
-  store %s.0* %a1, %s.0** %v1, align 16, !tbaa !0
-  %v2 = getelementptr inbounds i8, i8* %a0, i32 -76
-  %v3 = bitcast i8* %v2 to void (i8*)**
-  store void (i8*)* %a2, void (i8*)** %v3, align 4, !tbaa !9
-  %v4 = tail call void ()* @f1(void ()* null) #3
-  %v5 = getelementptr inbounds i8, i8* %a0, i32 -72
-  %v6 = bitcast i8* %v5 to void ()**
-  store void ()* %v4, void ()** %v6, align 8, !tbaa !10
-  %v7 = tail call void ()* @f1(void ()* %v4) #3
-  %v8 = tail call void ()* @f2(void ()* null) #3
-  %v9 = getelementptr inbounds i8, i8* %a0, i32 -68
-  %v10 = bitcast i8* %v9 to void ()**
-  store void ()* %v8, void ()** %v10, align 4, !tbaa !11
-  %v11 = tail call void ()* @f2(void ()* %v8) #3
-  %v12 = getelementptr inbounds i8, i8* %a0, i32 -64
-  %v13 = bitcast i8* %v12 to %s.1**
-  store %s.1* null, %s.1** %v13, align 16, !tbaa !12
-  %v14 = getelementptr inbounds i8, i8* %a0, i32 -60
-  %v15 = bitcast i8* %v14 to i32*
-  store i32 0, i32* %v15, align 4, !tbaa !13
-  %v16 = getelementptr inbounds i8, i8* %a0, i32 -32
-  %v17 = bitcast i8* %v16 to %s.2*
-  %v18 = bitcast i8* %v16 to i64*
-  store i64 4921953907261516544, i64* %v18, align 16, !tbaa !14
-  %v19 = getelementptr inbounds i8, i8* %a0, i32 -24
-  %v20 = bitcast i8* %v19 to void (i8, %s.2*)**
-  store void (i8, %s.2*)* @f3, void (i8, %s.2*)** %v20, align 8, !tbaa !15
-  %v21 = tail call %s.3* @f4() #3
-  %v22 = getelementptr inbounds %s.3, %s.3* %v21, i32 0, i32 1
-  %v23 = load i32, i32* %v22, align 4, !tbaa !16
+  %v0 = getelementptr inbounds i8, ptr %a0, i32 -80
+  store ptr %a1, ptr %v0, align 16, !tbaa !0
+  %v2 = getelementptr inbounds i8, ptr %a0, i32 -76
+  store ptr %a2, ptr %v2, align 4, !tbaa !9
+  %v4 = tail call ptr @f1(ptr null) #3
+  %v5 = getelementptr inbounds i8, ptr %a0, i32 -72
+  store ptr %v4, ptr %v5, align 8, !tbaa !10
+  %v7 = tail call ptr @f1(ptr %v4) #3
+  %v8 = tail call ptr @f2(ptr null) #3
+  %v9 = getelementptr inbounds i8, ptr %a0, i32 -68
+  store ptr %v8, ptr %v9, align 4, !tbaa !11
+  %v11 = tail call ptr @f2(ptr %v8) #3
+  %v12 = getelementptr inbounds i8, ptr %a0, i32 -64
+  store ptr null, ptr %v12, align 16, !tbaa !12
+  %v14 = getelementptr inbounds i8, ptr %a0, i32 -60
+  store i32 0, ptr %v14, align 4, !tbaa !13
+  %v16 = getelementptr inbounds i8, ptr %a0, i32 -32
+  store i64 4921953907261516544, ptr %v16, align 16, !tbaa !14
+  %v19 = getelementptr inbounds i8, ptr %a0, i32 -24
+  store ptr @f3, ptr %v19, align 8, !tbaa !15
+  %v21 = tail call ptr @f4() #3
+  %v22 = getelementptr inbounds %s.3, ptr %v21, i32 0, i32 1
+  %v23 = load i32, ptr %v22, align 4, !tbaa !16
   %v24 = add i32 %v23, 1
-  store i32 %v24, i32* %v22, align 4, !tbaa !16
-  %v25 = tail call zeroext i8 @f5(%s.2* %v17) #4
-  %v26 = tail call i8* @f6(i8* %v16) #3
+  store i32 %v24, ptr %v22, align 4, !tbaa !16
+  %v25 = tail call zeroext i8 @f5(ptr %v16) #4
+  %v26 = tail call ptr @f6(ptr %v16) #3
   tail call void @f7() #5
   unreachable
 }
 
 ; Function Attrs: nounwind
-declare void ()* @f1(void ()*) #1
+declare ptr @f1(ptr) #1
 
 ; Function Attrs: nounwind
-declare void ()* @f2(void ()*) #1
+declare ptr @f2(ptr) #1
 
-define internal void @f3(i8 zeroext %a0, %s.2* %a1) #2 {
+define internal void @f3(i8 zeroext %a0, ptr %a1) #2 {
 b0:
-  %v0 = getelementptr inbounds %s.2, %s.2* %a1, i32 0, i32 0
-  %v1 = load i64, i64* %v0, align 16, !tbaa !18
+  %v1 = load i64, ptr %a1, align 16, !tbaa !18
   %v2 = icmp eq i64 %v1, 4921953907261516544
   br i1 %v2, label %b1, label %b4
 
 b1:                                               ; preds = %b0
-  %v3 = getelementptr inbounds %s.2, %s.2* %a1, i32 1
-  %v4 = bitcast %s.2* %v3 to i8*
-  %v5 = getelementptr inbounds %s.2, %s.2* %a1, i32 -2, i32 3
-  %v6 = getelementptr inbounds i32, i32* %v5, i32 1
-  %v7 = bitcast i32* %v6 to void (i8*)**
-  %v8 = load void (i8*)*, void (i8*)** %v7, align 4, !tbaa !9
-  %v9 = icmp eq void (i8*)* %v8, null
+  %v3 = getelementptr inbounds %s.2, ptr %a1, i32 1
+  %v5 = getelementptr inbounds %s.2, ptr %a1, i32 -2, i32 3
+  %v6 = getelementptr inbounds i32, ptr %v5, i32 1
+  %v8 = load ptr, ptr %v6, align 4, !tbaa !9
+  %v9 = icmp eq ptr %v8, null
   br i1 %v9, label %b3, label %b2
 
 b2:                                               ; preds = %b1
-  tail call void %v8(i8* %v4) #4
+  tail call void %v8(ptr %v3) #4
   br label %b3
 
 b3:                                               ; preds = %b2, %b1
-  tail call void @f8(i8* %v4) #3
+  tail call void @f8(ptr %v3) #3
   br label %b4
 
 b4:                                               ; preds = %b3, %b0
@@ -90,18 +78,18 @@ b4:                                               ; preds = %b3, %b0
 }
 
 ; Function Attrs: nounwind
-declare %s.3* @f4() #1
+declare ptr @f4() #1
 
-declare zeroext i8 @f5(%s.2*) #2
+declare zeroext i8 @f5(ptr) #2
 
 ; Function Attrs: nounwind
-declare i8* @f6(i8*) #1
+declare ptr @f6(ptr) #1
 
 ; Function Attrs: noreturn
 declare void @f7() #0
 
 ; Function Attrs: nounwind
-declare void @f8(i8*) #1
+declare void @f8(ptr) #1
 
 attributes #0 = { noreturn "target-cpu"="hexagonv60" }
 attributes #1 = { nounwind "target-cpu"="hexagonv60" }

diff  --git a/llvm/test/CodeGen/Hexagon/hexagon_vector_loop_carried_reuse.ll b/llvm/test/CodeGen/Hexagon/hexagon_vector_loop_carried_reuse.ll
index b86cef9cea9ad..d7b18ae078ff0 100644
--- a/llvm/test/CodeGen/Hexagon/hexagon_vector_loop_carried_reuse.ll
+++ b/llvm/test/CodeGen/Hexagon/hexagon_vector_loop_carried_reuse.ll
@@ -9,53 +9,46 @@ target triple = "hexagon"
 @W = external local_unnamed_addr global i32, align 4
 
 ; Function Attrs: nounwind
-define void @foo(i8* noalias nocapture readonly %src, i8* noalias nocapture %dst, i32 %stride) local_unnamed_addr #0 {
+define void @foo(ptr noalias nocapture readonly %src, ptr noalias nocapture %dst, i32 %stride) local_unnamed_addr #0 {
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %src, i32 %stride
+  %add.ptr = getelementptr inbounds i8, ptr %src, i32 %stride
   %mul = mul nsw i32 %stride, 2
-  %add.ptr1 = getelementptr inbounds i8, i8* %src, i32 %mul
-  %0 = load i32, i32* @W, align 4, !tbaa !1
+  %add.ptr1 = getelementptr inbounds i8, ptr %src, i32 %mul
+  %0 = load i32, ptr @W, align 4, !tbaa !1
   %cmp55 = icmp sgt i32 %0, 0
   br i1 %cmp55, label %for.body.lr.ph, label %for.end
 
 for.body.lr.ph:                                   ; preds = %entry
-  %1 = bitcast i8* %add.ptr1 to <32 x i32>*
-  %2 = load <32 x i32>, <32 x i32>* %1, align 128, !tbaa !5
-  %incdec.ptr4 = getelementptr inbounds i8, i8* %add.ptr1, i32 128
-  %3 = bitcast i8* %incdec.ptr4 to <32 x i32>*
-  %4 = bitcast i8* %add.ptr to <32 x i32>*
-  %5 = load <32 x i32>, <32 x i32>* %4, align 128, !tbaa !5
-  %incdec.ptr2 = getelementptr inbounds i8, i8* %add.ptr, i32 128
-  %6 = bitcast i8* %incdec.ptr2 to <32 x i32>*
-  %7 = bitcast i8* %src to <32 x i32>*
-  %8 = load <32 x i32>, <32 x i32>* %7, align 128, !tbaa !5
-  %incdec.ptr = getelementptr inbounds i8, i8* %src, i32 128
-  %9 = bitcast i8* %incdec.ptr to <32 x i32>*
-  %10 = bitcast i8* %dst to <32 x i32>*
+  %1 = load <32 x i32>, ptr %add.ptr1, align 128, !tbaa !5
+  %incdec.ptr4 = getelementptr inbounds i8, ptr %add.ptr1, i32 128
+  %2 = load <32 x i32>, ptr %add.ptr, align 128, !tbaa !5
+  %incdec.ptr2 = getelementptr inbounds i8, ptr %add.ptr, i32 128
+  %3 = load <32 x i32>, ptr %src, align 128, !tbaa !5
+  %incdec.ptr = getelementptr inbounds i8, ptr %src, i32 128
   br label %for.body
 
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
-  %out.063 = phi <32 x i32>* [ %10, %for.body.lr.ph ], [ %incdec.ptr18, %for.body ]
-  %p2.062 = phi <32 x i32>* [ %3, %for.body.lr.ph ], [ %incdec.ptr10, %for.body ]
-  %p1.061 = phi <32 x i32>* [ %6, %for.body.lr.ph ], [ %incdec.ptr8, %for.body ]
-  %p0.060 = phi <32 x i32>* [ %9, %for.body.lr.ph ], [ %incdec.ptr6, %for.body ]
+  %out.063 = phi ptr [ %dst, %for.body.lr.ph ], [ %incdec.ptr18, %for.body ]
+  %p2.062 = phi ptr [ %incdec.ptr4, %for.body.lr.ph ], [ %incdec.ptr10, %for.body ]
+  %p1.061 = phi ptr [ %incdec.ptr2, %for.body.lr.ph ], [ %incdec.ptr8, %for.body ]
+  %p0.060 = phi ptr [ %incdec.ptr, %for.body.lr.ph ], [ %incdec.ptr6, %for.body ]
   %i.059 = phi i32 [ 0, %for.body.lr.ph ], [ %add, %for.body ]
-  %a.sroa.0.058 = phi <32 x i32> [ %8, %for.body.lr.ph ], [ %11, %for.body ]
-  %b.sroa.0.057 = phi <32 x i32> [ %5, %for.body.lr.ph ], [ %12, %for.body ]
-  %c.sroa.0.056 = phi <32 x i32> [ %2, %for.body.lr.ph ], [ %13, %for.body ]
-  %incdec.ptr6 = getelementptr inbounds <32 x i32>, <32 x i32>* %p0.060, i32 1
-  %11 = load <32 x i32>, <32 x i32>* %p0.060, align 128, !tbaa !5
-  %incdec.ptr8 = getelementptr inbounds <32 x i32>, <32 x i32>* %p1.061, i32 1
-  %12 = load <32 x i32>, <32 x i32>* %p1.061, align 128, !tbaa !5
-  %incdec.ptr10 = getelementptr inbounds <32 x i32>, <32 x i32>* %p2.062, i32 1
-  %13 = load <32 x i32>, <32 x i32>* %p2.062, align 128, !tbaa !5
-  %14 = tail call <32 x i32> @llvm.hexagon.V6.vmaxub.128B(<32 x i32> %a.sroa.0.058, <32 x i32> %b.sroa.0.057)
-  %15 = tail call <32 x i32> @llvm.hexagon.V6.vmaxub.128B(<32 x i32> %14, <32 x i32> %c.sroa.0.056)
-  %16 = tail call <32 x i32> @llvm.hexagon.V6.vmaxub.128B(<32 x i32> %11, <32 x i32> %12)
-  %17 = tail call <32 x i32> @llvm.hexagon.V6.vmaxub.128B(<32 x i32> %16, <32 x i32> %13)
-  %18 = tail call <32 x i32> @llvm.hexagon.V6.valignbi.128B(<32 x i32> %17, <32 x i32> %15, i32 1)
-  %incdec.ptr18 = getelementptr inbounds <32 x i32>, <32 x i32>* %out.063, i32 1
-  store <32 x i32> %18, <32 x i32>* %out.063, align 128, !tbaa !5
+  %a.sroa.0.058 = phi <32 x i32> [ %3, %for.body.lr.ph ], [ %4, %for.body ]
+  %b.sroa.0.057 = phi <32 x i32> [ %2, %for.body.lr.ph ], [ %5, %for.body ]
+  %c.sroa.0.056 = phi <32 x i32> [ %1, %for.body.lr.ph ], [ %6, %for.body ]
+  %incdec.ptr6 = getelementptr inbounds <32 x i32>, ptr %p0.060, i32 1
+  %4 = load <32 x i32>, ptr %p0.060, align 128, !tbaa !5
+  %incdec.ptr8 = getelementptr inbounds <32 x i32>, ptr %p1.061, i32 1
+  %5 = load <32 x i32>, ptr %p1.061, align 128, !tbaa !5
+  %incdec.ptr10 = getelementptr inbounds <32 x i32>, ptr %p2.062, i32 1
+  %6 = load <32 x i32>, ptr %p2.062, align 128, !tbaa !5
+  %7 = tail call <32 x i32> @llvm.hexagon.V6.vmaxub.128B(<32 x i32> %a.sroa.0.058, <32 x i32> %b.sroa.0.057)
+  %8 = tail call <32 x i32> @llvm.hexagon.V6.vmaxub.128B(<32 x i32> %7, <32 x i32> %c.sroa.0.056)
+  %9 = tail call <32 x i32> @llvm.hexagon.V6.vmaxub.128B(<32 x i32> %4, <32 x i32> %5)
+  %10 = tail call <32 x i32> @llvm.hexagon.V6.vmaxub.128B(<32 x i32> %9, <32 x i32> %6)
+  %11 = tail call <32 x i32> @llvm.hexagon.V6.valignbi.128B(<32 x i32> %10, <32 x i32> %8, i32 1)
+  %incdec.ptr18 = getelementptr inbounds <32 x i32>, ptr %out.063, i32 1
+  store <32 x i32> %11, ptr %out.063, align 128, !tbaa !5
   %add = add nuw nsw i32 %i.059, 128
   %cmp = icmp slt i32 %add, %0
   br i1 %cmp, label %for.body, label %for.end.loopexit

diff  --git a/llvm/test/CodeGen/Hexagon/hexagon_vector_loop_carried_reuse_commutative.ll b/llvm/test/CodeGen/Hexagon/hexagon_vector_loop_carried_reuse_commutative.ll
index 0ba9bb74f7bc1..b280830d403a5 100644
--- a/llvm/test/CodeGen/Hexagon/hexagon_vector_loop_carried_reuse_commutative.ll
+++ b/llvm/test/CodeGen/Hexagon/hexagon_vector_loop_carried_reuse_commutative.ll
@@ -8,53 +8,46 @@ target triple = "hexagon"
 @g0 = external local_unnamed_addr global i32, align 4
 
 ; Function Attrs: nounwind
-define void @f0(i8* noalias nocapture readonly %a0, i8* noalias nocapture %a1, i32 %a2) local_unnamed_addr #0 {
+define void @f0(ptr noalias nocapture readonly %a0, ptr noalias nocapture %a1, i32 %a2) local_unnamed_addr #0 {
 b0:
-  %v0 = getelementptr inbounds i8, i8* %a0, i32 %a2
+  %v0 = getelementptr inbounds i8, ptr %a0, i32 %a2
   %v1 = mul nsw i32 %a2, 2
-  %v2 = getelementptr inbounds i8, i8* %a0, i32 %v1
-  %v3 = load i32, i32* @g0, align 4, !tbaa !0
+  %v2 = getelementptr inbounds i8, ptr %a0, i32 %v1
+  %v3 = load i32, ptr @g0, align 4, !tbaa !0
   %v4 = icmp sgt i32 %v3, 0
   br i1 %v4, label %b1, label %b4
 
 b1:                                               ; preds = %b0
-  %v5 = bitcast i8* %v2 to <32 x i32>*
-  %v6 = load <32 x i32>, <32 x i32>* %v5, align 128, !tbaa !4
-  %v7 = getelementptr inbounds i8, i8* %v2, i32 128
-  %v8 = bitcast i8* %v7 to <32 x i32>*
-  %v9 = bitcast i8* %v0 to <32 x i32>*
-  %v10 = load <32 x i32>, <32 x i32>* %v9, align 128, !tbaa !4
-  %v11 = getelementptr inbounds i8, i8* %v0, i32 128
-  %v12 = bitcast i8* %v11 to <32 x i32>*
-  %v13 = bitcast i8* %a0 to <32 x i32>*
-  %v14 = load <32 x i32>, <32 x i32>* %v13, align 128, !tbaa !4
-  %v15 = getelementptr inbounds i8, i8* %a0, i32 128
-  %v16 = bitcast i8* %v15 to <32 x i32>*
-  %v17 = bitcast i8* %a1 to <32 x i32>*
+  %v6 = load <32 x i32>, ptr %v2, align 128, !tbaa !4
+  %v7 = getelementptr inbounds i8, ptr %v2, i32 128
+  %v10 = load <32 x i32>, ptr %v0, align 128, !tbaa !4
+  %v11 = getelementptr inbounds i8, ptr %v0, i32 128
+  %v14 = load <32 x i32>, ptr %a0, align 128, !tbaa !4
+  %v15 = getelementptr inbounds i8, ptr %a0, i32 128
   br label %b2
 
 b2:                                               ; preds = %b2, %b1
-  %v18 = phi <32 x i32>* [ %v17, %b1 ], [ %v37, %b2 ]
-  %v19 = phi <32 x i32>* [ %v8, %b1 ], [ %v30, %b2 ]
-  %v20 = phi <32 x i32>* [ %v12, %b1 ], [ %v28, %b2 ]
-  %v21 = phi <32 x i32>* [ %v16, %b1 ], [ %v26, %b2 ]
+  %v18 = phi ptr [ %a1, %b1 ], [ %v37, %b2 ]
+  %v19 = phi ptr [ %v7, %b1 ], [ %v30, %b2 ]
+  %v20 = phi ptr [ %v11, %b1 ], [ %v28, %b2 ]
+  %v21 = phi ptr [ %v15, %b1 ], [ %v26, %b2 ]
   %v22 = phi i32 [ 0, %b1 ], [ %v38, %b2 ]
   %v23 = phi <32 x i32> [ %v14, %b1 ], [ %v27, %b2 ]
   %v24 = phi <32 x i32> [ %v10, %b1 ], [ %v29, %b2 ]
   %v25 = phi <32 x i32> [ %v6, %b1 ], [ %v31, %b2 ]
-  %v26 = getelementptr inbounds <32 x i32>, <32 x i32>* %v21, i32 1
-  %v27 = load <32 x i32>, <32 x i32>* %v21, align 128, !tbaa !4
-  %v28 = getelementptr inbounds <32 x i32>, <32 x i32>* %v20, i32 1
-  %v29 = load <32 x i32>, <32 x i32>* %v20, align 128, !tbaa !4
-  %v30 = getelementptr inbounds <32 x i32>, <32 x i32>* %v19, i32 1
-  %v31 = load <32 x i32>, <32 x i32>* %v19, align 128, !tbaa !4
+  %v26 = getelementptr inbounds <32 x i32>, ptr %v21, i32 1
+  %v27 = load <32 x i32>, ptr %v21, align 128, !tbaa !4
+  %v28 = getelementptr inbounds <32 x i32>, ptr %v20, i32 1
+  %v29 = load <32 x i32>, ptr %v20, align 128, !tbaa !4
+  %v30 = getelementptr inbounds <32 x i32>, ptr %v19, i32 1
+  %v31 = load <32 x i32>, ptr %v19, align 128, !tbaa !4
   %v32 = tail call <32 x i32> @llvm.hexagon.V6.vmaxub.128B(<32 x i32> %v23, <32 x i32> %v24)
   %v33 = tail call <32 x i32> @llvm.hexagon.V6.vmaxub.128B(<32 x i32> %v32, <32 x i32> %v25)
   %v34 = tail call <32 x i32> @llvm.hexagon.V6.vmaxub.128B(<32 x i32> %v29, <32 x i32> %v27)
   %v35 = tail call <32 x i32> @llvm.hexagon.V6.vmaxub.128B(<32 x i32> %v34, <32 x i32> %v31)
   %v36 = tail call <32 x i32> @llvm.hexagon.V6.valignbi.128B(<32 x i32> %v35, <32 x i32> %v33, i32 1)
-  %v37 = getelementptr inbounds <32 x i32>, <32 x i32>* %v18, i32 1
-  store <32 x i32> %v36, <32 x i32>* %v18, align 128, !tbaa !4
+  %v37 = getelementptr inbounds <32 x i32>, ptr %v18, i32 1
+  store <32 x i32> %v36, ptr %v18, align 128, !tbaa !4
   %v38 = add nuw nsw i32 %v22, 128
   %v39 = icmp slt i32 %v38, %v3
   br i1 %v39, label %b2, label %b3

diff  --git a/llvm/test/CodeGen/Hexagon/hexagon_vector_loop_carried_reuse_constant.ll b/llvm/test/CodeGen/Hexagon/hexagon_vector_loop_carried_reuse_constant.ll
index 205b0786ab8cd..c64404f977754 100644
--- a/llvm/test/CodeGen/Hexagon/hexagon_vector_loop_carried_reuse_constant.ll
+++ b/llvm/test/CodeGen/Hexagon/hexagon_vector_loop_carried_reuse_constant.ll
@@ -9,53 +9,46 @@ target triple = "hexagon"
 @W = external local_unnamed_addr global i32, align 4
 
 ; Function Attrs: nounwind
-define void @foo(i8* noalias nocapture readonly %src, i8* noalias nocapture %dst, i32 %stride) local_unnamed_addr #0 {
+define void @foo(ptr noalias nocapture readonly %src, ptr noalias nocapture %dst, i32 %stride) local_unnamed_addr #0 {
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %src, i32 %stride
+  %add.ptr = getelementptr inbounds i8, ptr %src, i32 %stride
   %mul = mul nsw i32 %stride, 2
-  %add.ptr1 = getelementptr inbounds i8, i8* %src, i32 %mul
-  %0 = load i32, i32* @W, align 4, !tbaa !1
+  %add.ptr1 = getelementptr inbounds i8, ptr %src, i32 %mul
+  %0 = load i32, ptr @W, align 4, !tbaa !1
   %cmp55 = icmp sgt i32 %0, 0
   br i1 %cmp55, label %for.body.lr.ph, label %for.end
 
 for.body.lr.ph:                                   ; preds = %entry
-  %1 = bitcast i8* %add.ptr1 to <32 x i32>*
-  %2 = load <32 x i32>, <32 x i32>* %1, align 128, !tbaa !5
-  %incdec.ptr4 = getelementptr inbounds i8, i8* %add.ptr1, i32 128
-  %3 = bitcast i8* %incdec.ptr4 to <32 x i32>*
-  %4 = bitcast i8* %add.ptr to <32 x i32>*
-  %5 = load <32 x i32>, <32 x i32>* %4, align 128, !tbaa !5
-  %incdec.ptr2 = getelementptr inbounds i8, i8* %add.ptr, i32 128
-  %6 = bitcast i8* %incdec.ptr2 to <32 x i32>*
-  %7 = bitcast i8* %src to <32 x i32>*
-  %8 = load <32 x i32>, <32 x i32>* %7, align 128, !tbaa !5
-  %incdec.ptr = getelementptr inbounds i8, i8* %src, i32 128
-  %9 = bitcast i8* %incdec.ptr to <32 x i32>*
-  %10 = bitcast i8* %dst to <32 x i32>*
+  %1 = load <32 x i32>, ptr %add.ptr1, align 128, !tbaa !5
+  %incdec.ptr4 = getelementptr inbounds i8, ptr %add.ptr1, i32 128
+  %2 = load <32 x i32>, ptr %add.ptr, align 128, !tbaa !5
+  %incdec.ptr2 = getelementptr inbounds i8, ptr %add.ptr, i32 128
+  %3 = load <32 x i32>, ptr %src, align 128, !tbaa !5
+  %incdec.ptr = getelementptr inbounds i8, ptr %src, i32 128
   br label %for.body
 
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
-  %out.063 = phi <32 x i32>* [ %10, %for.body.lr.ph ], [ %incdec.ptr18, %for.body ]
-  %p2.062 = phi <32 x i32>* [ %3, %for.body.lr.ph ], [ %incdec.ptr10, %for.body ]
-  %p1.061 = phi <32 x i32>* [ %6, %for.body.lr.ph ], [ %incdec.ptr8, %for.body ]
-  %p0.060 = phi <32 x i32>* [ %9, %for.body.lr.ph ], [ %incdec.ptr6, %for.body ]
+  %out.063 = phi ptr [ %dst, %for.body.lr.ph ], [ %incdec.ptr18, %for.body ]
+  %p2.062 = phi ptr [ %incdec.ptr4, %for.body.lr.ph ], [ %incdec.ptr10, %for.body ]
+  %p1.061 = phi ptr [ %incdec.ptr2, %for.body.lr.ph ], [ %incdec.ptr8, %for.body ]
+  %p0.060 = phi ptr [ %incdec.ptr, %for.body.lr.ph ], [ %incdec.ptr6, %for.body ]
   %i.059 = phi i32 [ 0, %for.body.lr.ph ], [ %add, %for.body ]
-  %a.sroa.0.058 = phi <32 x i32> [ %8, %for.body.lr.ph ], [ %11, %for.body ]
-  %b.sroa.0.057 = phi <32 x i32> [ %5, %for.body.lr.ph ], [ %12, %for.body ]
-  %c.sroa.0.056 = phi <32 x i32> [ %2, %for.body.lr.ph ], [ %13, %for.body ]
-  %incdec.ptr6 = getelementptr inbounds <32 x i32>, <32 x i32>* %p0.060, i32 1
-  %11 = load <32 x i32>, <32 x i32>* %p0.060, align 128, !tbaa !5
-  %incdec.ptr8 = getelementptr inbounds <32 x i32>, <32 x i32>* %p1.061, i32 1
-  %12 = load <32 x i32>, <32 x i32>* %p1.061, align 128, !tbaa !5
-  %incdec.ptr10 = getelementptr inbounds <32 x i32>, <32 x i32>* %p2.062, i32 1
-  %13 = load <32 x i32>, <32 x i32>* %p2.062, align 128, !tbaa !5
-  %14 = tail call <32 x i32> @llvm.hexagon.V6.valignbi.128B(<32 x i32> %a.sroa.0.058, <32 x i32> %b.sroa.0.057, i32 4)
-  %15 = tail call <32 x i32> @llvm.hexagon.V6.vmaxub.128B(<32 x i32> %14, <32 x i32> %c.sroa.0.056)
-  %16 = tail call <32 x i32> @llvm.hexagon.V6.valignbi.128B(<32 x i32> %11, <32 x i32> %12, i32 5)
-  %17 = tail call <32 x i32> @llvm.hexagon.V6.vmaxub.128B(<32 x i32> %16, <32 x i32> %13)
-  %18 = tail call <32 x i32> @llvm.hexagon.V6.valignbi.128B(<32 x i32> %17, <32 x i32> %15, i32 1)
-  %incdec.ptr18 = getelementptr inbounds <32 x i32>, <32 x i32>* %out.063, i32 1
-  store <32 x i32> %18, <32 x i32>* %out.063, align 128, !tbaa !5
+  %a.sroa.0.058 = phi <32 x i32> [ %3, %for.body.lr.ph ], [ %4, %for.body ]
+  %b.sroa.0.057 = phi <32 x i32> [ %2, %for.body.lr.ph ], [ %5, %for.body ]
+  %c.sroa.0.056 = phi <32 x i32> [ %1, %for.body.lr.ph ], [ %6, %for.body ]
+  %incdec.ptr6 = getelementptr inbounds <32 x i32>, ptr %p0.060, i32 1
+  %4 = load <32 x i32>, ptr %p0.060, align 128, !tbaa !5
+  %incdec.ptr8 = getelementptr inbounds <32 x i32>, ptr %p1.061, i32 1
+  %5 = load <32 x i32>, ptr %p1.061, align 128, !tbaa !5
+  %incdec.ptr10 = getelementptr inbounds <32 x i32>, ptr %p2.062, i32 1
+  %6 = load <32 x i32>, ptr %p2.062, align 128, !tbaa !5
+  %7 = tail call <32 x i32> @llvm.hexagon.V6.valignbi.128B(<32 x i32> %a.sroa.0.058, <32 x i32> %b.sroa.0.057, i32 4)
+  %8 = tail call <32 x i32> @llvm.hexagon.V6.vmaxub.128B(<32 x i32> %7, <32 x i32> %c.sroa.0.056)
+  %9 = tail call <32 x i32> @llvm.hexagon.V6.valignbi.128B(<32 x i32> %4, <32 x i32> %5, i32 5)
+  %10 = tail call <32 x i32> @llvm.hexagon.V6.vmaxub.128B(<32 x i32> %9, <32 x i32> %6)
+  %11 = tail call <32 x i32> @llvm.hexagon.V6.valignbi.128B(<32 x i32> %10, <32 x i32> %8, i32 1)
+  %incdec.ptr18 = getelementptr inbounds <32 x i32>, ptr %out.063, i32 1
+  store <32 x i32> %11, ptr %out.063, align 128, !tbaa !5
   %add = add nuw nsw i32 %i.059, 128
   %cmp = icmp slt i32 %add, %0
   br i1 %cmp, label %for.body, label %for.end.loopexit

diff  --git a/llvm/test/CodeGen/Hexagon/hidden-relocation.ll b/llvm/test/CodeGen/Hexagon/hidden-relocation.ll
index 3e54d0f26e4d2..ac15e7263b35d 100644
--- a/llvm/test/CodeGen/Hexagon/hidden-relocation.ll
+++ b/llvm/test/CodeGen/Hexagon/hidden-relocation.ll
@@ -4,20 +4,20 @@
 
 @g0 = hidden global i32 10, align 4
 @g1 = private unnamed_addr constant [4 x i8] c"%d\0A\00", align 1
- at g2 = internal global i32* @g0, align 4
+ at g2 = internal global ptr @g0, align 4
 
 ; Function Attrs: nounwind
-declare i32 @f0(i8*, ...) #0
+declare i32 @f0(ptr, ...) #0
 
 ; Function Attrs: nounwind
 define i32 @f1() #0 {
 b0:
   %v0 = alloca i32, align 4
-  store i32 10, i32* @g0, align 4
-  %v1 = load i32*, i32** @g2, align 4
-  %v2 = load i32, i32* %v1, align 4
-  %v3 = call i32 (i8*, ...) @f0(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @g1, i32 0, i32 0), i32 %v2)
-  %v4 = load i32, i32* %v0
+  store i32 10, ptr @g0, align 4
+  %v1 = load ptr, ptr @g2, align 4
+  %v2 = load i32, ptr %v1, align 4
+  %v3 = call i32 (ptr, ...) @f0(ptr @g1, i32 %v2)
+  %v4 = load i32, ptr %v0
   ret i32 %v4
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/honor-optsize.ll b/llvm/test/CodeGen/Hexagon/honor-optsize.ll
index 1e30d22b3e007..7bf6aad145a38 100644
--- a/llvm/test/CodeGen/Hexagon/honor-optsize.ll
+++ b/llvm/test/CodeGen/Hexagon/honor-optsize.ll
@@ -5,13 +5,13 @@ target triple = "hexagon"
 ; CHECK: f0:
 ; CHECK:   call __save_r16_through_r21
 ; CHECK:   .size	f0
-define i32 @f0(i8* nocapture %a0) #0 {
+define i32 @f0(ptr nocapture %a0) #0 {
 b0:
-  %v0 = tail call i32 bitcast (i32 (...)* @f1 to i32 ()*)() #0
-  %v1 = tail call i32 bitcast (i32 (...)* @f1 to i32 ()*)() #0
-  %v2 = tail call i32 bitcast (i32 (...)* @f1 to i32 ()*)() #0
-  %v3 = tail call i32 bitcast (i32 (...)* @f1 to i32 ()*)() #0
-  %v4 = load i8, i8* %a0, align 1
+  %v0 = tail call i32 @f1() #0
+  %v1 = tail call i32 @f1() #0
+  %v2 = tail call i32 @f1() #0
+  %v3 = tail call i32 @f1() #0
+  %v4 = load i8, ptr %a0, align 1
   %v5 = icmp eq i8 %v4, 0
   br i1 %v5, label %b4, label %b1
 
@@ -22,10 +22,10 @@ b2:                                               ; preds = %b2, %b1
   %v6 = phi i32 [ %v10, %b2 ], [ 0, %b1 ]
   %v7 = phi i32 [ %v2, %b2 ], [ %v1, %b1 ]
   %v8 = phi i32 [ %v7, %b2 ], [ %v0, %b1 ]
-  %v9 = tail call i32 bitcast (i32 (...)* @f1 to i32 ()*)() #0
+  %v9 = tail call i32 @f1() #0
   %v10 = add nsw i32 %v6, %v8
-  %v11 = tail call i32 bitcast (i32 (...)* @f1 to i32 ()*)() #0
-  %v12 = load i8, i8* %a0, align 1
+  %v11 = tail call i32 @f1() #0
+  %v12 = load i8, ptr %a0, align 1
   %v13 = icmp eq i8 %v12, 0
   br i1 %v13, label %b3, label %b2
 

diff  --git a/llvm/test/CodeGen/Hexagon/hrc-stack-coloring.ll b/llvm/test/CodeGen/Hexagon/hrc-stack-coloring.ll
index 27144381ef19a..9b77337cfc978 100644
--- a/llvm/test/CodeGen/Hexagon/hrc-stack-coloring.ll
+++ b/llvm/test/CodeGen/Hexagon/hrc-stack-coloring.ll
@@ -3,7 +3,7 @@
 
 target triple = "hexagon"
 
-%s.0 = type { %s.1*, %s.2*, %s.3*, i16*, i32*, i8, i8, i8, i8, i8, i8, i16, i16, i16, i32, i32, i32, i32, i16, i8, i8, i8, i8, float, float, float, float, float, float, float, float, float, float, float, [4 x %s.7], [4 x %s.7], [20 x %s.7], [104 x %s.7], [20 x i32], [257 x %s.8], %s.9 }
+%s.0 = type { ptr, ptr, ptr, ptr, ptr, i8, i8, i8, i8, i8, i8, i16, i16, i16, i32, i32, i32, i32, i16, i8, i8, i8, i8, float, float, float, float, float, float, float, float, float, float, float, [4 x %s.7], [4 x %s.7], [20 x %s.7], [104 x %s.7], [20 x i32], [257 x %s.8], %s.9 }
 %s.1 = type { i16, i8, i16, i8, i8, i8, i8, i8 }
 %s.2 = type { i16, i16, i16, i16, i8, i8, i8, i8, i8, i8, i8, i8, i32, i8, i8, [20 x i16], i8, i16 }
 %s.3 = type { i8, i8, i8, i8, i16, i16, i16, i16, i16, i16, i16, i16, i16, i16, i32, i32, i32, [2 x [2 x i32]], %s.4 }
@@ -23,11 +23,11 @@ target triple = "hexagon"
 ; reduced stack usage from around 568 bytes to 280 bytes.
 ; After r308350 the stack size is ~300.
 ; CHECK: allocframe(r29,#304):raw
-define void @f0(%s.0* %a0, %s.11* %a1, %s.12* %a2) #0 {
+define void @f0(ptr %a0, ptr %a1, ptr %a2) #0 {
 b0:
-  %v0 = alloca %s.0*, align 4
-  %v1 = alloca %s.11*, align 4
-  %v2 = alloca %s.12*, align 4
+  %v0 = alloca ptr, align 4
+  %v1 = alloca ptr, align 4
+  %v2 = alloca ptr, align 4
   %v3 = alloca float, align 4
   %v4 = alloca float, align 4
   %v5 = alloca float, align 4
@@ -59,675 +59,651 @@ b0:
   %v31 = alloca double, align 8
   %v32 = alloca double, align 8
   %v33 = alloca double, align 8
-  store %s.0* %a0, %s.0** %v0, align 4
-  store %s.11* %a1, %s.11** %v1, align 4
-  store %s.12* %a2, %s.12** %v2, align 4
-  store double 1.000000e+00, double* %v32, align 8
-  %v34 = load %s.11*, %s.11** %v1, align 4
-  %v35 = getelementptr inbounds %s.11, %s.11* %v34, i32 0
-  %v36 = getelementptr inbounds %s.11, %s.11* %v35, i32 0, i32 0
-  %v37 = load i64, i64* %v36, align 8
+  store ptr %a0, ptr %v0, align 4
+  store ptr %a1, ptr %v1, align 4
+  store ptr %a2, ptr %v2, align 4
+  store double 1.000000e+00, ptr %v32, align 8
+  %v34 = load ptr, ptr %v1, align 4
+  %v37 = load i64, ptr %v34, align 8
   %v38 = sitofp i64 %v37 to double
-  %v39 = load double, double* %v32, align 8
+  %v39 = load double, ptr %v32, align 8
   %v40 = fmul double %v38, %v39
-  store double %v40, double* %v13, align 8
-  %v41 = load %s.11*, %s.11** %v1, align 4
-  %v42 = getelementptr inbounds %s.11, %s.11* %v41, i32 1
-  %v43 = getelementptr inbounds %s.11, %s.11* %v42, i32 0, i32 0
-  %v44 = load i64, i64* %v43, align 8
+  store double %v40, ptr %v13, align 8
+  %v41 = load ptr, ptr %v1, align 4
+  %v42 = getelementptr inbounds %s.11, ptr %v41, i32 1
+  %v44 = load i64, ptr %v42, align 8
   %v45 = sitofp i64 %v44 to double
-  %v46 = load double, double* %v32, align 8
+  %v46 = load double, ptr %v32, align 8
   %v47 = fmul double %v45, %v46
-  store double %v47, double* %v14, align 8
-  %v48 = load %s.11*, %s.11** %v1, align 4
-  %v49 = getelementptr inbounds %s.11, %s.11* %v48, i32 1
-  %v50 = getelementptr inbounds %s.11, %s.11* %v49, i32 0, i32 1
-  %v51 = load i64, i64* %v50, align 8
+  store double %v47, ptr %v14, align 8
+  %v48 = load ptr, ptr %v1, align 4
+  %v49 = getelementptr inbounds %s.11, ptr %v48, i32 1
+  %v50 = getelementptr inbounds %s.11, ptr %v49, i32 0, i32 1
+  %v51 = load i64, ptr %v50, align 8
   %v52 = sitofp i64 %v51 to double
-  %v53 = load double, double* %v32, align 8
+  %v53 = load double, ptr %v32, align 8
   %v54 = fmul double %v52, %v53
-  store double %v54, double* %v15, align 8
-  %v55 = load %s.11*, %s.11** %v1, align 4
-  %v56 = getelementptr inbounds %s.11, %s.11* %v55, i32 2
-  %v57 = getelementptr inbounds %s.11, %s.11* %v56, i32 0, i32 0
-  %v58 = load i64, i64* %v57, align 8
+  store double %v54, ptr %v15, align 8
+  %v55 = load ptr, ptr %v1, align 4
+  %v56 = getelementptr inbounds %s.11, ptr %v55, i32 2
+  %v58 = load i64, ptr %v56, align 8
   %v59 = sitofp i64 %v58 to double
-  %v60 = load double, double* %v32, align 8
+  %v60 = load double, ptr %v32, align 8
   %v61 = fmul double %v59, %v60
-  store double %v61, double* %v16, align 8
-  %v62 = load %s.11*, %s.11** %v1, align 4
-  %v63 = getelementptr inbounds %s.11, %s.11* %v62, i32 2
-  %v64 = getelementptr inbounds %s.11, %s.11* %v63, i32 0, i32 1
-  %v65 = load i64, i64* %v64, align 8
+  store double %v61, ptr %v16, align 8
+  %v62 = load ptr, ptr %v1, align 4
+  %v63 = getelementptr inbounds %s.11, ptr %v62, i32 2
+  %v64 = getelementptr inbounds %s.11, ptr %v63, i32 0, i32 1
+  %v65 = load i64, ptr %v64, align 8
   %v66 = sitofp i64 %v65 to double
-  %v67 = load double, double* %v32, align 8
+  %v67 = load double, ptr %v32, align 8
   %v68 = fmul double %v66, %v67
-  store double %v68, double* %v17, align 8
-  %v69 = load %s.11*, %s.11** %v1, align 4
-  %v70 = getelementptr inbounds %s.11, %s.11* %v69, i32 3
-  %v71 = getelementptr inbounds %s.11, %s.11* %v70, i32 0, i32 0
-  %v72 = load i64, i64* %v71, align 8
+  store double %v68, ptr %v17, align 8
+  %v69 = load ptr, ptr %v1, align 4
+  %v70 = getelementptr inbounds %s.11, ptr %v69, i32 3
+  %v72 = load i64, ptr %v70, align 8
   %v73 = sitofp i64 %v72 to double
-  %v74 = load double, double* %v32, align 8
+  %v74 = load double, ptr %v32, align 8
   %v75 = fmul double %v73, %v74
-  store double %v75, double* %v18, align 8
-  %v76 = load %s.11*, %s.11** %v1, align 4
-  %v77 = getelementptr inbounds %s.11, %s.11* %v76, i32 3
-  %v78 = getelementptr inbounds %s.11, %s.11* %v77, i32 0, i32 1
-  %v79 = load i64, i64* %v78, align 8
+  store double %v75, ptr %v18, align 8
+  %v76 = load ptr, ptr %v1, align 4
+  %v77 = getelementptr inbounds %s.11, ptr %v76, i32 3
+  %v78 = getelementptr inbounds %s.11, ptr %v77, i32 0, i32 1
+  %v79 = load i64, ptr %v78, align 8
   %v80 = sitofp i64 %v79 to double
-  %v81 = load double, double* %v32, align 8
+  %v81 = load double, ptr %v32, align 8
   %v82 = fmul double %v80, %v81
-  store double %v82, double* %v19, align 8
-  %v83 = load double, double* %v13, align 8
-  %v84 = load double, double* %v13, align 8
+  store double %v82, ptr %v19, align 8
+  %v83 = load double, ptr %v13, align 8
+  %v84 = load double, ptr %v13, align 8
   %v85 = fmul double %v83, %v84
-  %v86 = load double, double* %v14, align 8
-  %v87 = load double, double* %v14, align 8
+  %v86 = load double, ptr %v14, align 8
+  %v87 = load double, ptr %v14, align 8
   %v88 = fmul double %v86, %v87
   %v89 = fsub double %v85, %v88
-  %v90 = load double, double* %v15, align 8
-  %v91 = load double, double* %v15, align 8
+  %v90 = load double, ptr %v15, align 8
+  %v91 = load double, ptr %v15, align 8
   %v92 = fmul double %v90, %v91
   %v93 = fsub double %v89, %v92
-  store double %v93, double* %v20, align 8
-  %v94 = load double, double* %v13, align 8
-  %v95 = load double, double* %v14, align 8
+  store double %v93, ptr %v20, align 8
+  %v94 = load double, ptr %v13, align 8
+  %v95 = load double, ptr %v14, align 8
   %v96 = fmul double %v94, %v95
-  %v97 = load double, double* %v16, align 8
-  %v98 = load double, double* %v14, align 8
+  %v97 = load double, ptr %v16, align 8
+  %v98 = load double, ptr %v14, align 8
   %v99 = fmul double %v97, %v98
   %v100 = fsub double %v96, %v99
-  %v101 = load double, double* %v17, align 8
-  %v102 = load double, double* %v15, align 8
+  %v101 = load double, ptr %v17, align 8
+  %v102 = load double, ptr %v15, align 8
   %v103 = fmul double %v101, %v102
   %v104 = fsub double %v100, %v103
-  store double %v104, double* %v21, align 8
-  %v105 = load double, double* %v13, align 8
-  %v106 = load double, double* %v15, align 8
+  store double %v104, ptr %v21, align 8
+  %v105 = load double, ptr %v13, align 8
+  %v106 = load double, ptr %v15, align 8
   %v107 = fmul double %v105, %v106
-  %v108 = load double, double* %v16, align 8
-  %v109 = load double, double* %v15, align 8
+  %v108 = load double, ptr %v16, align 8
+  %v109 = load double, ptr %v15, align 8
   %v110 = fmul double %v108, %v109
   %v111 = fadd double %v107, %v110
-  %v112 = load double, double* %v17, align 8
-  %v113 = load double, double* %v14, align 8
+  %v112 = load double, ptr %v17, align 8
+  %v113 = load double, ptr %v14, align 8
   %v114 = fmul double %v112, %v113
   %v115 = fsub double %v111, %v114
-  store double %v115, double* %v22, align 8
-  %v116 = load double, double* %v13, align 8
-  %v117 = load double, double* %v16, align 8
+  store double %v115, ptr %v22, align 8
+  %v116 = load double, ptr %v13, align 8
+  %v117 = load double, ptr %v16, align 8
   %v118 = fmul double %v116, %v117
-  %v119 = load double, double* %v18, align 8
-  %v120 = load double, double* %v14, align 8
+  %v119 = load double, ptr %v18, align 8
+  %v120 = load double, ptr %v14, align 8
   %v121 = fmul double %v119, %v120
   %v122 = fsub double %v118, %v121
-  %v123 = load double, double* %v19, align 8
-  %v124 = load double, double* %v15, align 8
+  %v123 = load double, ptr %v19, align 8
+  %v124 = load double, ptr %v15, align 8
   %v125 = fmul double %v123, %v124
   %v126 = fsub double %v122, %v125
-  store double %v126, double* %v23, align 8
-  %v127 = load double, double* %v13, align 8
-  %v128 = load double, double* %v17, align 8
+  store double %v126, ptr %v23, align 8
+  %v127 = load double, ptr %v13, align 8
+  %v128 = load double, ptr %v17, align 8
   %v129 = fmul double %v127, %v128
-  %v130 = load double, double* %v18, align 8
-  %v131 = load double, double* %v15, align 8
+  %v130 = load double, ptr %v18, align 8
+  %v131 = load double, ptr %v15, align 8
   %v132 = fmul double %v130, %v131
   %v133 = fadd double %v129, %v132
-  %v134 = load double, double* %v19, align 8
-  %v135 = load double, double* %v14, align 8
+  %v134 = load double, ptr %v19, align 8
+  %v135 = load double, ptr %v14, align 8
   %v136 = fmul double %v134, %v135
   %v137 = fsub double %v133, %v136
-  store double %v137, double* %v24, align 8
-  %v138 = load double, double* %v14, align 8
-  %v139 = load double, double* %v14, align 8
+  store double %v137, ptr %v24, align 8
+  %v138 = load double, ptr %v14, align 8
+  %v139 = load double, ptr %v14, align 8
   %v140 = fmul double %v138, %v139
-  %v141 = load double, double* %v15, align 8
-  %v142 = load double, double* %v15, align 8
+  %v141 = load double, ptr %v15, align 8
+  %v142 = load double, ptr %v15, align 8
   %v143 = fmul double %v141, %v142
   %v144 = fsub double %v140, %v143
-  %v145 = load double, double* %v16, align 8
-  %v146 = load double, double* %v13, align 8
+  %v145 = load double, ptr %v16, align 8
+  %v146 = load double, ptr %v13, align 8
   %v147 = fmul double %v145, %v146
   %v148 = fsub double %v144, %v147
-  store double %v148, double* %v25, align 8
-  %v149 = load double, double* %v14, align 8
-  %v150 = load double, double* %v15, align 8
+  store double %v148, ptr %v25, align 8
+  %v149 = load double, ptr %v14, align 8
+  %v150 = load double, ptr %v15, align 8
   %v151 = fmul double %v149, %v150
   %v152 = fmul double %v151, 2.000000e+00
-  %v153 = load double, double* %v17, align 8
-  %v154 = load double, double* %v13, align 8
+  %v153 = load double, ptr %v17, align 8
+  %v154 = load double, ptr %v13, align 8
   %v155 = fmul double %v153, %v154
   %v156 = fsub double %v152, %v155
-  store double %v156, double* %v26, align 8
-  %v157 = load double, double* %v14, align 8
-  %v158 = load double, double* %v16, align 8
+  store double %v156, ptr %v26, align 8
+  %v157 = load double, ptr %v14, align 8
+  %v158 = load double, ptr %v16, align 8
   %v159 = fmul double %v157, %v158
-  %v160 = load double, double* %v15, align 8
-  %v161 = load double, double* %v17, align 8
+  %v160 = load double, ptr %v15, align 8
+  %v161 = load double, ptr %v17, align 8
   %v162 = fmul double %v160, %v161
   %v163 = fsub double %v159, %v162
-  %v164 = load double, double* %v18, align 8
-  %v165 = load double, double* %v13, align 8
+  %v164 = load double, ptr %v18, align 8
+  %v165 = load double, ptr %v13, align 8
   %v166 = fmul double %v164, %v165
   %v167 = fsub double %v163, %v166
-  store double %v167, double* %v27, align 8
-  %v168 = load double, double* %v14, align 8
-  %v169 = load double, double* %v17, align 8
+  store double %v167, ptr %v27, align 8
+  %v168 = load double, ptr %v14, align 8
+  %v169 = load double, ptr %v17, align 8
   %v170 = fmul double %v168, %v169
-  %v171 = load double, double* %v15, align 8
-  %v172 = load double, double* %v16, align 8
+  %v171 = load double, ptr %v15, align 8
+  %v172 = load double, ptr %v16, align 8
   %v173 = fmul double %v171, %v172
   %v174 = fadd double %v170, %v173
-  %v175 = load double, double* %v19, align 8
-  %v176 = load double, double* %v13, align 8
+  %v175 = load double, ptr %v19, align 8
+  %v176 = load double, ptr %v13, align 8
   %v177 = fmul double %v175, %v176
   %v178 = fsub double %v174, %v177
-  store double %v178, double* %v28, align 8
-  %v179 = load double, double* %v16, align 8
-  %v180 = load double, double* %v16, align 8
+  store double %v178, ptr %v28, align 8
+  %v179 = load double, ptr %v16, align 8
+  %v180 = load double, ptr %v16, align 8
   %v181 = fmul double %v179, %v180
-  %v182 = load double, double* %v17, align 8
-  %v183 = load double, double* %v17, align 8
+  %v182 = load double, ptr %v17, align 8
+  %v183 = load double, ptr %v17, align 8
   %v184 = fmul double %v182, %v183
   %v185 = fsub double %v181, %v184
-  %v186 = load double, double* %v18, align 8
-  %v187 = load double, double* %v14, align 8
+  %v186 = load double, ptr %v18, align 8
+  %v187 = load double, ptr %v14, align 8
   %v188 = fmul double %v186, %v187
   %v189 = fsub double %v185, %v188
-  %v190 = load double, double* %v19, align 8
-  %v191 = load double, double* %v15, align 8
+  %v190 = load double, ptr %v19, align 8
+  %v191 = load double, ptr %v15, align 8
   %v192 = fmul double %v190, %v191
   %v193 = fadd double %v189, %v192
-  store double %v193, double* %v29, align 8
-  %v194 = load double, double* %v16, align 8
-  %v195 = load double, double* %v17, align 8
+  store double %v193, ptr %v29, align 8
+  %v194 = load double, ptr %v16, align 8
+  %v195 = load double, ptr %v17, align 8
   %v196 = fmul double %v194, %v195
   %v197 = fmul double %v196, 2.000000e+00
-  %v198 = load double, double* %v18, align 8
-  %v199 = load double, double* %v15, align 8
+  %v198 = load double, ptr %v18, align 8
+  %v199 = load double, ptr %v15, align 8
   %v200 = fmul double %v198, %v199
   %v201 = fsub double %v197, %v200
-  %v202 = load double, double* %v19, align 8
-  %v203 = load double, double* %v14, align 8
+  %v202 = load double, ptr %v19, align 8
+  %v203 = load double, ptr %v14, align 8
   %v204 = fmul double %v202, %v203
   %v205 = fsub double %v201, %v204
-  store double %v205, double* %v30, align 8
-  %v206 = load double, double* %v20, align 8
-  %v207 = load double, double* %v20, align 8
+  store double %v205, ptr %v30, align 8
+  %v206 = load double, ptr %v20, align 8
+  %v207 = load double, ptr %v20, align 8
   %v208 = fmul double %v206, %v207
-  %v209 = load double, double* %v21, align 8
-  %v210 = load double, double* %v21, align 8
+  %v209 = load double, ptr %v21, align 8
+  %v210 = load double, ptr %v21, align 8
   %v211 = fmul double %v209, %v210
   %v212 = fsub double %v208, %v211
-  %v213 = load double, double* %v22, align 8
-  %v214 = load double, double* %v22, align 8
+  %v213 = load double, ptr %v22, align 8
+  %v214 = load double, ptr %v22, align 8
   %v215 = fmul double %v213, %v214
   %v216 = fsub double %v212, %v215
-  %v217 = load double, double* %v23, align 8
-  %v218 = load double, double* %v25, align 8
+  %v217 = load double, ptr %v23, align 8
+  %v218 = load double, ptr %v25, align 8
   %v219 = fmul double %v217, %v218
   %v220 = fmul double %v219, 2.000000e+00
   %v221 = fadd double %v216, %v220
-  %v222 = load double, double* %v24, align 8
-  %v223 = load double, double* %v26, align 8
+  %v222 = load double, ptr %v24, align 8
+  %v223 = load double, ptr %v26, align 8
   %v224 = fmul double %v222, %v223
   %v225 = fmul double %v224, 2.000000e+00
   %v226 = fadd double %v221, %v225
-  %v227 = load double, double* %v27, align 8
-  %v228 = load double, double* %v27, align 8
+  %v227 = load double, ptr %v27, align 8
+  %v228 = load double, ptr %v27, align 8
   %v229 = fmul double %v227, %v228
   %v230 = fsub double %v226, %v229
-  %v231 = load double, double* %v28, align 8
-  %v232 = load double, double* %v28, align 8
+  %v231 = load double, ptr %v28, align 8
+  %v232 = load double, ptr %v28, align 8
   %v233 = fmul double %v231, %v232
   %v234 = fsub double %v230, %v233
-  %v235 = load double, double* %v29, align 8
-  %v236 = load double, double* %v29, align 8
+  %v235 = load double, ptr %v29, align 8
+  %v236 = load double, ptr %v29, align 8
   %v237 = fmul double %v235, %v236
   %v238 = fadd double %v234, %v237
-  %v239 = load double, double* %v30, align 8
-  %v240 = load double, double* %v30, align 8
+  %v239 = load double, ptr %v30, align 8
+  %v240 = load double, ptr %v30, align 8
   %v241 = fmul double %v239, %v240
   %v242 = fadd double %v238, %v241
-  store double %v242, double* %v31, align 8
-  %v243 = load double, double* %v31, align 8
+  store double %v242, ptr %v31, align 8
+  %v243 = load double, ptr %v31, align 8
   %v244 = call double @f1(double %v243) #1
-  %v245 = load double, double* %v32, align 8
+  %v245 = load double, ptr %v32, align 8
   %v246 = fcmp olt double %v244, %v245
   br i1 %v246, label %b1, label %b2
 
 b1:                                               ; preds = %b0
-  %v247 = load %s.0*, %s.0** %v0, align 4
-  %v248 = getelementptr inbounds %s.0, %s.0* %v247, i32 0, i32 2
-  %v249 = load %s.3*, %s.3** %v248, align 4
-  %v250 = getelementptr inbounds %s.3, %s.3* %v249, i32 0, i32 0
-  store i8 3, i8* %v250, align 1
+  %v247 = load ptr, ptr %v0, align 4
+  %v248 = getelementptr inbounds %s.0, ptr %v247, i32 0, i32 2
+  %v249 = load ptr, ptr %v248, align 4
+  store i8 3, ptr %v249, align 1
   br label %b3
 
 b2:                                               ; preds = %b0
-  %v251 = load double, double* %v32, align 8
-  %v252 = load double, double* %v31, align 8
+  %v251 = load double, ptr %v32, align 8
+  %v252 = load double, ptr %v31, align 8
   %v253 = fdiv double %v251, %v252
-  store double %v253, double* %v32, align 8
-  %v254 = load double, double* %v13, align 8
-  %v255 = load double, double* %v20, align 8
+  store double %v253, ptr %v32, align 8
+  %v254 = load double, ptr %v13, align 8
+  %v255 = load double, ptr %v20, align 8
   %v256 = fmul double %v254, %v255
-  %v257 = load double, double* %v14, align 8
-  %v258 = load double, double* %v21, align 8
+  %v257 = load double, ptr %v14, align 8
+  %v258 = load double, ptr %v21, align 8
   %v259 = fmul double %v257, %v258
   %v260 = fsub double %v256, %v259
-  %v261 = load double, double* %v15, align 8
-  %v262 = load double, double* %v22, align 8
+  %v261 = load double, ptr %v15, align 8
+  %v262 = load double, ptr %v22, align 8
   %v263 = fmul double %v261, %v262
   %v264 = fsub double %v260, %v263
-  %v265 = load double, double* %v16, align 8
-  %v266 = load double, double* %v25, align 8
+  %v265 = load double, ptr %v16, align 8
+  %v266 = load double, ptr %v25, align 8
   %v267 = fmul double %v265, %v266
   %v268 = fadd double %v264, %v267
-  %v269 = load double, double* %v17, align 8
-  %v270 = load double, double* %v26, align 8
+  %v269 = load double, ptr %v17, align 8
+  %v270 = load double, ptr %v26, align 8
   %v271 = fmul double %v269, %v270
   %v272 = fadd double %v268, %v271
-  store double %v272, double* %v33, align 8
-  %v273 = load double, double* %v33, align 8
-  %v274 = load double, double* %v32, align 8
+  store double %v272, ptr %v33, align 8
+  %v273 = load double, ptr %v33, align 8
+  %v274 = load double, ptr %v32, align 8
   %v275 = fmul double %v273, %v274
   %v276 = fptrunc double %v275 to float
-  store float %v276, float* %v3, align 4
-  %v277 = load double, double* %v14, align 8
+  store float %v276, ptr %v3, align 4
+  %v277 = load double, ptr %v14, align 8
   %v278 = fsub double -0.000000e+00, %v277
-  %v279 = load double, double* %v20, align 8
+  %v279 = load double, ptr %v20, align 8
   %v280 = fmul double %v278, %v279
-  %v281 = load double, double* %v16, align 8
-  %v282 = load double, double* %v21, align 8
+  %v281 = load double, ptr %v16, align 8
+  %v282 = load double, ptr %v21, align 8
   %v283 = fmul double %v281, %v282
   %v284 = fadd double %v280, %v283
-  %v285 = load double, double* %v17, align 8
-  %v286 = load double, double* %v22, align 8
+  %v285 = load double, ptr %v17, align 8
+  %v286 = load double, ptr %v22, align 8
   %v287 = fmul double %v285, %v286
   %v288 = fadd double %v284, %v287
-  %v289 = load double, double* %v18, align 8
-  %v290 = load double, double* %v25, align 8
+  %v289 = load double, ptr %v18, align 8
+  %v290 = load double, ptr %v25, align 8
   %v291 = fmul double %v289, %v290
   %v292 = fsub double %v288, %v291
-  %v293 = load double, double* %v19, align 8
-  %v294 = load double, double* %v26, align 8
+  %v293 = load double, ptr %v19, align 8
+  %v294 = load double, ptr %v26, align 8
   %v295 = fmul double %v293, %v294
   %v296 = fsub double %v292, %v295
-  store double %v296, double* %v33, align 8
-  %v297 = load double, double* %v33, align 8
-  %v298 = load double, double* %v32, align 8
+  store double %v296, ptr %v33, align 8
+  %v297 = load double, ptr %v33, align 8
+  %v298 = load double, ptr %v32, align 8
   %v299 = fmul double %v297, %v298
   %v300 = fptrunc double %v299 to float
-  store float %v300, float* %v4, align 4
-  %v301 = load double, double* %v15, align 8
+  store float %v300, ptr %v4, align 4
+  %v301 = load double, ptr %v15, align 8
   %v302 = fsub double -0.000000e+00, %v301
-  %v303 = load double, double* %v20, align 8
+  %v303 = load double, ptr %v20, align 8
   %v304 = fmul double %v302, %v303
-  %v305 = load double, double* %v16, align 8
-  %v306 = load double, double* %v22, align 8
+  %v305 = load double, ptr %v16, align 8
+  %v306 = load double, ptr %v22, align 8
   %v307 = fmul double %v305, %v306
   %v308 = fsub double %v304, %v307
-  %v309 = load double, double* %v17, align 8
-  %v310 = load double, double* %v21, align 8
+  %v309 = load double, ptr %v17, align 8
+  %v310 = load double, ptr %v21, align 8
   %v311 = fmul double %v309, %v310
   %v312 = fadd double %v308, %v311
-  %v313 = load double, double* %v18, align 8
-  %v314 = load double, double* %v26, align 8
+  %v313 = load double, ptr %v18, align 8
+  %v314 = load double, ptr %v26, align 8
   %v315 = fmul double %v313, %v314
   %v316 = fadd double %v312, %v315
-  %v317 = load double, double* %v19, align 8
-  %v318 = load double, double* %v25, align 8
+  %v317 = load double, ptr %v19, align 8
+  %v318 = load double, ptr %v25, align 8
   %v319 = fmul double %v317, %v318
   %v320 = fsub double %v316, %v319
-  store double %v320, double* %v33, align 8
-  %v321 = load double, double* %v33, align 8
-  %v322 = load double, double* %v32, align 8
+  store double %v320, ptr %v33, align 8
+  %v321 = load double, ptr %v33, align 8
+  %v322 = load double, ptr %v32, align 8
   %v323 = fmul double %v321, %v322
   %v324 = fptrunc double %v323 to float
-  store float %v324, float* %v5, align 4
-  %v325 = load double, double* %v16, align 8
-  %v326 = load double, double* %v29, align 8
+  store float %v324, ptr %v5, align 4
+  %v325 = load double, ptr %v16, align 8
+  %v326 = load double, ptr %v29, align 8
   %v327 = fmul double %v325, %v326
-  %v328 = load double, double* %v17, align 8
-  %v329 = load double, double* %v30, align 8
+  %v328 = load double, ptr %v17, align 8
+  %v329 = load double, ptr %v30, align 8
   %v330 = fmul double %v328, %v329
   %v331 = fadd double %v327, %v330
-  %v332 = load double, double* %v14, align 8
-  %v333 = load double, double* %v27, align 8
+  %v332 = load double, ptr %v14, align 8
+  %v333 = load double, ptr %v27, align 8
   %v334 = fmul double %v332, %v333
   %v335 = fsub double %v331, %v334
-  %v336 = load double, double* %v15, align 8
-  %v337 = load double, double* %v28, align 8
+  %v336 = load double, ptr %v15, align 8
+  %v337 = load double, ptr %v28, align 8
   %v338 = fmul double %v336, %v337
   %v339 = fsub double %v335, %v338
-  %v340 = load double, double* %v13, align 8
-  %v341 = load double, double* %v25, align 8
+  %v340 = load double, ptr %v13, align 8
+  %v341 = load double, ptr %v25, align 8
   %v342 = fmul double %v340, %v341
   %v343 = fadd double %v339, %v342
-  store double %v343, double* %v33, align 8
-  %v344 = load double, double* %v33, align 8
-  %v345 = load double, double* %v32, align 8
+  store double %v343, ptr %v33, align 8
+  %v344 = load double, ptr %v33, align 8
+  %v345 = load double, ptr %v32, align 8
   %v346 = fmul double %v344, %v345
   %v347 = fptrunc double %v346 to float
-  store float %v347, float* %v6, align 4
-  %v348 = load double, double* %v16, align 8
-  %v349 = load double, double* %v30, align 8
+  store float %v347, ptr %v6, align 4
+  %v348 = load double, ptr %v16, align 8
+  %v349 = load double, ptr %v30, align 8
   %v350 = fmul double %v348, %v349
-  %v351 = load double, double* %v17, align 8
-  %v352 = load double, double* %v29, align 8
+  %v351 = load double, ptr %v17, align 8
+  %v352 = load double, ptr %v29, align 8
   %v353 = fmul double %v351, %v352
   %v354 = fsub double %v350, %v353
-  %v355 = load double, double* %v14, align 8
-  %v356 = load double, double* %v28, align 8
+  %v355 = load double, ptr %v14, align 8
+  %v356 = load double, ptr %v28, align 8
   %v357 = fmul double %v355, %v356
   %v358 = fsub double %v354, %v357
-  %v359 = load double, double* %v15, align 8
-  %v360 = load double, double* %v27, align 8
+  %v359 = load double, ptr %v15, align 8
+  %v360 = load double, ptr %v27, align 8
   %v361 = fmul double %v359, %v360
   %v362 = fadd double %v358, %v361
-  %v363 = load double, double* %v13, align 8
-  %v364 = load double, double* %v26, align 8
+  %v363 = load double, ptr %v13, align 8
+  %v364 = load double, ptr %v26, align 8
   %v365 = fmul double %v363, %v364
   %v366 = fadd double %v362, %v365
-  store double %v366, double* %v33, align 8
-  %v367 = load double, double* %v33, align 8
-  %v368 = load double, double* %v32, align 8
+  store double %v366, ptr %v33, align 8
+  %v367 = load double, ptr %v33, align 8
+  %v368 = load double, ptr %v32, align 8
   %v369 = fmul double %v367, %v368
   %v370 = fptrunc double %v369 to float
-  store float %v370, float* %v7, align 4
-  %v371 = load double, double* %v14, align 8
+  store float %v370, ptr %v7, align 4
+  %v371 = load double, ptr %v14, align 8
   %v372 = fsub double -0.000000e+00, %v371
-  %v373 = load double, double* %v29, align 8
+  %v373 = load double, ptr %v29, align 8
   %v374 = fmul double %v372, %v373
-  %v375 = load double, double* %v15, align 8
-  %v376 = load double, double* %v30, align 8
+  %v375 = load double, ptr %v15, align 8
+  %v376 = load double, ptr %v30, align 8
   %v377 = fmul double %v375, %v376
   %v378 = fsub double %v374, %v377
-  %v379 = load double, double* %v13, align 8
-  %v380 = load double, double* %v27, align 8
+  %v379 = load double, ptr %v13, align 8
+  %v380 = load double, ptr %v27, align 8
   %v381 = fmul double %v379, %v380
   %v382 = fadd double %v378, %v381
-  %v383 = load double, double* %v14, align 8
-  %v384 = load double, double* %v25, align 8
+  %v383 = load double, ptr %v14, align 8
+  %v384 = load double, ptr %v25, align 8
   %v385 = fmul double %v383, %v384
   %v386 = fsub double %v382, %v385
-  %v387 = load double, double* %v15, align 8
-  %v388 = load double, double* %v26, align 8
+  %v387 = load double, ptr %v15, align 8
+  %v388 = load double, ptr %v26, align 8
   %v389 = fmul double %v387, %v388
   %v390 = fadd double %v386, %v389
-  store double %v390, double* %v33, align 8
-  %v391 = load double, double* %v33, align 8
-  %v392 = load double, double* %v32, align 8
+  store double %v390, ptr %v33, align 8
+  %v391 = load double, ptr %v33, align 8
+  %v392 = load double, ptr %v32, align 8
   %v393 = fmul double %v391, %v392
   %v394 = fptrunc double %v393 to float
-  store float %v394, float* %v8, align 4
-  %v395 = load double, double* %v14, align 8
+  store float %v394, ptr %v8, align 4
+  %v395 = load double, ptr %v14, align 8
   %v396 = fsub double -0.000000e+00, %v395
-  %v397 = load double, double* %v30, align 8
+  %v397 = load double, ptr %v30, align 8
   %v398 = fmul double %v396, %v397
-  %v399 = load double, double* %v15, align 8
-  %v400 = load double, double* %v29, align 8
+  %v399 = load double, ptr %v15, align 8
+  %v400 = load double, ptr %v29, align 8
   %v401 = fmul double %v399, %v400
   %v402 = fadd double %v398, %v401
-  %v403 = load double, double* %v13, align 8
-  %v404 = load double, double* %v28, align 8
+  %v403 = load double, ptr %v13, align 8
+  %v404 = load double, ptr %v28, align 8
   %v405 = fmul double %v403, %v404
   %v406 = fadd double %v402, %v405
-  %v407 = load double, double* %v14, align 8
-  %v408 = load double, double* %v26, align 8
+  %v407 = load double, ptr %v14, align 8
+  %v408 = load double, ptr %v26, align 8
   %v409 = fmul double %v407, %v408
   %v410 = fsub double %v406, %v409
-  %v411 = load double, double* %v15, align 8
-  %v412 = load double, double* %v25, align 8
+  %v411 = load double, ptr %v15, align 8
+  %v412 = load double, ptr %v25, align 8
   %v413 = fmul double %v411, %v412
   %v414 = fsub double %v410, %v413
-  store double %v414, double* %v33, align 8
-  %v415 = load double, double* %v33, align 8
-  %v416 = load double, double* %v32, align 8
+  store double %v414, ptr %v33, align 8
+  %v415 = load double, ptr %v33, align 8
+  %v416 = load double, ptr %v32, align 8
   %v417 = fmul double %v415, %v416
   %v418 = fptrunc double %v417 to float
-  store float %v418, float* %v9, align 4
-  %v419 = load double, double* %v13, align 8
-  %v420 = load double, double* %v20, align 8
+  store float %v418, ptr %v9, align 4
+  %v419 = load double, ptr %v13, align 8
+  %v420 = load double, ptr %v20, align 8
   %v421 = fmul double %v419, %v420
-  %v422 = load double, double* %v16, align 8
-  %v423 = load double, double* %v23, align 8
+  %v422 = load double, ptr %v16, align 8
+  %v423 = load double, ptr %v23, align 8
   %v424 = fmul double %v422, %v423
   %v425 = fsub double %v421, %v424
-  %v426 = load double, double* %v17, align 8
-  %v427 = load double, double* %v24, align 8
+  %v426 = load double, ptr %v17, align 8
+  %v427 = load double, ptr %v24, align 8
   %v428 = fmul double %v426, %v427
   %v429 = fsub double %v425, %v428
-  %v430 = load double, double* %v18, align 8
-  %v431 = load double, double* %v27, align 8
+  %v430 = load double, ptr %v18, align 8
+  %v431 = load double, ptr %v27, align 8
   %v432 = fmul double %v430, %v431
   %v433 = fadd double %v429, %v432
-  %v434 = load double, double* %v19, align 8
-  %v435 = load double, double* %v28, align 8
+  %v434 = load double, ptr %v19, align 8
+  %v435 = load double, ptr %v28, align 8
   %v436 = fmul double %v434, %v435
   %v437 = fadd double %v433, %v436
-  store double %v437, double* %v33, align 8
-  %v438 = load double, double* %v33, align 8
-  %v439 = load double, double* %v32, align 8
+  store double %v437, ptr %v33, align 8
+  %v438 = load double, ptr %v33, align 8
+  %v439 = load double, ptr %v32, align 8
   %v440 = fmul double %v438, %v439
   %v441 = fptrunc double %v440 to float
-  store float %v441, float* %v10, align 4
-  %v442 = load double, double* %v18, align 8
+  store float %v441, ptr %v10, align 4
+  %v442 = load double, ptr %v18, align 8
   %v443 = fsub double -0.000000e+00, %v442
-  %v444 = load double, double* %v29, align 8
+  %v444 = load double, ptr %v29, align 8
   %v445 = fmul double %v443, %v444
-  %v446 = load double, double* %v19, align 8
-  %v447 = load double, double* %v30, align 8
+  %v446 = load double, ptr %v19, align 8
+  %v447 = load double, ptr %v30, align 8
   %v448 = fmul double %v446, %v447
   %v449 = fsub double %v445, %v448
-  %v450 = load double, double* %v14, align 8
-  %v451 = load double, double* %v23, align 8
+  %v450 = load double, ptr %v14, align 8
+  %v451 = load double, ptr %v23, align 8
   %v452 = fmul double %v450, %v451
   %v453 = fadd double %v449, %v452
-  %v454 = load double, double* %v15, align 8
-  %v455 = load double, double* %v24, align 8
+  %v454 = load double, ptr %v15, align 8
+  %v455 = load double, ptr %v24, align 8
   %v456 = fmul double %v454, %v455
   %v457 = fadd double %v453, %v456
-  %v458 = load double, double* %v13, align 8
-  %v459 = load double, double* %v21, align 8
+  %v458 = load double, ptr %v13, align 8
+  %v459 = load double, ptr %v21, align 8
   %v460 = fmul double %v458, %v459
   %v461 = fsub double %v457, %v460
-  store double %v461, double* %v33, align 8
-  %v462 = load double, double* %v33, align 8
-  %v463 = load double, double* %v32, align 8
+  store double %v461, ptr %v33, align 8
+  %v462 = load double, ptr %v33, align 8
+  %v463 = load double, ptr %v32, align 8
   %v464 = fmul double %v462, %v463
   %v465 = fptrunc double %v464 to float
-  store float %v465, float* %v11, align 4
-  %v466 = load double, double* %v18, align 8
+  store float %v465, ptr %v11, align 4
+  %v466 = load double, ptr %v18, align 8
   %v467 = fsub double -0.000000e+00, %v466
-  %v468 = load double, double* %v30, align 8
+  %v468 = load double, ptr %v30, align 8
   %v469 = fmul double %v467, %v468
-  %v470 = load double, double* %v19, align 8
-  %v471 = load double, double* %v29, align 8
+  %v470 = load double, ptr %v19, align 8
+  %v471 = load double, ptr %v29, align 8
   %v472 = fmul double %v470, %v471
   %v473 = fadd double %v469, %v472
-  %v474 = load double, double* %v14, align 8
-  %v475 = load double, double* %v24, align 8
+  %v474 = load double, ptr %v14, align 8
+  %v475 = load double, ptr %v24, align 8
   %v476 = fmul double %v474, %v475
   %v477 = fadd double %v473, %v476
-  %v478 = load double, double* %v15, align 8
-  %v479 = load double, double* %v23, align 8
+  %v478 = load double, ptr %v15, align 8
+  %v479 = load double, ptr %v23, align 8
   %v480 = fmul double %v478, %v479
   %v481 = fsub double %v477, %v480
-  %v482 = load double, double* %v13, align 8
-  %v483 = load double, double* %v22, align 8
+  %v482 = load double, ptr %v13, align 8
+  %v483 = load double, ptr %v22, align 8
   %v484 = fmul double %v482, %v483
   %v485 = fsub double %v481, %v484
-  store double %v485, double* %v33, align 8
-  %v486 = load double, double* %v33, align 8
-  %v487 = load double, double* %v32, align 8
+  store double %v485, ptr %v33, align 8
+  %v486 = load double, ptr %v33, align 8
+  %v487 = load double, ptr %v32, align 8
   %v488 = fmul double %v486, %v487
   %v489 = fptrunc double %v488 to float
-  store float %v489, float* %v12, align 4
-  %v490 = load float, float* %v3, align 4
-  %v491 = load %s.12*, %s.12** %v2, align 4
-  %v492 = getelementptr inbounds %s.12, %s.12* %v491, i32 0
-  %v493 = getelementptr inbounds %s.12, %s.12* %v492, i32 0, i32 0
-  store float %v490, float* %v493, align 4
-  %v494 = load %s.12*, %s.12** %v2, align 4
-  %v495 = getelementptr inbounds %s.12, %s.12* %v494, i32 0
-  %v496 = getelementptr inbounds %s.12, %s.12* %v495, i32 0, i32 1
-  store float 0.000000e+00, float* %v496, align 4
-  %v497 = load float, float* %v4, align 4
-  %v498 = load %s.12*, %s.12** %v2, align 4
-  %v499 = getelementptr inbounds %s.12, %s.12* %v498, i32 1
-  %v500 = getelementptr inbounds %s.12, %s.12* %v499, i32 0, i32 0
-  store float %v497, float* %v500, align 4
-  %v501 = load float, float* %v5, align 4
-  %v502 = load %s.12*, %s.12** %v2, align 4
-  %v503 = getelementptr inbounds %s.12, %s.12* %v502, i32 1
-  %v504 = getelementptr inbounds %s.12, %s.12* %v503, i32 0, i32 1
-  store float %v501, float* %v504, align 4
-  %v505 = load float, float* %v6, align 4
-  %v506 = load %s.12*, %s.12** %v2, align 4
-  %v507 = getelementptr inbounds %s.12, %s.12* %v506, i32 2
-  %v508 = getelementptr inbounds %s.12, %s.12* %v507, i32 0, i32 0
-  store float %v505, float* %v508, align 4
-  %v509 = load float, float* %v7, align 4
-  %v510 = load %s.12*, %s.12** %v2, align 4
-  %v511 = getelementptr inbounds %s.12, %s.12* %v510, i32 2
-  %v512 = getelementptr inbounds %s.12, %s.12* %v511, i32 0, i32 1
-  store float %v509, float* %v512, align 4
-  %v513 = load float, float* %v8, align 4
-  %v514 = load %s.12*, %s.12** %v2, align 4
-  %v515 = getelementptr inbounds %s.12, %s.12* %v514, i32 3
-  %v516 = getelementptr inbounds %s.12, %s.12* %v515, i32 0, i32 0
-  store float %v513, float* %v516, align 4
-  %v517 = load float, float* %v9, align 4
-  %v518 = load %s.12*, %s.12** %v2, align 4
-  %v519 = getelementptr inbounds %s.12, %s.12* %v518, i32 3
-  %v520 = getelementptr inbounds %s.12, %s.12* %v519, i32 0, i32 1
-  store float %v517, float* %v520, align 4
-  %v521 = load float, float* %v4, align 4
-  %v522 = load %s.12*, %s.12** %v2, align 4
-  %v523 = getelementptr inbounds %s.12, %s.12* %v522, i32 4
-  %v524 = getelementptr inbounds %s.12, %s.12* %v523, i32 0, i32 0
-  store float %v521, float* %v524, align 4
-  %v525 = load float, float* %v5, align 4
+  store float %v489, ptr %v12, align 4
+  %v490 = load float, ptr %v3, align 4
+  %v491 = load ptr, ptr %v2, align 4
+  store float %v490, ptr %v491, align 4
+  %v494 = load ptr, ptr %v2, align 4
+  %v496 = getelementptr inbounds %s.12, ptr %v494, i32 0, i32 1
+  store float 0.000000e+00, ptr %v496, align 4
+  %v497 = load float, ptr %v4, align 4
+  %v498 = load ptr, ptr %v2, align 4
+  %v499 = getelementptr inbounds %s.12, ptr %v498, i32 1
+  store float %v497, ptr %v499, align 4
+  %v501 = load float, ptr %v5, align 4
+  %v502 = load ptr, ptr %v2, align 4
+  %v503 = getelementptr inbounds %s.12, ptr %v502, i32 1
+  %v504 = getelementptr inbounds %s.12, ptr %v503, i32 0, i32 1
+  store float %v501, ptr %v504, align 4
+  %v505 = load float, ptr %v6, align 4
+  %v506 = load ptr, ptr %v2, align 4
+  %v507 = getelementptr inbounds %s.12, ptr %v506, i32 2
+  store float %v505, ptr %v507, align 4
+  %v509 = load float, ptr %v7, align 4
+  %v510 = load ptr, ptr %v2, align 4
+  %v511 = getelementptr inbounds %s.12, ptr %v510, i32 2
+  %v512 = getelementptr inbounds %s.12, ptr %v511, i32 0, i32 1
+  store float %v509, ptr %v512, align 4
+  %v513 = load float, ptr %v8, align 4
+  %v514 = load ptr, ptr %v2, align 4
+  %v515 = getelementptr inbounds %s.12, ptr %v514, i32 3
+  store float %v513, ptr %v515, align 4
+  %v517 = load float, ptr %v9, align 4
+  %v518 = load ptr, ptr %v2, align 4
+  %v519 = getelementptr inbounds %s.12, ptr %v518, i32 3
+  %v520 = getelementptr inbounds %s.12, ptr %v519, i32 0, i32 1
+  store float %v517, ptr %v520, align 4
+  %v521 = load float, ptr %v4, align 4
+  %v522 = load ptr, ptr %v2, align 4
+  %v523 = getelementptr inbounds %s.12, ptr %v522, i32 4
+  store float %v521, ptr %v523, align 4
+  %v525 = load float, ptr %v5, align 4
   %v526 = fsub float -0.000000e+00, %v525
-  %v527 = load %s.12*, %s.12** %v2, align 4
-  %v528 = getelementptr inbounds %s.12, %s.12* %v527, i32 4
-  %v529 = getelementptr inbounds %s.12, %s.12* %v528, i32 0, i32 1
-  store float %v526, float* %v529, align 4
-  %v530 = load float, float* %v10, align 4
-  %v531 = load %s.12*, %s.12** %v2, align 4
-  %v532 = getelementptr inbounds %s.12, %s.12* %v531, i32 5
-  %v533 = getelementptr inbounds %s.12, %s.12* %v532, i32 0, i32 0
-  store float %v530, float* %v533, align 4
-  %v534 = load %s.12*, %s.12** %v2, align 4
-  %v535 = getelementptr inbounds %s.12, %s.12* %v534, i32 5
-  %v536 = getelementptr inbounds %s.12, %s.12* %v535, i32 0, i32 1
-  store float 0.000000e+00, float* %v536, align 4
-  %v537 = load float, float* %v11, align 4
-  %v538 = load %s.12*, %s.12** %v2, align 4
-  %v539 = getelementptr inbounds %s.12, %s.12* %v538, i32 6
-  %v540 = getelementptr inbounds %s.12, %s.12* %v539, i32 0, i32 0
-  store float %v537, float* %v540, align 4
-  %v541 = load float, float* %v12, align 4
-  %v542 = load %s.12*, %s.12** %v2, align 4
-  %v543 = getelementptr inbounds %s.12, %s.12* %v542, i32 6
-  %v544 = getelementptr inbounds %s.12, %s.12* %v543, i32 0, i32 1
-  store float %v541, float* %v544, align 4
-  %v545 = load float, float* %v6, align 4
-  %v546 = load %s.12*, %s.12** %v2, align 4
-  %v547 = getelementptr inbounds %s.12, %s.12* %v546, i32 7
-  %v548 = getelementptr inbounds %s.12, %s.12* %v547, i32 0, i32 0
-  store float %v545, float* %v548, align 4
-  %v549 = load float, float* %v7, align 4
-  %v550 = load %s.12*, %s.12** %v2, align 4
-  %v551 = getelementptr inbounds %s.12, %s.12* %v550, i32 7
-  %v552 = getelementptr inbounds %s.12, %s.12* %v551, i32 0, i32 1
-  store float %v549, float* %v552, align 4
-  %v553 = load float, float* %v6, align 4
-  %v554 = load %s.12*, %s.12** %v2, align 4
-  %v555 = getelementptr inbounds %s.12, %s.12* %v554, i32 8
-  %v556 = getelementptr inbounds %s.12, %s.12* %v555, i32 0, i32 0
-  store float %v553, float* %v556, align 4
-  %v557 = load float, float* %v7, align 4
+  %v527 = load ptr, ptr %v2, align 4
+  %v528 = getelementptr inbounds %s.12, ptr %v527, i32 4
+  %v529 = getelementptr inbounds %s.12, ptr %v528, i32 0, i32 1
+  store float %v526, ptr %v529, align 4
+  %v530 = load float, ptr %v10, align 4
+  %v531 = load ptr, ptr %v2, align 4
+  %v532 = getelementptr inbounds %s.12, ptr %v531, i32 5
+  store float %v530, ptr %v532, align 4
+  %v534 = load ptr, ptr %v2, align 4
+  %v535 = getelementptr inbounds %s.12, ptr %v534, i32 5
+  %v536 = getelementptr inbounds %s.12, ptr %v535, i32 0, i32 1
+  store float 0.000000e+00, ptr %v536, align 4
+  %v537 = load float, ptr %v11, align 4
+  %v538 = load ptr, ptr %v2, align 4
+  %v539 = getelementptr inbounds %s.12, ptr %v538, i32 6
+  store float %v537, ptr %v539, align 4
+  %v541 = load float, ptr %v12, align 4
+  %v542 = load ptr, ptr %v2, align 4
+  %v543 = getelementptr inbounds %s.12, ptr %v542, i32 6
+  %v544 = getelementptr inbounds %s.12, ptr %v543, i32 0, i32 1
+  store float %v541, ptr %v544, align 4
+  %v545 = load float, ptr %v6, align 4
+  %v546 = load ptr, ptr %v2, align 4
+  %v547 = getelementptr inbounds %s.12, ptr %v546, i32 7
+  store float %v545, ptr %v547, align 4
+  %v549 = load float, ptr %v7, align 4
+  %v550 = load ptr, ptr %v2, align 4
+  %v551 = getelementptr inbounds %s.12, ptr %v550, i32 7
+  %v552 = getelementptr inbounds %s.12, ptr %v551, i32 0, i32 1
+  store float %v549, ptr %v552, align 4
+  %v553 = load float, ptr %v6, align 4
+  %v554 = load ptr, ptr %v2, align 4
+  %v555 = getelementptr inbounds %s.12, ptr %v554, i32 8
+  store float %v553, ptr %v555, align 4
+  %v557 = load float, ptr %v7, align 4
   %v558 = fsub float -0.000000e+00, %v557
-  %v559 = load %s.12*, %s.12** %v2, align 4
-  %v560 = getelementptr inbounds %s.12, %s.12* %v559, i32 8
-  %v561 = getelementptr inbounds %s.12, %s.12* %v560, i32 0, i32 1
-  store float %v558, float* %v561, align 4
-  %v562 = load float, float* %v11, align 4
-  %v563 = load %s.12*, %s.12** %v2, align 4
-  %v564 = getelementptr inbounds %s.12, %s.12* %v563, i32 9
-  %v565 = getelementptr inbounds %s.12, %s.12* %v564, i32 0, i32 0
-  store float %v562, float* %v565, align 4
-  %v566 = load float, float* %v12, align 4
+  %v559 = load ptr, ptr %v2, align 4
+  %v560 = getelementptr inbounds %s.12, ptr %v559, i32 8
+  %v561 = getelementptr inbounds %s.12, ptr %v560, i32 0, i32 1
+  store float %v558, ptr %v561, align 4
+  %v562 = load float, ptr %v11, align 4
+  %v563 = load ptr, ptr %v2, align 4
+  %v564 = getelementptr inbounds %s.12, ptr %v563, i32 9
+  store float %v562, ptr %v564, align 4
+  %v566 = load float, ptr %v12, align 4
   %v567 = fsub float -0.000000e+00, %v566
-  %v568 = load %s.12*, %s.12** %v2, align 4
-  %v569 = getelementptr inbounds %s.12, %s.12* %v568, i32 9
-  %v570 = getelementptr inbounds %s.12, %s.12* %v569, i32 0, i32 1
-  store float %v567, float* %v570, align 4
-  %v571 = load float, float* %v10, align 4
-  %v572 = load %s.12*, %s.12** %v2, align 4
-  %v573 = getelementptr inbounds %s.12, %s.12* %v572, i32 10
-  %v574 = getelementptr inbounds %s.12, %s.12* %v573, i32 0, i32 0
-  store float %v571, float* %v574, align 4
-  %v575 = load %s.12*, %s.12** %v2, align 4
-  %v576 = getelementptr inbounds %s.12, %s.12* %v575, i32 10
-  %v577 = getelementptr inbounds %s.12, %s.12* %v576, i32 0, i32 1
-  store float 0.000000e+00, float* %v577, align 4
-  %v578 = load float, float* %v4, align 4
-  %v579 = load %s.12*, %s.12** %v2, align 4
-  %v580 = getelementptr inbounds %s.12, %s.12* %v579, i32 11
-  %v581 = getelementptr inbounds %s.12, %s.12* %v580, i32 0, i32 0
-  store float %v578, float* %v581, align 4
-  %v582 = load float, float* %v5, align 4
-  %v583 = load %s.12*, %s.12** %v2, align 4
-  %v584 = getelementptr inbounds %s.12, %s.12* %v583, i32 11
-  %v585 = getelementptr inbounds %s.12, %s.12* %v584, i32 0, i32 1
-  store float %v582, float* %v585, align 4
-  %v586 = load float, float* %v8, align 4
-  %v587 = load %s.12*, %s.12** %v2, align 4
-  %v588 = getelementptr inbounds %s.12, %s.12* %v587, i32 12
-  %v589 = getelementptr inbounds %s.12, %s.12* %v588, i32 0, i32 0
-  store float %v586, float* %v589, align 4
-  %v590 = load float, float* %v9, align 4
+  %v568 = load ptr, ptr %v2, align 4
+  %v569 = getelementptr inbounds %s.12, ptr %v568, i32 9
+  %v570 = getelementptr inbounds %s.12, ptr %v569, i32 0, i32 1
+  store float %v567, ptr %v570, align 4
+  %v571 = load float, ptr %v10, align 4
+  %v572 = load ptr, ptr %v2, align 4
+  %v573 = getelementptr inbounds %s.12, ptr %v572, i32 10
+  store float %v571, ptr %v573, align 4
+  %v575 = load ptr, ptr %v2, align 4
+  %v576 = getelementptr inbounds %s.12, ptr %v575, i32 10
+  %v577 = getelementptr inbounds %s.12, ptr %v576, i32 0, i32 1
+  store float 0.000000e+00, ptr %v577, align 4
+  %v578 = load float, ptr %v4, align 4
+  %v579 = load ptr, ptr %v2, align 4
+  %v580 = getelementptr inbounds %s.12, ptr %v579, i32 11
+  store float %v578, ptr %v580, align 4
+  %v582 = load float, ptr %v5, align 4
+  %v583 = load ptr, ptr %v2, align 4
+  %v584 = getelementptr inbounds %s.12, ptr %v583, i32 11
+  %v585 = getelementptr inbounds %s.12, ptr %v584, i32 0, i32 1
+  store float %v582, ptr %v585, align 4
+  %v586 = load float, ptr %v8, align 4
+  %v587 = load ptr, ptr %v2, align 4
+  %v588 = getelementptr inbounds %s.12, ptr %v587, i32 12
+  store float %v586, ptr %v588, align 4
+  %v590 = load float, ptr %v9, align 4
   %v591 = fsub float -0.000000e+00, %v590
-  %v592 = load %s.12*, %s.12** %v2, align 4
-  %v593 = getelementptr inbounds %s.12, %s.12* %v592, i32 12
-  %v594 = getelementptr inbounds %s.12, %s.12* %v593, i32 0, i32 1
-  store float %v591, float* %v594, align 4
-  %v595 = load float, float* %v6, align 4
-  %v596 = load %s.12*, %s.12** %v2, align 4
-  %v597 = getelementptr inbounds %s.12, %s.12* %v596, i32 13
-  %v598 = getelementptr inbounds %s.12, %s.12* %v597, i32 0, i32 0
-  store float %v595, float* %v598, align 4
-  %v599 = load float, float* %v7, align 4
+  %v592 = load ptr, ptr %v2, align 4
+  %v593 = getelementptr inbounds %s.12, ptr %v592, i32 12
+  %v594 = getelementptr inbounds %s.12, ptr %v593, i32 0, i32 1
+  store float %v591, ptr %v594, align 4
+  %v595 = load float, ptr %v6, align 4
+  %v596 = load ptr, ptr %v2, align 4
+  %v597 = getelementptr inbounds %s.12, ptr %v596, i32 13
+  store float %v595, ptr %v597, align 4
+  %v599 = load float, ptr %v7, align 4
   %v600 = fsub float -0.000000e+00, %v599
-  %v601 = load %s.12*, %s.12** %v2, align 4
-  %v602 = getelementptr inbounds %s.12, %s.12* %v601, i32 13
-  %v603 = getelementptr inbounds %s.12, %s.12* %v602, i32 0, i32 1
-  store float %v600, float* %v603, align 4
-  %v604 = load float, float* %v4, align 4
-  %v605 = load %s.12*, %s.12** %v2, align 4
-  %v606 = getelementptr inbounds %s.12, %s.12* %v605, i32 14
-  %v607 = getelementptr inbounds %s.12, %s.12* %v606, i32 0, i32 0
-  store float %v604, float* %v607, align 4
-  %v608 = load float, float* %v5, align 4
+  %v601 = load ptr, ptr %v2, align 4
+  %v602 = getelementptr inbounds %s.12, ptr %v601, i32 13
+  %v603 = getelementptr inbounds %s.12, ptr %v602, i32 0, i32 1
+  store float %v600, ptr %v603, align 4
+  %v604 = load float, ptr %v4, align 4
+  %v605 = load ptr, ptr %v2, align 4
+  %v606 = getelementptr inbounds %s.12, ptr %v605, i32 14
+  store float %v604, ptr %v606, align 4
+  %v608 = load float, ptr %v5, align 4
   %v609 = fsub float -0.000000e+00, %v608
-  %v610 = load %s.12*, %s.12** %v2, align 4
-  %v611 = getelementptr inbounds %s.12, %s.12* %v610, i32 14
-  %v612 = getelementptr inbounds %s.12, %s.12* %v611, i32 0, i32 1
-  store float %v609, float* %v612, align 4
-  %v613 = load float, float* %v3, align 4
-  %v614 = load %s.12*, %s.12** %v2, align 4
-  %v615 = getelementptr inbounds %s.12, %s.12* %v614, i32 15
-  %v616 = getelementptr inbounds %s.12, %s.12* %v615, i32 0, i32 0
-  store float %v613, float* %v616, align 4
-  %v617 = load %s.12*, %s.12** %v2, align 4
-  %v618 = getelementptr inbounds %s.12, %s.12* %v617, i32 15
-  %v619 = getelementptr inbounds %s.12, %s.12* %v618, i32 0, i32 1
-  store float 0.000000e+00, float* %v619, align 4
+  %v610 = load ptr, ptr %v2, align 4
+  %v611 = getelementptr inbounds %s.12, ptr %v610, i32 14
+  %v612 = getelementptr inbounds %s.12, ptr %v611, i32 0, i32 1
+  store float %v609, ptr %v612, align 4
+  %v613 = load float, ptr %v3, align 4
+  %v614 = load ptr, ptr %v2, align 4
+  %v615 = getelementptr inbounds %s.12, ptr %v614, i32 15
+  store float %v613, ptr %v615, align 4
+  %v617 = load ptr, ptr %v2, align 4
+  %v618 = getelementptr inbounds %s.12, ptr %v617, i32 15
+  %v619 = getelementptr inbounds %s.12, ptr %v618, i32 0, i32 1
+  store float 0.000000e+00, ptr %v619, align 4
   br label %b3
 
 b3:                                               ; preds = %b2, %b1

diff  --git a/llvm/test/CodeGen/Hexagon/hvx-bitcast-v64i1.ll b/llvm/test/CodeGen/Hexagon/hvx-bitcast-v64i1.ll
index cb135f72448fe..c4d2564a86a38 100644
--- a/llvm/test/CodeGen/Hexagon/hvx-bitcast-v64i1.ll
+++ b/llvm/test/CodeGen/Hexagon/hvx-bitcast-v64i1.ll
@@ -11,13 +11,13 @@ b0:
   br i1 undef, label %b2, label %b1
 
 b1:                                               ; preds = %b0
-  %v0 = load i8, i8* undef, align 1
+  %v0 = load i8, ptr undef, align 1
   %v1 = zext i8 %v0 to i32
   %v2 = add nsw i32 %v1, -1
   %v3 = insertelement <64 x i32> undef, i32 %v2, i32 0
   %v4 = shufflevector <64 x i32> %v3, <64 x i32> undef, <64 x i32> zeroinitializer
   %v5 = icmp ule <64 x i32> undef, %v4
-  %v6 = call <64 x i8> @llvm.masked.load.v64i8.p0v64i8(<64 x i8>* nonnull undef, i32 1, <64 x i1> %v5, <64 x i8> undef)
+  %v6 = call <64 x i8> @llvm.masked.load.v64i8.p0(ptr nonnull undef, i32 1, <64 x i1> %v5, <64 x i8> undef)
   %v7 = lshr <64 x i8> %v6, <i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4>
   %v8 = and <64 x i8> %v7, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
   %v9 = zext <64 x i8> %v8 to <64 x i32>
@@ -31,9 +31,8 @@ b1:                                               ; preds = %b0
   %v17 = add <64 x i32> %v16, undef
   %v18 = add <64 x i32> %v17, undef
   %v19 = extractelement <64 x i32> %v18, i32 0
-  %v20 = getelementptr inbounds i8, i8* null, i32 2160
-  %v21 = bitcast i8* %v20 to i32*
-  store i32 %v19, i32* %v21, align 4
+  %v20 = getelementptr inbounds i8, ptr null, i32 2160
+  store i32 %v19, ptr %v20, align 4
   br label %b2
 
 b2:                                               ; preds = %b1, %b0
@@ -41,7 +40,7 @@ b2:                                               ; preds = %b1, %b0
 }
 
 ; Function Attrs: argmemonly nounwind readonly willreturn
-declare <64 x i8> @llvm.masked.load.v64i8.p0v64i8(<64 x i8>*, i32 immarg, <64 x i1>, <64 x i8>) #1
+declare <64 x i8> @llvm.masked.load.v64i8.p0(ptr, i32 immarg, <64 x i1>, <64 x i8>) #1
 
 attributes #0 = { "target-features"="+hvx-length64b,+hvxv67,+v67,-long-calls" }
 attributes #1 = { argmemonly nounwind readonly willreturn }

diff  --git a/llvm/test/CodeGen/Hexagon/hvx-byte-store-double.ll b/llvm/test/CodeGen/Hexagon/hvx-byte-store-double.ll
index 07ac087c1bdbf..d737510b28802 100644
--- a/llvm/test/CodeGen/Hexagon/hvx-byte-store-double.ll
+++ b/llvm/test/CodeGen/Hexagon/hvx-byte-store-double.ll
@@ -5,53 +5,53 @@
 ; CHECK-LABEL: f0:
 ; CHECK: if (q{{[0-3]}}) vmem(r{{[0-9]+}}+#0) = v{{[0-9]+}}
 
-define void @f0(<32 x i32> %a0, i8* %a1, <32 x i32> %a2) local_unnamed_addr {
+define void @f0(<32 x i32> %a0, ptr %a1, <32 x i32> %a2) local_unnamed_addr {
 b0:
   %v0 = tail call <128 x i1> @llvm.hexagon.V6.vandvrt.128B(<32 x i32> %a0, i32 -1)
-  tail call void @llvm.hexagon.V6.vS32b.qpred.ai.128B(<128 x i1> %v0, i8* %a1, <32 x i32> %a2)
+  tail call void @llvm.hexagon.V6.vS32b.qpred.ai.128B(<128 x i1> %v0, ptr %a1, <32 x i32> %a2)
   ret void
 }
 
 ; Function Attrs: argmemonly nounwind
-declare void @llvm.hexagon.V6.vS32b.qpred.ai.128B(<128 x i1>, i8*, <32 x i32>) #0
+declare void @llvm.hexagon.V6.vS32b.qpred.ai.128B(<128 x i1>, ptr, <32 x i32>) #0
 
 ; CHECK-LABEL: f1:
 ; CHECK: if (!q{{[0-3]}}) vmem(r{{[0-9]+}}+#0) = v{{[0-9]+}}
 
-define void @f1(<32 x i32> %a0, i8* %a1, <32 x i32> %a2) local_unnamed_addr {
+define void @f1(<32 x i32> %a0, ptr %a1, <32 x i32> %a2) local_unnamed_addr {
 b0:
   %v0 = tail call <128 x i1> @llvm.hexagon.V6.vandvrt.128B(<32 x i32> %a0, i32 -1)
-  tail call void @llvm.hexagon.V6.vS32b.nqpred.ai.128B(<128 x i1> %v0, i8* %a1, <32 x i32> %a2)
+  tail call void @llvm.hexagon.V6.vS32b.nqpred.ai.128B(<128 x i1> %v0, ptr %a1, <32 x i32> %a2)
   ret void
 }
 
 ; Function Attrs: argmemonly nounwind
-declare void @llvm.hexagon.V6.vS32b.nqpred.ai.128B(<128 x i1>, i8*, <32 x i32>) #0
+declare void @llvm.hexagon.V6.vS32b.nqpred.ai.128B(<128 x i1>, ptr, <32 x i32>) #0
 
 ; CHECK-LABEL: f2:
 ; CHECK: if (q{{[0-3]}}) vmem(r{{[0-9]+}}+#0):nt = v{{[0-9]+}}
 
-define void @f2(<32 x i32> %a0, i8* %a1, <32 x i32> %a2) local_unnamed_addr {
+define void @f2(<32 x i32> %a0, ptr %a1, <32 x i32> %a2) local_unnamed_addr {
 b0:
   %v0 = tail call <128 x i1> @llvm.hexagon.V6.vandvrt.128B(<32 x i32> %a0, i32 -1)
-  tail call void @llvm.hexagon.V6.vS32b.nt.qpred.ai.128B(<128 x i1> %v0, i8* %a1, <32 x i32> %a2)
+  tail call void @llvm.hexagon.V6.vS32b.nt.qpred.ai.128B(<128 x i1> %v0, ptr %a1, <32 x i32> %a2)
   ret void
 }
 
 ; Function Attrs: argmemonly nounwind
-declare void @llvm.hexagon.V6.vS32b.nt.qpred.ai.128B(<128 x i1>, i8*, <32 x i32>) #0
+declare void @llvm.hexagon.V6.vS32b.nt.qpred.ai.128B(<128 x i1>, ptr, <32 x i32>) #0
 
 ; CHECK-LABEL: f3:
 ; CHECK: if (!q{{[0-3]}}) vmem(r{{[0-9]+}}+#0):nt = v{{[0-9]+}}
 
-define void @f3(<32 x i32> %a0, i8* %a1, <32 x i32> %a2) local_unnamed_addr {
+define void @f3(<32 x i32> %a0, ptr %a1, <32 x i32> %a2) local_unnamed_addr {
 b0:
   %v0 = tail call <128 x i1> @llvm.hexagon.V6.vandvrt.128B(<32 x i32> %a0, i32 -1)
-  tail call void @llvm.hexagon.V6.vS32b.nt.nqpred.ai.128B(<128 x i1> %v0, i8* %a1, <32 x i32> %a2)
+  tail call void @llvm.hexagon.V6.vS32b.nt.nqpred.ai.128B(<128 x i1> %v0, ptr %a1, <32 x i32> %a2)
   ret void
 }
 
-declare void @llvm.hexagon.V6.vS32b.nt.nqpred.ai.128B(<128 x i1>, i8*, <32 x i32>) #0
+declare void @llvm.hexagon.V6.vS32b.nt.nqpred.ai.128B(<128 x i1>, ptr, <32 x i32>) #0
 declare <128 x i1> @llvm.hexagon.V6.vandvrt.128B(<32 x i32>, i32) #1
 
 attributes #0 = { argmemonly nounwind }

diff  --git a/llvm/test/CodeGen/Hexagon/hvx-byte-store.ll b/llvm/test/CodeGen/Hexagon/hvx-byte-store.ll
index 78c5a1161ca89..170ec02dfa289 100644
--- a/llvm/test/CodeGen/Hexagon/hvx-byte-store.ll
+++ b/llvm/test/CodeGen/Hexagon/hvx-byte-store.ll
@@ -5,53 +5,53 @@
 ; CHECK-LABEL: f0:
 ; CHECK: if (q{{[0-3]}}) vmem(r{{[0-9]+}}+#0) = v{{[0-9]+}}
 
-define void @f0(<16 x i32> %a0, i8* %a1, <16 x i32> %a2) local_unnamed_addr {
+define void @f0(<16 x i32> %a0, ptr %a1, <16 x i32> %a2) local_unnamed_addr {
 b0:
   %v0 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %a0, i32 -1)
-  tail call void @llvm.hexagon.V6.vS32b.qpred.ai(<64 x i1> %v0, i8* %a1, <16 x i32> %a2)
+  tail call void @llvm.hexagon.V6.vS32b.qpred.ai(<64 x i1> %v0, ptr %a1, <16 x i32> %a2)
   ret void
 }
 
 ; Function Attrs: argmemonly nounwind
-declare void @llvm.hexagon.V6.vS32b.qpred.ai(<64 x i1>, i8*, <16 x i32>) #0
+declare void @llvm.hexagon.V6.vS32b.qpred.ai(<64 x i1>, ptr, <16 x i32>) #0
 
 ; CHECK-LABEL: f1:
 ; CHECK: if (!q{{[0-3]}}) vmem(r{{[0-9]+}}+#0) = v{{[0-9]+}}
 
-define void @f1(<16 x i32> %a0, i8* %a1, <16 x i32> %a2) local_unnamed_addr {
+define void @f1(<16 x i32> %a0, ptr %a1, <16 x i32> %a2) local_unnamed_addr {
 b0:
   %v0 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %a0, i32 -1)
-  tail call void @llvm.hexagon.V6.vS32b.nqpred.ai(<64 x i1> %v0, i8* %a1, <16 x i32> %a2)
+  tail call void @llvm.hexagon.V6.vS32b.nqpred.ai(<64 x i1> %v0, ptr %a1, <16 x i32> %a2)
   ret void
 }
 
 ; Function Attrs: argmemonly nounwind
-declare void @llvm.hexagon.V6.vS32b.nqpred.ai(<64 x i1>, i8*, <16 x i32>) #0
+declare void @llvm.hexagon.V6.vS32b.nqpred.ai(<64 x i1>, ptr, <16 x i32>) #0
 
 ; CHECK-LABEL: f2:
 ; CHECK: if (q{{[0-3]}}) vmem(r{{[0-9]+}}+#0):nt = v{{[0-9]+}}
 
-define void @f2(<16 x i32> %a0, i8* %a1, <16 x i32> %a2) local_unnamed_addr {
+define void @f2(<16 x i32> %a0, ptr %a1, <16 x i32> %a2) local_unnamed_addr {
 b0:
   %v0 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %a0, i32 -1)
-  tail call void @llvm.hexagon.V6.vS32b.nt.qpred.ai(<64 x i1> %v0, i8* %a1, <16 x i32> %a2)
+  tail call void @llvm.hexagon.V6.vS32b.nt.qpred.ai(<64 x i1> %v0, ptr %a1, <16 x i32> %a2)
   ret void
 }
 
 ; Function Attrs: argmemonly nounwind
-declare void @llvm.hexagon.V6.vS32b.nt.qpred.ai(<64 x i1>, i8*, <16 x i32>) #0
+declare void @llvm.hexagon.V6.vS32b.nt.qpred.ai(<64 x i1>, ptr, <16 x i32>) #0
 
 ; CHECK-LABEL: f3:
 ; CHECK: if (!q{{[0-3]}}) vmem(r{{[0-9]+}}+#0):nt = v{{[0-9]+}}
 
-define void @f3(<16 x i32> %a0, i8* %a1, <16 x i32> %a2) local_unnamed_addr {
+define void @f3(<16 x i32> %a0, ptr %a1, <16 x i32> %a2) local_unnamed_addr {
 b0:
   %v0 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %a0, i32 -1)
-  tail call void @llvm.hexagon.V6.vS32b.nt.nqpred.ai(<64 x i1> %v0, i8* %a1, <16 x i32> %a2)
+  tail call void @llvm.hexagon.V6.vS32b.nt.nqpred.ai(<64 x i1> %v0, ptr %a1, <16 x i32> %a2)
   ret void
 }
 
-declare void @llvm.hexagon.V6.vS32b.nt.nqpred.ai(<64 x i1>, i8*, <16 x i32>) #0
+declare void @llvm.hexagon.V6.vS32b.nt.nqpred.ai(<64 x i1>, ptr, <16 x i32>) #0
 declare <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32>, i32) #1
 
 attributes #0 = { argmemonly nounwind }

diff  --git a/llvm/test/CodeGen/Hexagon/hvx-dbl-dual-output.ll b/llvm/test/CodeGen/Hexagon/hvx-dbl-dual-output.ll
index 6127473852726..8cd4265eb64f6 100644
--- a/llvm/test/CodeGen/Hexagon/hvx-dbl-dual-output.ll
+++ b/llvm/test/CodeGen/Hexagon/hvx-dbl-dual-output.ll
@@ -3,7 +3,7 @@
 
 ; Test that we compile the HVX dual output intrinsics.
 
-define inreg <32 x i32> @f0(<32 x i32> %a0, <32 x i32> %a1, <32 x i32>* %a2) #0 {
+define inreg <32 x i32> @f0(<32 x i32> %a0, <32 x i32> %a1, ptr %a2) #0 {
 ; CHECK-LABEL: f0:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -18,14 +18,14 @@ define inreg <32 x i32> @f0(<32 x i32> %a0, <32 x i32> %a1, <32 x i32>* %a2) #0
 ; CHECK-NEXT:     jumpr r31
 ; CHECK-NEXT:    }
 b0:
-  %v0 = load <32 x i32>, <32 x i32>* %a2, align 128
+  %v0 = load <32 x i32>, ptr %a2, align 128
   %v1 = tail call <128 x i1> @llvm.hexagon.V6.vandvrt.128B(<32 x i32> %v0, i32 -1)
   %v2 = tail call { <32 x i32>, <128 x i1> } @llvm.hexagon.V6.vaddcarry.128B(<32 x i32> %a0, <32 x i32> %a1, <128 x i1> %v1)
   %v3 = extractvalue { <32 x i32>, <128 x i1> } %v2, 0
   ret <32 x i32> %v3
 }
 
-define inreg <32 x i32> @f1(<32 x i32> %a0, <32 x i32> %a1, <32 x i32>* %a2) #0 {
+define inreg <32 x i32> @f1(<32 x i32> %a0, <32 x i32> %a1, ptr %a2) #0 {
 ; CHECK-LABEL: f1:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -40,7 +40,7 @@ define inreg <32 x i32> @f1(<32 x i32> %a0, <32 x i32> %a1, <32 x i32>* %a2) #0
 ; CHECK-NEXT:     jumpr r31
 ; CHECK-NEXT:    }
 b0:
-  %v0 = load <32 x i32>, <32 x i32>* %a2, align 128
+  %v0 = load <32 x i32>, ptr %a2, align 128
   %v1 = tail call <128 x i1> @llvm.hexagon.V6.vandvrt.128B(<32 x i32> %v0, i32 -1)
   %v2 = tail call { <32 x i32>, <128 x i1> } @llvm.hexagon.V6.vsubcarry.128B(<32 x i32> %a0, <32 x i32> %a1, <128 x i1> %v1)
   %v3 = extractvalue { <32 x i32>, <128 x i1> } %v2, 0

diff  --git a/llvm/test/CodeGen/Hexagon/hvx-double-vzero.ll b/llvm/test/CodeGen/Hexagon/hvx-double-vzero.ll
index 0c1e4cae4d565..110eb6cdb0a44 100644
--- a/llvm/test/CodeGen/Hexagon/hvx-double-vzero.ll
+++ b/llvm/test/CodeGen/Hexagon/hvx-double-vzero.ll
@@ -5,11 +5,10 @@
 
 ; CHECK-LABEL: f0:
 ; CHECK: [[VREG1:v([0-9]+)]] = vxor([[VREG1]],[[VREG1]])
-define void @f0(i16** nocapture %a0) #0 {
+define void @f0(ptr nocapture %a0) #0 {
 b0:
-  %v0 = bitcast i16** %a0 to <32 x i32>*
   %v1 = tail call <32 x i32> @llvm.hexagon.V6.vd0.128B()
-  store <32 x i32> %v1, <32 x i32>* %v0, align 64
+  store <32 x i32> %v1, ptr %a0, align 64
   ret void
 }
 
@@ -18,11 +17,10 @@ declare <32 x i32> @llvm.hexagon.V6.vd0.128B() #1
 
 ; CHECK-LABEL: f1:
 ; CHECK: [[VREG2:v([0-9]+):([0-9]+).w]] = vsub([[VREG2]],[[VREG2]])
-define void @f1(i16** nocapture %a0) #0 {
+define void @f1(ptr nocapture %a0) #0 {
 b0:
-  %v0 = bitcast i16** %a0 to <64 x i32>*
   %v1 = tail call <64 x i32> @llvm.hexagon.V6.vdd0.128B()
-  store <64 x i32> %v1, <64 x i32>* %v0, align 128
+  store <64 x i32> %v1, ptr %a0, align 128
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/hvx-dual-output.ll b/llvm/test/CodeGen/Hexagon/hvx-dual-output.ll
index 8c1e19385e23a..cb4158d37fdb2 100644
--- a/llvm/test/CodeGen/Hexagon/hvx-dual-output.ll
+++ b/llvm/test/CodeGen/Hexagon/hvx-dual-output.ll
@@ -3,7 +3,7 @@
 
 ; Test that we compile the HVX dual output intrinsics.
 
-define inreg <16 x i32> @f0(<16 x i32> %a0, <16 x i32> %a1, <16 x i32>* %a2) #0 {
+define inreg <16 x i32> @f0(<16 x i32> %a0, <16 x i32> %a1, ptr %a2) #0 {
 ; CHECK-LABEL: f0:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -18,14 +18,14 @@ define inreg <16 x i32> @f0(<16 x i32> %a0, <16 x i32> %a1, <16 x i32>* %a2) #0
 ; CHECK-NEXT:     jumpr r31
 ; CHECK-NEXT:    }
 b0:
-  %v0 = load <16 x i32>, <16 x i32>* %a2, align 64
+  %v0 = load <16 x i32>, ptr %a2, align 64
   %v1 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %v0, i32 -1)
   %v2 = tail call { <16 x i32>, <64 x i1> } @llvm.hexagon.V6.vaddcarry(<16 x i32> %a0, <16 x i32> %a1, <64 x i1> %v1)
   %v3 = extractvalue { <16 x i32>, <64 x i1> } %v2, 0
   ret <16 x i32> %v3
 }
 
-define inreg <16 x i32> @f1(<16 x i32> %a0, <16 x i32> %a1, <16 x i32>* %a2) #0 {
+define inreg <16 x i32> @f1(<16 x i32> %a0, <16 x i32> %a1, ptr %a2) #0 {
 ; CHECK-LABEL: f1:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -40,7 +40,7 @@ define inreg <16 x i32> @f1(<16 x i32> %a0, <16 x i32> %a1, <16 x i32>* %a2) #0
 ; CHECK-NEXT:     jumpr r31
 ; CHECK-NEXT:    }
 b0:
-  %v0 = load <16 x i32>, <16 x i32>* %a2, align 64
+  %v0 = load <16 x i32>, ptr %a2, align 64
   %v1 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %v0, i32 -1)
   %v2 = tail call { <16 x i32>, <64 x i1> } @llvm.hexagon.V6.vsubcarry(<16 x i32> %a0, <16 x i32> %a1, <64 x i1> %v1)
   %v3 = extractvalue { <16 x i32>, <64 x i1> } %v2, 0

diff  --git a/llvm/test/CodeGen/Hexagon/hvx-loopidiom-memcpy.ll b/llvm/test/CodeGen/Hexagon/hvx-loopidiom-memcpy.ll
index 22c2400f4c4f8..ab7bf1b4b0e86 100644
--- a/llvm/test/CodeGen/Hexagon/hvx-loopidiom-memcpy.ll
+++ b/llvm/test/CodeGen/Hexagon/hvx-loopidiom-memcpy.ll
@@ -7,19 +7,17 @@
 
 %s.0 = type { i32 }
 
-define void @f0(%s.0* noalias %a0, %s.0* noalias %a1) #0 align 2 {
+define void @f0(ptr noalias %a0, ptr noalias %a1) #0 align 2 {
 b0:
   br i1 undef, label %b1, label %b2
 
 b1:                                               ; preds = %b1, %b0
   %v0 = phi i32 [ %v7, %b1 ], [ 0, %b0 ]
   %v1 = mul nuw nsw i32 %v0, 64
-  %v2 = getelementptr %s.0, %s.0* %a0, i32 %v1
-  %v3 = getelementptr %s.0, %s.0* %a1, i32 %v1
-  %v4 = bitcast %s.0* %v2 to <64 x i32>*
-  %v5 = load <64 x i32>, <64 x i32>* %v4, align 256
-  %v6 = bitcast %s.0* %v3 to <64 x i32>*
-  store <64 x i32> %v5, <64 x i32>* %v6, align 256
+  %v2 = getelementptr %s.0, ptr %a0, i32 %v1
+  %v3 = getelementptr %s.0, ptr %a1, i32 %v1
+  %v5 = load <64 x i32>, ptr %v2, align 256
+  store <64 x i32> %v5, ptr %v3, align 256
   %v7 = add nuw nsw i32 %v0, 1
   br i1 undef, label %b1, label %b2
 

diff  --git a/llvm/test/CodeGen/Hexagon/hvx-nontemporal.ll b/llvm/test/CodeGen/Hexagon/hvx-nontemporal.ll
index 38e597df1ba8e..5054197fdf09c 100644
--- a/llvm/test/CodeGen/Hexagon/hvx-nontemporal.ll
+++ b/llvm/test/CodeGen/Hexagon/hvx-nontemporal.ll
@@ -2,21 +2,21 @@
 target triple = "hexagon"
 
 ; Function Attrs: norecurse nounwind
-define void @test(<32 x i32>* nocapture readonly %x, <32 x i32>* nocapture readnone %y, <32 x i32>* nocapture %a, <32 x i32>* nocapture %b) #0 {
+define void @test(ptr nocapture readonly %x, ptr nocapture readnone %y, ptr nocapture %a, ptr nocapture %b) #0 {
 entry:
 ; CHECK: v0 = vmem(r0+#7):nt
-  %add.ptr = getelementptr inbounds <32 x i32>, <32 x i32>* %x, i32 7
-  %0 = load <32 x i32>, <32 x i32>* %add.ptr, align 128, !tbaa !1, !nontemporal !4
+  %add.ptr = getelementptr inbounds <32 x i32>, ptr %x, i32 7
+  %0 = load <32 x i32>, ptr %add.ptr, align 128, !tbaa !1, !nontemporal !4
 
 ; CHECK: v1.cur = vmem(r2+#0):nt
-  %1 = load <32 x i32>, <32 x i32>* %a, align 128, !tbaa !1, !nontemporal !4
+  %1 = load <32 x i32>, ptr %a, align 128, !tbaa !1, !nontemporal !4
 
 ; CHECK: vmem(r3+#3):nt = v1
-  %add.ptr2 = getelementptr inbounds <32 x i32>, <32 x i32>* %b, i32 3
-  store <32 x i32> %1, <32 x i32>* %add.ptr2, align 128, !tbaa !1, !nontemporal !4
+  %add.ptr2 = getelementptr inbounds <32 x i32>, ptr %b, i32 3
+  store <32 x i32> %1, ptr %add.ptr2, align 128, !tbaa !1, !nontemporal !4
 
 ; CHECK: vmem(r2+#0):nt = v0
-  store <32 x i32> %0, <32 x i32>* %a, align 128, !tbaa !1, !nontemporal !4
+  store <32 x i32> %0, ptr %a, align 128, !tbaa !1, !nontemporal !4
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/hvx-reuse-fi-base.ll b/llvm/test/CodeGen/Hexagon/hvx-reuse-fi-base.ll
index 51a279ee4ab50..6000b9bc56b91 100644
--- a/llvm/test/CodeGen/Hexagon/hvx-reuse-fi-base.ll
+++ b/llvm/test/CodeGen/Hexagon/hvx-reuse-fi-base.ll
@@ -10,7 +10,7 @@ target triple = "hexagon"
 
 declare dso_local void @f0() #0
 
-declare dso_local void @f1(i8*, ...) #0
+declare dso_local void @f1(ptr, ...) #0
 
 ; Function Attrs: nounwind readnone
 declare <32 x i32> @llvm.hexagon.V6.vandqrt.128B(<128 x i1>, i32) #1
@@ -168,37 +168,37 @@ b0:
   %v1 = call <32 x i32> @llvm.hexagon.V6.lvsplatw.128B(i32 1)
   %v2 = call <128 x i1> @llvm.hexagon.V6.vandvrt.128B(<32 x i32> %v1, i32 16843009)
   %v3 = call <32 x i32> @llvm.hexagon.V6.vandqrt.128B(<128 x i1> %v2, i32 -1)
-  store <32 x i32> %v3, <32 x i32>* %v0, align 128
+  store <32 x i32> %v3, ptr %v0, align 128
   %v4 = call <32 x i32> @llvm.hexagon.V6.lvsplatw.128B(i32 2)
   %v5 = call <64 x i32> @llvm.hexagon.V6.vaddubh.128B(<32 x i32> undef, <32 x i32> %v4)
   %v6 = call <64 x i32> @llvm.hexagon.V6.vrmpyubi.128B(<64 x i32> %v5, i32 -2147483648, i32 0)
-  store <64 x i32> %v6, <64 x i32>* @g0, align 128
-  call void (i8*, ...) @f1(i8* getelementptr inbounds ([110 x i8], [110 x i8]* @g1, i32 0, i32 0)) #2
+  store <64 x i32> %v6, ptr @g0, align 128
+  call void (ptr, ...) @f1(ptr @g1) #2
   %v7 = call <32 x i32> @llvm.hexagon.V6.lvsplatw.128B(i32 1)
   %v8 = call <64 x i32> @llvm.hexagon.V6.vaddubh.128B(<32 x i32> %v7, <32 x i32> undef)
   %v9 = call <64 x i32> @llvm.hexagon.V6.vrmpyubi.128B(<64 x i32> %v8, i32 -1, i32 0)
-  store <64 x i32> %v9, <64 x i32>* @g0, align 128
-  call void (i8*, ...) @f1(i8* getelementptr inbounds ([102 x i8], [102 x i8]* @g2, i32 0, i32 0)) #2
+  store <64 x i32> %v9, ptr @g0, align 128
+  call void (ptr, ...) @f1(ptr @g2) #2
   %v10 = call <32 x i32> @llvm.hexagon.V6.lvsplatw.128B(i32 1)
   %v11 = call <64 x i32> @llvm.hexagon.V6.vaddubh.128B(<32 x i32> %v10, <32 x i32> undef)
   %v12 = call <64 x i32> @llvm.hexagon.V6.vrmpyubi.128B(<64 x i32> %v11, i32 2147483647, i32 1)
-  store <64 x i32> %v12, <64 x i32>* @g0, align 128
-  call void (i8*, ...) @f1(i8* getelementptr inbounds ([110 x i8], [110 x i8]* @g3, i32 0, i32 0)) #2
+  store <64 x i32> %v12, ptr @g0, align 128
+  call void (ptr, ...) @f1(ptr @g3) #2
   %v13 = call <32 x i32> @llvm.hexagon.V6.lvsplatw.128B(i32 2)
   %v14 = call <64 x i32> @llvm.hexagon.V6.vaddubh.128B(<32 x i32> undef, <32 x i32> %v13)
   %v15 = call <64 x i32> @llvm.hexagon.V6.vrmpyubi.128B(<64 x i32> %v14, i32 -2147483648, i32 1)
-  store <64 x i32> %v15, <64 x i32>* @g0, align 128
+  store <64 x i32> %v15, ptr @g0, align 128
   call void @f0() #2
   %v16 = call <32 x i32> @llvm.hexagon.V6.lvsplatw.128B(i32 2)
   %v17 = call <64 x i32> @llvm.hexagon.V6.vaddubh.128B(<32 x i32> undef, <32 x i32> %v16)
   %v18 = call <64 x i32> @llvm.hexagon.V6.vrmpyubi.128B(<64 x i32> %v17, i32 0, i32 1)
-  store <64 x i32> %v18, <64 x i32>* @g0, align 128
+  store <64 x i32> %v18, ptr @g0, align 128
   call void @f0() #2
   %v19 = call <32 x i32> @llvm.hexagon.V6.lvsplatw.128B(i32 1)
   %v20 = call <32 x i32> @llvm.hexagon.V6.lvsplatw.128B(i32 2)
   %v21 = call <64 x i32> @llvm.hexagon.V6.vaddubh.128B(<32 x i32> %v19, <32 x i32> %v20)
   %v22 = call <64 x i32> @llvm.hexagon.V6.vrmpyubi.128B(<64 x i32> %v21, i32 1, i32 1)
-  store <64 x i32> %v22, <64 x i32>* @g0, align 128
+  store <64 x i32> %v22, ptr @g0, align 128
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/hvx-vzero.ll b/llvm/test/CodeGen/Hexagon/hvx-vzero.ll
index 71d316aab13ae..3550f0929ff46 100644
--- a/llvm/test/CodeGen/Hexagon/hvx-vzero.ll
+++ b/llvm/test/CodeGen/Hexagon/hvx-vzero.ll
@@ -6,11 +6,10 @@
 
 ; CHECK-LABEL: f0:
 ; CHECK: [[VREG1:v([0-9]+)]] = vxor([[VREG1]],[[VREG1]])
-define void @f0(i16** nocapture %a0) #0 {
+define void @f0(ptr nocapture %a0) #0 {
 b0:
-  %v0 = bitcast i16** %a0 to <16 x i32>*
   %v1 = tail call <16 x i32> @llvm.hexagon.V6.vd0()
-  store <16 x i32> %v1, <16 x i32>* %v0, align 64
+  store <16 x i32> %v1, ptr %a0, align 64
   ret void
 }
 
@@ -19,11 +18,10 @@ declare <16 x i32> @llvm.hexagon.V6.vd0() #1
 
 ; CHECK-LABEL: f1:
 ; CHECK: [[VREG2:v([0-9]+):([0-9]+).w]] = vsub([[VREG2]],[[VREG2]])
-define void @f1(i16** nocapture %a0) #0 {
+define void @f1(ptr nocapture %a0) #0 {
 b0:
-  %v0 = bitcast i16** %a0 to <32 x i32>*
   %v1 = tail call <32 x i32> @llvm.hexagon.V6.vdd0()
-  store <32 x i32> %v1, <32 x i32>* %v0, align 128
+  store <32 x i32> %v1, ptr %a0, align 128
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/hwloop-cleanup.ll b/llvm/test/CodeGen/Hexagon/hwloop-cleanup.ll
index 71e1bf10fe64b..29cd88f334ff1 100644
--- a/llvm/test/CodeGen/Hexagon/hwloop-cleanup.ll
+++ b/llvm/test/CodeGen/Hexagon/hwloop-cleanup.ll
@@ -9,7 +9,7 @@
 ; CHECK-NOT: cmp.eq
 ; CHECK: endloop0
 
-define i32 @f0(i32* nocapture %a0, i32 %a1) #0 {
+define i32 @f0(ptr nocapture %a0, i32 %a1) #0 {
 b0:
   %v0 = icmp sgt i32 %a1, 0
   br i1 %v0, label %b1, label %b4
@@ -19,13 +19,13 @@ b1:                                               ; preds = %b0
 
 b2:                                               ; preds = %b2, %b1
   %v1 = phi i32 [ %v5, %b2 ], [ 0, %b1 ]
-  %v2 = phi i32* [ %v8, %b2 ], [ %a0, %b1 ]
+  %v2 = phi ptr [ %v8, %b2 ], [ %a0, %b1 ]
   %v3 = phi i32 [ %v6, %b2 ], [ 0, %b1 ]
-  %v4 = load i32, i32* %v2, align 4
+  %v4 = load i32, ptr %v2, align 4
   %v5 = add nsw i32 %v4, %v1
   %v6 = add nsw i32 %v3, 1
   %v7 = icmp eq i32 %v6, %a1
-  %v8 = getelementptr i32, i32* %v2, i32 1
+  %v8 = getelementptr i32, ptr %v2, i32 1
   br i1 %v7, label %b3, label %b2
 
 b3:                                               ; preds = %b2
@@ -44,19 +44,19 @@ b4:                                               ; preds = %b3, %b0
 ; CHECK-NOT: cmp.eq
 ; CHECK: endloop0
 
-define i32 @f1(i32* nocapture %a0) #0 {
+define i32 @f1(ptr nocapture %a0) #0 {
 b0:
   br label %b1
 
 b1:                                               ; preds = %b1, %b0
   %v0 = phi i32 [ 0, %b0 ], [ %v4, %b1 ]
-  %v1 = phi i32* [ %a0, %b0 ], [ %v7, %b1 ]
+  %v1 = phi ptr [ %a0, %b0 ], [ %v7, %b1 ]
   %v2 = phi i32 [ 0, %b0 ], [ %v5, %b1 ]
-  %v3 = load i32, i32* %v1, align 4
+  %v3 = load i32, ptr %v1, align 4
   %v4 = add nsw i32 %v3, %v0
   %v5 = add nsw i32 %v2, 1
   %v6 = icmp eq i32 %v5, 40
-  %v7 = getelementptr i32, i32* %v1, i32 1
+  %v7 = getelementptr i32, ptr %v1, i32 1
   br i1 %v6, label %b2, label %b1
 
 b2:                                               ; preds = %b1
@@ -70,17 +70,17 @@ b2:                                               ; preds = %b1
 ; CHECK-NOT: cmp.eq
 ; CHECK: endloop0
 
-define i32 @f2(i32* nocapture %a0) #1 {
+define i32 @f2(ptr nocapture %a0) #1 {
 b0:
   br label %b1
 
 b1:                                               ; preds = %b1, %b0
-  %v0 = phi i32* [ %a0, %b0 ], [ %v4, %b1 ]
+  %v0 = phi ptr [ %a0, %b0 ], [ %v4, %b1 ]
   %v1 = phi i32 [ 0, %b0 ], [ %v2, %b1 ]
-  store i32 %v1, i32* %v0, align 4
+  store i32 %v1, ptr %v0, align 4
   %v2 = add nsw i32 %v1, 1
   %v3 = icmp eq i32 %v2, 40
-  %v4 = getelementptr i32, i32* %v0, i32 1
+  %v4 = getelementptr i32, ptr %v0, i32 1
   br i1 %v3, label %b2, label %b1
 
 b2:                                               ; preds = %b1

diff  --git a/llvm/test/CodeGen/Hexagon/hwloop-const.ll b/llvm/test/CodeGen/Hexagon/hwloop-const.ll
index eb105a33768a3..58160dec4498a 100644
--- a/llvm/test/CodeGen/Hexagon/hwloop-const.ll
+++ b/llvm/test/CodeGen/Hexagon/hwloop-const.ll
@@ -12,10 +12,10 @@ b0:
 
 b1:                                               ; preds = %b1, %b0
   %v0 = phi i32 [ 0, %b0 ], [ %v3, %b1 ]
-  %v1 = getelementptr inbounds [25000 x i32], [25000 x i32]* @g0, i32 0, i32 %v0
-  store i32 %v0, i32* %v1, align 4
-  %v2 = getelementptr inbounds [25000 x i32], [25000 x i32]* @g1, i32 0, i32 %v0
-  store i32 %v0, i32* %v2, align 4
+  %v1 = getelementptr inbounds [25000 x i32], ptr @g0, i32 0, i32 %v0
+  store i32 %v0, ptr %v1, align 4
+  %v2 = getelementptr inbounds [25000 x i32], ptr @g1, i32 0, i32 %v0
+  store i32 %v0, ptr %v2, align 4
   %v3 = add nsw i32 %v0, 1
   %v4 = icmp eq i32 %v3, 25000
   br i1 %v4, label %b2, label %b1

diff  --git a/llvm/test/CodeGen/Hexagon/hwloop-crit-edge.ll b/llvm/test/CodeGen/Hexagon/hwloop-crit-edge.ll
index af64a63c29060..d004fbbcbdd18 100644
--- a/llvm/test/CodeGen/Hexagon/hwloop-crit-edge.ll
+++ b/llvm/test/CodeGen/Hexagon/hwloop-crit-edge.ll
@@ -7,13 +7,13 @@
 ; CHECK: loop0(.LBB{{.}}_{{.}}, r{{[0-9]+}})
 ; CHECK: endloop0
 
-define void @test(i32* nocapture %pFL, i16 signext %nBS, i16* nocapture readonly %pHT) #0 {
+define void @test(ptr nocapture %pFL, i16 signext %nBS, ptr nocapture readonly %pHT) #0 {
 entry:
-  %0 = load i32, i32* %pFL, align 4
+  %0 = load i32, ptr %pFL, align 4
   %1 = tail call i64 @llvm.hexagon.M2.dpmpyss.s0(i32 %0, i32 246)
   %2 = tail call i64 @llvm.hexagon.S2.asl.r.p(i64 %1, i32 -13)
   %3 = tail call i32 @llvm.hexagon.A2.sat(i64 %2)
-  store i32 %3, i32* %pFL, align 4
+  store i32 %3, ptr %pFL, align 4
   %cmp16 = icmp sgt i16 %nBS, 0
   br i1 %cmp16, label %for.body.lr.ph, label %for.end
 
@@ -23,22 +23,22 @@ for.body.lr.ph:
 
 for.body:
   %5 = phi i32 [ %3, %for.body.lr.ph ], [ %.pre, %for.body.for.body_crit_edge ]
-  %arrayidx3.phi = phi i32* [ %pFL, %for.body.lr.ph ], [ %arrayidx3.inc, %for.body.for.body_crit_edge ]
-  %arrayidx5.phi = phi i16* [ %pHT, %for.body.lr.ph ], [ %arrayidx5.inc, %for.body.for.body_crit_edge ]
+  %arrayidx3.phi = phi ptr [ %pFL, %for.body.lr.ph ], [ %arrayidx3.inc, %for.body.for.body_crit_edge ]
+  %arrayidx5.phi = phi ptr [ %pHT, %for.body.lr.ph ], [ %arrayidx5.inc, %for.body.for.body_crit_edge ]
   %i.017.pmt = phi i32 [ 1, %for.body.lr.ph ], [ %phitmp, %for.body.for.body_crit_edge ]
-  %6 = load i16, i16* %arrayidx5.phi, align 2
+  %6 = load i16, ptr %arrayidx5.phi, align 2
   %conv6 = sext i16 %6 to i32
   %7 = tail call i64 @llvm.hexagon.M2.dpmpyss.s0(i32 %5, i32 %conv6)
   %8 = tail call i64 @llvm.hexagon.S2.asl.r.p(i64 %7, i32 -13)
   %9 = tail call i32 @llvm.hexagon.A2.sat(i64 %8)
-  store i32 %9, i32* %arrayidx3.phi, align 4
+  store i32 %9, ptr %arrayidx3.phi, align 4
   %exitcond = icmp eq i32 %i.017.pmt, %4
-  %arrayidx3.inc = getelementptr i32, i32* %arrayidx3.phi, i32 1
+  %arrayidx3.inc = getelementptr i32, ptr %arrayidx3.phi, i32 1
   br i1 %exitcond, label %for.end.loopexit, label %for.body.for.body_crit_edge
 
 for.body.for.body_crit_edge:
-  %arrayidx5.inc = getelementptr i16, i16* %arrayidx5.phi, i32 1
-  %.pre = load i32, i32* %arrayidx3.inc, align 4
+  %arrayidx5.inc = getelementptr i16, ptr %arrayidx5.phi, i32 1
+  %.pre = load i32, ptr %arrayidx3.inc, align 4
   %phitmp = add i32 %i.017.pmt, 1
   br label %for.body
 

diff  --git a/llvm/test/CodeGen/Hexagon/hwloop-dbg.ll b/llvm/test/CodeGen/Hexagon/hwloop-dbg.ll
index 443e4b59e9dd6..1739b6188f4da 100644
--- a/llvm/test/CodeGen/Hexagon/hwloop-dbg.ll
+++ b/llvm/test/CodeGen/Hexagon/hwloop-dbg.ll
@@ -7,25 +7,25 @@
 target datalayout = "e-p:32:32:32-i64:64:64-i32:32:32-i16:16:16-i1:32:32-f64:64:64-f32:32:32-v64:64:64-v32:32:32-a0:0-n16:32"
 target triple = "hexagon"
 
-define void @f0(i32* nocapture %a0, i32* nocapture %a1) #0 !dbg !4 {
+define void @f0(ptr nocapture %a0, ptr nocapture %a1) #0 !dbg !4 {
 b0:
-  call void @llvm.dbg.value(metadata i32* %a0, metadata !10, metadata !DIExpression()), !dbg !14
-  call void @llvm.dbg.value(metadata i32* %a1, metadata !11, metadata !DIExpression()), !dbg !15
+  call void @llvm.dbg.value(metadata ptr %a0, metadata !10, metadata !DIExpression()), !dbg !14
+  call void @llvm.dbg.value(metadata ptr %a1, metadata !11, metadata !DIExpression()), !dbg !15
   call void @llvm.dbg.value(metadata i32 0, metadata !12, metadata !DIExpression()), !dbg !16
   br label %b1, !dbg !16
 
 b1:                                               ; preds = %b1, %b0
-  %v0 = phi i32* [ %a0, %b0 ], [ %v7, %b1 ]
+  %v0 = phi ptr [ %a0, %b0 ], [ %v7, %b1 ]
   %v1 = phi i32 [ 0, %b0 ], [ %v5, %b1 ]
-  %v2 = phi i32* [ %a1, %b0 ], [ %v3, %b1 ]
-  %v3 = getelementptr inbounds i32, i32* %v2, i32 1, !dbg !18
-  call void @llvm.dbg.value(metadata i32* %v3, metadata !11, metadata !DIExpression()), !dbg !18
-  %v4 = load i32, i32* %v2, align 4, !dbg !18
-  store i32 %v4, i32* %v0, align 4, !dbg !18
+  %v2 = phi ptr [ %a1, %b0 ], [ %v3, %b1 ]
+  %v3 = getelementptr inbounds i32, ptr %v2, i32 1, !dbg !18
+  call void @llvm.dbg.value(metadata ptr %v3, metadata !11, metadata !DIExpression()), !dbg !18
+  %v4 = load i32, ptr %v2, align 4, !dbg !18
+  store i32 %v4, ptr %v0, align 4, !dbg !18
   %v5 = add nsw i32 %v1, 1, !dbg !20
   call void @llvm.dbg.value(metadata i32 %v5, metadata !12, metadata !DIExpression()), !dbg !20
   %v6 = icmp eq i32 %v5, 10, !dbg !16
-  %v7 = getelementptr i32, i32* %v0, i32 1
+  %v7 = getelementptr i32, ptr %v0, i32 1
   br i1 %v6, label %b2, label %b1, !dbg !16
 
 b2:                                               ; preds = %b1

diff  --git a/llvm/test/CodeGen/Hexagon/hwloop-le.ll b/llvm/test/CodeGen/Hexagon/hwloop-le.ll
index d78b234d4ecea..4d6e7bd9b110a 100644
--- a/llvm/test/CodeGen/Hexagon/hwloop-le.ll
+++ b/llvm/test/CodeGen/Hexagon/hwloop-le.ll
@@ -3,7 +3,7 @@
 ; CHECK-LABEL: f0:
 ; CHECK: loop0
 ; a < b
-define void @f0(i8* nocapture %a0, i32 %a1, i32 %a2) #0 {
+define void @f0(ptr nocapture %a0, i32 %a1, i32 %a2) #0 {
 b0:
   %v0 = icmp sle i32 28395, %a2
   br i1 %v0, label %b1, label %b3
@@ -13,12 +13,12 @@ b1:                                               ; preds = %b0
 
 b2:                                               ; preds = %b2, %b1
   %v1 = phi i32 [ 28395, %b1 ], [ %v7, %b2 ]
-  %v2 = getelementptr inbounds i8, i8* %a0, i32 %v1
-  %v3 = load i8, i8* %v2, align 1
+  %v2 = getelementptr inbounds i8, ptr %a0, i32 %v1
+  %v3 = load i8, ptr %v2, align 1
   %v4 = zext i8 %v3 to i32
   %v5 = add nsw i32 %v4, 1
   %v6 = trunc i32 %v5 to i8
-  store i8 %v6, i8* %v2, align 1
+  store i8 %v6, ptr %v2, align 1
   %v7 = add nsw i32 %v1, 1
   %v8 = icmp sle i32 %v7, %a2
   br i1 %v8, label %b2, label %b3
@@ -30,7 +30,7 @@ b3:                                               ; preds = %b2, %b0
 ; CHECK-LABEL: f1:
 ; CHECK: loop0
 ; a < b
-define void @f1(i8* nocapture %a0, i32 %a1, i32 %a2) #0 {
+define void @f1(ptr nocapture %a0, i32 %a1, i32 %a2) #0 {
 b0:
   %v0 = icmp sle i32 9073, %a2
   br i1 %v0, label %b1, label %b3
@@ -40,12 +40,12 @@ b1:                                               ; preds = %b0
 
 b2:                                               ; preds = %b2, %b1
   %v1 = phi i32 [ 9073, %b1 ], [ %v7, %b2 ]
-  %v2 = getelementptr inbounds i8, i8* %a0, i32 %v1
-  %v3 = load i8, i8* %v2, align 1
+  %v2 = getelementptr inbounds i8, ptr %a0, i32 %v1
+  %v3 = load i8, ptr %v2, align 1
   %v4 = zext i8 %v3 to i32
   %v5 = add nsw i32 %v4, 1
   %v6 = trunc i32 %v5 to i8
-  store i8 %v6, i8* %v2, align 1
+  store i8 %v6, ptr %v2, align 1
   %v7 = add nsw i32 %v1, 2
   %v8 = icmp sle i32 %v7, %a2
   br i1 %v8, label %b2, label %b3
@@ -57,7 +57,7 @@ b3:                                               ; preds = %b2, %b0
 ; CHECK-LABEL: f2:
 ; CHECK: loop0
 ; a < b
-define void @f2(i8* nocapture %a0, i32 %a1, i32 %a2) #0 {
+define void @f2(ptr nocapture %a0, i32 %a1, i32 %a2) #0 {
 b0:
   %v0 = icmp sle i32 21956, %a2
   br i1 %v0, label %b1, label %b3
@@ -67,12 +67,12 @@ b1:                                               ; preds = %b0
 
 b2:                                               ; preds = %b2, %b1
   %v1 = phi i32 [ 21956, %b1 ], [ %v7, %b2 ]
-  %v2 = getelementptr inbounds i8, i8* %a0, i32 %v1
-  %v3 = load i8, i8* %v2, align 1
+  %v2 = getelementptr inbounds i8, ptr %a0, i32 %v1
+  %v3 = load i8, ptr %v2, align 1
   %v4 = zext i8 %v3 to i32
   %v5 = add nsw i32 %v4, 1
   %v6 = trunc i32 %v5 to i8
-  store i8 %v6, i8* %v2, align 1
+  store i8 %v6, ptr %v2, align 1
   %v7 = add nsw i32 %v1, 4
   %v8 = icmp sle i32 %v7, %a2
   br i1 %v8, label %b2, label %b3
@@ -84,7 +84,7 @@ b3:                                               ; preds = %b2, %b0
 ; CHECK-LABEL: f3:
 ; CHECK: loop0
 ; a < b
-define void @f3(i8* nocapture %a0, i32 %a1, i32 %a2) #0 {
+define void @f3(ptr nocapture %a0, i32 %a1, i32 %a2) #0 {
 b0:
   %v0 = icmp sle i32 16782, %a2
   br i1 %v0, label %b1, label %b3
@@ -94,12 +94,12 @@ b1:                                               ; preds = %b0
 
 b2:                                               ; preds = %b2, %b1
   %v1 = phi i32 [ 16782, %b1 ], [ %v7, %b2 ]
-  %v2 = getelementptr inbounds i8, i8* %a0, i32 %v1
-  %v3 = load i8, i8* %v2, align 1
+  %v2 = getelementptr inbounds i8, ptr %a0, i32 %v1
+  %v3 = load i8, ptr %v2, align 1
   %v4 = zext i8 %v3 to i32
   %v5 = add nsw i32 %v4, 1
   %v6 = trunc i32 %v5 to i8
-  store i8 %v6, i8* %v2, align 1
+  store i8 %v6, ptr %v2, align 1
   %v7 = add nsw i32 %v1, 8
   %v8 = icmp sle i32 %v7, %a2
   br i1 %v8, label %b2, label %b3
@@ -111,7 +111,7 @@ b3:                                               ; preds = %b2, %b0
 ; CHECK-LABEL: f4:
 ; CHECK: loop0
 ; a < b
-define void @f4(i8* nocapture %a0, i32 %a1, i32 %a2) #0 {
+define void @f4(ptr nocapture %a0, i32 %a1, i32 %a2) #0 {
 b0:
   %v0 = icmp sle i32 19097, %a2
   br i1 %v0, label %b1, label %b3
@@ -121,12 +121,12 @@ b1:                                               ; preds = %b0
 
 b2:                                               ; preds = %b2, %b1
   %v1 = phi i32 [ 19097, %b1 ], [ %v7, %b2 ]
-  %v2 = getelementptr inbounds i8, i8* %a0, i32 %v1
-  %v3 = load i8, i8* %v2, align 1
+  %v2 = getelementptr inbounds i8, ptr %a0, i32 %v1
+  %v3 = load i8, ptr %v2, align 1
   %v4 = zext i8 %v3 to i32
   %v5 = add nsw i32 %v4, 1
   %v6 = trunc i32 %v5 to i8
-  store i8 %v6, i8* %v2, align 1
+  store i8 %v6, ptr %v2, align 1
   %v7 = add nsw i32 %v1, 16
   %v8 = icmp sle i32 %v7, %a2
   br i1 %v8, label %b2, label %b3
@@ -138,7 +138,7 @@ b3:                                               ; preds = %b2, %b0
 ; CHECK-LABEL: f5:
 ; CHECK: loop0
 ; a < b
-define void @f5(i8* nocapture %a0, i32 %a1, i32 %a2) #0 {
+define void @f5(ptr nocapture %a0, i32 %a1, i32 %a2) #0 {
 b0:
   %v0 = icmp sle i32 %a1, 14040
   br i1 %v0, label %b1, label %b3
@@ -148,12 +148,12 @@ b1:                                               ; preds = %b0
 
 b2:                                               ; preds = %b2, %b1
   %v1 = phi i32 [ %a1, %b1 ], [ %v7, %b2 ]
-  %v2 = getelementptr inbounds i8, i8* %a0, i32 %v1
-  %v3 = load i8, i8* %v2, align 1
+  %v2 = getelementptr inbounds i8, ptr %a0, i32 %v1
+  %v3 = load i8, ptr %v2, align 1
   %v4 = zext i8 %v3 to i32
   %v5 = add nsw i32 %v4, 1
   %v6 = trunc i32 %v5 to i8
-  store i8 %v6, i8* %v2, align 1
+  store i8 %v6, ptr %v2, align 1
   %v7 = add nsw i32 %v1, 1
   %v8 = icmp sle i32 %v7, 14040
   br i1 %v8, label %b2, label %b3
@@ -165,7 +165,7 @@ b3:                                               ; preds = %b2, %b0
 ; CHECK-LABEL: f6:
 ; CHECK: loop0
 ; a < b
-define void @f6(i8* nocapture %a0, i32 %a1, i32 %a2) #0 {
+define void @f6(ptr nocapture %a0, i32 %a1, i32 %a2) #0 {
 b0:
   %v0 = icmp sle i32 %a1, 13710
   br i1 %v0, label %b1, label %b3
@@ -175,12 +175,12 @@ b1:                                               ; preds = %b0
 
 b2:                                               ; preds = %b2, %b1
   %v1 = phi i32 [ %a1, %b1 ], [ %v7, %b2 ]
-  %v2 = getelementptr inbounds i8, i8* %a0, i32 %v1
-  %v3 = load i8, i8* %v2, align 1
+  %v2 = getelementptr inbounds i8, ptr %a0, i32 %v1
+  %v3 = load i8, ptr %v2, align 1
   %v4 = zext i8 %v3 to i32
   %v5 = add nsw i32 %v4, 1
   %v6 = trunc i32 %v5 to i8
-  store i8 %v6, i8* %v2, align 1
+  store i8 %v6, ptr %v2, align 1
   %v7 = add nsw i32 %v1, 2
   %v8 = icmp sle i32 %v7, 13710
   br i1 %v8, label %b2, label %b3
@@ -192,7 +192,7 @@ b3:                                               ; preds = %b2, %b0
 ; CHECK-LABEL: f7:
 ; CHECK: loop0
 ; a < b
-define void @f7(i8* nocapture %a0, i32 %a1, i32 %a2) #0 {
+define void @f7(ptr nocapture %a0, i32 %a1, i32 %a2) #0 {
 b0:
   %v0 = icmp sle i32 %a1, 9920
   br i1 %v0, label %b1, label %b3
@@ -202,12 +202,12 @@ b1:                                               ; preds = %b0
 
 b2:                                               ; preds = %b2, %b1
   %v1 = phi i32 [ %a1, %b1 ], [ %v7, %b2 ]
-  %v2 = getelementptr inbounds i8, i8* %a0, i32 %v1
-  %v3 = load i8, i8* %v2, align 1
+  %v2 = getelementptr inbounds i8, ptr %a0, i32 %v1
+  %v3 = load i8, ptr %v2, align 1
   %v4 = zext i8 %v3 to i32
   %v5 = add nsw i32 %v4, 1
   %v6 = trunc i32 %v5 to i8
-  store i8 %v6, i8* %v2, align 1
+  store i8 %v6, ptr %v2, align 1
   %v7 = add nsw i32 %v1, 4
   %v8 = icmp sle i32 %v7, 9920
   br i1 %v8, label %b2, label %b3
@@ -219,7 +219,7 @@ b3:                                               ; preds = %b2, %b0
 ; CHECK-LABEL: f8:
 ; CHECK: loop0
 ; a < b
-define void @f8(i8* nocapture %a0, i32 %a1, i32 %a2) #0 {
+define void @f8(ptr nocapture %a0, i32 %a1, i32 %a2) #0 {
 b0:
   %v0 = icmp sle i32 %a1, 18924
   br i1 %v0, label %b1, label %b3
@@ -229,12 +229,12 @@ b1:                                               ; preds = %b0
 
 b2:                                               ; preds = %b2, %b1
   %v1 = phi i32 [ %a1, %b1 ], [ %v7, %b2 ]
-  %v2 = getelementptr inbounds i8, i8* %a0, i32 %v1
-  %v3 = load i8, i8* %v2, align 1
+  %v2 = getelementptr inbounds i8, ptr %a0, i32 %v1
+  %v3 = load i8, ptr %v2, align 1
   %v4 = zext i8 %v3 to i32
   %v5 = add nsw i32 %v4, 1
   %v6 = trunc i32 %v5 to i8
-  store i8 %v6, i8* %v2, align 1
+  store i8 %v6, ptr %v2, align 1
   %v7 = add nsw i32 %v1, 8
   %v8 = icmp sle i32 %v7, 18924
   br i1 %v8, label %b2, label %b3
@@ -246,7 +246,7 @@ b3:                                               ; preds = %b2, %b0
 ; CHECK-LABEL: f9:
 ; CHECK: loop0
 ; a < b
-define void @f9(i8* nocapture %a0, i32 %a1, i32 %a2) #0 {
+define void @f9(ptr nocapture %a0, i32 %a1, i32 %a2) #0 {
 b0:
   %v0 = icmp sle i32 %a1, 11812
   br i1 %v0, label %b1, label %b3
@@ -256,12 +256,12 @@ b1:                                               ; preds = %b0
 
 b2:                                               ; preds = %b2, %b1
   %v1 = phi i32 [ %a1, %b1 ], [ %v7, %b2 ]
-  %v2 = getelementptr inbounds i8, i8* %a0, i32 %v1
-  %v3 = load i8, i8* %v2, align 1
+  %v2 = getelementptr inbounds i8, ptr %a0, i32 %v1
+  %v3 = load i8, ptr %v2, align 1
   %v4 = zext i8 %v3 to i32
   %v5 = add nsw i32 %v4, 1
   %v6 = trunc i32 %v5 to i8
-  store i8 %v6, i8* %v2, align 1
+  store i8 %v6, ptr %v2, align 1
   %v7 = add nsw i32 %v1, 16
   %v8 = icmp sle i32 %v7, 11812
   br i1 %v8, label %b2, label %b3
@@ -273,7 +273,7 @@ b3:                                               ; preds = %b2, %b0
 ; CHECK-LABEL: f10:
 ; CHECK: loop0
 ; a < b
-define void @f10(i8* nocapture %a0, i32 %a1, i32 %a2) #0 {
+define void @f10(ptr nocapture %a0, i32 %a1, i32 %a2) #0 {
 b0:
   %v0 = icmp sle i32 %a1, %a2
   br i1 %v0, label %b1, label %b3
@@ -283,12 +283,12 @@ b1:                                               ; preds = %b0
 
 b2:                                               ; preds = %b2, %b1
   %v1 = phi i32 [ %a1, %b1 ], [ %v7, %b2 ]
-  %v2 = getelementptr inbounds i8, i8* %a0, i32 %v1
-  %v3 = load i8, i8* %v2, align 1
+  %v2 = getelementptr inbounds i8, ptr %a0, i32 %v1
+  %v3 = load i8, ptr %v2, align 1
   %v4 = zext i8 %v3 to i32
   %v5 = add nsw i32 %v4, 1
   %v6 = trunc i32 %v5 to i8
-  store i8 %v6, i8* %v2, align 1
+  store i8 %v6, ptr %v2, align 1
   %v7 = add nsw i32 %v1, 1
   %v8 = icmp sle i32 %v7, %a2
   br i1 %v8, label %b2, label %b3
@@ -300,7 +300,7 @@ b3:                                               ; preds = %b2, %b0
 ; CHECK-LABEL: f11:
 ; CHECK: loop0
 ; a < b
-define void @f11(i8* nocapture %a0, i32 %a1, i32 %a2) #0 {
+define void @f11(ptr nocapture %a0, i32 %a1, i32 %a2) #0 {
 b0:
   %v0 = icmp sle i32 %a1, %a2
   br i1 %v0, label %b1, label %b3
@@ -310,12 +310,12 @@ b1:                                               ; preds = %b0
 
 b2:                                               ; preds = %b2, %b1
   %v1 = phi i32 [ %a1, %b1 ], [ %v7, %b2 ]
-  %v2 = getelementptr inbounds i8, i8* %a0, i32 %v1
-  %v3 = load i8, i8* %v2, align 1
+  %v2 = getelementptr inbounds i8, ptr %a0, i32 %v1
+  %v3 = load i8, ptr %v2, align 1
   %v4 = zext i8 %v3 to i32
   %v5 = add nsw i32 %v4, 1
   %v6 = trunc i32 %v5 to i8
-  store i8 %v6, i8* %v2, align 1
+  store i8 %v6, ptr %v2, align 1
   %v7 = add nsw i32 %v1, 2
   %v8 = icmp sle i32 %v7, %a2
   br i1 %v8, label %b2, label %b3
@@ -327,7 +327,7 @@ b3:                                               ; preds = %b2, %b0
 ; CHECK-LABEL: f12:
 ; CHECK: loop0
 ; a < b
-define void @f12(i8* nocapture %a0, i32 %a1, i32 %a2) #0 {
+define void @f12(ptr nocapture %a0, i32 %a1, i32 %a2) #0 {
 b0:
   %v0 = icmp sle i32 %a1, %a2
   br i1 %v0, label %b1, label %b3
@@ -337,12 +337,12 @@ b1:                                               ; preds = %b0
 
 b2:                                               ; preds = %b2, %b1
   %v1 = phi i32 [ %a1, %b1 ], [ %v7, %b2 ]
-  %v2 = getelementptr inbounds i8, i8* %a0, i32 %v1
-  %v3 = load i8, i8* %v2, align 1
+  %v2 = getelementptr inbounds i8, ptr %a0, i32 %v1
+  %v3 = load i8, ptr %v2, align 1
   %v4 = zext i8 %v3 to i32
   %v5 = add nsw i32 %v4, 1
   %v6 = trunc i32 %v5 to i8
-  store i8 %v6, i8* %v2, align 1
+  store i8 %v6, ptr %v2, align 1
   %v7 = add nsw i32 %v1, 4
   %v8 = icmp sle i32 %v7, %a2
   br i1 %v8, label %b2, label %b3
@@ -354,7 +354,7 @@ b3:                                               ; preds = %b2, %b0
 ; CHECK-LABEL: f13:
 ; CHECK: loop0
 ; a < b
-define void @f13(i8* nocapture %a0, i32 %a1, i32 %a2) #0 {
+define void @f13(ptr nocapture %a0, i32 %a1, i32 %a2) #0 {
 b0:
   %v0 = icmp sle i32 %a1, %a2
   br i1 %v0, label %b1, label %b3
@@ -364,12 +364,12 @@ b1:                                               ; preds = %b0
 
 b2:                                               ; preds = %b2, %b1
   %v1 = phi i32 [ %a1, %b1 ], [ %v7, %b2 ]
-  %v2 = getelementptr inbounds i8, i8* %a0, i32 %v1
-  %v3 = load i8, i8* %v2, align 1
+  %v2 = getelementptr inbounds i8, ptr %a0, i32 %v1
+  %v3 = load i8, ptr %v2, align 1
   %v4 = zext i8 %v3 to i32
   %v5 = add nsw i32 %v4, 1
   %v6 = trunc i32 %v5 to i8
-  store i8 %v6, i8* %v2, align 1
+  store i8 %v6, ptr %v2, align 1
   %v7 = add nsw i32 %v1, 8
   %v8 = icmp sle i32 %v7, %a2
   br i1 %v8, label %b2, label %b3
@@ -381,7 +381,7 @@ b3:                                               ; preds = %b2, %b0
 ; CHECK-LABEL: f14:
 ; CHECK: loop0
 ; a < b
-define void @f14(i8* nocapture %a0, i32 %a1, i32 %a2) #0 {
+define void @f14(ptr nocapture %a0, i32 %a1, i32 %a2) #0 {
 b0:
   %v0 = icmp sle i32 %a1, %a2
   br i1 %v0, label %b1, label %b3
@@ -391,12 +391,12 @@ b1:                                               ; preds = %b0
 
 b2:                                               ; preds = %b2, %b1
   %v1 = phi i32 [ %a1, %b1 ], [ %v7, %b2 ]
-  %v2 = getelementptr inbounds i8, i8* %a0, i32 %v1
-  %v3 = load i8, i8* %v2, align 1
+  %v2 = getelementptr inbounds i8, ptr %a0, i32 %v1
+  %v3 = load i8, ptr %v2, align 1
   %v4 = zext i8 %v3 to i32
   %v5 = add nsw i32 %v4, 1
   %v6 = trunc i32 %v5 to i8
-  store i8 %v6, i8* %v2, align 1
+  store i8 %v6, ptr %v2, align 1
   %v7 = add nsw i32 %v1, 16
   %v8 = icmp sle i32 %v7, %a2
   br i1 %v8, label %b2, label %b3

diff  --git a/llvm/test/CodeGen/Hexagon/hwloop-long.ll b/llvm/test/CodeGen/Hexagon/hwloop-long.ll
index d6b6ef3336f08..eb9627b846f51 100644
--- a/llvm/test/CodeGen/Hexagon/hwloop-long.ll
+++ b/llvm/test/CodeGen/Hexagon/hwloop-long.ll
@@ -6,15 +6,15 @@
 ; signed GT case
 ; CHECK-LABEL: f0:
 ; CHECK: loop0
-define i32 @f0(i32* nocapture %a0) #0 {
+define i32 @f0(ptr nocapture %a0) #0 {
 b0:
   br label %b1
 b1:                                               ; preds = %b1, %b0
   %v0 = phi i32 [ 0, %b0 ], [ %v5, %b1 ]
   %v1 = phi i64 [ 0, %b0 ], [ %v6, %b1 ]
   %v2 = trunc i64 %v1 to i32
-  %v3 = getelementptr inbounds i32, i32* %a0, i32 %v2
-  %v4 = load i32, i32* %v3, align 4
+  %v3 = getelementptr inbounds i32, ptr %a0, i32 %v2
+  %v4 = load i32, ptr %v3, align 4
   %v5 = add nsw i32 %v4, %v0
   %v6 = add nsw i64 %v1, 1
   %v7 = icmp slt i64 %v6, 8
@@ -27,15 +27,15 @@ b2:                                               ; preds = %b1
 ; unsigned signed GT case
 ; CHECK-LABEL: f1:
 ; CHECK: loop0
-define i32 @f1(i32* nocapture %a0) #0 {
+define i32 @f1(ptr nocapture %a0) #0 {
 b0:
   br label %b1
 b1:                                               ; preds = %b1, %b0
   %v0 = phi i32 [ 0, %b0 ], [ %v5, %b1 ]
   %v1 = phi i64 [ 0, %b0 ], [ %v6, %b1 ]
   %v2 = trunc i64 %v1 to i32
-  %v3 = getelementptr inbounds i32, i32* %a0, i32 %v2
-  %v4 = load i32, i32* %v3, align 4
+  %v3 = getelementptr inbounds i32, ptr %a0, i32 %v2
+  %v4 = load i32, ptr %v3, align 4
   %v5 = add nsw i32 %v4, %v0
   %v6 = add i64 %v1, 1
   %v7 = icmp ult i64 %v6, 8
@@ -48,7 +48,7 @@ b2:                                               ; preds = %b1
 ; EQ case
 ; CHECK-LABEL: f2:
 ; CHECK: loop0
-define i32 @f2(i32* nocapture %a0) #0 {
+define i32 @f2(ptr nocapture %a0) #0 {
 b0:
   br label %b1
 
@@ -56,8 +56,8 @@ b1:                                               ; preds = %b1, %b0
   %v0 = phi i32 [ 0, %b0 ], [ %v5, %b1 ]
   %v1 = phi i64 [ 0, %b0 ], [ %v6, %b1 ]
   %v2 = trunc i64 %v1 to i32
-  %v3 = getelementptr inbounds i32, i32* %a0, i32 %v2
-  %v4 = load i32, i32* %v3, align 4
+  %v3 = getelementptr inbounds i32, ptr %a0, i32 %v2
+  %v4 = load i32, ptr %v3, align 4
   %v5 = add nsw i32 %v4, %v0
   %v6 = add nsw i64 %v1, 1
   %v7 = icmp eq i64 %v6, 8

diff  --git a/llvm/test/CodeGen/Hexagon/hwloop-loop1.ll b/llvm/test/CodeGen/Hexagon/hwloop-loop1.ll
index af908b602297a..47aa32a60e5bf 100644
--- a/llvm/test/CodeGen/Hexagon/hwloop-loop1.ll
+++ b/llvm/test/CodeGen/Hexagon/hwloop-loop1.ll
@@ -11,29 +11,26 @@ define i32 @main() #0 {
 entry:
   %array = alloca [100 x i32], align 8
   %doublearray = alloca [100 x [100 x i32]], align 8
-  %0 = bitcast [100 x i32]* %array to i8*
-  call void @llvm.lifetime.start.p0i8(i64 400, i8* %0) #1
-  %1 = bitcast [100 x [100 x i32]]* %doublearray to i8*
-  call void @llvm.lifetime.start.p0i8(i64 40000, i8* %1) #1
-  %arrayidx1 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %doublearray, i32 0, i32 10, i32 10
-  %arrayidx2.gep = getelementptr [100 x i32], [100 x i32]* %array, i32 0, i32 0
+  call void @llvm.lifetime.start.p0(i64 400, ptr %array) #1
+  call void @llvm.lifetime.start.p0(i64 40000, ptr %doublearray) #1
+  %arrayidx1 = getelementptr inbounds [100 x [100 x i32]], ptr %doublearray, i32 0, i32 10, i32 10
   br label %for.body
 
 for.body:
-  %2 = phi i32 [ undef, %entry ], [ %.pre, %for.body.for.body_crit_edge ]
+  %0 = phi i32 [ undef, %entry ], [ %.pre, %for.body.for.body_crit_edge ]
   %sum.031 = phi i32 [ undef, %entry ], [ %add, %for.body.for.body_crit_edge ]
-  %arrayidx2.phi = phi i32* [ %arrayidx2.gep, %entry ], [ %arrayidx2.inc, %for.body.for.body_crit_edge ]
+  %arrayidx2.phi = phi ptr [ %array, %entry ], [ %arrayidx2.inc, %for.body.for.body_crit_edge ]
   %i.030 = phi i32 [ 1, %entry ], [ %phitmp, %for.body.for.body_crit_edge ]
-  %add = add nsw i32 %2, %sum.031
+  %add = add nsw i32 %0, %sum.031
   %exitcond33 = icmp eq i32 %i.030, 100
-  %arrayidx2.inc = getelementptr i32, i32* %arrayidx2.phi, i32 1
+  %arrayidx2.inc = getelementptr i32, ptr %arrayidx2.phi, i32 1
   br i1 %exitcond33, label %for.cond7.preheader.preheader, label %for.body.for.body_crit_edge
 
 for.cond7.preheader.preheader:
   br label %for.cond7.preheader
 
 for.body.for.body_crit_edge:
-  %.pre = load i32, i32* %arrayidx2.inc, align 4
+  %.pre = load i32, ptr %arrayidx2.inc, align 4
   %phitmp = add i32 %i.030, 1
   br label %for.body
 
@@ -43,8 +40,8 @@ for.cond7.preheader:
 
 for.body9:
   %j.028 = phi i32 [ 0, %for.cond7.preheader ], [ %inc13, %for.body9 ]
-  %arrayidx11 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %doublearray, i32 0, i32 %i.129, i32 %j.028
-  store i32 %add, i32* %arrayidx11, align 4
+  %arrayidx11 = getelementptr inbounds [100 x [100 x i32]], ptr %doublearray, i32 0, i32 %i.129, i32 %j.028
+  store i32 %add, ptr %arrayidx11, align 4
   %inc13 = add nsw i32 %j.028, 1
   %exitcond = icmp eq i32 %inc13, 100
   br i1 %exitcond, label %for.inc15, label %for.body9
@@ -55,12 +52,12 @@ for.inc15:
   br i1 %exitcond32, label %for.end17, label %for.cond7.preheader
 
 for.end17:
-  %3 = load i32, i32* %arrayidx1, align 8
-  call void @llvm.lifetime.end.p0i8(i64 40000, i8* %1) #1
-  call void @llvm.lifetime.end.p0i8(i64 400, i8* %0) #1
-  ret i32 %3
+  %1 = load i32, ptr %arrayidx1, align 8
+  call void @llvm.lifetime.end.p0(i64 40000, ptr %doublearray) #1
+  call void @llvm.lifetime.end.p0(i64 400, ptr %array) #1
+  ret i32 %1
 }
 
-declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #1
+declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
 
-declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #1
+declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1

diff  --git a/llvm/test/CodeGen/Hexagon/hwloop-lt.ll b/llvm/test/CodeGen/Hexagon/hwloop-lt.ll
index 8919f265abfe3..dc9d25e8071c5 100644
--- a/llvm/test/CodeGen/Hexagon/hwloop-lt.ll
+++ b/llvm/test/CodeGen/Hexagon/hwloop-lt.ll
@@ -3,7 +3,7 @@
 ; CHECK-LABEL: @test_pos1_ir_slt
 ; CHECK: loop0
 ; a < b
-define void @test_pos1_ir_slt(i8* nocapture %p, i32 %a, i32 %b) nounwind {
+define void @test_pos1_ir_slt(ptr nocapture %p, i32 %a, i32 %b) nounwind {
 entry:
   %cmp3 = icmp slt i32 8531, %b
   br i1 %cmp3, label %for.body.lr.ph, label %for.end
@@ -13,12 +13,12 @@ for.body.lr.ph:
 
 for.body:
   %i.04 = phi i32 [ 8531, %for.body.lr.ph ], [ %inc, %for.body ]
-  %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8, i8* %arrayidx, align 1
+  %arrayidx = getelementptr inbounds i8, ptr %p, i32 %i.04
+  %0 = load i8, ptr %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
-  store i8 %conv1, i8* %arrayidx, align 1
+  store i8 %conv1, ptr %arrayidx, align 1
   %inc = add nsw i32 %i.04, 1
   %cmp = icmp slt i32 %inc, %b
   br i1 %cmp, label %for.body, label %for.end
@@ -30,7 +30,7 @@ for.end:
 ; CHECK-LABEL: @test_pos2_ir_slt
 ; CHECK: loop0
 ; a < b
-define void @test_pos2_ir_slt(i8* nocapture %p, i32 %a, i32 %b) nounwind {
+define void @test_pos2_ir_slt(ptr nocapture %p, i32 %a, i32 %b) nounwind {
 entry:
   %cmp3 = icmp slt i32 9152, %b
   br i1 %cmp3, label %for.body.lr.ph, label %for.end
@@ -40,12 +40,12 @@ for.body.lr.ph:
 
 for.body:
   %i.04 = phi i32 [ 9152, %for.body.lr.ph ], [ %inc, %for.body ]
-  %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8, i8* %arrayidx, align 1
+  %arrayidx = getelementptr inbounds i8, ptr %p, i32 %i.04
+  %0 = load i8, ptr %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
-  store i8 %conv1, i8* %arrayidx, align 1
+  store i8 %conv1, ptr %arrayidx, align 1
   %inc = add nsw i32 %i.04, 2
   %cmp = icmp slt i32 %inc, %b
   br i1 %cmp, label %for.body, label %for.end
@@ -57,7 +57,7 @@ for.end:
 ; CHECK-LABEL: @test_pos4_ir_slt
 ; CHECK: loop0
 ; a < b
-define void @test_pos4_ir_slt(i8* nocapture %p, i32 %a, i32 %b) nounwind {
+define void @test_pos4_ir_slt(ptr nocapture %p, i32 %a, i32 %b) nounwind {
 entry:
   %cmp3 = icmp slt i32 18851, %b
   br i1 %cmp3, label %for.body.lr.ph, label %for.end
@@ -67,12 +67,12 @@ for.body.lr.ph:
 
 for.body:
   %i.04 = phi i32 [ 18851, %for.body.lr.ph ], [ %inc, %for.body ]
-  %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8, i8* %arrayidx, align 1
+  %arrayidx = getelementptr inbounds i8, ptr %p, i32 %i.04
+  %0 = load i8, ptr %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
-  store i8 %conv1, i8* %arrayidx, align 1
+  store i8 %conv1, ptr %arrayidx, align 1
   %inc = add nsw i32 %i.04, 4
   %cmp = icmp slt i32 %inc, %b
   br i1 %cmp, label %for.body, label %for.end
@@ -84,7 +84,7 @@ for.end:
 ; CHECK-LABEL: @test_pos8_ir_slt
 ; CHECK: loop0
 ; a < b
-define void @test_pos8_ir_slt(i8* nocapture %p, i32 %a, i32 %b) nounwind {
+define void @test_pos8_ir_slt(ptr nocapture %p, i32 %a, i32 %b) nounwind {
 entry:
   %cmp3 = icmp slt i32 25466, %b
   br i1 %cmp3, label %for.body.lr.ph, label %for.end
@@ -94,12 +94,12 @@ for.body.lr.ph:
 
 for.body:
   %i.04 = phi i32 [ 25466, %for.body.lr.ph ], [ %inc, %for.body ]
-  %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8, i8* %arrayidx, align 1
+  %arrayidx = getelementptr inbounds i8, ptr %p, i32 %i.04
+  %0 = load i8, ptr %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
-  store i8 %conv1, i8* %arrayidx, align 1
+  store i8 %conv1, ptr %arrayidx, align 1
   %inc = add nsw i32 %i.04, 8
   %cmp = icmp slt i32 %inc, %b
   br i1 %cmp, label %for.body, label %for.end
@@ -111,7 +111,7 @@ for.end:
 ; CHECK-LABEL: @test_pos16_ir_slt
 ; CHECK: loop0
 ; a < b
-define void @test_pos16_ir_slt(i8* nocapture %p, i32 %a, i32 %b) nounwind {
+define void @test_pos16_ir_slt(ptr nocapture %p, i32 %a, i32 %b) nounwind {
 entry:
   %cmp3 = icmp slt i32 9295, %b
   br i1 %cmp3, label %for.body.lr.ph, label %for.end
@@ -121,12 +121,12 @@ for.body.lr.ph:
 
 for.body:
   %i.04 = phi i32 [ 9295, %for.body.lr.ph ], [ %inc, %for.body ]
-  %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8, i8* %arrayidx, align 1
+  %arrayidx = getelementptr inbounds i8, ptr %p, i32 %i.04
+  %0 = load i8, ptr %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
-  store i8 %conv1, i8* %arrayidx, align 1
+  store i8 %conv1, ptr %arrayidx, align 1
   %inc = add nsw i32 %i.04, 16
   %cmp = icmp slt i32 %inc, %b
   br i1 %cmp, label %for.body, label %for.end
@@ -138,7 +138,7 @@ for.end:
 ; CHECK-LABEL: @test_pos1_ri_slt
 ; CHECK: loop0
 ; a < b
-define void @test_pos1_ri_slt(i8* nocapture %p, i32 %a, i32 %b) nounwind {
+define void @test_pos1_ri_slt(ptr nocapture %p, i32 %a, i32 %b) nounwind {
 entry:
   %cmp3 = icmp slt i32 %a, 31236
   br i1 %cmp3, label %for.body.lr.ph, label %for.end
@@ -148,12 +148,12 @@ for.body.lr.ph:
 
 for.body:
   %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
-  %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8, i8* %arrayidx, align 1
+  %arrayidx = getelementptr inbounds i8, ptr %p, i32 %i.04
+  %0 = load i8, ptr %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
-  store i8 %conv1, i8* %arrayidx, align 1
+  store i8 %conv1, ptr %arrayidx, align 1
   %inc = add nsw i32 %i.04, 1
   %cmp = icmp slt i32 %inc, 31236
   br i1 %cmp, label %for.body, label %for.end
@@ -165,7 +165,7 @@ for.end:
 ; CHECK-LABEL: @test_pos2_ri_slt
 ; CHECK: loop0
 ; a < b
-define void @test_pos2_ri_slt(i8* nocapture %p, i32 %a, i32 %b) nounwind {
+define void @test_pos2_ri_slt(ptr nocapture %p, i32 %a, i32 %b) nounwind {
 entry:
   %cmp3 = icmp slt i32 %a, 22653
   br i1 %cmp3, label %for.body.lr.ph, label %for.end
@@ -175,12 +175,12 @@ for.body.lr.ph:
 
 for.body:
   %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
-  %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8, i8* %arrayidx, align 1
+  %arrayidx = getelementptr inbounds i8, ptr %p, i32 %i.04
+  %0 = load i8, ptr %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
-  store i8 %conv1, i8* %arrayidx, align 1
+  store i8 %conv1, ptr %arrayidx, align 1
   %inc = add nsw i32 %i.04, 2
   %cmp = icmp slt i32 %inc, 22653
   br i1 %cmp, label %for.body, label %for.end
@@ -192,7 +192,7 @@ for.end:
 ; CHECK-LABEL: @test_pos4_ri_slt
 ; CHECK: loop0
 ; a < b
-define void @test_pos4_ri_slt(i8* nocapture %p, i32 %a, i32 %b) nounwind {
+define void @test_pos4_ri_slt(ptr nocapture %p, i32 %a, i32 %b) nounwind {
 entry:
   %cmp3 = icmp slt i32 %a, 1431
   br i1 %cmp3, label %for.body.lr.ph, label %for.end
@@ -202,12 +202,12 @@ for.body.lr.ph:
 
 for.body:
   %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
-  %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8, i8* %arrayidx, align 1
+  %arrayidx = getelementptr inbounds i8, ptr %p, i32 %i.04
+  %0 = load i8, ptr %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
-  store i8 %conv1, i8* %arrayidx, align 1
+  store i8 %conv1, ptr %arrayidx, align 1
   %inc = add nsw i32 %i.04, 4
   %cmp = icmp slt i32 %inc, 1431
   br i1 %cmp, label %for.body, label %for.end
@@ -219,7 +219,7 @@ for.end:
 ; CHECK-LABEL: @test_pos8_ri_slt
 ; CHECK: loop0
 ; a < b
-define void @test_pos8_ri_slt(i8* nocapture %p, i32 %a, i32 %b) nounwind {
+define void @test_pos8_ri_slt(ptr nocapture %p, i32 %a, i32 %b) nounwind {
 entry:
   %cmp3 = icmp slt i32 %a, 22403
   br i1 %cmp3, label %for.body.lr.ph, label %for.end
@@ -229,12 +229,12 @@ for.body.lr.ph:
 
 for.body:
   %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
-  %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8, i8* %arrayidx, align 1
+  %arrayidx = getelementptr inbounds i8, ptr %p, i32 %i.04
+  %0 = load i8, ptr %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
-  store i8 %conv1, i8* %arrayidx, align 1
+  store i8 %conv1, ptr %arrayidx, align 1
   %inc = add nsw i32 %i.04, 8
   %cmp = icmp slt i32 %inc, 22403
   br i1 %cmp, label %for.body, label %for.end
@@ -246,7 +246,7 @@ for.end:
 ; CHECK-LABEL: @test_pos16_ri_slt
 ; CHECK: loop0
 ; a < b
-define void @test_pos16_ri_slt(i8* nocapture %p, i32 %a, i32 %b) nounwind {
+define void @test_pos16_ri_slt(ptr nocapture %p, i32 %a, i32 %b) nounwind {
 entry:
   %cmp3 = icmp slt i32 %a, 21715
   br i1 %cmp3, label %for.body.lr.ph, label %for.end
@@ -256,12 +256,12 @@ for.body.lr.ph:
 
 for.body:
   %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
-  %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8, i8* %arrayidx, align 1
+  %arrayidx = getelementptr inbounds i8, ptr %p, i32 %i.04
+  %0 = load i8, ptr %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
-  store i8 %conv1, i8* %arrayidx, align 1
+  store i8 %conv1, ptr %arrayidx, align 1
   %inc = add nsw i32 %i.04, 16
   %cmp = icmp slt i32 %inc, 21715
   br i1 %cmp, label %for.body, label %for.end
@@ -273,7 +273,7 @@ for.end:
 ; CHECK-LABEL: @test_pos1_rr_slt
 ; CHECK: loop0
 ; a < b
-define void @test_pos1_rr_slt(i8* nocapture %p, i32 %a, i32 %b) nounwind {
+define void @test_pos1_rr_slt(ptr nocapture %p, i32 %a, i32 %b) nounwind {
 entry:
   %cmp3 = icmp slt i32 %a, %b
   br i1 %cmp3, label %for.body.lr.ph, label %for.end
@@ -283,12 +283,12 @@ for.body.lr.ph:
 
 for.body:
   %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
-  %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8, i8* %arrayidx, align 1
+  %arrayidx = getelementptr inbounds i8, ptr %p, i32 %i.04
+  %0 = load i8, ptr %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
-  store i8 %conv1, i8* %arrayidx, align 1
+  store i8 %conv1, ptr %arrayidx, align 1
   %inc = add nsw i32 %i.04, 1
   %cmp = icmp slt i32 %inc, %b
   br i1 %cmp, label %for.body, label %for.end
@@ -300,7 +300,7 @@ for.end:
 ; CHECK-LABEL: @test_pos2_rr_slt
 ; CHECK: loop0
 ; a < b
-define void @test_pos2_rr_slt(i8* nocapture %p, i32 %a, i32 %b) nounwind {
+define void @test_pos2_rr_slt(ptr nocapture %p, i32 %a, i32 %b) nounwind {
 entry:
   %cmp3 = icmp slt i32 %a, %b
   br i1 %cmp3, label %for.body.lr.ph, label %for.end
@@ -310,12 +310,12 @@ for.body.lr.ph:
 
 for.body:
   %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
-  %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8, i8* %arrayidx, align 1
+  %arrayidx = getelementptr inbounds i8, ptr %p, i32 %i.04
+  %0 = load i8, ptr %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
-  store i8 %conv1, i8* %arrayidx, align 1
+  store i8 %conv1, ptr %arrayidx, align 1
   %inc = add nsw i32 %i.04, 2
   %cmp = icmp slt i32 %inc, %b
   br i1 %cmp, label %for.body, label %for.end
@@ -327,7 +327,7 @@ for.end:
 ; CHECK-LABEL: @test_pos4_rr_slt
 ; CHECK: loop0
 ; a < b
-define void @test_pos4_rr_slt(i8* nocapture %p, i32 %a, i32 %b) nounwind {
+define void @test_pos4_rr_slt(ptr nocapture %p, i32 %a, i32 %b) nounwind {
 entry:
   %cmp3 = icmp slt i32 %a, %b
   br i1 %cmp3, label %for.body.lr.ph, label %for.end
@@ -337,12 +337,12 @@ for.body.lr.ph:
 
 for.body:
   %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
-  %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8, i8* %arrayidx, align 1
+  %arrayidx = getelementptr inbounds i8, ptr %p, i32 %i.04
+  %0 = load i8, ptr %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
-  store i8 %conv1, i8* %arrayidx, align 1
+  store i8 %conv1, ptr %arrayidx, align 1
   %inc = add nsw i32 %i.04, 4
   %cmp = icmp slt i32 %inc, %b
   br i1 %cmp, label %for.body, label %for.end
@@ -354,7 +354,7 @@ for.end:
 ; CHECK-LABEL: @test_pos8_rr_slt
 ; CHECK: loop0
 ; a < b
-define void @test_pos8_rr_slt(i8* nocapture %p, i32 %a, i32 %b) nounwind {
+define void @test_pos8_rr_slt(ptr nocapture %p, i32 %a, i32 %b) nounwind {
 entry:
   %cmp3 = icmp slt i32 %a, %b
   br i1 %cmp3, label %for.body.lr.ph, label %for.end
@@ -364,12 +364,12 @@ for.body.lr.ph:
 
 for.body:
   %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
-  %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8, i8* %arrayidx, align 1
+  %arrayidx = getelementptr inbounds i8, ptr %p, i32 %i.04
+  %0 = load i8, ptr %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
-  store i8 %conv1, i8* %arrayidx, align 1
+  store i8 %conv1, ptr %arrayidx, align 1
   %inc = add nsw i32 %i.04, 8
   %cmp = icmp slt i32 %inc, %b
   br i1 %cmp, label %for.body, label %for.end
@@ -381,7 +381,7 @@ for.end:
 ; CHECK-LABEL: @test_pos16_rr_slt
 ; CHECK: loop0
 ; a < b
-define void @test_pos16_rr_slt(i8* nocapture %p, i32 %a, i32 %b) nounwind {
+define void @test_pos16_rr_slt(ptr nocapture %p, i32 %a, i32 %b) nounwind {
 entry:
   %cmp3 = icmp slt i32 %a, %b
   br i1 %cmp3, label %for.body.lr.ph, label %for.end
@@ -391,12 +391,12 @@ for.body.lr.ph:
 
 for.body:
   %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
-  %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
-  %0 = load i8, i8* %arrayidx, align 1
+  %arrayidx = getelementptr inbounds i8, ptr %p, i32 %i.04
+  %0 = load i8, ptr %arrayidx, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 1
   %conv1 = trunc i32 %add to i8
-  store i8 %conv1, i8* %arrayidx, align 1
+  store i8 %conv1, ptr %arrayidx, align 1
   %inc = add nsw i32 %i.04, 16
   %cmp = icmp slt i32 %inc, %b
   br i1 %cmp, label %for.body, label %for.end

diff  --git a/llvm/test/CodeGen/Hexagon/hwloop-lt1.ll b/llvm/test/CodeGen/Hexagon/hwloop-lt1.ll
index cf97fffce40aa..614d23c389cd5 100644
--- a/llvm/test/CodeGen/Hexagon/hwloop-lt1.ll
+++ b/llvm/test/CodeGen/Hexagon/hwloop-lt1.ll
@@ -19,14 +19,14 @@ polly.loop_body:                                  ; preds = %entry, %polly.loop_
   %p_vector_iv14 = or i32 %polly.loopiv16, 1
   %p_vector_iv3 = add i32 %p_vector_iv14, 1
   %p_vector_iv415 = or i32 %polly.loopiv16, 3
-  %p_arrayidx = getelementptr [400 x i8], [400 x i8]* @A, i32 0, i32 %polly.loopiv16
-  %p_arrayidx5 = getelementptr [400 x i8], [400 x i8]* @A, i32 0, i32 %p_vector_iv14
-  %p_arrayidx6 = getelementptr [400 x i8], [400 x i8]* @A, i32 0, i32 %p_vector_iv3
-  %p_arrayidx7 = getelementptr [400 x i8], [400 x i8]* @A, i32 0, i32 %p_vector_iv415
-  store i8 123, i8* %p_arrayidx, align 1
-  store i8 123, i8* %p_arrayidx5, align 1
-  store i8 123, i8* %p_arrayidx6, align 1
-  store i8 123, i8* %p_arrayidx7, align 1
+  %p_arrayidx = getelementptr [400 x i8], ptr @A, i32 0, i32 %polly.loopiv16
+  %p_arrayidx5 = getelementptr [400 x i8], ptr @A, i32 0, i32 %p_vector_iv14
+  %p_arrayidx6 = getelementptr [400 x i8], ptr @A, i32 0, i32 %p_vector_iv3
+  %p_arrayidx7 = getelementptr [400 x i8], ptr @A, i32 0, i32 %p_vector_iv415
+  store i8 123, ptr %p_arrayidx, align 1
+  store i8 123, ptr %p_arrayidx5, align 1
+  store i8 123, ptr %p_arrayidx6, align 1
+  store i8 123, ptr %p_arrayidx7, align 1
   %0 = icmp slt i32 %polly.next_loopiv, 400
   br i1 %0, label %polly.loop_body, label %polly.loop_after
 }

diff  --git a/llvm/test/CodeGen/Hexagon/hwloop-missed.ll b/llvm/test/CodeGen/Hexagon/hwloop-missed.ll
index bcc8006522948..e0e01884ad1d8 100644
--- a/llvm/test/CodeGen/Hexagon/hwloop-missed.ll
+++ b/llvm/test/CodeGen/Hexagon/hwloop-missed.ll
@@ -10,7 +10,7 @@
 
 @g = external global i32
 
-define void @test(i32* nocapture %a, i32* nocapture %b, i32 %n) nounwind {
+define void @test(ptr nocapture %a, ptr nocapture %b, i32 %n) nounwind {
 entry:
   %tobool = icmp eq i32 %n, 0
   br i1 %tobool, label %for.body4.preheader, label %for.body.preheader
@@ -19,13 +19,13 @@ for.body.preheader:
   br label %for.body
 
 for.body:
-  %arrayidx.phi = phi i32* [ %arrayidx.inc, %for.body ], [ %a, %for.body.preheader ]
+  %arrayidx.phi = phi ptr [ %arrayidx.inc, %for.body ], [ %a, %for.body.preheader ]
   %i.014 = phi i32 [ %inc, %for.body ], [ 0, %for.body.preheader ]
-  %0 = load i32, i32* @g, align 4
-  store i32 %0, i32* %arrayidx.phi, align 4
+  %0 = load i32, ptr @g, align 4
+  store i32 %0, ptr %arrayidx.phi, align 4
   %inc = add nsw i32 %i.014, 1
   %exitcond15 = icmp eq i32 %inc, 3
-  %arrayidx.inc = getelementptr i32, i32* %arrayidx.phi, i32 1
+  %arrayidx.inc = getelementptr i32, ptr %arrayidx.phi, i32 1
   br i1 %exitcond15, label %for.body4.preheader.loopexit, label %for.body
 
 for.body4.preheader.loopexit:
@@ -35,13 +35,13 @@ for.body4.preheader:
   br label %for.body4
 
 for.body4:
-  %arrayidx5.phi = phi i32* [ %arrayidx5.inc, %for.body4 ], [ %b, %for.body4.preheader ]
+  %arrayidx5.phi = phi ptr [ %arrayidx5.inc, %for.body4 ], [ %b, %for.body4.preheader ]
   %i1.013 = phi i32 [ %inc7, %for.body4 ], [ 0, %for.body4.preheader ]
-  %1 = load i32, i32* @g, align 4
-  store i32 %1, i32* %arrayidx5.phi, align 4
+  %1 = load i32, ptr @g, align 4
+  store i32 %1, ptr %arrayidx5.phi, align 4
   %inc7 = add nsw i32 %i1.013, 1
   %exitcond = icmp eq i32 %inc7, 3
-  %arrayidx5.inc = getelementptr i32, i32* %arrayidx5.phi, i32 1
+  %arrayidx5.inc = getelementptr i32, ptr %arrayidx5.phi, i32 1
   br i1 %exitcond, label %for.end8, label %for.body4
 
 for.end8:

diff  --git a/llvm/test/CodeGen/Hexagon/hwloop-ne.ll b/llvm/test/CodeGen/Hexagon/hwloop-ne.ll
index 301a31a7c0b7d..a474f3a1681fd 100644
--- a/llvm/test/CodeGen/Hexagon/hwloop-ne.ll
+++ b/llvm/test/CodeGen/Hexagon/hwloop-ne.ll
@@ -3,7 +3,7 @@
 ; CHECK-LABEL: f0:
 ; CHECK: loop0
 ; a < b
-define void @f0(i8* nocapture %a0, i32 %a1, i32 %a2) #0 {
+define void @f0(ptr nocapture %a0, i32 %a1, i32 %a2) #0 {
 b0:
   %v0 = icmp slt i32 32623, %a2
   br i1 %v0, label %b1, label %b3
@@ -13,12 +13,12 @@ b1:                                               ; preds = %b0
 
 b2:                                               ; preds = %b2, %b1
   %v1 = phi i32 [ 32623, %b1 ], [ %v7, %b2 ]
-  %v2 = getelementptr inbounds i8, i8* %a0, i32 %v1
-  %v3 = load i8, i8* %v2, align 1
+  %v2 = getelementptr inbounds i8, ptr %a0, i32 %v1
+  %v3 = load i8, ptr %v2, align 1
   %v4 = zext i8 %v3 to i32
   %v5 = add nsw i32 %v4, 1
   %v6 = trunc i32 %v5 to i8
-  store i8 %v6, i8* %v2, align 1
+  store i8 %v6, ptr %v2, align 1
   %v7 = add nsw i32 %v1, 1
   %v8 = icmp ne i32 %v7, %a2
   br i1 %v8, label %b2, label %b3
@@ -30,7 +30,7 @@ b3:                                               ; preds = %b2, %b0
 ; CHECK-LABEL: f1:
 ; CHECK: loop0
 ; a < b
-define void @f1(i8* nocapture %a0, i32 %a1, i32 %a2) #0 {
+define void @f1(ptr nocapture %a0, i32 %a1, i32 %a2) #0 {
 b0:
   %v0 = icmp slt i32 29554, %a2
   br i1 %v0, label %b1, label %b3
@@ -40,12 +40,12 @@ b1:                                               ; preds = %b0
 
 b2:                                               ; preds = %b2, %b1
   %v1 = phi i32 [ 29554, %b1 ], [ %v7, %b2 ]
-  %v2 = getelementptr inbounds i8, i8* %a0, i32 %v1
-  %v3 = load i8, i8* %v2, align 1
+  %v2 = getelementptr inbounds i8, ptr %a0, i32 %v1
+  %v3 = load i8, ptr %v2, align 1
   %v4 = zext i8 %v3 to i32
   %v5 = add nsw i32 %v4, 1
   %v6 = trunc i32 %v5 to i8
-  store i8 %v6, i8* %v2, align 1
+  store i8 %v6, ptr %v2, align 1
   %v7 = add nsw i32 %v1, 2
   %v8 = icmp ne i32 %v7, %a2
   br i1 %v8, label %b2, label %b3
@@ -57,7 +57,7 @@ b3:                                               ; preds = %b2, %b0
 ; CHECK-LABEL: f2:
 ; CHECK: loop0
 ; a < b
-define void @f2(i8* nocapture %a0, i32 %a1, i32 %a2) #0 {
+define void @f2(ptr nocapture %a0, i32 %a1, i32 %a2) #0 {
 b0:
   %v0 = icmp slt i32 15692, %a2
   br i1 %v0, label %b1, label %b3
@@ -67,12 +67,12 @@ b1:                                               ; preds = %b0
 
 b2:                                               ; preds = %b2, %b1
   %v1 = phi i32 [ 15692, %b1 ], [ %v7, %b2 ]
-  %v2 = getelementptr inbounds i8, i8* %a0, i32 %v1
-  %v3 = load i8, i8* %v2, align 1
+  %v2 = getelementptr inbounds i8, ptr %a0, i32 %v1
+  %v3 = load i8, ptr %v2, align 1
   %v4 = zext i8 %v3 to i32
   %v5 = add nsw i32 %v4, 1
   %v6 = trunc i32 %v5 to i8
-  store i8 %v6, i8* %v2, align 1
+  store i8 %v6, ptr %v2, align 1
   %v7 = add nsw i32 %v1, 4
   %v8 = icmp ne i32 %v7, %a2
   br i1 %v8, label %b2, label %b3
@@ -84,7 +84,7 @@ b3:                                               ; preds = %b2, %b0
 ; CHECK-LABEL: f3:
 ; CHECK: loop0
 ; a < b
-define void @f3(i8* nocapture %a0, i32 %a1, i32 %a2) #0 {
+define void @f3(ptr nocapture %a0, i32 %a1, i32 %a2) #0 {
 b0:
   %v0 = icmp slt i32 10449, %a2
   br i1 %v0, label %b1, label %b3
@@ -94,12 +94,12 @@ b1:                                               ; preds = %b0
 
 b2:                                               ; preds = %b2, %b1
   %v1 = phi i32 [ 10449, %b1 ], [ %v7, %b2 ]
-  %v2 = getelementptr inbounds i8, i8* %a0, i32 %v1
-  %v3 = load i8, i8* %v2, align 1
+  %v2 = getelementptr inbounds i8, ptr %a0, i32 %v1
+  %v3 = load i8, ptr %v2, align 1
   %v4 = zext i8 %v3 to i32
   %v5 = add nsw i32 %v4, 1
   %v6 = trunc i32 %v5 to i8
-  store i8 %v6, i8* %v2, align 1
+  store i8 %v6, ptr %v2, align 1
   %v7 = add nsw i32 %v1, 8
   %v8 = icmp ne i32 %v7, %a2
   br i1 %v8, label %b2, label %b3
@@ -111,7 +111,7 @@ b3:                                               ; preds = %b2, %b0
 ; CHECK-LABEL: f4:
 ; CHECK: loop0
 ; a < b
-define void @f4(i8* nocapture %a0, i32 %a1, i32 %a2) #0 {
+define void @f4(ptr nocapture %a0, i32 %a1, i32 %a2) #0 {
 b0:
   %v0 = icmp slt i32 32087, %a2
   br i1 %v0, label %b1, label %b3
@@ -121,12 +121,12 @@ b1:                                               ; preds = %b0
 
 b2:                                               ; preds = %b2, %b1
   %v1 = phi i32 [ 32087, %b1 ], [ %v7, %b2 ]
-  %v2 = getelementptr inbounds i8, i8* %a0, i32 %v1
-  %v3 = load i8, i8* %v2, align 1
+  %v2 = getelementptr inbounds i8, ptr %a0, i32 %v1
+  %v3 = load i8, ptr %v2, align 1
   %v4 = zext i8 %v3 to i32
   %v5 = add nsw i32 %v4, 1
   %v6 = trunc i32 %v5 to i8
-  store i8 %v6, i8* %v2, align 1
+  store i8 %v6, ptr %v2, align 1
   %v7 = add nsw i32 %v1, 16
   %v8 = icmp ne i32 %v7, %a2
   br i1 %v8, label %b2, label %b3
@@ -138,7 +138,7 @@ b3:                                               ; preds = %b2, %b0
 ; CHECK-LABEL: f5:
 ; CHECK: loop0
 ; a < b
-define void @f5(i8* nocapture %a0, i32 %a1, i32 %a2) #0 {
+define void @f5(ptr nocapture %a0, i32 %a1, i32 %a2) #0 {
 b0:
   %v0 = icmp slt i32 %a1, 3472
   br i1 %v0, label %b1, label %b3
@@ -148,12 +148,12 @@ b1:                                               ; preds = %b0
 
 b2:                                               ; preds = %b2, %b1
   %v1 = phi i32 [ %a1, %b1 ], [ %v7, %b2 ]
-  %v2 = getelementptr inbounds i8, i8* %a0, i32 %v1
-  %v3 = load i8, i8* %v2, align 1
+  %v2 = getelementptr inbounds i8, ptr %a0, i32 %v1
+  %v3 = load i8, ptr %v2, align 1
   %v4 = zext i8 %v3 to i32
   %v5 = add nsw i32 %v4, 1
   %v6 = trunc i32 %v5 to i8
-  store i8 %v6, i8* %v2, align 1
+  store i8 %v6, ptr %v2, align 1
   %v7 = add nsw i32 %v1, 1
   %v8 = icmp ne i32 %v7, 3472
   br i1 %v8, label %b2, label %b3
@@ -165,7 +165,7 @@ b3:                                               ; preds = %b2, %b0
 ; CHECK-LABEL: f6:
 ; CHECK: loop0
 ; a < b
-define void @f6(i8* nocapture %a0, i32 %a1, i32 %a2) #0 {
+define void @f6(ptr nocapture %a0, i32 %a1, i32 %a2) #0 {
 b0:
   %v0 = icmp slt i32 %a1, 8730
   br i1 %v0, label %b1, label %b3
@@ -175,12 +175,12 @@ b1:                                               ; preds = %b0
 
 b2:                                               ; preds = %b2, %b1
   %v1 = phi i32 [ %a1, %b1 ], [ %v7, %b2 ]
-  %v2 = getelementptr inbounds i8, i8* %a0, i32 %v1
-  %v3 = load i8, i8* %v2, align 1
+  %v2 = getelementptr inbounds i8, ptr %a0, i32 %v1
+  %v3 = load i8, ptr %v2, align 1
   %v4 = zext i8 %v3 to i32
   %v5 = add nsw i32 %v4, 1
   %v6 = trunc i32 %v5 to i8
-  store i8 %v6, i8* %v2, align 1
+  store i8 %v6, ptr %v2, align 1
   %v7 = add nsw i32 %v1, 2
   %v8 = icmp ne i32 %v7, 8730
   br i1 %v8, label %b2, label %b3
@@ -192,7 +192,7 @@ b3:                                               ; preds = %b2, %b0
 ; CHECK-LABEL: f7:
 ; CHECK: loop0
 ; a < b
-define void @f7(i8* nocapture %a0, i32 %a1, i32 %a2) #0 {
+define void @f7(ptr nocapture %a0, i32 %a1, i32 %a2) #0 {
 b0:
   %v0 = icmp slt i32 %a1, 1493
   br i1 %v0, label %b1, label %b3
@@ -202,12 +202,12 @@ b1:                                               ; preds = %b0
 
 b2:                                               ; preds = %b2, %b1
   %v1 = phi i32 [ %a1, %b1 ], [ %v7, %b2 ]
-  %v2 = getelementptr inbounds i8, i8* %a0, i32 %v1
-  %v3 = load i8, i8* %v2, align 1
+  %v2 = getelementptr inbounds i8, ptr %a0, i32 %v1
+  %v3 = load i8, ptr %v2, align 1
   %v4 = zext i8 %v3 to i32
   %v5 = add nsw i32 %v4, 1
   %v6 = trunc i32 %v5 to i8
-  store i8 %v6, i8* %v2, align 1
+  store i8 %v6, ptr %v2, align 1
   %v7 = add nsw i32 %v1, 4
   %v8 = icmp ne i32 %v7, 1493
   br i1 %v8, label %b2, label %b3
@@ -219,7 +219,7 @@ b3:                                               ; preds = %b2, %b0
 ; CHECK-LABEL: f8:
 ; CHECK: loop0
 ; a < b
-define void @f8(i8* nocapture %a0, i32 %a1, i32 %a2) #0 {
+define void @f8(ptr nocapture %a0, i32 %a1, i32 %a2) #0 {
 b0:
   %v0 = icmp slt i32 %a1, 1706
   br i1 %v0, label %b1, label %b3
@@ -229,12 +229,12 @@ b1:                                               ; preds = %b0
 
 b2:                                               ; preds = %b2, %b1
   %v1 = phi i32 [ %a1, %b1 ], [ %v7, %b2 ]
-  %v2 = getelementptr inbounds i8, i8* %a0, i32 %v1
-  %v3 = load i8, i8* %v2, align 1
+  %v2 = getelementptr inbounds i8, ptr %a0, i32 %v1
+  %v3 = load i8, ptr %v2, align 1
   %v4 = zext i8 %v3 to i32
   %v5 = add nsw i32 %v4, 1
   %v6 = trunc i32 %v5 to i8
-  store i8 %v6, i8* %v2, align 1
+  store i8 %v6, ptr %v2, align 1
   %v7 = add nsw i32 %v1, 8
   %v8 = icmp ne i32 %v7, 1706
   br i1 %v8, label %b2, label %b3
@@ -246,7 +246,7 @@ b3:                                               ; preds = %b2, %b0
 ; CHECK-LABEL: f9:
 ; CHECK: loop0
 ; a < b
-define void @f9(i8* nocapture %a0, i32 %a1, i32 %a2) #0 {
+define void @f9(ptr nocapture %a0, i32 %a1, i32 %a2) #0 {
 b0:
   %v0 = icmp slt i32 %a1, 1886
   br i1 %v0, label %b1, label %b3
@@ -256,12 +256,12 @@ b1:                                               ; preds = %b0
 
 b2:                                               ; preds = %b2, %b1
   %v1 = phi i32 [ %a1, %b1 ], [ %v7, %b2 ]
-  %v2 = getelementptr inbounds i8, i8* %a0, i32 %v1
-  %v3 = load i8, i8* %v2, align 1
+  %v2 = getelementptr inbounds i8, ptr %a0, i32 %v1
+  %v3 = load i8, ptr %v2, align 1
   %v4 = zext i8 %v3 to i32
   %v5 = add nsw i32 %v4, 1
   %v6 = trunc i32 %v5 to i8
-  store i8 %v6, i8* %v2, align 1
+  store i8 %v6, ptr %v2, align 1
   %v7 = add nsw i32 %v1, 16
   %v8 = icmp ne i32 %v7, 1886
   br i1 %v8, label %b2, label %b3
@@ -273,7 +273,7 @@ b3:                                               ; preds = %b2, %b0
 ; CHECK-LABEL: f10:
 ; CHECK: loop0
 ; a < b
-define void @f10(i8* nocapture %a0, i32 %a1, i32 %a2) #0 {
+define void @f10(ptr nocapture %a0, i32 %a1, i32 %a2) #0 {
 b0:
   %v0 = icmp slt i32 %a1, %a2
   br i1 %v0, label %b1, label %b3
@@ -283,12 +283,12 @@ b1:                                               ; preds = %b0
 
 b2:                                               ; preds = %b2, %b1
   %v1 = phi i32 [ %a1, %b1 ], [ %v7, %b2 ]
-  %v2 = getelementptr inbounds i8, i8* %a0, i32 %v1
-  %v3 = load i8, i8* %v2, align 1
+  %v2 = getelementptr inbounds i8, ptr %a0, i32 %v1
+  %v3 = load i8, ptr %v2, align 1
   %v4 = zext i8 %v3 to i32
   %v5 = add nsw i32 %v4, 1
   %v6 = trunc i32 %v5 to i8
-  store i8 %v6, i8* %v2, align 1
+  store i8 %v6, ptr %v2, align 1
   %v7 = add nsw i32 %v1, 1
   %v8 = icmp ne i32 %v7, %a2
   br i1 %v8, label %b2, label %b3
@@ -300,7 +300,7 @@ b3:                                               ; preds = %b2, %b0
 ; CHECK-LABEL: f11:
 ; CHECK: loop0
 ; a < b
-define void @f11(i8* nocapture %a0, i32 %a1, i32 %a2) #0 {
+define void @f11(ptr nocapture %a0, i32 %a1, i32 %a2) #0 {
 b0:
   %v0 = icmp slt i32 %a1, %a2
   br i1 %v0, label %b1, label %b3
@@ -310,12 +310,12 @@ b1:                                               ; preds = %b0
 
 b2:                                               ; preds = %b2, %b1
   %v1 = phi i32 [ %a1, %b1 ], [ %v7, %b2 ]
-  %v2 = getelementptr inbounds i8, i8* %a0, i32 %v1
-  %v3 = load i8, i8* %v2, align 1
+  %v2 = getelementptr inbounds i8, ptr %a0, i32 %v1
+  %v3 = load i8, ptr %v2, align 1
   %v4 = zext i8 %v3 to i32
   %v5 = add nsw i32 %v4, 1
   %v6 = trunc i32 %v5 to i8
-  store i8 %v6, i8* %v2, align 1
+  store i8 %v6, ptr %v2, align 1
   %v7 = add nsw i32 %v1, 2
   %v8 = icmp ne i32 %v7, %a2
   br i1 %v8, label %b2, label %b3
@@ -327,7 +327,7 @@ b3:                                               ; preds = %b2, %b0
 ; CHECK-LABEL: f12:
 ; CHECK: loop0
 ; a < b
-define void @f12(i8* nocapture %a0, i32 %a1, i32 %a2) #0 {
+define void @f12(ptr nocapture %a0, i32 %a1, i32 %a2) #0 {
 b0:
   %v0 = icmp slt i32 %a1, %a2
   br i1 %v0, label %b1, label %b3
@@ -337,12 +337,12 @@ b1:                                               ; preds = %b0
 
 b2:                                               ; preds = %b2, %b1
   %v1 = phi i32 [ %a1, %b1 ], [ %v7, %b2 ]
-  %v2 = getelementptr inbounds i8, i8* %a0, i32 %v1
-  %v3 = load i8, i8* %v2, align 1
+  %v2 = getelementptr inbounds i8, ptr %a0, i32 %v1
+  %v3 = load i8, ptr %v2, align 1
   %v4 = zext i8 %v3 to i32
   %v5 = add nsw i32 %v4, 1
   %v6 = trunc i32 %v5 to i8
-  store i8 %v6, i8* %v2, align 1
+  store i8 %v6, ptr %v2, align 1
   %v7 = add nsw i32 %v1, 4
   %v8 = icmp ne i32 %v7, %a2
   br i1 %v8, label %b2, label %b3
@@ -354,7 +354,7 @@ b3:                                               ; preds = %b2, %b0
 ; CHECK-LABEL: f13
 ; CHECK: loop0
 ; a < b
-define void @f13(i8* nocapture %a0, i32 %a1, i32 %a2) #0 {
+define void @f13(ptr nocapture %a0, i32 %a1, i32 %a2) #0 {
 b0:
   %v0 = icmp slt i32 %a1, %a2
   br i1 %v0, label %b1, label %b3
@@ -364,12 +364,12 @@ b1:                                               ; preds = %b0
 
 b2:                                               ; preds = %b2, %b1
   %v1 = phi i32 [ %a1, %b1 ], [ %v7, %b2 ]
-  %v2 = getelementptr inbounds i8, i8* %a0, i32 %v1
-  %v3 = load i8, i8* %v2, align 1
+  %v2 = getelementptr inbounds i8, ptr %a0, i32 %v1
+  %v3 = load i8, ptr %v2, align 1
   %v4 = zext i8 %v3 to i32
   %v5 = add nsw i32 %v4, 1
   %v6 = trunc i32 %v5 to i8
-  store i8 %v6, i8* %v2, align 1
+  store i8 %v6, ptr %v2, align 1
   %v7 = add nsw i32 %v1, 8
   %v8 = icmp ne i32 %v7, %a2
   br i1 %v8, label %b2, label %b3
@@ -381,7 +381,7 @@ b3:                                               ; preds = %b2, %b0
 ; CHECK-LABEL: f14
 ; CHECK: loop0
 ; a < b
-define void @f14(i8* nocapture %a0, i32 %a1, i32 %a2) #0 {
+define void @f14(ptr nocapture %a0, i32 %a1, i32 %a2) #0 {
 b0:
   %v0 = icmp slt i32 %a1, %a2
   br i1 %v0, label %b1, label %b3
@@ -391,12 +391,12 @@ b1:                                               ; preds = %b0
 
 b2:                                               ; preds = %b2, %b1
   %v1 = phi i32 [ %a1, %b1 ], [ %v7, %b2 ]
-  %v2 = getelementptr inbounds i8, i8* %a0, i32 %v1
-  %v3 = load i8, i8* %v2, align 1
+  %v2 = getelementptr inbounds i8, ptr %a0, i32 %v1
+  %v3 = load i8, ptr %v2, align 1
   %v4 = zext i8 %v3 to i32
   %v5 = add nsw i32 %v4, 1
   %v6 = trunc i32 %v5 to i8
-  store i8 %v6, i8* %v2, align 1
+  store i8 %v6, ptr %v2, align 1
   %v7 = add nsw i32 %v1, 16
   %v8 = icmp ne i32 %v7, %a2
   br i1 %v8, label %b2, label %b3

diff  --git a/llvm/test/CodeGen/Hexagon/hwloop-noreturn-call.ll b/llvm/test/CodeGen/Hexagon/hwloop-noreturn-call.ll
index accf6fd83c6e6..d76c13d73bdfe 100644
--- a/llvm/test/CodeGen/Hexagon/hwloop-noreturn-call.ll
+++ b/llvm/test/CodeGen/Hexagon/hwloop-noreturn-call.ll
@@ -5,15 +5,15 @@ target triple = "hexagon"
 ; CHECK-LABEL: danny:
 ; CHECK-DAG: loop0
 ; CHECK-DAG: call trap
-define void @danny(i32* %p, i32 %n, i32 %k) #0 {
+define void @danny(ptr %p, i32 %n, i32 %k) #0 {
 entry:
   br label %for.body
 
 for.body:                                         ; preds = %entry
   %t0 = phi i32 [ 0, %entry ], [ %t1, %for.cont ]
   %t1 = add i32 %t0, 1
-  %t2 = getelementptr i32, i32* %p, i32 %t0
-  store i32 %t1, i32* %t2, align 4
+  %t2 = getelementptr i32, ptr %p, i32 %t0
+  store i32 %t1, ptr %t2, align 4
   %c = icmp sgt i32 %t1, %k
   br i1 %c, label %noret, label %for.cont
 
@@ -32,15 +32,15 @@ noret:
 ; CHECK-LABEL: sammy:
 ; CHECK-DAG: loop0
 ; CHECK-DAG: callr
-define void @sammy(i32* %p, i32 %n, i32 %k, void (...)* %f) #0 {
+define void @sammy(ptr %p, i32 %n, i32 %k, ptr %f) #0 {
 entry:
   br label %for.body
 
 for.body:                                         ; preds = %entry
   %t0 = phi i32 [ 0, %entry ], [ %t1, %for.cont ]
   %t1 = add i32 %t0, 1
-  %t2 = getelementptr i32, i32* %p, i32 %t0
-  store i32 %t1, i32* %t2, align 4
+  %t2 = getelementptr i32, ptr %p, i32 %t0
+  store i32 %t1, ptr %t2, align 4
   %c = icmp sgt i32 %t1, %k
   br i1 %c, label %noret, label %for.cont
 

diff  --git a/llvm/test/CodeGen/Hexagon/hwloop-ph-deadcode.ll b/llvm/test/CodeGen/Hexagon/hwloop-ph-deadcode.ll
index 06e6db420f8fb..6ca50171ff05c 100644
--- a/llvm/test/CodeGen/Hexagon/hwloop-ph-deadcode.ll
+++ b/llvm/test/CodeGen/Hexagon/hwloop-ph-deadcode.ll
@@ -13,7 +13,7 @@ entry:
 
 for.body:
   %loopIdx.051 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
-  store i32 1, i32* @g, align 4
+  store i32 1, ptr @g, align 4
   %inc = add i32 %loopIdx.051, 1
   %cmp9 = icmp ult i32 %inc, 5
   br i1 %cmp9, label %for.body, label %if.end38

diff  --git a/llvm/test/CodeGen/Hexagon/hwloop-phi-subreg.ll b/llvm/test/CodeGen/Hexagon/hwloop-phi-subreg.ll
index da81e61a2ede4..b42a0d44f7ba0 100644
--- a/llvm/test/CodeGen/Hexagon/hwloop-phi-subreg.ll
+++ b/llvm/test/CodeGen/Hexagon/hwloop-phi-subreg.ll
@@ -17,7 +17,7 @@ b1:                                               ; preds = %b2, %b1
   %v4 = add nsw i64 %v3, 0
   %v5 = add nsw i64 %v4, 0
   %v6 = add nsw i64 %v5, 0
-  %v7 = load i32, i32* undef, align 4
+  %v7 = load i32, ptr undef, align 4
   %v8 = ashr i32 %v7, 5
   %v9 = sext i32 %v8 to i64
   %v10 = mul nsw i64 %v9, %v9

diff  --git a/llvm/test/CodeGen/Hexagon/hwloop-preh.ll b/llvm/test/CodeGen/Hexagon/hwloop-preh.ll
index fb7e76848660b..5b45d59fe67cc 100644
--- a/llvm/test/CodeGen/Hexagon/hwloop-preh.ll
+++ b/llvm/test/CodeGen/Hexagon/hwloop-preh.ll
@@ -3,7 +3,7 @@
 
 target triple = "hexagon"
 
-define i32 @foo(i32 %x, i32 %n, i32* nocapture %A, i32* nocapture %B) #0 {
+define i32 @foo(i32 %x, i32 %n, ptr nocapture %A, ptr nocapture %B) #0 {
 entry:
   %cmp = icmp sgt i32 %x, 0
   br i1 %cmp, label %for.cond.preheader, label %return
@@ -16,17 +16,17 @@ for.body.preheader:                               ; preds = %for.cond.preheader
   br label %for.body
 
 for.body:                                         ; preds = %for.body.preheader, %for.body
-  %arrayidx.phi = phi i32* [ %arrayidx.inc, %for.body ], [ %B, %for.body.preheader ]
-  %arrayidx2.phi = phi i32* [ %arrayidx2.inc, %for.body ], [ %A, %for.body.preheader ]
+  %arrayidx.phi = phi ptr [ %arrayidx.inc, %for.body ], [ %B, %for.body.preheader ]
+  %arrayidx2.phi = phi ptr [ %arrayidx2.inc, %for.body ], [ %A, %for.body.preheader ]
   %i.07 = phi i32 [ %inc, %for.body ], [ 0, %for.body.preheader ]
-  %0 = load i32, i32* %arrayidx.phi, align 4, !tbaa !0
-  %1 = load i32, i32* %arrayidx2.phi, align 4, !tbaa !0
+  %0 = load i32, ptr %arrayidx.phi, align 4, !tbaa !0
+  %1 = load i32, ptr %arrayidx2.phi, align 4, !tbaa !0
   %add = add nsw i32 %1, %0
-  store i32 %add, i32* %arrayidx2.phi, align 4, !tbaa !0
+  store i32 %add, ptr %arrayidx2.phi, align 4, !tbaa !0
   %inc = add nsw i32 %i.07, 1
   %exitcond = icmp eq i32 %inc, %n
-  %arrayidx.inc = getelementptr i32, i32* %arrayidx.phi, i32 1
-  %arrayidx2.inc = getelementptr i32, i32* %arrayidx2.phi, i32 1
+  %arrayidx.inc = getelementptr i32, ptr %arrayidx.phi, i32 1
+  %arrayidx2.inc = getelementptr i32, ptr %arrayidx2.phi, i32 1
   br i1 %exitcond, label %return.loopexit, label %for.body
 
 return.loopexit:                                  ; preds = %for.body

diff  --git a/llvm/test/CodeGen/Hexagon/hwloop-range.ll b/llvm/test/CodeGen/Hexagon/hwloop-range.ll
index 5e6fe78d0e0b5..59f94b20cfc00 100644
--- a/llvm/test/CodeGen/Hexagon/hwloop-range.ll
+++ b/llvm/test/CodeGen/Hexagon/hwloop-range.ll
@@ -8,7 +8,7 @@
 
 @g = external global i32, align 4
 
-define void @test(i32* nocapture %a, i32* nocapture readonly %b, i32 %n) #0 {
+define void @test(ptr nocapture %a, ptr nocapture readonly %b, i32 %n) #0 {
 entry:
   %cmp6 = icmp slt i32 %n, 1
   br i1 %cmp6, label %for.end, label %for.body.preheader
@@ -18,12 +18,12 @@ for.body.preheader:
 
 for.body:
   %i.07 = phi i32 [ %inc, %for.body ], [ 0, %for.body.preheader ]
-  %arrayidx = getelementptr inbounds i32, i32* %b, i32 %i.07
-  %0 = load i32, i32* %arrayidx, align 4
-  %1 = load i32, i32* @g, align 4
+  %arrayidx = getelementptr inbounds i32, ptr %b, i32 %i.07
+  %0 = load i32, ptr %arrayidx, align 4
+  %1 = load i32, ptr @g, align 4
   %mul = mul nsw i32 %1, %0
-  %arrayidx1 = getelementptr inbounds i32, i32* %a, i32 %i.07
-  store i32 %mul, i32* %arrayidx1, align 4
+  %arrayidx1 = getelementptr inbounds i32, ptr %a, i32 %i.07
+  store i32 %mul, ptr %arrayidx1, align 4
   %inc = add nuw nsw i32 %i.07, 1
   %exitcond = icmp eq i32 %inc, %n
   br i1 %exitcond, label %for.end.loopexit, label %for.body

diff  --git a/llvm/test/CodeGen/Hexagon/hwloop-recursion.ll b/llvm/test/CodeGen/Hexagon/hwloop-recursion.ll
index 8ab2dc37d0212..d9f2a94d0fffd 100644
--- a/llvm/test/CodeGen/Hexagon/hwloop-recursion.ll
+++ b/llvm/test/CodeGen/Hexagon/hwloop-recursion.ll
@@ -4,23 +4,23 @@
 
 @c = common global i32 0, align 4
 @e = common global i32 0, align 4
- at g = common global i32* null, align 4
+ at g = common global ptr null, align 4
 @a = common global i32 0, align 4
 @b = common global i32 0, align 4
- at h = common global i32* null, align 4
+ at h = common global ptr null, align 4
 @d = common global i32 0, align 4
 @f = common global i32 0, align 4
 
-define i32 @fn1([0 x i32]* nocapture readnone %p1) #0 {
+define i32 @fn1(ptr nocapture readnone %p1) #0 {
 entry:
-  %0 = load i32*, i32** @h, align 4
-  %1 = load i32*, i32** @g, align 4
-  %.pre = load i32, i32* @c, align 4
+  %0 = load ptr, ptr @h, align 4
+  %1 = load ptr, ptr @g, align 4
+  %.pre = load i32, ptr @c, align 4
   br label %for.cond
 
 for.cond:
   %2 = phi i32 [ %10, %if.end ], [ %.pre, %entry ]
-  store i32 %2, i32* @e, align 4
+  store i32 %2, ptr @e, align 4
   %tobool5 = icmp eq i32 %2, 0
   br i1 %tobool5, label %for.end, label %for.body.lr.ph
 
@@ -32,8 +32,8 @@ for.body.lr.ph:
 
 for.body:
   %add6 = phi i32 [ %2, %for.body.lr.ph ], [ %add, %for.body ]
-  %6 = load i32, i32* %1, align 4
-  store i32 %6, i32* @a, align 4
+  %6 = load i32, ptr %1, align 4
+  store i32 %6, ptr @a, align 4
   %add = add nsw i32 %add6, 5
   %tobool = icmp eq i32 %add, 0
   br i1 %tobool, label %for.cond1.for.end_crit_edge, label %for.body
@@ -41,24 +41,24 @@ for.body:
 for.cond1.for.end_crit_edge:
   %7 = add i32 %2, 5
   %8 = add i32 %7, %5
-  store i32 %8, i32* @e, align 4
+  store i32 %8, ptr @e, align 4
   br label %for.end
 
 for.end:
-  %9 = load i32, i32* @b, align 4
+  %9 = load i32, ptr @b, align 4
   %tobool2 = icmp eq i32 %9, 0
   br i1 %tobool2, label %if.end, label %if.then
 
 if.then:
-  store i32 0, i32* %0, align 4
-  %.pre7 = load i32, i32* @c, align 4
+  store i32 0, ptr %0, align 4
+  %.pre7 = load i32, ptr @c, align 4
   br label %if.end
 
 if.end:
   %10 = phi i32 [ %2, %for.end ], [ %.pre7, %if.then ]
-  store i32 %10, i32* @d, align 4
-  %11 = load i32, i32* @f, align 4
+  store i32 %10, ptr @d, align 4
+  %11 = load i32, ptr @f, align 4
   %inc = add nsw i32 %11, 1
-  store i32 %inc, i32* @f, align 4
+  store i32 %inc, ptr @f, align 4
   br label %for.cond
 }

diff  --git a/llvm/test/CodeGen/Hexagon/hwloop-subreg.ll b/llvm/test/CodeGen/Hexagon/hwloop-subreg.ll
index 602e1bd7f2686..ef0ef5651cb33 100644
--- a/llvm/test/CodeGen/Hexagon/hwloop-subreg.ll
+++ b/llvm/test/CodeGen/Hexagon/hwloop-subreg.ll
@@ -6,7 +6,7 @@ target triple = "hexagon"
 ; Function Attrs: nounwind optsize readonly
 define void @f0() #0 align 2 {
 b0:
-  %v0 = load i32, i32* undef, align 8
+  %v0 = load i32, ptr undef, align 8
   %v1 = zext i32 %v0 to i64
   %v2 = add nuw nsw i64 %v1, 63
   %v3 = lshr i64 %v2, 6

diff  --git a/llvm/test/CodeGen/Hexagon/hwloop-wrap2.ll b/llvm/test/CodeGen/Hexagon/hwloop-wrap2.ll
index 50675d6b681b8..ae041eed688c0 100644
--- a/llvm/test/CodeGen/Hexagon/hwloop-wrap2.ll
+++ b/llvm/test/CodeGen/Hexagon/hwloop-wrap2.ll
@@ -4,24 +4,24 @@
 
 ; CHECK-NOT: loop0
 
-%struct.3 = type { i8*, i8, i8, i32, i32, i16, i16, i16, i16, i16, i16, i16, %struct.2* }
-%struct.2 = type { i16, i16, i16, i16, %struct.1* }
-%struct.1 = type { %struct.1*, %struct.0*, i32, i32, i16, [2 x i16], [2 x i16], i16 }
-%struct.0 = type { %struct.0*, i32, i32, i32, i32, i32, i32, i16, i16, i16, i8, i8, i8, i8 }
+%struct.3 = type { ptr, i8, i8, i32, i32, i16, i16, i16, i16, i16, i16, i16, ptr }
+%struct.2 = type { i16, i16, i16, i16, ptr }
+%struct.1 = type { ptr, ptr, i32, i32, i16, [2 x i16], [2 x i16], i16 }
+%struct.0 = type { ptr, i32, i32, i32, i32, i32, i32, i16, i16, i16, i8, i8, i8, i8 }
 
- at pairArray = external global i32**
- at carray = external global %struct.3**
+ at pairArray = external global ptr
+ at carray = external global ptr
 
 define void @test() #0 {
 entry:
-  %0 = load i32**, i32*** @pairArray, align 4
-  %1 = load %struct.3**, %struct.3*** @carray, align 4
+  %0 = load ptr, ptr @pairArray, align 4
+  %1 = load ptr, ptr @carray, align 4
   br i1 undef, label %for.end110, label %for.body
 
 for.body:
   %row.0199 = phi i32 [ %inc109, %for.inc108 ], [ 1, %entry ]
-  %arrayidx = getelementptr inbounds i32*, i32** %0, i32 %row.0199
-  %2 = load i32*, i32** %arrayidx, align 4
+  %arrayidx = getelementptr inbounds ptr, ptr %0, i32 %row.0199
+  %2 = load ptr, ptr %arrayidx, align 4
   br i1 undef, label %for.body48, label %for.inc108
 
 for.cond45:
@@ -30,8 +30,8 @@ for.cond45:
 
 for.body48:
   %i.1190 = phi i32 [ %dec58, %for.cond45 ], [ 0, %for.body ]
-  %arrayidx50 = getelementptr inbounds i32, i32* %2, i32 %i.1190
-  %3 = load i32, i32* %arrayidx50, align 4
+  %arrayidx50 = getelementptr inbounds i32, ptr %2, i32 %i.1190
+  %3 = load i32, ptr %arrayidx50, align 4
   %cmp53 = icmp slt i32 %3, 0
   %dec58 = add nsw i32 %i.1190, -1
   br i1 %cmp53, label %for.end59, label %for.cond45
@@ -46,15 +46,15 @@ if.then65:
 for.body80:
   %j.1196.in = phi i32 [ %j.1196, %for.body80 ], [ %i.1190, %if.then65 ]
   %j.1196 = add nsw i32 %j.1196.in, 1
-  %arrayidx81 = getelementptr inbounds i32, i32* %2, i32 %j.1196
-  %4 = load i32, i32* %arrayidx81, align 4
-  %arrayidx82 = getelementptr inbounds %struct.3*, %struct.3** %1, i32 %4
-  %5 = load %struct.3*, %struct.3** %arrayidx82, align 4
-  %cxcenter83 = getelementptr inbounds %struct.3, %struct.3* %5, i32 0, i32 3
-  store i32 0, i32* %cxcenter83, align 4
-  %6 = load i32, i32* %arrayidx81, align 4
-  %arrayidx87 = getelementptr inbounds i32, i32* %2, i32 %j.1196.in
-  store i32 %6, i32* %arrayidx87, align 4
+  %arrayidx81 = getelementptr inbounds i32, ptr %2, i32 %j.1196
+  %4 = load i32, ptr %arrayidx81, align 4
+  %arrayidx82 = getelementptr inbounds ptr, ptr %1, i32 %4
+  %5 = load ptr, ptr %arrayidx82, align 4
+  %cxcenter83 = getelementptr inbounds %struct.3, ptr %5, i32 0, i32 3
+  store i32 0, ptr %cxcenter83, align 4
+  %6 = load i32, ptr %arrayidx81, align 4
+  %arrayidx87 = getelementptr inbounds i32, ptr %2, i32 %j.1196.in
+  store i32 %6, ptr %arrayidx87, align 4
   %exitcond = icmp eq i32 %j.1196, 0
   br i1 %exitcond, label %for.inc108, label %for.body80
 

diff  --git a/llvm/test/CodeGen/Hexagon/hwloop1.ll b/llvm/test/CodeGen/Hexagon/hwloop1.ll
index 7a805d951b959..cdc9835e21a6b 100644
--- a/llvm/test/CodeGen/Hexagon/hwloop1.ll
+++ b/llvm/test/CodeGen/Hexagon/hwloop1.ll
@@ -12,8 +12,8 @@ entry:
   br label %for.body
 for.body:
   %i.01 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
-  %arrayidx = getelementptr inbounds [10 x i32], [10 x i32]* @a, i32 0, i32 %i.01
-  store i32 %i.01, i32* %arrayidx, align 4
+  %arrayidx = getelementptr inbounds [10 x i32], ptr @a, i32 0, i32 %i.01
+  store i32 %i.01, ptr %arrayidx, align 4
   %inc = add nsw i32 %i.01, 1
   %exitcond = icmp eq i32 %inc, 10
   br i1 %exitcond, label %for.end, label %for.body
@@ -26,7 +26,7 @@ for.end:
 ; CHECK: loop0(.LBB{{.}}_{{.}},r{{[0-9]+}})
 ; CHECK: endloop0
 
-define i32 @hwloop2(i32 %n, i32* nocapture %b) nounwind {
+define i32 @hwloop2(i32 %n, ptr nocapture %b) nounwind {
 entry:
   %cmp1 = icmp sgt i32 %n, 0
   br i1 %cmp1, label %for.body.preheader, label %for.end
@@ -37,8 +37,8 @@ for.body.preheader:
 for.body:
   %a.03 = phi i32 [ %add, %for.body ], [ 0, %for.body.preheader ]
   %i.02 = phi i32 [ %inc, %for.body ], [ 0, %for.body.preheader ]
-  %arrayidx = getelementptr inbounds i32, i32* %b, i32 %i.02
-  %0 = load i32, i32* %arrayidx, align 4
+  %arrayidx = getelementptr inbounds i32, ptr %b, i32 %i.02
+  %0 = load i32, ptr %arrayidx, align 4
   %add = add nsw i32 %0, %a.03
   %inc = add nsw i32 %i.02, 1
   %exitcond = icmp eq i32 %inc, %n
@@ -58,7 +58,7 @@ for.end:
 ; CHECK: loop0(.LBB{{.}}_{{.}},r{{[0-9]+}})
 ; CHECK: endloop0
 
-define i32 @hwloop3(i32 %n, i32* nocapture %b) nounwind {
+define i32 @hwloop3(i32 %n, ptr nocapture %b) nounwind {
 entry:
   %cmp1 = icmp sgt i32 %n, 0
   br i1 %cmp1, label %for.body.preheader, label %for.end
@@ -69,8 +69,8 @@ for.body.preheader:
 for.body:
   %a.03 = phi i32 [ %add, %for.body ], [ 0, %for.body.preheader ]
   %i.02 = phi i32 [ %inc, %for.body ], [ 0, %for.body.preheader ]
-  %arrayidx = getelementptr inbounds i32, i32* %b, i32 %i.02
-  %0 = load i32, i32* %arrayidx, align 4
+  %arrayidx = getelementptr inbounds i32, ptr %b, i32 %i.02
+  %0 = load i32, ptr %arrayidx, align 4
   %add = add nsw i32 %0, %a.03
   %inc = add nsw i32 %i.02, 4
   %exitcond = icmp eq i32 %inc, %n
@@ -89,7 +89,7 @@ for.end:
 ; CHECK: loop0(.LBB{{.}}_{{.}},r{{[0-9]+}})
 ; CHECK: endloop0
 
-define i32 @hwloop4(i32 %n, i32* nocapture %b) nounwind {
+define i32 @hwloop4(i32 %n, ptr nocapture %b) nounwind {
 entry:
   %cmp1 = icmp sgt i32 %n, 0
   br i1 %cmp1, label %for.body.preheader, label %for.end
@@ -99,8 +99,8 @@ for.body.preheader:
 
 for.body:
   %i.02 = phi i32 [ %inc, %for.body ], [ 0, %for.body.preheader ]
-  %arrayidx = getelementptr inbounds i32, i32* %b, i32 %i.02
-  store i32 %i.02, i32* %arrayidx, align 4
+  %arrayidx = getelementptr inbounds i32, ptr %b, i32 %i.02
+  store i32 %i.02, ptr %arrayidx, align 4
   %inc = add nsw i32 %i.02, 1
   %exitcond = icmp eq i32 %inc, %n
   br i1 %exitcond, label %for.end.loopexit, label %for.body
@@ -117,17 +117,17 @@ for.end:
 ; CHECK: loop0(.LBB{{.}}_{{.}},#100)
 ; CHECK: endloop0
 
-define void @hwloop5(i32* nocapture %a, i32* nocapture %res) nounwind {
+define void @hwloop5(ptr nocapture %a, ptr nocapture %res) nounwind {
 entry:
   br label %for.body
 
 for.body:
   %i.03 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
-  %arrayidx = getelementptr inbounds i32, i32* %a, i32 %i.03
-  %0 = load i32, i32* %arrayidx, align 4
+  %arrayidx = getelementptr inbounds i32, ptr %a, i32 %i.03
+  %0 = load i32, ptr %arrayidx, align 4
   %mul = mul nsw i32 %0, %0
-  %arrayidx2 = getelementptr inbounds i32, i32* %res, i32 %i.03
-  store i32 %mul, i32* %arrayidx2, align 4
+  %arrayidx2 = getelementptr inbounds i32, ptr %res, i32 %i.03
+  store i32 %mul, ptr %arrayidx2, align 4
   %inc = add nsw i32 %i.03, 1
   %exitcond = icmp eq i32 %inc, 100
   br i1 %exitcond, label %for.end, label %for.body
@@ -142,16 +142,16 @@ for.end:
 ; CHECK: loop0(.LBB{{.}}_{{.}},r{{[0-9]+}})
 ; CHECK: endloop0
 
-define void @hwloop6(i32* nocapture %a, i32* nocapture %res) nounwind {
+define void @hwloop6(ptr nocapture %a, ptr nocapture %res) nounwind {
 entry:
   br label %for.body
 
 for.body:
   %i.02 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
-  %arrayidx = getelementptr inbounds i32, i32* %a, i32 %i.02
-  %0 = load i32, i32* %arrayidx, align 4
-  %arrayidx1 = getelementptr inbounds i32, i32* %res, i32 %i.02
-  store i32 %0, i32* %arrayidx1, align 4
+  %arrayidx = getelementptr inbounds i32, ptr %a, i32 %i.02
+  %0 = load i32, ptr %arrayidx, align 4
+  %arrayidx1 = getelementptr inbounds i32, ptr %res, i32 %i.02
+  store i32 %0, ptr %arrayidx1, align 4
   %inc = add nsw i32 %i.02, 1
   %exitcond = icmp eq i32 %inc, 1024
   br i1 %exitcond, label %for.end, label %for.body

diff  --git a/llvm/test/CodeGen/Hexagon/hwloop2.ll b/llvm/test/CodeGen/Hexagon/hwloop2.ll
index ba3de1f1a2af0..98d418abcd66f 100644
--- a/llvm/test/CodeGen/Hexagon/hwloop2.ll
+++ b/llvm/test/CodeGen/Hexagon/hwloop2.ll
@@ -5,26 +5,26 @@
 ; CHECK: loop0(.LBB{{.}}_{{.}},r{{[0-9]+}})
 ; CHECK: endloop0
 
-define i32 @hwloop4(i32* nocapture %s, i32* nocapture %a, i32 %n) {
+define i32 @hwloop4(ptr nocapture %s, ptr nocapture %a, i32 %n) {
 entry:
   %cmp3 = icmp eq i32 %n, 0
   br i1 %cmp3, label %for.end, label %for.body.lr.ph
 
 for.body.lr.ph:
-  %.pre = load i32, i32* %s, align 4
+  %.pre = load i32, ptr %s, align 4
   br label %for.body
 
 for.body:
   %0 = phi i32 [ %.pre, %for.body.lr.ph ], [ %add1, %for.body ]
   %j.05 = phi i32 [ 0, %for.body.lr.ph ], [ %add2, %for.body ]
   %lsr.iv = phi i32 [ %lsr.iv.next, %for.body ], [ %n, %for.body.lr.ph ]
-  %lsr.iv1 = phi i32* [ %scevgep, %for.body ], [ %a, %for.body.lr.ph ]
-  %1 = load i32, i32* %lsr.iv1, align 4
+  %lsr.iv1 = phi ptr [ %scevgep, %for.body ], [ %a, %for.body.lr.ph ]
+  %1 = load i32, ptr %lsr.iv1, align 4
   %add1 = add nsw i32 %0, %1
-  store i32 %add1, i32* %s, align 4
+  store i32 %add1, ptr %s, align 4
   %add2 = add nsw i32 %j.05, 1
   %lsr.iv.next = add i32 %lsr.iv, -1
-  %scevgep = getelementptr i32, i32* %lsr.iv1, i32 1
+  %scevgep = getelementptr i32, ptr %lsr.iv1, i32 1
   %cmp = icmp eq i32 %lsr.iv.next, 0
   br i1 %cmp, label %for.end.loopexit, label %for.body
 

diff  --git a/llvm/test/CodeGen/Hexagon/hwloop3.ll b/llvm/test/CodeGen/Hexagon/hwloop3.ll
index 21778f93c6636..f70623e78794a 100644
--- a/llvm/test/CodeGen/Hexagon/hwloop3.ll
+++ b/llvm/test/CodeGen/Hexagon/hwloop3.ll
@@ -5,19 +5,19 @@
 ; CHECK: endloop0
 ; CHECK-NOT: jump [[L1:.]]{{.*[[:space:]]+}}[[L1]]
 
-define void @test(i32* nocapture %a, i32 %n) nounwind {
+define void @test(ptr nocapture %a, i32 %n) nounwind {
 entry:
   br label %for.body
 
 for.body:
-  %arrayidx.phi = phi i32* [ %a, %entry ], [ %arrayidx.inc, %for.body ]
+  %arrayidx.phi = phi ptr [ %a, %entry ], [ %arrayidx.inc, %for.body ]
   %i.02 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
-  %0 = load i32, i32* %arrayidx.phi, align 4
+  %0 = load i32, ptr %arrayidx.phi, align 4
   %add = add nsw i32 %0, 1
-  store i32 %add, i32* %arrayidx.phi, align 4
+  store i32 %add, ptr %arrayidx.phi, align 4
   %inc = add nsw i32 %i.02, 1
   %exitcond = icmp eq i32 %inc, 100
-  %arrayidx.inc = getelementptr i32, i32* %arrayidx.phi, i32 1
+  %arrayidx.inc = getelementptr i32, ptr %arrayidx.phi, i32 1
   br i1 %exitcond, label %for.end, label %for.body
 
 for.end:

diff  --git a/llvm/test/CodeGen/Hexagon/hwloop4.ll b/llvm/test/CodeGen/Hexagon/hwloop4.ll
index b8cea4c777208..5c860628697ff 100644
--- a/llvm/test/CodeGen/Hexagon/hwloop4.ll
+++ b/llvm/test/CodeGen/Hexagon/hwloop4.ll
@@ -7,7 +7,7 @@
 ; CHECK: lsr([[OP1]],#{{[0-9]+}})
 ; CHECK: loop0
 
-define void @matrix_mul_matrix(i32 %N, i32* nocapture %C, i16* nocapture readnone %A, i16* nocapture readnone %B) #0 {
+define void @matrix_mul_matrix(i32 %N, ptr nocapture %C, ptr nocapture readnone %A, ptr nocapture readnone %B) #0 {
 entry:
   %cmp4 = icmp eq i32 %N, 0
   br i1 %cmp4, label %for.end, label %for.body.preheader
@@ -21,33 +21,33 @@ for.body.preheader9:
   br label %for.body
 
 for.body:
-  %arrayidx.phi = phi i32* [ %arrayidx.inc.7, %for.body ], [ %C, %for.body.preheader9 ]
+  %arrayidx.phi = phi ptr [ %arrayidx.inc.7, %for.body ], [ %C, %for.body.preheader9 ]
   %i.05 = phi i32 [ %inc.7, %for.body ], [ 0, %for.body.preheader9 ]
-  store i32 %i.05, i32* %arrayidx.phi, align 4
+  store i32 %i.05, ptr %arrayidx.phi, align 4
   %inc = add i32 %i.05, 1
-  %arrayidx.inc = getelementptr i32, i32* %arrayidx.phi, i32 1
-  store i32 %inc, i32* %arrayidx.inc, align 4
+  %arrayidx.inc = getelementptr i32, ptr %arrayidx.phi, i32 1
+  store i32 %inc, ptr %arrayidx.inc, align 4
   %inc.1 = add i32 %i.05, 2
-  %arrayidx.inc.1 = getelementptr i32, i32* %arrayidx.phi, i32 2
-  store i32 %inc.1, i32* %arrayidx.inc.1, align 4
+  %arrayidx.inc.1 = getelementptr i32, ptr %arrayidx.phi, i32 2
+  store i32 %inc.1, ptr %arrayidx.inc.1, align 4
   %inc.2 = add i32 %i.05, 3
-  %arrayidx.inc.2 = getelementptr i32, i32* %arrayidx.phi, i32 3
-  store i32 %inc.2, i32* %arrayidx.inc.2, align 4
+  %arrayidx.inc.2 = getelementptr i32, ptr %arrayidx.phi, i32 3
+  store i32 %inc.2, ptr %arrayidx.inc.2, align 4
   %inc.3 = add i32 %i.05, 4
-  %arrayidx.inc.3 = getelementptr i32, i32* %arrayidx.phi, i32 4
-  store i32 %inc.3, i32* %arrayidx.inc.3, align 4
+  %arrayidx.inc.3 = getelementptr i32, ptr %arrayidx.phi, i32 4
+  store i32 %inc.3, ptr %arrayidx.inc.3, align 4
   %inc.4 = add i32 %i.05, 5
-  %arrayidx.inc.4 = getelementptr i32, i32* %arrayidx.phi, i32 5
-  store i32 %inc.4, i32* %arrayidx.inc.4, align 4
+  %arrayidx.inc.4 = getelementptr i32, ptr %arrayidx.phi, i32 5
+  store i32 %inc.4, ptr %arrayidx.inc.4, align 4
   %inc.5 = add i32 %i.05, 6
-  %arrayidx.inc.5 = getelementptr i32, i32* %arrayidx.phi, i32 6
-  store i32 %inc.5, i32* %arrayidx.inc.5, align 4
+  %arrayidx.inc.5 = getelementptr i32, ptr %arrayidx.phi, i32 6
+  store i32 %inc.5, ptr %arrayidx.inc.5, align 4
   %inc.6 = add i32 %i.05, 7
-  %arrayidx.inc.6 = getelementptr i32, i32* %arrayidx.phi, i32 7
-  store i32 %inc.6, i32* %arrayidx.inc.6, align 4
+  %arrayidx.inc.6 = getelementptr i32, ptr %arrayidx.phi, i32 7
+  store i32 %inc.6, ptr %arrayidx.inc.6, align 4
   %inc.7 = add i32 %i.05, 8
   %exitcond.7 = icmp slt i32 %inc.7, %maxval
-  %arrayidx.inc.7 = getelementptr i32, i32* %arrayidx.phi, i32 8
+  %arrayidx.inc.7 = getelementptr i32, ptr %arrayidx.phi, i32 8
   br i1 %exitcond.7, label %for.body, label %for.end.loopexit.ur-lcssa
 
 for.end.loopexit.ur-lcssa:
@@ -55,17 +55,17 @@ for.end.loopexit.ur-lcssa:
   br i1 %1, label %for.end, label %for.body.ur.preheader
 
 for.body.ur.preheader:
-  %arrayidx.phi.ur.ph = phi i32* [ %C, %for.body.preheader ], [ %arrayidx.inc.7, %for.end.loopexit.ur-lcssa ]
+  %arrayidx.phi.ur.ph = phi ptr [ %C, %for.body.preheader ], [ %arrayidx.inc.7, %for.end.loopexit.ur-lcssa ]
   %i.05.ur.ph = phi i32 [ 0, %for.body.preheader ], [ %inc.7, %for.end.loopexit.ur-lcssa ]
   br label %for.body.ur
 
 for.body.ur:
-  %arrayidx.phi.ur = phi i32* [ %arrayidx.inc.ur, %for.body.ur ], [ %arrayidx.phi.ur.ph, %for.body.ur.preheader ]
+  %arrayidx.phi.ur = phi ptr [ %arrayidx.inc.ur, %for.body.ur ], [ %arrayidx.phi.ur.ph, %for.body.ur.preheader ]
   %i.05.ur = phi i32 [ %inc.ur, %for.body.ur ], [ %i.05.ur.ph, %for.body.ur.preheader ]
-  store i32 %i.05.ur, i32* %arrayidx.phi.ur, align 4
+  store i32 %i.05.ur, ptr %arrayidx.phi.ur, align 4
   %inc.ur = add i32 %i.05.ur, 1
   %exitcond.ur = icmp eq i32 %inc.ur, %N
-  %arrayidx.inc.ur = getelementptr i32, i32* %arrayidx.phi.ur, i32 1
+  %arrayidx.inc.ur = getelementptr i32, ptr %arrayidx.phi.ur, i32 1
   br i1 %exitcond.ur, label %for.end.loopexit, label %for.body.ur
 
 for.end.loopexit:

diff  --git a/llvm/test/CodeGen/Hexagon/hwloop5.ll b/llvm/test/CodeGen/Hexagon/hwloop5.ll
index f4990dabebb9d..b8b7745e5e36f 100644
--- a/llvm/test/CodeGen/Hexagon/hwloop5.ll
+++ b/llvm/test/CodeGen/Hexagon/hwloop5.ll
@@ -44,19 +44,17 @@ polly.loop_if:
 
 polly.stmt.for.body:
   %addp_vec28 = phi <2 x i32> [ zeroinitializer, %polly.loop_preheader ], [ %addp_vec, %polly.stmt.for.body ]
-  %scevgep.phi = phi i32* [ getelementptr inbounds ([1000 x i32], [1000 x i32]* @A, i32 0, i32 0), %polly.loop_preheader ], [ %scevgep.inc, %polly.stmt.for.body ]
-  %scevgep9.phi = phi i32* [ getelementptr inbounds ([1000 x i32], [1000 x i32]* @B, i32 0, i32 0), %polly.loop_preheader ], [ %scevgep9.inc, %polly.stmt.for.body ]
+  %scevgep.phi = phi ptr [ @A, %polly.loop_preheader ], [ %scevgep.inc, %polly.stmt.for.body ]
+  %scevgep9.phi = phi ptr [ @B, %polly.loop_preheader ], [ %scevgep9.inc, %polly.stmt.for.body ]
   %polly.indvar = phi i32 [ 0, %polly.loop_preheader ], [ %polly.indvar_next, %polly.stmt.for.body ]
-  %vector_ptr = bitcast i32* %scevgep.phi to <2 x i32>*
-  %_p_vec_full = load <2 x i32>, <2 x i32>* %vector_ptr, align 8
-  %vector_ptr10 = bitcast i32* %scevgep9.phi to <2 x i32>*
-  %_p_vec_full11 = load <2 x i32>, <2 x i32>* %vector_ptr10, align 8
+  %_p_vec_full = load <2 x i32>, ptr %scevgep.phi, align 8
+  %_p_vec_full11 = load <2 x i32>, ptr %scevgep9.phi, align 8
   %mulp_vec = mul <2 x i32> %_p_vec_full11, %_p_vec_full
   %addp_vec = add <2 x i32> %mulp_vec, %addp_vec28
   %polly.indvar_next = add nsw i32 %polly.indvar, 2
   %polly.loop_cond = icmp eq i32 %polly.indvar, %polly.adjust_ub
-  %scevgep.inc = getelementptr i32, i32* %scevgep.phi, i32 2
-  %scevgep9.inc = getelementptr i32, i32* %scevgep9.phi, i32 2
+  %scevgep.inc = getelementptr i32, ptr %scevgep.phi, i32 2
+  %scevgep9.inc = getelementptr i32, ptr %scevgep9.phi, i32 2
   br i1 %polly.loop_cond, label %polly.loop_exit.loopexit, label %polly.stmt.for.body
 
 polly.loop_preheader:
@@ -72,11 +70,11 @@ polly.loop_if13:
 polly.stmt.for.body22:
   %p_add30 = phi i32 [ %p_add34, %polly.loop_preheader15 ], [ %p_add, %polly.stmt.for.body22 ]
   %polly.indvar18 = phi i32 [ %merge.lb, %polly.loop_preheader15 ], [ %polly.indvar_next19, %polly.stmt.for.body22 ]
-  %5 = tail call i32 @llvm.annotation.i32(i32 %polly.indvar18, i8* null, i8* null, i32 0), !polly.loop.smallTripCount !0
-  %scevgep23 = getelementptr [1000 x i32], [1000 x i32]* @A, i32 0, i32 %polly.indvar18
-  %_p_scalar_ = load i32, i32* %scevgep23, align 4
-  %scevgep24 = getelementptr [1000 x i32], [1000 x i32]* @B, i32 0, i32 %polly.indvar18
-  %_p_scalar_25 = load i32, i32* %scevgep24, align 4
+  %5 = tail call i32 @llvm.annotation.i32(i32 %polly.indvar18, ptr null, ptr null, i32 0), !polly.loop.smallTripCount !0
+  %scevgep23 = getelementptr [1000 x i32], ptr @A, i32 0, i32 %polly.indvar18
+  %_p_scalar_ = load i32, ptr %scevgep23, align 4
+  %scevgep24 = getelementptr [1000 x i32], ptr @B, i32 0, i32 %polly.indvar18
+  %_p_scalar_25 = load i32, ptr %scevgep24, align 4
   %p_mul = mul nsw i32 %_p_scalar_25, %_p_scalar_
   %p_add = add nsw i32 %p_mul, %p_add30
   %polly.indvar_next19 = add nsw i32 %polly.indvar18, 1
@@ -88,6 +86,6 @@ polly.loop_preheader15:
   br label %polly.stmt.for.body22
 }
 
-declare i32 @llvm.annotation.i32(i32, i8*, i8*, i32) #1
+declare i32 @llvm.annotation.i32(i32, ptr, ptr, i32) #1
 
 !0 = !{}

diff  --git a/llvm/test/CodeGen/Hexagon/hx_V6_lo_hi.ll b/llvm/test/CodeGen/Hexagon/hx_V6_lo_hi.ll
index 5c076d7eed1a7..909acaa7b66d9 100644
--- a/llvm/test/CodeGen/Hexagon/hx_V6_lo_hi.ll
+++ b/llvm/test/CodeGen/Hexagon/hx_V6_lo_hi.ll
@@ -5,19 +5,16 @@
 ; CHECK-NOT: v{{[0-9]+}} = vand(v{{[0-9]+}},v{{[0-9]+}})
 
 ; Function Attrs: nounwind
-define void @f0(i8* nocapture readonly %a0, i8* nocapture readonly %a1, i32 %a2, i8* nocapture %a3, i32 %a4, i32 %a5) #0 {
+define void @f0(ptr nocapture readonly %a0, ptr nocapture readonly %a1, i32 %a2, ptr nocapture %a3, i32 %a4, i32 %a5) #0 {
 b0:
-  %v0 = bitcast i8* %a1 to i64*
-  %v1 = load i64, i64* %v0, align 8, !tbaa !0
+  %v1 = load i64, ptr %a1, align 8, !tbaa !0
   %v2 = shl i64 %v1, 8
   %v3 = trunc i64 %v2 to i32
   %v4 = trunc i64 %v1 to i32
   %v5 = and i32 %v4, 16777215
-  %v6 = bitcast i8* %a0 to <16 x i32>*
-  %v7 = load <16 x i32>, <16 x i32>* %v6, align 64, !tbaa !4
-  %v8 = getelementptr inbounds i8, i8* %a0, i32 32
-  %v9 = bitcast i8* %v8 to <16 x i32>*
-  %v10 = load <16 x i32>, <16 x i32>* %v9, align 64, !tbaa !4
+  %v7 = load <16 x i32>, ptr %a0, align 64, !tbaa !4
+  %v8 = getelementptr inbounds i8, ptr %a0, i32 32
+  %v10 = load <16 x i32>, ptr %v8, align 64, !tbaa !4
   %v11 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v10, <16 x i32> %v7)
   %v12 = tail call <32 x i32> @llvm.hexagon.V6.vrmpybusi(<32 x i32> %v11, i32 %v5, i32 0)
   %v13 = tail call <32 x i32> @llvm.hexagon.V6.vrmpybusi(<32 x i32> %v11, i32 %v3, i32 0)
@@ -25,11 +22,9 @@ b0:
   %v15 = tail call <16 x i32> @llvm.hexagon.V6.vasrwuhsat(<16 x i32> %v14, <16 x i32> %v14, i32 %a2)
   %v16 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v13)
   %v17 = tail call <16 x i32> @llvm.hexagon.V6.vasrwuhsat(<16 x i32> %v16, <16 x i32> %v16, i32 %a2)
-  %v18 = getelementptr inbounds i8, i8* %a3, i32 32
-  %v19 = bitcast i8* %v18 to <16 x i32>*
-  store <16 x i32> %v15, <16 x i32>* %v19, align 64, !tbaa !4
-  %v20 = bitcast i8* %a3 to <16 x i32>*
-  store <16 x i32> %v17, <16 x i32>* %v20, align 64, !tbaa !4
+  %v18 = getelementptr inbounds i8, ptr %a3, i32 32
+  store <16 x i32> %v15, ptr %v18, align 64, !tbaa !4
+  store <16 x i32> %v17, ptr %a3, align 64, !tbaa !4
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/i128-bitop.ll b/llvm/test/CodeGen/Hexagon/i128-bitop.ll
index 05186d8cb5a12..fc7cc100fece7 100644
--- a/llvm/test/CodeGen/Hexagon/i128-bitop.ll
+++ b/llvm/test/CodeGen/Hexagon/i128-bitop.ll
@@ -7,12 +7,11 @@ target triple = "hexagon"
 %s.1 = type { [4 x i32] }
 %s.2 = type { i128 }
 
- at g0 = external global %s.0*
+ at g0 = external global ptr
 
 ; Function Attrs: nounwind ssp
-define void @f0(%s.2* nocapture %a0, i32 %a1) #0 {
+define void @f0(ptr nocapture %a0, i32 %a1) #0 {
 b0:
-  %v0 = getelementptr inbounds %s.2, %s.2* %a0, i32 0, i32 0
   br label %b1
 
 b1:                                               ; preds = %b4, %b3, %b0
@@ -29,15 +28,15 @@ b3:                                               ; preds = %b2, %b1
   %v2 = phi i32 [ 1, %b2 ], [ 0, %b1 ]
   %v3 = phi i128 [ 64, %b2 ], [ 32, %b1 ]
   %v4 = phi i128 [ -79228162495817593519834398721, %b2 ], [ -18446744069414584321, %b1 ]
-  %v5 = load %s.0*, %s.0** @g0, align 4
-  %v6 = getelementptr inbounds %s.0, %s.0* %v5, i32 0, i32 2, i32 %a1, i32 0, i32 %v2
-  %v7 = load i32, i32* %v6, align 4
+  %v5 = load ptr, ptr @g0, align 4
+  %v6 = getelementptr inbounds %s.0, ptr %v5, i32 0, i32 2, i32 %a1, i32 0, i32 %v2
+  %v7 = load i32, ptr %v6, align 4
   %v8 = zext i32 %v7 to i128
-  %v9 = load i128, i128* %v0, align 4
+  %v9 = load i128, ptr %a0, align 4
   %v10 = shl nuw nsw i128 %v8, %v3
   %v11 = and i128 %v9, %v4
   %v12 = or i128 %v11, %v10
-  store i128 %v12, i128* %v0, align 4
+  store i128 %v12, ptr %a0, align 4
   %v13 = add i32 %v1, 1
   br label %b1
 
@@ -51,9 +50,8 @@ b5:                                               ; preds = %b4
 }
 
 ; Function Attrs: nounwind ssp
-define void @f1(%s.2* nocapture %a0, i32 %a1) #0 {
+define void @f1(ptr nocapture %a0, i32 %a1) #0 {
 b0:
-  %v0 = getelementptr inbounds %s.2, %s.2* %a0, i32 0, i32 0
   br label %b1
 
 b1:                                               ; preds = %b5, %b4, %b0
@@ -64,22 +62,22 @@ b1:                                               ; preds = %b5, %b4, %b0
   ]
 
 b2:                                               ; preds = %b1
-  %v2 = load %s.0*, %s.0** @g0, align 4
-  %v3 = getelementptr inbounds %s.0, %s.0* %v2, i32 0, i32 2, i32 %a1, i32 0, i32 0
-  %v4 = load i32, i32* %v3, align 4
+  %v2 = load ptr, ptr @g0, align 4
+  %v3 = getelementptr inbounds %s.0, ptr %v2, i32 0, i32 2, i32 %a1, i32 0, i32 0
+  %v4 = load i32, ptr %v3, align 4
   %v5 = zext i32 %v4 to i128
-  %v6 = load i128, i128* %v0, align 4
+  %v6 = load i128, ptr %a0, align 4
   %v7 = shl nuw nsw i128 %v5, 32
   %v8 = and i128 %v6, -18446744069414584321
   %v9 = or i128 %v8, %v7
   br label %b4
 
 b3:                                               ; preds = %b1
-  %v10 = load %s.0*, %s.0** @g0, align 4
-  %v11 = getelementptr inbounds %s.0, %s.0* %v10, i32 0, i32 2, i32 %a1, i32 0, i32 1
-  %v12 = load i32, i32* %v11, align 4
+  %v10 = load ptr, ptr @g0, align 4
+  %v11 = getelementptr inbounds %s.0, ptr %v10, i32 0, i32 2, i32 %a1, i32 0, i32 1
+  %v12 = load i32, ptr %v11, align 4
   %v13 = zext i32 %v12 to i128
-  %v14 = load i128, i128* %v0, align 4
+  %v14 = load i128, ptr %a0, align 4
   %v15 = shl nuw nsw i128 %v13, 64
   %v16 = and i128 %v14, -79228162495817593519834398721
   %v17 = or i128 %v16, %v15
@@ -87,7 +85,7 @@ b3:                                               ; preds = %b1
 
 b4:                                               ; preds = %b3, %b2
   %v18 = phi i128 [ %v17, %b3 ], [ %v9, %b2 ]
-  store i128 %v18, i128* %v0, align 4
+  store i128 %v18, ptr %a0, align 4
   %v19 = add i32 %v1, 1
   br label %b1
 

diff  --git a/llvm/test/CodeGen/Hexagon/i16_VarArg.ll b/llvm/test/CodeGen/Hexagon/i16_VarArg.ll
index af2682edc4b38..6f0e0b602af06 100644
--- a/llvm/test/CodeGen/Hexagon/i16_VarArg.ll
+++ b/llvm/test/CodeGen/Hexagon/i16_VarArg.ll
@@ -10,12 +10,12 @@
 @g6 = global double 2.000000e+00
 @g7 = global double 5.000000e+00
 
-declare i32 @f0(i8*, ...) #0
+declare i32 @f0(ptr, ...) #0
 
 define i32 @f1() #0 {
 b0:
-  %v0 = load double, double* @g6
-  %v1 = load double, double* @g7
+  %v0 = load double, ptr @g6
+  %v1 = load double, ptr @g7
   %v2 = fcmp olt double %v0, %v1
   %v3 = fcmp ole double %v0, %v1
   %v4 = fcmp ogt double %v0, %v1
@@ -23,13 +23,7 @@ b0:
   %v6 = fcmp oeq double %v0, %v1
   %v7 = fcmp une double %v0, %v1
   %v8 = zext i1 %v2 to i16
-  %v9 = getelementptr [12 x i8], [12 x i8]* @g0, i64 0, i64 0
-  %v10 = getelementptr [13 x i8], [13 x i8]* @g1, i64 0, i64 0
-  %v11 = getelementptr [12 x i8], [12 x i8]* @g2, i64 0, i64 0
-  %v12 = getelementptr [13 x i8], [13 x i8]* @g3, i64 0, i64 0
-  %v13 = getelementptr [13 x i8], [13 x i8]* @g4, i64 0, i64 0
-  %v14 = getelementptr [13 x i8], [13 x i8]* @g5, i64 0, i64 0
-  %v15 = call i32 (i8*, ...) @f0(i8* %v9, i16 %v8)
+  %v15 = call i32 (ptr, ...) @f0(ptr @g0, i16 %v8)
   ret i32 0
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/i1_VarArg.ll b/llvm/test/CodeGen/Hexagon/i1_VarArg.ll
index 01619bc542460..a1b1cebc3e2f5 100644
--- a/llvm/test/CodeGen/Hexagon/i1_VarArg.ll
+++ b/llvm/test/CodeGen/Hexagon/i1_VarArg.ll
@@ -10,30 +10,24 @@
 @g6 = global double 2.000000e+00
 @g7 = global double 5.000000e+00
 
-declare i32 @f0(i8*, ...) #0
+declare i32 @f0(ptr, ...) #0
 
 define i32 @f1() #0 {
 b0:
-  %v0 = load double, double* @g6
-  %v1 = load double, double* @g7
+  %v0 = load double, ptr @g6
+  %v1 = load double, ptr @g7
   %v2 = fcmp olt double %v0, %v1
   %v3 = fcmp ole double %v0, %v1
   %v4 = fcmp ogt double %v0, %v1
   %v5 = fcmp oge double %v0, %v1
   %v6 = fcmp oeq double %v0, %v1
   %v7 = fcmp une double %v0, %v1
-  %v8 = getelementptr [12 x i8], [12 x i8]* @g0, i64 0, i64 0
-  %v9 = getelementptr [13 x i8], [13 x i8]* @g1, i64 0, i64 0
-  %v10 = getelementptr [12 x i8], [12 x i8]* @g2, i64 0, i64 0
-  %v11 = getelementptr [13 x i8], [13 x i8]* @g3, i64 0, i64 0
-  %v12 = getelementptr [13 x i8], [13 x i8]* @g4, i64 0, i64 0
-  %v13 = getelementptr [13 x i8], [13 x i8]* @g5, i64 0, i64 0
-  %v14 = call i32 (i8*, ...) @f0(i8* %v8, i1 %v2)
-  %v15 = call i32 (i8*, ...) @f0(i8* %v9, i1 %v3)
-  %v16 = call i32 (i8*, ...) @f0(i8* %v10, i1 %v4)
-  %v17 = call i32 (i8*, ...) @f0(i8* %v11, i1 %v5)
-  %v18 = call i32 (i8*, ...) @f0(i8* %v12, i1 %v6)
-  %v19 = call i32 (i8*, ...) @f0(i8* %v13, i1 %v7)
+  %v14 = call i32 (ptr, ...) @f0(ptr @g0, i1 %v2)
+  %v15 = call i32 (ptr, ...) @f0(ptr @g1, i1 %v3)
+  %v16 = call i32 (ptr, ...) @f0(ptr @g2, i1 %v4)
+  %v17 = call i32 (ptr, ...) @f0(ptr @g3, i1 %v5)
+  %v18 = call i32 (ptr, ...) @f0(ptr @g4, i1 %v6)
+  %v19 = call i32 (ptr, ...) @f0(ptr @g5, i1 %v7)
   ret i32 0
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/i8_VarArg.ll b/llvm/test/CodeGen/Hexagon/i8_VarArg.ll
index 247952d0c5cab..05c70dbc2df66 100644
--- a/llvm/test/CodeGen/Hexagon/i8_VarArg.ll
+++ b/llvm/test/CodeGen/Hexagon/i8_VarArg.ll
@@ -10,12 +10,12 @@
 @g6 = global double 2.000000e+00
 @g7 = global double 5.000000e+00
 
-declare i32 @f0(i8*, ...) #0
+declare i32 @f0(ptr, ...) #0
 
 define i32 @f1() #0 {
 b0:
-  %v0 = load double, double* @g6
-  %v1 = load double, double* @g7
+  %v0 = load double, ptr @g6
+  %v1 = load double, ptr @g7
   %v2 = fcmp olt double %v0, %v1
   %v3 = fcmp ole double %v0, %v1
   %v4 = fcmp ogt double %v0, %v1
@@ -23,13 +23,7 @@ b0:
   %v6 = fcmp oeq double %v0, %v1
   %v7 = fcmp une double %v0, %v1
   %v8 = zext i1 %v2 to i8
-  %v9 = getelementptr [12 x i8], [12 x i8]* @g0, i64 0, i64 0
-  %v10 = getelementptr [13 x i8], [13 x i8]* @g1, i64 0, i64 0
-  %v11 = getelementptr [12 x i8], [12 x i8]* @g2, i64 0, i64 0
-  %v12 = getelementptr [13 x i8], [13 x i8]* @g3, i64 0, i64 0
-  %v13 = getelementptr [13 x i8], [13 x i8]* @g4, i64 0, i64 0
-  %v14 = getelementptr [13 x i8], [13 x i8]* @g5, i64 0, i64 0
-  %v15 = call i32 (i8*, ...) @f0(i8* %v9, i8 %v8)
+  %v15 = call i32 (ptr, ...) @f0(ptr @g0, i8 %v8)
   ret i32 0
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/idxload-with-zero-offset.ll b/llvm/test/CodeGen/Hexagon/idxload-with-zero-offset.ll
index fc9fe8ac80fae..11f0363f8c7f4 100644
--- a/llvm/test/CodeGen/Hexagon/idxload-with-zero-offset.ll
+++ b/llvm/test/CodeGen/Hexagon/idxload-with-zero-offset.ll
@@ -3,68 +3,68 @@
 
 ; load word
 
-define i32 @load_w(i32* nocapture %a, i32 %n, i32 %m) nounwind {
+define i32 @load_w(ptr nocapture %a, i32 %n, i32 %m) nounwind {
 ; CHECK: r{{[0-9]+}} = memw(r{{[0-9]+}}+r{{[0-9]+}}<<#2)
 entry:
   %tmp = add i32 %n, %m
-  %scevgep9 = getelementptr i32, i32* %a, i32 %tmp
-  %val = load i32, i32* %scevgep9, align 4
+  %scevgep9 = getelementptr i32, ptr %a, i32 %tmp
+  %val = load i32, ptr %scevgep9, align 4
   ret i32 %val
 }
 
 ; load unsigned half word
 
-define i16 @load_uh(i16* nocapture %a, i32 %n, i32 %m) nounwind {
+define i16 @load_uh(ptr nocapture %a, i32 %n, i32 %m) nounwind {
 ; CHECK: r{{[0-9]+}} = memuh(r{{[0-9]+}}+r{{[0-9]+}}<<#1)
 entry:
   %tmp = add i32 %n, %m
-  %scevgep9 = getelementptr i16, i16* %a, i32 %tmp
-  %val = load i16, i16* %scevgep9, align 2
+  %scevgep9 = getelementptr i16, ptr %a, i32 %tmp
+  %val = load i16, ptr %scevgep9, align 2
   ret i16 %val
 }
 
 ; load signed half word
 
-define i32 @load_h(i16* nocapture %a, i32 %n, i32 %m) nounwind {
+define i32 @load_h(ptr nocapture %a, i32 %n, i32 %m) nounwind {
 ; CHECK: r{{[0-9]+}} = memh(r{{[0-9]+}}+r{{[0-9]+}}<<#1)
 entry:
   %tmp = add i32 %n, %m
-  %scevgep9 = getelementptr i16, i16* %a, i32 %tmp
-  %val = load i16, i16* %scevgep9, align 2
+  %scevgep9 = getelementptr i16, ptr %a, i32 %tmp
+  %val = load i16, ptr %scevgep9, align 2
   %conv = sext i16 %val to i32
   ret i32 %conv
 }
 
 ; load unsigned byte
 
-define i8 @load_ub(i8* nocapture %a, i32 %n, i32 %m) nounwind {
+define i8 @load_ub(ptr nocapture %a, i32 %n, i32 %m) nounwind {
 ; CHECK: r{{[0-9]+}} = memub(r{{[0-9]+}}+r{{[0-9]+}}<<#0)
 entry:
   %tmp = add i32 %n, %m
-  %scevgep9 = getelementptr i8, i8* %a, i32 %tmp
-  %val = load i8, i8* %scevgep9, align 1
+  %scevgep9 = getelementptr i8, ptr %a, i32 %tmp
+  %val = load i8, ptr %scevgep9, align 1
   ret i8 %val
 }
 
 ; load signed byte
 
-define i32 @foo_2(i8* nocapture %a, i32 %n, i32 %m) nounwind {
+define i32 @foo_2(ptr nocapture %a, i32 %n, i32 %m) nounwind {
 ; CHECK: r{{[0-9]+}} = memb(r{{[0-9]+}}+r{{[0-9]+}}<<#0)
 entry:
   %tmp = add i32 %n, %m
-  %scevgep9 = getelementptr i8, i8* %a, i32 %tmp
-  %val = load i8, i8* %scevgep9, align 1
+  %scevgep9 = getelementptr i8, ptr %a, i32 %tmp
+  %val = load i8, ptr %scevgep9, align 1
   %conv = sext i8 %val to i32
   ret i32 %conv
 }
 
 ; load doubleword
 
-define i64 @load_d(i64* nocapture %a, i32 %n, i32 %m) nounwind {
+define i64 @load_d(ptr nocapture %a, i32 %n, i32 %m) nounwind {
 ; CHECK: r{{[0-9]+}}:{{[0-9]+}} = memd(r{{[0-9]+}}+r{{[0-9]+}}<<#3)
 entry:
   %tmp = add i32 %n, %m
-  %scevgep9 = getelementptr i64, i64* %a, i32 %tmp
-  %val = load i64, i64* %scevgep9, align 8
+  %scevgep9 = getelementptr i64, ptr %a, i32 %tmp
+  %val = load i64, ptr %scevgep9, align 8
   ret i64 %val
 }

diff  --git a/llvm/test/CodeGen/Hexagon/ifcvt-diamond-bad.ll b/llvm/test/CodeGen/Hexagon/ifcvt-diamond-bad.ll
index e4bee8354a7c5..507e3b25bacc0 100644
--- a/llvm/test/CodeGen/Hexagon/ifcvt-diamond-bad.ll
+++ b/llvm/test/CodeGen/Hexagon/ifcvt-diamond-bad.ll
@@ -15,8 +15,8 @@ declare void @bar(i32, i32) #2
 define void @fred(i8 signext %a, i8 signext %b) #1 {
 entry:
   %i = sext i8 %a to i32
-  %t = getelementptr inbounds [3 x %struct.t1], [3 x %struct.t1]* @var, i32 0, i32 %i, i32 3, i32 0
-  %0 = load i8, i8* %t, align 8
+  %t = getelementptr inbounds [3 x %struct.t1], ptr @var, i32 0, i32 %i, i32 3, i32 0
+  %0 = load i8, ptr %t, align 8
   switch i8 %0, label %if.end14 [
     i8 1, label %if.then
     i8 0, label %do.body
@@ -24,8 +24,8 @@ entry:
 
 if.then:                                          ; preds = %entry
   %j = sext i8 %b to i32
-  %u = getelementptr inbounds [3 x %struct.t1], [3 x %struct.t1]* @var, i32 0, i32 %i, i32 3, i32 1, i32 %j
-  store i8 1, i8* %u, align 1
+  %u = getelementptr inbounds [3 x %struct.t1], ptr @var, i32 0, i32 %i, i32 3, i32 1, i32 %j
+  store i8 1, ptr %u, align 1
   tail call void @foo() #0
   br label %if.end14
 

diff  --git a/llvm/test/CodeGen/Hexagon/ifcvt-diamond-bug-2016-08-26.ll b/llvm/test/CodeGen/Hexagon/ifcvt-diamond-bug-2016-08-26.ll
index 2a6767c644a3d..be2b7b4d60107 100644
--- a/llvm/test/CodeGen/Hexagon/ifcvt-diamond-bug-2016-08-26.ll
+++ b/llvm/test/CodeGen/Hexagon/ifcvt-diamond-bug-2016-08-26.ll
@@ -23,13 +23,13 @@ entry:
 ; CHECK-DAG: if (!p0) memh(##t) = [[R3]]
 ; CHECK-DAG: if (p0) memh(##t) = [[R4]]
 if.then200:                                       ; preds = %entry
-  store i16 %x1, i16* getelementptr inbounds (%struct.0, %struct.0* @t, i32 0, i32 0), align 2
-  store i16 %z1, i16* getelementptr inbounds (%struct.0, %struct.0* @t, i32 0, i32 1), align 2
+  store i16 %x1, ptr @t, align 2
+  store i16 %z1, ptr getelementptr inbounds (%struct.0, ptr @t, i32 0, i32 1), align 2
   br label %if.end202
 
 if.else201:                                       ; preds = %entry
   %y1 = add i16 %y, 3
-  store i16 %y1, i16* getelementptr inbounds (%struct.0, %struct.0* @t, i32 0, i32 0), align 2
+  store i16 %y1, ptr @t, align 2
   br label %if.end202
 
 if.end202:                                        ; preds = %if.else201, %if.then200

diff  --git a/llvm/test/CodeGen/Hexagon/ifcvt-edge-weight.ll b/llvm/test/CodeGen/Hexagon/ifcvt-edge-weight.ll
index 3fa095e857a7c..b32f4b89484e0 100644
--- a/llvm/test/CodeGen/Hexagon/ifcvt-edge-weight.ll
+++ b/llvm/test/CodeGen/Hexagon/ifcvt-edge-weight.ll
@@ -32,8 +32,8 @@ if.end2:
 
 if.end:
   %storemerge = phi i32 [ %and, %if.else ], [ %shl, %if.then ]
-  store i32 %storemerge, i32* @a, align 4
-  %0 = load i32, i32* @d, align 4
+  store i32 %storemerge, ptr @a, align 4
+  %0 = load i32, ptr @d, align 4
   %cmp2 = call i1 @pred()
   br i1 %cmp2, label %if.end2, label %if.else2, !prof !2
 

diff  --git a/llvm/test/CodeGen/Hexagon/ignore-terminal-mbb.ll b/llvm/test/CodeGen/Hexagon/ignore-terminal-mbb.ll
index 175318d996715..690ccb9ba7a53 100644
--- a/llvm/test/CodeGen/Hexagon/ignore-terminal-mbb.ll
+++ b/llvm/test/CodeGen/Hexagon/ignore-terminal-mbb.ll
@@ -11,7 +11,7 @@ b0:
   br i1 undef, label %b2, label %b1
 
 b1:                                               ; preds = %b0
-  store i32 0, i32* undef, align 4, !tbaa !0
+  store i32 0, ptr undef, align 4, !tbaa !0
   unreachable
 
 b2:                                               ; preds = %b0

diff  --git a/llvm/test/CodeGen/Hexagon/indirect-br.ll b/llvm/test/CodeGen/Hexagon/indirect-br.ll
index e8bab5f247343..781481954f4e0 100644
--- a/llvm/test/CodeGen/Hexagon/indirect-br.ll
+++ b/llvm/test/CodeGen/Hexagon/indirect-br.ll
@@ -2,9 +2,9 @@
 
 ; CHECK: jumpr r{{[0-9]+}}
 
-define i32 @check_indirect_br(i8* %target) nounwind {
+define i32 @check_indirect_br(ptr %target) nounwind {
 entry:
-  indirectbr i8* %target, [label %test_label]
+  indirectbr ptr %target, [label %test_label]
 
 test_label:
   br label %ret

diff  --git a/llvm/test/CodeGen/Hexagon/initial-exec.ll b/llvm/test/CodeGen/Hexagon/initial-exec.ll
index f46ce1e925546..1d9527d053248 100644
--- a/llvm/test/CodeGen/Hexagon/initial-exec.ll
+++ b/llvm/test/CodeGen/Hexagon/initial-exec.ll
@@ -9,7 +9,7 @@ target triple = "hexagon-unknown--elf"
 ; CHECK-DAG: r{{[0-9]+}} = memw(##g1 at IE)
 define i32 @f0() {
 b0:
-  %v0 = load i32, i32* @g1, align 4
-  store i32 %v0, i32* @g0, align 4
+  %v0 = load i32, ptr @g1, align 4
+  store i32 %v0, ptr @g0, align 4
   ret i32 0
 }

diff  --git a/llvm/test/CodeGen/Hexagon/inline-asm-a.ll b/llvm/test/CodeGen/Hexagon/inline-asm-a.ll
index 08862d9233ad1..23e48fa0f444d 100644
--- a/llvm/test/CodeGen/Hexagon/inline-asm-a.ll
+++ b/llvm/test/CodeGen/Hexagon/inline-asm-a.ll
@@ -7,9 +7,9 @@
 target triple = "hexagon"
 
 ; Function Attrs: nounwind
-define void @foo(i32* %a, i32 %m, i32 %v) #0 {
+define void @foo(ptr %a, i32 %m, i32 %v) #0 {
 entry:
-  tail call void asm sideeffect "memw($0++$1) = $2", "r,a,r,~{memory}"(i32* %a, i32 %m, i32 %v)
+  tail call void asm sideeffect "memw($0++$1) = $2", "r,a,r,~{memory}"(ptr %a, i32 %m, i32 %v)
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/inline-asm-bad-constraint.ll b/llvm/test/CodeGen/Hexagon/inline-asm-bad-constraint.ll
index 2c4e3f4ae4a0a..da638047ddb9a 100644
--- a/llvm/test/CodeGen/Hexagon/inline-asm-bad-constraint.ll
+++ b/llvm/test/CodeGen/Hexagon/inline-asm-bad-constraint.ll
@@ -9,7 +9,7 @@ define void @fred() #0 {
 entry:
   %a0 = alloca <16 x i32>, align 64
   %0 = call <16 x i32> asm sideeffect "$0 = vmem(r0)", "=r"()
-  store <16 x i32> %0, <16 x i32>* %a0, align 64
+  store <16 x i32> %0, ptr %a0, align 64
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/inline-asm-clobber-lr.ll b/llvm/test/CodeGen/Hexagon/inline-asm-clobber-lr.ll
index 99d740023bc33..06de6dad23d64 100644
--- a/llvm/test/CodeGen/Hexagon/inline-asm-clobber-lr.ll
+++ b/llvm/test/CodeGen/Hexagon/inline-asm-clobber-lr.ll
@@ -5,15 +5,14 @@ target triple = "hexagon"
 
 define internal fastcc void @f0() {
 b0:
-  %v0 = tail call i32* asm sideeffect "call 1f; r31.h = #hi(TH); r31.l = #lo(TH); jumpr r31; 1: $0 = r31", "=r,~{r28},~{r31}"()
-  %v1 = bitcast i32* %v0 to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 bitcast (void (...)* @f1 to i8*), i8* align 4 %v1, i32 12, i1 false)
+  %v0 = tail call ptr asm sideeffect "call 1f; r31.h = #hi(TH); r31.l = #lo(TH); jumpr r31; 1: $0 = r31", "=r,~{r28},~{r31}"()
+  call void @llvm.memcpy.p0.p0.i32(ptr align 4 @f1, ptr align 4 %v0, i32 12, i1 false)
   ret void
 }
 
 declare void @f1(...)
 
 ; Function Attrs: argmemonly nounwind
-declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture writeonly, i8* nocapture readonly, i32, i1) #0
+declare void @llvm.memcpy.p0.p0.i32(ptr nocapture writeonly, ptr nocapture readonly, i32, i1) #0
 
 attributes #0 = { argmemonly nounwind }

diff  --git a/llvm/test/CodeGen/Hexagon/inline-asm-error.ll b/llvm/test/CodeGen/Hexagon/inline-asm-error.ll
index 0254836127b70..cf6bf51faea19 100644
--- a/llvm/test/CodeGen/Hexagon/inline-asm-error.ll
+++ b/llvm/test/CodeGen/Hexagon/inline-asm-error.ll
@@ -2,13 +2,13 @@
 
 ; CHECK: error: Don't know how to handle indirect register inputs yet for constraint 'r'
 
-%s.0 = type { i8*, i32, %s.1 }
+%s.0 = type { ptr, i32, %s.1 }
 %s.1 = type { %s.2 }
-%s.2 = type { i32, i8* }
+%s.2 = type { i32, ptr }
 
-define void @f0(%s.0* byval(%s.0) align 8 %a0) {
+define void @f0(ptr byval(%s.0) align 8 %a0) {
 b0:
-  call void asm sideeffect ".weak OFFSET_0;jump ##(OFFSET_0 + 0x14c15f0)", "*r"(%s.0* elementtype(%s.0) nonnull %a0), !srcloc !0
+  call void asm sideeffect ".weak OFFSET_0;jump ##(OFFSET_0 + 0x14c15f0)", "*r"(ptr elementtype(%s.0) nonnull %a0), !srcloc !0
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/inline-asm-hexagon.ll b/llvm/test/CodeGen/Hexagon/inline-asm-hexagon.ll
index c7c44a3856128..48313f5038f1e 100644
--- a/llvm/test/CodeGen/Hexagon/inline-asm-hexagon.ll
+++ b/llvm/test/CodeGen/Hexagon/inline-asm-hexagon.ll
@@ -5,12 +5,12 @@ target triple = "hexagon"
 ;CHECK: [[REGH:r[0-9]]]:[[REGL:[0-9]]] = memd_locked
 ;CHECK: HIGH([[REGH]])
 ;CHECK: LOW(r[[REGL]])
-define i32 @fred(i64* %free_list_ptr, i32** %item_ptr, i8** %free_item_ptr) nounwind {
+define i32 @fred(ptr %free_list_ptr, ptr %item_ptr, ptr %free_item_ptr) nounwind {
 entry:
-  %free_list_ptr.addr = alloca i64*, align 4
-  store i64* %free_list_ptr, i64** %free_list_ptr.addr, align 4
-  %0 = load i32*, i32** %item_ptr, align 4
-  %1 = call { i64, i32 } asm sideeffect "1:     $0 = memd_locked($5)\0A\09       $1 = HIGH(${0:H}) \0A\09       $1 = add($1,#1) \0A\09       memw($6) = LOW(${0:L}) \0A\09       $0 = combine($7,$1) \0A\09       memd_locked($5,p0) = $0 \0A\09       if !p0 jump 1b\0A\09", "=&r,=&r,=*m,=*m,r,r,r,r,*m,*m,~{p0}"(i64** elementtype(i64*) %free_list_ptr.addr, i8** elementtype(i8*) %free_item_ptr, i64 0, i64* %free_list_ptr, i8** %free_item_ptr, i32* %0, i64** elementtype(i64*) %free_list_ptr.addr, i8** elementtype(i8*) %free_item_ptr) nounwind
+  %free_list_ptr.addr = alloca ptr, align 4
+  store ptr %free_list_ptr, ptr %free_list_ptr.addr, align 4
+  %0 = load ptr, ptr %item_ptr, align 4
+  %1 = call { i64, i32 } asm sideeffect "1:     $0 = memd_locked($5)\0A\09       $1 = HIGH(${0:H}) \0A\09       $1 = add($1,#1) \0A\09       memw($6) = LOW(${0:L}) \0A\09       $0 = combine($7,$1) \0A\09       memd_locked($5,p0) = $0 \0A\09       if !p0 jump 1b\0A\09", "=&r,=&r,=*m,=*m,r,r,r,r,*m,*m,~{p0}"(ptr elementtype(ptr) %free_list_ptr.addr, ptr elementtype(ptr) %free_item_ptr, i64 0, ptr %free_list_ptr, ptr %free_item_ptr, ptr %0, ptr elementtype(ptr) %free_list_ptr.addr, ptr elementtype(ptr) %free_item_ptr) nounwind
   %asmresult1 = extractvalue { i64, i32 } %1, 1
   ret i32 %asmresult1
 }

diff  --git a/llvm/test/CodeGen/Hexagon/inline-asm-qv.ll b/llvm/test/CodeGen/Hexagon/inline-asm-qv.ll
index 624ab99496d4c..0577c6be7d819 100644
--- a/llvm/test/CodeGen/Hexagon/inline-asm-qv.ll
+++ b/llvm/test/CodeGen/Hexagon/inline-asm-qv.ll
@@ -8,11 +8,11 @@
 target triple = "hexagon"
 
 ; Function Attrs: nounwind
-define void @foo(<16 x i32> %v0, <16 x i32> %v1, <16 x i32>* nocapture %p) #0 {
+define void @foo(<16 x i32> %v0, <16 x i32> %v1, ptr nocapture %p) #0 {
 entry:
   %0 = tail call <64 x i1> asm "$0 = vgtw($1.w,$2.w)", "=q,v,v"(<16 x i32> %v0, <16 x i32> %v1) #1
   %1 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %0, i32 -1) #1
-  store <16 x i32> %1, <16 x i32>* %p, align 64
+  store <16 x i32> %1, ptr %p, align 64
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/inline-asm-vecpred128.ll b/llvm/test/CodeGen/Hexagon/inline-asm-vecpred128.ll
index 89ab13ada40b5..0a6dabb95e8b9 100644
--- a/llvm/test/CodeGen/Hexagon/inline-asm-vecpred128.ll
+++ b/llvm/test/CodeGen/Hexagon/inline-asm-vecpred128.ll
@@ -8,7 +8,7 @@ target triple = "hexagon"
 ; CHECK-LABEL: fred
 ; CHECK: if (q{{[0-3]}}) vmem
 define void @fred() #0 {
-  tail call void asm sideeffect "if ($0) vmem($1) = $2;", "q,r,v,~{memory}"(<128 x i1> undef, <32 x i32>* undef, <32 x i32> undef) #0
+  tail call void asm sideeffect "if ($0) vmem($1) = $2;", "q,r,v,~{memory}"(<128 x i1> undef, ptr undef, <32 x i32> undef) #0
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/inlineasm-output-template.ll b/llvm/test/CodeGen/Hexagon/inlineasm-output-template.ll
index 23281a484a2bb..2e0d95f7b7a45 100644
--- a/llvm/test/CodeGen/Hexagon/inlineasm-output-template.ll
+++ b/llvm/test/CodeGen/Hexagon/inlineasm-output-template.ll
@@ -13,7 +13,7 @@ define dso_local i32 @test_inlineasm_c_output_template0() {
 ; CHECK: TEST {{_?}}baz
 @baz = internal global i32 0, align 4
 define dso_local i32 @test_inlineasm_c_output_template1() {
-  tail call void asm sideeffect "//TEST ${0:c}", "i"(i32* nonnull @baz)
+  tail call void asm sideeffect "//TEST ${0:c}", "i"(ptr nonnull @baz)
   ret i32 43
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/insert-basic.ll b/llvm/test/CodeGen/Hexagon/insert-basic.ll
index 14ee735abd79c..3781c423fefc9 100644
--- a/llvm/test/CodeGen/Hexagon/insert-basic.ll
+++ b/llvm/test/CodeGen/Hexagon/insert-basic.ll
@@ -26,41 +26,36 @@ target triple = "hexagon"
 
 %struct.structx_t = type { [3 x i8], i8, [3 x i8], i8, [3 x i8], i8, [3 x i8], i8, [2 x i8], [2 x i8] }
 
-define void @foo(%struct.structx_t* nocapture %px, i32 %y1, i32 %y2, i32 %y3, i32 %y4, i32 %y5) nounwind {
+define void @foo(ptr nocapture %px, i32 %y1, i32 %y2, i32 %y3, i32 %y4, i32 %y5) nounwind {
 entry:
   %bf.value = and i32 %y1, 8388607
-  %0 = bitcast %struct.structx_t* %px to i32*
-  %1 = load i32, i32* %0, align 4
-  %2 = and i32 %1, -8388608
-  %3 = or i32 %2, %bf.value
-  store i32 %3, i32* %0, align 4
+  %0 = load i32, ptr %px, align 4
+  %1 = and i32 %0, -8388608
+  %2 = or i32 %1, %bf.value
+  store i32 %2, ptr %px, align 4
   %bf.value1 = and i32 %y2, 131071
-  %bf.field.offs = getelementptr %struct.structx_t, %struct.structx_t* %px, i32 0, i32 0, i32 4
-  %4 = bitcast i8* %bf.field.offs to i32*
-  %5 = load i32, i32* %4, align 4
-  %6 = and i32 %5, -131072
-  %7 = or i32 %6, %bf.value1
-  store i32 %7, i32* %4, align 4
+  %bf.field.offs = getelementptr %struct.structx_t, ptr %px, i32 0, i32 0, i32 4
+  %3 = load i32, ptr %bf.field.offs, align 4
+  %4 = and i32 %3, -131072
+  %5 = or i32 %4, %bf.value1
+  store i32 %5, ptr %bf.field.offs, align 4
   %bf.value2 = and i32 %y3, 262143
-  %bf.field.offs3 = getelementptr %struct.structx_t, %struct.structx_t* %px, i32 0, i32 0, i32 8
-  %8 = bitcast i8* %bf.field.offs3 to i32*
-  %9 = load i32, i32* %8, align 4
-  %10 = and i32 %9, -262144
-  %11 = or i32 %10, %bf.value2
-  store i32 %11, i32* %8, align 4
+  %bf.field.offs3 = getelementptr %struct.structx_t, ptr %px, i32 0, i32 0, i32 8
+  %6 = load i32, ptr %bf.field.offs3, align 4
+  %7 = and i32 %6, -262144
+  %8 = or i32 %7, %bf.value2
+  store i32 %8, ptr %bf.field.offs3, align 4
   %bf.value4 = and i32 %y4, 4194303
-  %bf.field.offs5 = getelementptr %struct.structx_t, %struct.structx_t* %px, i32 0, i32 0, i32 12
-  %12 = bitcast i8* %bf.field.offs5 to i32*
-  %13 = load i32, i32* %12, align 4
-  %14 = and i32 %13, -4194304
-  %15 = or i32 %14, %bf.value4
-  store i32 %15, i32* %12, align 4
+  %bf.field.offs5 = getelementptr %struct.structx_t, ptr %px, i32 0, i32 0, i32 12
+  %9 = load i32, ptr %bf.field.offs5, align 4
+  %10 = and i32 %9, -4194304
+  %11 = or i32 %10, %bf.value4
+  store i32 %11, ptr %bf.field.offs5, align 4
   %bf.value6 = and i32 %y5, 4095
-  %bf.field.offs7 = getelementptr %struct.structx_t, %struct.structx_t* %px, i32 0, i32 0, i32 16
-  %16 = bitcast i8* %bf.field.offs7 to i32*
-  %17 = load i32, i32* %16, align 4
-  %18 = and i32 %17, -4096
-  %19 = or i32 %18, %bf.value6
-  store i32 %19, i32* %16, align 4
+  %bf.field.offs7 = getelementptr %struct.structx_t, ptr %px, i32 0, i32 0, i32 16
+  %12 = load i32, ptr %bf.field.offs7, align 4
+  %13 = and i32 %12, -4096
+  %14 = or i32 %13, %bf.value6
+  store i32 %14, ptr %bf.field.offs7, align 4
   ret void
 }

diff  --git a/llvm/test/CodeGen/Hexagon/insert.ll b/llvm/test/CodeGen/Hexagon/insert.ll
index 5d060515db756..cb6e8bbfb0aaf 100644
--- a/llvm/test/CodeGen/Hexagon/insert.ll
+++ b/llvm/test/CodeGen/Hexagon/insert.ll
@@ -6,7 +6,7 @@
 @g2 = private unnamed_addr constant [4 x i8] c"%d\0A\00", align 8
 
 ; Function Attrs: nounwind
-declare i32 @f0(i8* nocapture, ...) #0
+declare i32 @f0(ptr nocapture, ...) #0
 
 ; Function Attrs: nounwind
 define i32 @f1() #0 {
@@ -15,10 +15,10 @@ b0:
 
 b1:                                               ; preds = %b3, %b1
   %v0 = phi i32 [ 0, %b3 ], [ %v5, %b1 ]
-  %v1 = getelementptr [512 x i8], [512 x i8]* @g1, i32 0, i32 %v0
-  %v2 = load i8, i8* %v1, align 1, !tbaa !0
+  %v1 = getelementptr [512 x i8], ptr @g1, i32 0, i32 %v0
+  %v2 = load i8, ptr %v1, align 1, !tbaa !0
   %v3 = zext i8 %v2 to i32
-  %v4 = tail call i32 (i8*, ...) @f0(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @g2, i32 0, i32 0), i32 %v3) #0
+  %v4 = tail call i32 (ptr, ...) @f0(ptr @g2, i32 %v3) #0
   %v5 = add nsw i32 %v0, 1
   %v6 = icmp eq i32 %v5, 512
   br i1 %v6, label %b2, label %b1
@@ -27,7 +27,7 @@ b2:                                               ; preds = %b1
   ret i32 0
 
 b3:                                               ; preds = %b4
-  tail call void @f2(i16* getelementptr inbounds ([512 x i16], [512 x i16]* @g0, i32 0, i32 0), i8* getelementptr inbounds ([512 x i8], [512 x i8]* @g1, i32 0, i32 0)) #0
+  tail call void @f2(ptr @g0, ptr @g1) #0
   br label %b1
 
 b4:                                               ; preds = %b4, %b0
@@ -45,14 +45,13 @@ b4:                                               ; preds = %b4, %b0
   %v18 = trunc <2 x i32> %v16 to <2 x i16>
   %v19 = trunc <2 x i32> %v17 to <2 x i16>
   %v20 = shufflevector <2 x i16> %v18, <2 x i16> %v19, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-  %v21 = getelementptr [512 x i16], [512 x i16]* @g0, i32 0, i32 %v11
-  %v22 = bitcast i16* %v21 to <4 x i16>*
-  store <4 x i16> %v20, <4 x i16>* %v22, align 8
+  %v21 = getelementptr [512 x i16], ptr @g0, i32 0, i32 %v11
+  store <4 x i16> %v20, ptr %v21, align 8
   %v23 = icmp slt i64 %v10, 512
   br i1 %v23, label %b4, label %b3
 }
 
-declare void @f2(i16*, i8*)
+declare void @f2(ptr, ptr)
 
 attributes #0 = { nounwind }
 

diff  --git a/llvm/test/CodeGen/Hexagon/insert4.ll b/llvm/test/CodeGen/Hexagon/insert4.ll
index 3a0f9b999daeb..f88ae8dc5f7b8 100644
--- a/llvm/test/CodeGen/Hexagon/insert4.ll
+++ b/llvm/test/CodeGen/Hexagon/insert4.ll
@@ -10,10 +10,9 @@ target triple = "hexagon"
 
 %struct.a = type { i16 }
 
-define i32 @fun(%struct.a* nocapture %pData, i64 %c, i64* nocapture %d, i64* nocapture %e, i64* nocapture %f) #0 {
+define i32 @fun(ptr nocapture %pData, i64 %c, ptr nocapture %d, ptr nocapture %e, ptr nocapture %f) #0 {
 entry:
-  %g = getelementptr inbounds %struct.a, %struct.a* %pData, i32 0, i32 0
-  %0 = load i16, i16* %g, align 2, !tbaa !0
+  %0 = load i16, ptr %pData, align 2, !tbaa !0
   %conv185 = sext i16 %0 to i32
   %shr86 = ashr i32 %conv185, 2
   %cmp87 = icmp sgt i32 %shr86, 0
@@ -35,11 +34,11 @@ for.body.lr.ph:                                   ; preds = %entry
   br label %for.body
 
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
-  %arrayidx.phi = phi i64* [ %d, %for.body.lr.ph ], [ %arrayidx.inc, %for.body ]
-  %arrayidx30.phi = phi i64* [ %f, %for.body.lr.ph ], [ %arrayidx30.inc, %for.body ]
-  %arrayidx60.phi = phi i64* [ %e, %for.body.lr.ph ], [ %arrayidx60.inc, %for.body ]
+  %arrayidx.phi = phi ptr [ %d, %for.body.lr.ph ], [ %arrayidx.inc, %for.body ]
+  %arrayidx30.phi = phi ptr [ %f, %for.body.lr.ph ], [ %arrayidx30.inc, %for.body ]
+  %arrayidx60.phi = phi ptr [ %e, %for.body.lr.ph ], [ %arrayidx60.inc, %for.body ]
   %j.088.pmt = phi i32 [ 0, %for.body.lr.ph ], [ %inc.pmt, %for.body ]
-  %1 = load i64, i64* %arrayidx.phi, align 8, !tbaa !1
+  %1 = load i64, ptr %arrayidx.phi, align 8, !tbaa !1
   %n_union3.sroa.0.0.extract.trunc = trunc i64 %1 to i32
   %n_union3.sroa.1.4.extract.shift = lshr i64 %1, 32
   %2 = tail call i64 @llvm.hexagon.M2.dpmpyss.s0(i32 %n_union3.sroa.0.0.extract.trunc, i32 %conv8)
@@ -51,7 +50,7 @@ for.body:                                         ; preds = %for.body.lr.ph, %fo
   %6 = tail call i64 @llvm.hexagon.S2.asl.r.p(i64 %5, i32 -25)
   %conv24 = trunc i64 %6 to i32
   %7 = tail call i32 @llvm.hexagon.A2.sath(i32 %conv24)
-  %8 = load i64, i64* %arrayidx30.phi, align 8, !tbaa !1
+  %8 = load i64, ptr %arrayidx30.phi, align 8, !tbaa !1
   %n_union28.sroa.0.0.extract.trunc = trunc i64 %8 to i32
   %n_union28.sroa.1.4.extract.shift = lshr i64 %8, 32
   %9 = tail call i64 @llvm.hexagon.M2.dpmpyss.s0(i32 %n_union28.sroa.0.0.extract.trunc, i32 %conv38)
@@ -76,17 +75,17 @@ for.body:                                         ; preds = %for.body.lr.ph, %fo
   %n_union.sroa.2.4.insert.insert = or i64 %n_union.sroa.1.2.insert.shift, %n_union.sroa.0.0.insert.ext
   %n_union.sroa.1.2.insert.insert = or i64 %n_union.sroa.2.4.insert.insert, %n_union.sroa.2.4.insert.shift
   %n_union.sroa.0.0.insert.insert = or i64 %n_union.sroa.1.2.insert.insert, %n_union.sroa.3.6.insert.shift
-  %15 = load i64, i64* %arrayidx60.phi, align 8, !tbaa !1
+  %15 = load i64, ptr %arrayidx60.phi, align 8, !tbaa !1
   %16 = tail call i64 @llvm.hexagon.A2.vaddhs(i64 %15, i64 %n_union.sroa.0.0.insert.insert)
-  store i64 %16, i64* %arrayidx60.phi, align 8, !tbaa !1
+  store i64 %16, ptr %arrayidx60.phi, align 8, !tbaa !1
   %inc.pmt = add i32 %j.088.pmt, 1
-  %17 = load i16, i16* %g, align 2, !tbaa !0
+  %17 = load i16, ptr %pData, align 2, !tbaa !0
   %conv1 = sext i16 %17 to i32
   %shr = ashr i32 %conv1, 2
   %cmp = icmp slt i32 %inc.pmt, %shr
-  %arrayidx.inc = getelementptr i64, i64* %arrayidx.phi, i32 1
-  %arrayidx30.inc = getelementptr i64, i64* %arrayidx30.phi, i32 1
-  %arrayidx60.inc = getelementptr i64, i64* %arrayidx60.phi, i32 1
+  %arrayidx.inc = getelementptr i64, ptr %arrayidx.phi, i32 1
+  %arrayidx30.inc = getelementptr i64, ptr %arrayidx30.phi, i32 1
+  %arrayidx60.inc = getelementptr i64, ptr %arrayidx60.phi, i32 1
   br i1 %cmp, label %for.body, label %for.end.loopexit
 
 for.end.loopexit:                                 ; preds = %for.body

diff  --git a/llvm/test/CodeGen/Hexagon/instrprof-custom.ll b/llvm/test/CodeGen/Hexagon/instrprof-custom.ll
index e3c729842d544..c2d1e3b54b5c9 100644
--- a/llvm/test/CodeGen/Hexagon/instrprof-custom.ll
+++ b/llvm/test/CodeGen/Hexagon/instrprof-custom.ll
@@ -9,12 +9,12 @@
 
 define dllexport void @test1() local_unnamed_addr #0 {
 entry:
-  tail call void @llvm.hexagon.instrprof.custom(i8* getelementptr inbounds ([21 x i8], [21 x i8]* @handler_name, i32 0, i32 0), i32 999)
+  tail call void @llvm.hexagon.instrprof.custom(ptr @handler_name, i32 999)
   ret void
 }
 
 ; Function Attrs: inaccessiblememonly nofree nosync nounwind willreturn
-declare void @llvm.hexagon.instrprof.custom(i8*, i32) #1
+declare void @llvm.hexagon.instrprof.custom(ptr, i32) #1
 
 attributes #0 = { "target-features"="+hvxv68,+hvx-length128b,+hvx-qfloat,-hvx-ieee-fp,+hmxv68" }
 attributes #1 = { inaccessiblememonly nofree nosync nounwind willreturn }

diff  --git a/llvm/test/CodeGen/Hexagon/intrinsics-v60-misc.ll b/llvm/test/CodeGen/Hexagon/intrinsics-v60-misc.ll
index 62d2ec177303d..8f12dca504344 100644
--- a/llvm/test/CodeGen/Hexagon/intrinsics-v60-misc.ll
+++ b/llvm/test/CodeGen/Hexagon/intrinsics-v60-misc.ll
@@ -11,7 +11,7 @@
 define void @test1(<32 x i32> %a, i32 %b) #0 {
 entry:
   %0 = tail call <32 x i32> @llvm.hexagon.V6.vrmpybusi(<32 x i32> %a, i32 %b, i32 1)
-  store <32 x i32> %0, <32 x i32>* @l, align 128
+  store <32 x i32> %0, ptr @l, align 128
   ret void
 }
 
@@ -20,7 +20,7 @@ entry:
 define void @test2(<32 x i32> %a, i32 %b) #0 {
 entry:
   %0 = tail call <32 x i32> @llvm.hexagon.V6.vrsadubi(<32 x i32> %a, i32 %b, i32 1)
-  store <32 x i32> %0, <32 x i32>* @l, align 128
+  store <32 x i32> %0, ptr @l, align 128
   ret void
 }
 
@@ -29,7 +29,7 @@ entry:
 define void @test3(<32 x i32> %a, i32 %b) #0 {
 entry:
   %0 = tail call <32 x i32> @llvm.hexagon.V6.vrmpyubi(<32 x i32> %a, i32 %b, i32 1)
-  store <32 x i32> %0, <32 x i32>* @l, align 128
+  store <32 x i32> %0, ptr @l, align 128
   ret void
 }
 
@@ -38,7 +38,7 @@ entry:
 define void @test4(<32 x i32> %a, <32 x i32> %b, i32 %c) #0 {
 entry:
   %0 = tail call <32 x i32> @llvm.hexagon.V6.vrmpybusi.acc(<32 x i32> %a, <32 x i32> %b, i32 %c, i32 1)
-  store <32 x i32> %0, <32 x i32>* @l, align 128
+  store <32 x i32> %0, ptr @l, align 128
   ret void
 }
 
@@ -47,7 +47,7 @@ entry:
 define void @test5(<32 x i32> %a, <32 x i32> %b, i32 %c) #0 {
 entry:
   %0 = tail call <32 x i32> @llvm.hexagon.V6.vrsadubi.acc(<32 x i32> %a, <32 x i32> %b, i32 %c, i32 1)
-  store <32 x i32> %0, <32 x i32>* @l, align 128
+  store <32 x i32> %0, ptr @l, align 128
   ret void
 }
 
@@ -56,7 +56,7 @@ entry:
 define void @test6(<32 x i32> %a, <32 x i32> %b, i32 %c) #0 {
 entry:
   %0 = tail call <32 x i32> @llvm.hexagon.V6.vrmpyubi.acc(<32 x i32> %a, <32 x i32> %b, i32 %c, i32 0)
-  store <32 x i32> %0, <32 x i32>* @l, align 128
+  store <32 x i32> %0, ptr @l, align 128
   ret void
 }
 
@@ -65,7 +65,7 @@ entry:
 define void @test7(<16 x i32> %a, <16 x i32> %b, i32 %c) #0 {
 entry:
   %0 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %a, <16 x i32> %b, i32 %c)
-  store <16 x i32> %0, <16 x i32>* @k, align 64
+  store <16 x i32> %0, ptr @k, align 64
   ret void
 }
 
@@ -74,7 +74,7 @@ entry:
 define void @test8(<16 x i32> %a, <16 x i32> %b, i32 %c) #0 {
 entry:
   %0 = tail call <16 x i32> @llvm.hexagon.V6.vlalignb(<16 x i32> %a, <16 x i32> %b, i32 %c)
-  store <16 x i32> %0, <16 x i32>* @k, align 64
+  store <16 x i32> %0, ptr @k, align 64
   ret void
 }
 
@@ -83,7 +83,7 @@ entry:
 define void @test9(<16 x i32> %a, <16 x i32> %b, i32 %c) #0 {
 entry:
   %0 = tail call <16 x i32> @llvm.hexagon.V6.vasrwh(<16 x i32> %a, <16 x i32> %b, i32 %c)
-  store <16 x i32> %0, <16 x i32>* @k, align 64
+  store <16 x i32> %0, ptr @k, align 64
   ret void
 }
 
@@ -92,7 +92,7 @@ entry:
 define void @test10(<16 x i32> %a, <16 x i32> %b, i32 %c) #0 {
 entry:
   %0 = tail call <16 x i32> @llvm.hexagon.V6.vasrwhsat(<16 x i32> %a, <16 x i32> %b, i32 %c)
-  store <16 x i32> %0, <16 x i32>* @k, align 64
+  store <16 x i32> %0, ptr @k, align 64
   ret void
 }
 
@@ -101,7 +101,7 @@ entry:
 define void @test11(<16 x i32> %a, <16 x i32> %b, i32 %c) #0 {
 entry:
   %0 = tail call <16 x i32> @llvm.hexagon.V6.vasrwhrndsat(<16 x i32> %a, <16 x i32> %b, i32 %c)
-  store <16 x i32> %0, <16 x i32>* @k, align 64
+  store <16 x i32> %0, ptr @k, align 64
   ret void
 }
 
@@ -110,7 +110,7 @@ entry:
 define void @test12(<16 x i32> %a, <16 x i32> %b, i32 %c) #0 {
 entry:
   %0 = tail call <16 x i32> @llvm.hexagon.V6.vasrwuhsat(<16 x i32> %a, <16 x i32> %b, i32 %c)
-  store <16 x i32> %0, <16 x i32>* @k, align 64
+  store <16 x i32> %0, ptr @k, align 64
   ret void
 }
 
@@ -119,7 +119,7 @@ entry:
 define void @test13(<16 x i32> %a, <16 x i32> %b, i32 %c) #0 {
 entry:
   %0 = tail call <16 x i32> @llvm.hexagon.V6.vasrhubsat(<16 x i32> %a, <16 x i32> %b, i32 %c)
-  store <16 x i32> %0, <16 x i32>* @k, align 64
+  store <16 x i32> %0, ptr @k, align 64
   ret void
 }
 
@@ -128,7 +128,7 @@ entry:
 define void @test14(<16 x i32> %a, <16 x i32> %b, i32 %c) #0 {
 entry:
   %0 = tail call <16 x i32> @llvm.hexagon.V6.vasrhubrndsat(<16 x i32> %a, <16 x i32> %b, i32 %c)
-  store <16 x i32> %0, <16 x i32>* @k, align 64
+  store <16 x i32> %0, ptr @k, align 64
   ret void
 }
 
@@ -137,7 +137,7 @@ entry:
 define void @test15(<16 x i32> %a, <16 x i32> %b, i32 %c) #0 {
 entry:
   %0 = tail call <16 x i32> @llvm.hexagon.V6.vasrhbrndsat(<16 x i32> %a, <16 x i32> %b, i32 %c)
-  store <16 x i32> %0, <16 x i32>* @k, align 64
+  store <16 x i32> %0, ptr @k, align 64
   ret void
 }
 
@@ -146,7 +146,7 @@ entry:
 define void @test16(<32 x i32> %a, <16 x i32> %b) #0 {
 entry:
   %0 = tail call <32 x i32> @llvm.hexagon.V6.vunpackob(<32 x i32> %a, <16 x i32> %b)
-  store <32 x i32> %0, <32 x i32>* @l, align 128
+  store <32 x i32> %0, ptr @l, align 128
   ret void
 }
 
@@ -155,7 +155,7 @@ entry:
 define void @test17(<32 x i32> %a, <16 x i32> %b) #0 {
 entry:
   %0 = tail call <32 x i32> @llvm.hexagon.V6.vunpackoh(<32 x i32> %a, <16 x i32> %b)
-  store <32 x i32> %0, <32 x i32>* @l, align 128
+  store <32 x i32> %0, ptr @l, align 128
   ret void
 }
 
@@ -164,7 +164,7 @@ entry:
 define void @test18(<16 x i32> %a, <16 x i32> %b) #0 {
 entry:
   %0 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %a, <16 x i32> %b, i32 3)
-  store <16 x i32> %0, <16 x i32>* @k, align 64
+  store <16 x i32> %0, ptr @k, align 64
   ret void
 }
 
@@ -173,7 +173,7 @@ entry:
 define void @test19(<16 x i32> %a, <16 x i32> %b) #0 {
 entry:
   %0 = tail call <16 x i32> @llvm.hexagon.V6.vlalignbi(<16 x i32> %a, <16 x i32> %b, i32 3)
-  store <16 x i32> %0, <16 x i32>* @k, align 64
+  store <16 x i32> %0, ptr @k, align 64
   ret void
 }
 
@@ -183,7 +183,7 @@ define void @test20(<16 x i32> %a, <16 x i32> %b, <16 x i32> %c) #0 {
 entry:
   %0 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %a, i32 -1)
   %1 = tail call <16 x i32> @llvm.hexagon.V6.vmux(<64 x i1> %0, <16 x i32> %b, <16 x i32> %c)
-  store <16 x i32> %1, <16 x i32>* @k, align 64
+  store <16 x i32> %1, ptr @k, align 64
   ret void
 }
 
@@ -195,7 +195,7 @@ entry:
   %1 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %b, i32 -1)
   %2 = tail call <64 x i1> @llvm.hexagon.V6.pred.and(<64 x i1> %0, <64 x i1> %1)
   %3 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %2, i32 -1)
-  store <16 x i32> %3, <16 x i32>* @h, align 64
+  store <16 x i32> %3, ptr @h, align 64
   ret void
 }
 
@@ -207,7 +207,7 @@ entry:
   %1 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %b, i32 -1)
   %2 = tail call <64 x i1> @llvm.hexagon.V6.pred.or(<64 x i1> %0, <64 x i1> %1)
   %3 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %2, i32 -1)
-  store <16 x i32> %3, <16 x i32>* @h, align 64
+  store <16 x i32> %3, ptr @h, align 64
   ret void
 }
 
@@ -218,7 +218,7 @@ entry:
   %0 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %a, i32 -1)
   %1 = tail call <64 x i1> @llvm.hexagon.V6.pred.not(<64 x i1> %0)
   %2 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %1, i32 -1)
-  store <16 x i32> %2, <16 x i32>* @h, align 64
+  store <16 x i32> %2, ptr @h, align 64
   ret void
 }
 
@@ -230,7 +230,7 @@ entry:
   %1 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %b, i32 -1)
   %2 = tail call <64 x i1> @llvm.hexagon.V6.pred.xor(<64 x i1> %0, <64 x i1> %1)
   %3 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %2, i32 -1)
-  store <16 x i32> %3, <16 x i32>* @h, align 64
+  store <16 x i32> %3, ptr @h, align 64
   ret void
 }
 
@@ -242,7 +242,7 @@ entry:
   %1 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %b, i32 -1)
   %2 = tail call <64 x i1> @llvm.hexagon.V6.pred.or.n(<64 x i1> %0, <64 x i1> %1)
   %3 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %2, i32 -1)
-  store <16 x i32> %3, <16 x i32>* @h, align 64
+  store <16 x i32> %3, ptr @h, align 64
   ret void
 }
 
@@ -254,7 +254,7 @@ entry:
   %1 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %b, i32 -1)
   %2 = tail call <64 x i1> @llvm.hexagon.V6.pred.and.n(<64 x i1> %0, <64 x i1> %1)
   %3 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %2, i32 -1)
-  store <16 x i32> %3, <16 x i32>* @h, align 64
+  store <16 x i32> %3, ptr @h, align 64
   ret void
 }
 
@@ -264,7 +264,7 @@ define void @test27(<16 x i32> %a, <16 x i32> %b) #0 {
 entry:
   %0 = tail call <64 x i1> @llvm.hexagon.V6.vgtub(<16 x i32> %a, <16 x i32> %b)
   %1 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %0, i32 -1)
-  store <16 x i32> %1, <16 x i32>* @k, align 64
+  store <16 x i32> %1, ptr @k, align 64
   ret void
 }
 
@@ -274,7 +274,7 @@ define void @test28(<16 x i32> %a, <16 x i32> %b) #0 {
 entry:
   %0 = tail call <64 x i1> @llvm.hexagon.V6.vgth(<16 x i32> %a, <16 x i32> %b)
   %1 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %0, i32 -1)
-  store <16 x i32> %1, <16 x i32>* @k, align 64
+  store <16 x i32> %1, ptr @k, align 64
   ret void
 }
 
@@ -284,7 +284,7 @@ define void @test29(<16 x i32> %a, <16 x i32> %b) #0 {
 entry:
   %0 = tail call <64 x i1> @llvm.hexagon.V6.veqh(<16 x i32> %a, <16 x i32> %b)
   %1 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %0, i32 -1)
-  store <16 x i32> %1, <16 x i32>* @k, align 64
+  store <16 x i32> %1, ptr @k, align 64
   ret void
 }
 
@@ -294,7 +294,7 @@ define void @test30(<16 x i32> %a, <16 x i32> %b) #0 {
 entry:
   %0 = tail call <64 x i1> @llvm.hexagon.V6.vgtw(<16 x i32> %a, <16 x i32> %b)
   %1 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %0, i32 -1)
-  store <16 x i32> %1, <16 x i32>* @k, align 64
+  store <16 x i32> %1, ptr @k, align 64
   ret void
 }
 
@@ -304,7 +304,7 @@ define void @test31(<16 x i32> %a, <16 x i32> %b) #0 {
 entry:
   %0 = tail call <64 x i1> @llvm.hexagon.V6.veqw(<16 x i32> %a, <16 x i32> %b)
   %1 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %0, i32 -1)
-  store <16 x i32> %1, <16 x i32>* @k, align 64
+  store <16 x i32> %1, ptr @k, align 64
   ret void
 }
 
@@ -314,7 +314,7 @@ define void @test32(<16 x i32> %a, <16 x i32> %b) #0 {
 entry:
   %0 = tail call <64 x i1> @llvm.hexagon.V6.vgtuh(<16 x i32> %a, <16 x i32> %b)
   %1 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %0, i32 -1)
-  store <16 x i32> %1, <16 x i32>* @k, align 64
+  store <16 x i32> %1, ptr @k, align 64
   ret void
 }
 
@@ -324,7 +324,7 @@ define void @test33(<16 x i32> %a, <16 x i32> %b, i32 %c) #0 {
 entry:
   %0 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %b, i32 -1)
   %1 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt.acc(<16 x i32> %a, <64 x i1> %0, i32 %c)
-  store <16 x i32> %1, <16 x i32>* @h, align 64
+  store <16 x i32> %1, ptr @h, align 64
   ret void
 }
 
@@ -335,7 +335,7 @@ entry:
   %0 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %a, i32 -1)
   %1 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt.acc(<64 x i1> %0, <16 x i32> %b, i32 %c)
   %2 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %1, i32 -1)
-  store <16 x i32> %2, <16 x i32>* @k, align 64
+  store <16 x i32> %2, ptr @k, align 64
   ret void
 }
 
@@ -345,7 +345,7 @@ define void @test35(<16 x i32> %a, i32 %b) #0 {
 entry:
   %0 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %a, i32 -1)
   %1 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %0, i32 %b)
-  store <16 x i32> %1, <16 x i32>* @h, align 64
+  store <16 x i32> %1, ptr @h, align 64
   ret void
 }
 
@@ -355,7 +355,7 @@ define void @test36(<16 x i32> %a, i32 %b) #0 {
 entry:
   %0 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %a, i32 %b)
   %1 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %0, i32 -1)
-  store <16 x i32> %1, <16 x i32>* @k, align 64
+  store <16 x i32> %1, ptr @k, align 64
   ret void
 }
 
@@ -364,7 +364,7 @@ entry:
 define void @test37(i64 %a) #0 {
 entry:
   %0 = tail call i64 @llvm.hexagon.S6.rol.i.p(i64 %a, i32 38)
-  store i64 %0, i64* @n, align 8
+  store i64 %0, ptr @n, align 8
   ret void
 }
 
@@ -373,7 +373,7 @@ entry:
 define void @test38(i64 %a, i64 %b) #0 {
 entry:
   %0 = tail call i64 @llvm.hexagon.S6.rol.i.p.acc(i64 %a, i64 %b, i32 36)
-  store i64 %0, i64* @n, align 8
+  store i64 %0, ptr @n, align 8
   ret void
 }
 
@@ -382,7 +382,7 @@ entry:
 define void @test39(i64 %a, i64 %b) #0 {
 entry:
   %0 = tail call i64 @llvm.hexagon.S6.rol.i.p.and(i64 %a, i64 %b, i32 25)
-  store i64 %0, i64* @n, align 8
+  store i64 %0, ptr @n, align 8
   ret void
 }
 
@@ -391,7 +391,7 @@ entry:
 define void @test40(i64 %a, i64 %b) #0 {
 entry:
   %0 = tail call i64 @llvm.hexagon.S6.rol.i.p.nac(i64 %a, i64 %b, i32 20)
-  store i64 %0, i64* @n, align 8
+  store i64 %0, ptr @n, align 8
   ret void
 }
 
@@ -400,7 +400,7 @@ entry:
 define void @test41(i64 %a, i64 %b) #0 {
 entry:
   %0 = tail call i64 @llvm.hexagon.S6.rol.i.p.or(i64 %a, i64 %b, i32 22)
-  store i64 %0, i64* @n, align 8
+  store i64 %0, ptr @n, align 8
   ret void
 }
 
@@ -409,7 +409,7 @@ entry:
 define void @test42(i64 %a, i64 %b) #0 {
 entry:
   %0 = tail call i64 @llvm.hexagon.S6.rol.i.p.xacc(i64 %a, i64 %b, i32 25)
-  store i64 %0, i64* @n, align 8
+  store i64 %0, ptr @n, align 8
   ret void
 }
 
@@ -419,7 +419,7 @@ define void @test43(i32 %a) #0 {
 entry:
   %0 = tail call i32 @llvm.hexagon.S6.rol.i.r(i32 %a, i32 14)
   %conv = sext i32 %0 to i64
-  store i64 %conv, i64* @n, align 8
+  store i64 %conv, ptr @n, align 8
   ret void
 }
 
@@ -428,7 +428,7 @@ entry:
 define void @test44(i32 %a, i32 %b) #0 {
 entry:
   %0 = tail call i32 @llvm.hexagon.S6.rol.i.r.acc(i32 %a, i32 %b, i32 12)
-  store i32 %0, i32* @m, align 4
+  store i32 %0, ptr @m, align 4
   ret void
 }
 
@@ -437,7 +437,7 @@ entry:
 define void @test45(i32 %a, i32 %b) #0 {
 entry:
   %0 = tail call i32 @llvm.hexagon.S6.rol.i.r.and(i32 %a, i32 %b, i32 18)
-  store i32 %0, i32* @m, align 4
+  store i32 %0, ptr @m, align 4
   ret void
 }
 
@@ -446,7 +446,7 @@ entry:
 define void @test46(i32 %a, i32 %b) #0 {
 entry:
   %0 = tail call i32 @llvm.hexagon.S6.rol.i.r.nac(i32 %a, i32 %b, i32 31)
-  store i32 %0, i32* @m, align 4
+  store i32 %0, ptr @m, align 4
   ret void
 }
 
@@ -455,7 +455,7 @@ entry:
 define void @test47(i32 %a, i32 %b) #0 {
 entry:
   %0 = tail call i32 @llvm.hexagon.S6.rol.i.r.or(i32 %a, i32 %b, i32 30)
-  store i32 %0, i32* @m, align 4
+  store i32 %0, ptr @m, align 4
   ret void
 }
 
@@ -464,7 +464,7 @@ entry:
 define void @test48(i32 %a, i32 %b) #0 {
 entry:
   %0 = tail call i32 @llvm.hexagon.S6.rol.i.r.xacc(i32 %a, i32 %b, i32 31)
-  store i32 %0, i32* @m, align 4
+  store i32 %0, ptr @m, align 4
   ret void
 }
 
@@ -473,7 +473,7 @@ entry:
 define void @test49(<16 x i32> %a, i32 %b) #0 {
 entry:
   %0 = tail call i32 @llvm.hexagon.V6.extractw(<16 x i32> %a, i32 %b)
-  store i32 %0, i32* @m, align 4
+  store i32 %0, ptr @m, align 4
   ret void
 }
 
@@ -482,7 +482,7 @@ entry:
 define void @test50(i32 %a) #0 {
 entry:
   %0 = tail call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 %a)
-  store <16 x i32> %0, <16 x i32>* @k, align 64
+  store <16 x i32> %0, ptr @k, align 64
   ret void
 }
 
@@ -492,7 +492,7 @@ define void @test51(i32 %a) #0 {
 entry:
   %0 = tail call <64 x i1> @llvm.hexagon.V6.pred.scalar2(i32 %a)
   %1 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %0, i32 -1)
-  store <16 x i32> %1, <16 x i32>* @k, align 64
+  store <16 x i32> %1, ptr @k, align 64
   ret void
 }
 
@@ -501,7 +501,7 @@ entry:
 define void @test52(<16 x i32> %a, <16 x i32> %b, i32 %c) #0 {
 entry:
   %0 = tail call <16 x i32> @llvm.hexagon.V6.vlutvvb(<16 x i32> %a, <16 x i32> %b, i32 %c)
-  store <16 x i32> %0, <16 x i32>* @k, align 64
+  store <16 x i32> %0, ptr @k, align 64
   ret void
 }
 
@@ -510,7 +510,7 @@ entry:
 define void @test53(<16 x i32> %a, <16 x i32> %b, <16 x i32> %c, i32 %d) #0 {
 entry:
   %0 = tail call <16 x i32> @llvm.hexagon.V6.vlutvvb.oracc(<16 x i32> %a, <16 x i32> %b, <16 x i32> %c, i32 %d)
-  store <16 x i32> %0, <16 x i32>* @k, align 64
+  store <16 x i32> %0, ptr @k, align 64
   ret void
 }
 
@@ -519,7 +519,7 @@ entry:
 define void @test54(<32 x i32> %a, <16 x i32> %b, <16 x i32> %c, i32 %d) #0 {
 entry:
   %0 = tail call <32 x i32> @llvm.hexagon.V6.vlutvwh.oracc(<32 x i32> %a, <16 x i32> %b, <16 x i32> %c, i32 %d)
-  store <32 x i32> %0, <32 x i32>* @l, align 128
+  store <32 x i32> %0, ptr @l, align 128
   ret void
 }
 
@@ -528,7 +528,7 @@ entry:
 define void @test55(<16 x i32> %a, <16 x i32> %b, i32 %l) #0 {
 entry:
   %0 = tail call <32 x i32> @llvm.hexagon.V6.vlutvwh(<16 x i32> %a, <16 x i32> %b, i32 %l)
-  store <32 x i32> %0, <32 x i32>* @l, align 128
+  store <32 x i32> %0, ptr @l, align 128
   ret void
 }
 
@@ -536,9 +536,9 @@ entry:
 ; CHECK: v{{[0-9]+}}.w = vinsert(r{{[0-9]+}})
 define void @test56(i32 %b) #0 {
 entry:
-  %0 = load <16 x i32>, <16 x i32>* @k, align 64
+  %0 = load <16 x i32>, ptr @k, align 64
   %1 = tail call <16 x i32> @llvm.hexagon.V6.vinsertwr(<16 x i32> %0, i32 %b)
-  store <16 x i32> %1, <16 x i32>* @k, align 64
+  store <16 x i32> %1, ptr @k, align 64
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/intrinsics-v60-permute.ll b/llvm/test/CodeGen/Hexagon/intrinsics-v60-permute.ll
index 02ff0ed05c7b8..7a16d77d8dd49 100644
--- a/llvm/test/CodeGen/Hexagon/intrinsics-v60-permute.ll
+++ b/llvm/test/CodeGen/Hexagon/intrinsics-v60-permute.ll
@@ -8,7 +8,7 @@
 define void @test1(<16 x i32> %a, <16 x i32> %b) #0 {
 entry:
   %0 = tail call <16 x i32> @llvm.hexagon.V6.vpackeb(<16 x i32> %a, <16 x i32> %b)
-  store <16 x i32> %0, <16 x i32>* @d, align 64
+  store <16 x i32> %0, ptr @d, align 64
   ret void
 }
 
@@ -17,7 +17,7 @@ entry:
 define void @test2(<16 x i32> %a, <16 x i32> %b) #0 {
 entry:
   %0 = tail call <16 x i32> @llvm.hexagon.V6.vpackeh(<16 x i32> %a, <16 x i32> %b)
-  store <16 x i32> %0, <16 x i32>* @d, align 64
+  store <16 x i32> %0, ptr @d, align 64
   ret void
 }
 
@@ -26,7 +26,7 @@ entry:
 define void @test3(<16 x i32> %a, <16 x i32> %b) #0 {
 entry:
   %0 = tail call <16 x i32> @llvm.hexagon.V6.vpackhub.sat(<16 x i32> %a, <16 x i32> %b)
-  store <16 x i32> %0, <16 x i32>* @d, align 64
+  store <16 x i32> %0, ptr @d, align 64
   ret void
 }
 
@@ -35,7 +35,7 @@ entry:
 define void @test4(<16 x i32> %a, <16 x i32> %b) #0 {
 entry:
   %0 = tail call <16 x i32> @llvm.hexagon.V6.vpackhb.sat(<16 x i32> %a, <16 x i32> %b)
-  store <16 x i32> %0, <16 x i32>* @d, align 64
+  store <16 x i32> %0, ptr @d, align 64
   ret void
 }
 
@@ -44,7 +44,7 @@ entry:
 define void @test5(<16 x i32> %a, <16 x i32> %b) #0 {
 entry:
   %0 = tail call <16 x i32> @llvm.hexagon.V6.vpackwuh.sat(<16 x i32> %a, <16 x i32> %b)
-  store <16 x i32> %0, <16 x i32>* @d, align 64
+  store <16 x i32> %0, ptr @d, align 64
   ret void
 }
 
@@ -53,7 +53,7 @@ entry:
 define void @test6(<16 x i32> %a, <16 x i32> %b) #0 {
 entry:
   %0 = tail call <16 x i32> @llvm.hexagon.V6.vpackwh.sat(<16 x i32> %a, <16 x i32> %b)
-  store <16 x i32> %0, <16 x i32>* @d, align 64
+  store <16 x i32> %0, ptr @d, align 64
   ret void
 }
 
@@ -62,7 +62,7 @@ entry:
 define void @test7(<16 x i32> %a, <16 x i32> %b) #0 {
 entry:
   %0 = tail call <16 x i32> @llvm.hexagon.V6.vpackob(<16 x i32> %a, <16 x i32> %b)
-  store <16 x i32> %0, <16 x i32>* @d, align 64
+  store <16 x i32> %0, ptr @d, align 64
   ret void
 }
 
@@ -71,7 +71,7 @@ entry:
 define void @test8(<16 x i32> %a, <16 x i32> %b) #0 {
 entry:
   %0 = tail call <16 x i32> @llvm.hexagon.V6.vpackoh(<16 x i32> %a, <16 x i32> %b)
-  store <16 x i32> %0, <16 x i32>* @d, align 64
+  store <16 x i32> %0, ptr @d, align 64
   ret void
 }
 
@@ -80,7 +80,7 @@ entry:
 define void @test9(<16 x i32> %a) #0 {
 entry:
   %0 = tail call <32 x i32> @llvm.hexagon.V6.vunpackub(<16 x i32> %a)
-  store <32 x i32> %0, <32 x i32>* @c, align 128
+  store <32 x i32> %0, ptr @c, align 128
   ret void
 }
 
@@ -89,7 +89,7 @@ entry:
 define void @test10(<16 x i32> %a) #0 {
 entry:
   %0 = tail call <32 x i32> @llvm.hexagon.V6.vunpackuh(<16 x i32> %a)
-  store <32 x i32> %0, <32 x i32>* @c, align 128
+  store <32 x i32> %0, ptr @c, align 128
   ret void
 }
 
@@ -98,7 +98,7 @@ entry:
 define void @test11(<16 x i32> %a) #0 {
 entry:
   %0 = tail call <32 x i32> @llvm.hexagon.V6.vunpackb(<16 x i32> %a)
-  store <32 x i32> %0, <32 x i32>* @c, align 128
+  store <32 x i32> %0, ptr @c, align 128
   ret void
 }
 
@@ -107,7 +107,7 @@ entry:
 define void @test12(<16 x i32> %a) #0 {
 entry:
   %0 = tail call <32 x i32> @llvm.hexagon.V6.vunpackh(<16 x i32> %a)
-  store <32 x i32> %0, <32 x i32>* @c, align 128
+  store <32 x i32> %0, ptr @c, align 128
   ret void
 }
 
@@ -116,7 +116,7 @@ entry:
 define void @test13(<16 x i32> %a) #0 {
 entry:
   %0 = tail call <16 x i32> @llvm.hexagon.V6.vdealh(<16 x i32> %a)
-  store <16 x i32> %0, <16 x i32>* @d, align 64
+  store <16 x i32> %0, ptr @d, align 64
   ret void
 }
 
@@ -125,7 +125,7 @@ entry:
 define void @test14(<16 x i32> %a) #0 {
 entry:
   %0 = tail call <16 x i32> @llvm.hexagon.V6.vdealb(<16 x i32> %a)
-  store <16 x i32> %0, <16 x i32>* @d, align 64
+  store <16 x i32> %0, ptr @d, align 64
   ret void
 }
 
@@ -134,7 +134,7 @@ entry:
 define void @test15(<16 x i32> %a) #0 {
 entry:
   %0 = tail call <16 x i32> @llvm.hexagon.V6.vshuffh(<16 x i32> %a)
-  store <16 x i32> %0, <16 x i32>* @d, align 64
+  store <16 x i32> %0, ptr @d, align 64
   ret void
 }
 
@@ -143,7 +143,7 @@ entry:
 define void @test16(<16 x i32> %a) #0 {
 entry:
   %0 = tail call <16 x i32> @llvm.hexagon.V6.vshuffb(<16 x i32> %a)
-  store <16 x i32> %0, <16 x i32>* @d, align 64
+  store <16 x i32> %0, ptr @d, align 64
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/intrinsics-v60-shift.ll b/llvm/test/CodeGen/Hexagon/intrinsics-v60-shift.ll
index e6f8efaf008da..162ab73918d31 100644
--- a/llvm/test/CodeGen/Hexagon/intrinsics-v60-shift.ll
+++ b/llvm/test/CodeGen/Hexagon/intrinsics-v60-shift.ll
@@ -7,7 +7,7 @@
 define void @test18(<16 x i32> %a) #0 {
 entry:
   %0 = tail call <16 x i32> @llvm.hexagon.V6.vcl0w(<16 x i32> %a)
-  store <16 x i32> %0, <16 x i32>* @d, align 64
+  store <16 x i32> %0, ptr @d, align 64
   ret void
 }
 
@@ -16,7 +16,7 @@ entry:
 define void @test19(<16 x i32> %a) #0 {
 entry:
   %0 = tail call <16 x i32> @llvm.hexagon.V6.vpopcounth(<16 x i32> %a)
-  store <16 x i32> %0, <16 x i32>* @d, align 64
+  store <16 x i32> %0, ptr @d, align 64
   ret void
 }
 
@@ -25,7 +25,7 @@ entry:
 define void @test20(<16 x i32> %a) #0 {
 entry:
   %0 = tail call <16 x i32> @llvm.hexagon.V6.vcl0h(<16 x i32> %a)
-  store <16 x i32> %0, <16 x i32>* @d, align 64
+  store <16 x i32> %0, ptr @d, align 64
   ret void
 }
 
@@ -34,7 +34,7 @@ entry:
 define void @test21(<16 x i32> %a) #0 {
 entry:
   %0 = tail call <16 x i32> @llvm.hexagon.V6.vnormamtw(<16 x i32> %a)
-  store <16 x i32> %0, <16 x i32>* @d, align 64
+  store <16 x i32> %0, ptr @d, align 64
   ret void
 }
 
@@ -43,7 +43,7 @@ entry:
 define void @test22(<16 x i32> %a) #0 {
 entry:
   %0 = tail call <16 x i32> @llvm.hexagon.V6.vnormamth(<16 x i32> %a)
-  store <16 x i32> %0, <16 x i32>* @d, align 64
+  store <16 x i32> %0, ptr @d, align 64
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/intrinsics-v60-vcmp.ll b/llvm/test/CodeGen/Hexagon/intrinsics-v60-vcmp.ll
index a3319b92164b5..43953d9dca769 100644
--- a/llvm/test/CodeGen/Hexagon/intrinsics-v60-vcmp.ll
+++ b/llvm/test/CodeGen/Hexagon/intrinsics-v60-vcmp.ll
@@ -6,11 +6,11 @@
 ; CHECK: q{{[0-9]}} &= vcmp.eq(v{{[0-9]+}}.b,v{{[0-9]+}}.b)
 define void @test1(<16 x i32> %a, <16 x i32> %b) #0 {
 entry:
-  %v0 = load <16 x i32>, <16 x i32>* @d, align 64
+  %v0 = load <16 x i32>, ptr @d, align 64
   %v1 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %v0, i32 -1)
   %v2 = tail call <64 x i1> @llvm.hexagon.V6.veqb.and(<64 x i1> %v1, <16 x i32> %a, <16 x i32> %b)
   %v3 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %v2, i32 -1)
-  store <16 x i32> %v3, <16 x i32>* @d, align 64
+  store <16 x i32> %v3, ptr @d, align 64
   ret void
 }
 
@@ -18,11 +18,11 @@ entry:
 ; CHECK: q{{[0-9]}} &= vcmp.eq(v{{[0-9]+}}.h,v{{[0-9]+}}.h)
 define void @test2(<16 x i32> %a, <16 x i32> %b) #0 {
 entry:
-  %v0 = load <16 x i32>, <16 x i32>* @d, align 64
+  %v0 = load <16 x i32>, ptr @d, align 64
   %v1 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %v0, i32 -1)
   %v2 = tail call <64 x i1> @llvm.hexagon.V6.veqh.and(<64 x i1> %v1, <16 x i32> %a, <16 x i32> %b)
   %v3 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %v2, i32 -1)
-  store <16 x i32> %v3, <16 x i32>* @d, align 64
+  store <16 x i32> %v3, ptr @d, align 64
   ret void
 }
 
@@ -30,11 +30,11 @@ entry:
 ; CHECK: q{{[0-9]}} &= vcmp.eq(v{{[0-9]+}}.w,v{{[0-9]+}}.w)
 define void @test3(<16 x i32> %a, <16 x i32> %b) #0 {
 entry:
-  %v0 = load <16 x i32>, <16 x i32>* @d, align 64
+  %v0 = load <16 x i32>, ptr @d, align 64
   %v1 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %v0, i32 -1)
   %v2 = tail call <64 x i1> @llvm.hexagon.V6.veqw.and(<64 x i1> %v1, <16 x i32> %a, <16 x i32> %b)
   %v3 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %v2, i32 -1)
-  store <16 x i32> %v3, <16 x i32>* @d, align 64
+  store <16 x i32> %v3, ptr @d, align 64
   ret void
 }
 
@@ -42,11 +42,11 @@ entry:
 ; CHECK: q{{[0-9]}} &= vcmp.gt(v{{[0-9]+}}.b,v{{[0-9]+}}.b)
 define void @test4(<16 x i32> %a, <16 x i32> %b) #0 {
 entry:
-  %v0 = load <16 x i32>, <16 x i32>* @d, align 64
+  %v0 = load <16 x i32>, ptr @d, align 64
   %v1 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %v0, i32 -1)
   %v2 = tail call <64 x i1> @llvm.hexagon.V6.vgtb.and(<64 x i1> %v1, <16 x i32> %a, <16 x i32> %b)
   %v3 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %v2, i32 -1)
-  store <16 x i32> %v3, <16 x i32>* @d, align 64
+  store <16 x i32> %v3, ptr @d, align 64
   ret void
 }
 
@@ -54,11 +54,11 @@ entry:
 ; CHECK: q{{[0-9]}} &= vcmp.gt(v{{[0-9]+}}.h,v{{[0-9]+}}.h)
 define void @test5(<16 x i32> %a, <16 x i32> %b) #0 {
 entry:
-  %v0 = load <16 x i32>, <16 x i32>* @d, align 64
+  %v0 = load <16 x i32>, ptr @d, align 64
   %v1 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %v0, i32 -1)
   %v2 = tail call <64 x i1> @llvm.hexagon.V6.vgth.and(<64 x i1> %v1, <16 x i32> %a, <16 x i32> %b)
   %v3 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %v2, i32 -1)
-  store <16 x i32> %v3, <16 x i32>* @d, align 64
+  store <16 x i32> %v3, ptr @d, align 64
   ret void
 }
 
@@ -66,11 +66,11 @@ entry:
 ; CHECK: q{{[0-9]}} &= vcmp.gt(v{{[0-9]+}}.w,v{{[0-9]+}}.w)
 define void @test6(<16 x i32> %a, <16 x i32> %b) #0 {
 entry:
-  %v0 = load <16 x i32>, <16 x i32>* @d, align 64
+  %v0 = load <16 x i32>, ptr @d, align 64
   %v1 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %v0, i32 -1)
   %v2 = tail call <64 x i1> @llvm.hexagon.V6.vgtw.and(<64 x i1> %v1, <16 x i32> %a, <16 x i32> %b)
   %v3 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %v2, i32 -1)
-  store <16 x i32> %v3, <16 x i32>* @d, align 64
+  store <16 x i32> %v3, ptr @d, align 64
   ret void
 }
 
@@ -78,11 +78,11 @@ entry:
 ; CHECK: q{{[0-9]}} &= vcmp.gt(v{{[0-9]+}}.ub,v{{[0-9]+}}.ub)
 define void @test7(<16 x i32> %a, <16 x i32> %b) #0 {
 entry:
-  %v0 = load <16 x i32>, <16 x i32>* @d, align 64
+  %v0 = load <16 x i32>, ptr @d, align 64
   %v1 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %v0, i32 -1)
   %v2 = tail call <64 x i1> @llvm.hexagon.V6.vgtub.and(<64 x i1> %v1, <16 x i32> %a, <16 x i32> %b)
   %v3 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %v2, i32 -1)
-  store <16 x i32> %v3, <16 x i32>* @d, align 64
+  store <16 x i32> %v3, ptr @d, align 64
   ret void
 }
 
@@ -90,11 +90,11 @@ entry:
 ; CHECK: q{{[0-9]}} &= vcmp.gt(v{{[0-9]+}}.uh,v{{[0-9]+}}.uh)
 define void @test8(<16 x i32> %a, <16 x i32> %b) #0 {
 entry:
-  %v0 = load <16 x i32>, <16 x i32>* @d, align 64
+  %v0 = load <16 x i32>, ptr @d, align 64
   %v1 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %v0, i32 -1)
   %v2 = tail call <64 x i1> @llvm.hexagon.V6.vgtuh.and(<64 x i1> %v1, <16 x i32> %a, <16 x i32> %b)
   %v3 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %v2, i32 -1)
-  store <16 x i32> %v3, <16 x i32>* @d, align 64
+  store <16 x i32> %v3, ptr @d, align 64
   ret void
 }
 
@@ -102,11 +102,11 @@ entry:
 ; CHECK: q{{[0-9]}} &= vcmp.gt(v{{[0-9]+}}.uw,v{{[0-9]+}}.uw)
 define void @test9(<16 x i32> %a, <16 x i32> %b) #0 {
 entry:
-  %v0 = load <16 x i32>, <16 x i32>* @d, align 64
+  %v0 = load <16 x i32>, ptr @d, align 64
   %v1 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %v0, i32 -1)
   %v2 = tail call <64 x i1> @llvm.hexagon.V6.vgtuw.and(<64 x i1> %v1, <16 x i32> %a, <16 x i32> %b)
   %v3 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %v2, i32 -1)
-  store <16 x i32> %v3, <16 x i32>* @d, align 64
+  store <16 x i32> %v3, ptr @d, align 64
   ret void
 }
 
@@ -114,11 +114,11 @@ entry:
 ; CHECK: q{{[0-9]}} |= vcmp.eq(v{{[0-9]+}}.b,v{{[0-9]+}}.b)
 define void @test10(<16 x i32> %a, <16 x i32> %b) #0 {
 entry:
-  %v0 = load <16 x i32>, <16 x i32>* @d, align 64
+  %v0 = load <16 x i32>, ptr @d, align 64
   %v1 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %v0, i32 -1)
   %v2 = tail call <64 x i1> @llvm.hexagon.V6.veqb.or(<64 x i1> %v1, <16 x i32> %a, <16 x i32> %b)
   %v3 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %v2, i32 -1)
-  store <16 x i32> %v3, <16 x i32>* @d, align 64
+  store <16 x i32> %v3, ptr @d, align 64
   ret void
 }
 
@@ -126,11 +126,11 @@ entry:
 ; CHECK: q{{[0-9]}} |= vcmp.eq(v{{[0-9]+}}.h,v{{[0-9]+}}.h)
 define void @test11(<16 x i32> %a, <16 x i32> %b) #0 {
 entry:
-  %v0 = load <16 x i32>, <16 x i32>* @d, align 64
+  %v0 = load <16 x i32>, ptr @d, align 64
   %v1 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %v0, i32 -1)
   %v2 = tail call <64 x i1> @llvm.hexagon.V6.veqh.or(<64 x i1> %v1, <16 x i32> %a, <16 x i32> %b)
   %v3 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %v2, i32 -1)
-  store <16 x i32> %v3, <16 x i32>* @d, align 64
+  store <16 x i32> %v3, ptr @d, align 64
   ret void
 }
 
@@ -138,11 +138,11 @@ entry:
 ; CHECK: q{{[0-9]}} |= vcmp.eq(v{{[0-9]+}}.w,v{{[0-9]+}}.w)
 define void @test12(<16 x i32> %a, <16 x i32> %b) #0 {
 entry:
-  %v0 = load <16 x i32>, <16 x i32>* @d, align 64
+  %v0 = load <16 x i32>, ptr @d, align 64
   %v1 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %v0, i32 -1)
   %v2 = tail call <64 x i1> @llvm.hexagon.V6.veqw.or(<64 x i1> %v1, <16 x i32> %a, <16 x i32> %b)
   %v3 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %v2, i32 -1)
-  store <16 x i32> %v3, <16 x i32>* @d, align 64
+  store <16 x i32> %v3, ptr @d, align 64
   ret void
 }
 
@@ -150,11 +150,11 @@ entry:
 ; CHECK: q{{[0-9]}} |= vcmp.gt(v{{[0-9]+}}.b,v{{[0-9]+}}.b)
 define void @test13(<16 x i32> %a, <16 x i32> %b) #0 {
 entry:
-  %v0 = load <16 x i32>, <16 x i32>* @d, align 64
+  %v0 = load <16 x i32>, ptr @d, align 64
   %v1 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %v0, i32 -1)
   %v2 = tail call <64 x i1> @llvm.hexagon.V6.vgtb.or(<64 x i1> %v1, <16 x i32> %a, <16 x i32> %b)
   %v3 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %v2, i32 -1)
-  store <16 x i32> %v3, <16 x i32>* @d, align 64
+  store <16 x i32> %v3, ptr @d, align 64
   ret void
 }
 
@@ -162,11 +162,11 @@ entry:
 ; CHECK: q{{[0-9]}} |= vcmp.gt(v{{[0-9]+}}.h,v{{[0-9]+}}.h)
 define void @test14(<16 x i32> %a, <16 x i32> %b) #0 {
 entry:
-  %v0 = load <16 x i32>, <16 x i32>* @d, align 64
+  %v0 = load <16 x i32>, ptr @d, align 64
   %v1 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %v0, i32 -1)
   %v2 = tail call <64 x i1> @llvm.hexagon.V6.vgth.or(<64 x i1> %v1, <16 x i32> %a, <16 x i32> %b)
   %v3 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %v2, i32 -1)
-  store <16 x i32> %v3, <16 x i32>* @d, align 64
+  store <16 x i32> %v3, ptr @d, align 64
   ret void
 }
 
@@ -174,11 +174,11 @@ entry:
 ; CHECK: q{{[0-9]}} |= vcmp.gt(v{{[0-9]+}}.w,v{{[0-9]+}}.w)
 define void @test15(<16 x i32> %a, <16 x i32> %b) #0 {
 entry:
-  %v0 = load <16 x i32>, <16 x i32>* @d, align 64
+  %v0 = load <16 x i32>, ptr @d, align 64
   %v1 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %v0, i32 -1)
   %v2 = tail call <64 x i1> @llvm.hexagon.V6.vgtw.or(<64 x i1> %v1, <16 x i32> %a, <16 x i32> %b)
   %v3 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %v2, i32 -1)
-  store <16 x i32> %v3, <16 x i32>* @d, align 64
+  store <16 x i32> %v3, ptr @d, align 64
   ret void
 }
 
@@ -186,11 +186,11 @@ entry:
 ; CHECK: q{{[0-9]}} |= vcmp.gt(v{{[0-9]+}}.ub,v{{[0-9]+}}.ub)
 define void @test16(<16 x i32> %a, <16 x i32> %b) #0 {
 entry:
-  %v0 = load <16 x i32>, <16 x i32>* @d, align 64
+  %v0 = load <16 x i32>, ptr @d, align 64
   %v1 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %v0, i32 -1)
   %v2 = tail call <64 x i1> @llvm.hexagon.V6.vgtub.or(<64 x i1> %v1, <16 x i32> %a, <16 x i32> %b)
   %v3 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %v2, i32 -1)
-  store <16 x i32> %v3, <16 x i32>* @d, align 64
+  store <16 x i32> %v3, ptr @d, align 64
   ret void
 }
 
@@ -198,11 +198,11 @@ entry:
 ; CHECK: q{{[0-9]}} |= vcmp.gt(v{{[0-9]+}}.uh,v{{[0-9]+}}.uh)
 define void @test17(<16 x i32> %a, <16 x i32> %b) #0 {
 entry:
-  %v0 = load <16 x i32>, <16 x i32>* @d, align 64
+  %v0 = load <16 x i32>, ptr @d, align 64
   %v1 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %v0, i32 -1)
   %v2 = tail call <64 x i1> @llvm.hexagon.V6.vgtuh.or(<64 x i1> %v1, <16 x i32> %a, <16 x i32> %b)
   %v3 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %v2, i32 -1)
-  store <16 x i32> %v3, <16 x i32>* @d, align 64
+  store <16 x i32> %v3, ptr @d, align 64
   ret void
 }
 
@@ -210,11 +210,11 @@ entry:
 ; CHECK: q{{[0-9]}} |= vcmp.gt(v{{[0-9]+}}.uw,v{{[0-9]+}}.uw)
 define void @test18(<16 x i32> %a, <16 x i32> %b) #0 {
 entry:
-  %v0 = load <16 x i32>, <16 x i32>* @d, align 64
+  %v0 = load <16 x i32>, ptr @d, align 64
   %v1 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %v0, i32 -1)
   %v2 = tail call <64 x i1> @llvm.hexagon.V6.vgtuw.or(<64 x i1> %v1, <16 x i32> %a, <16 x i32> %b)
   %v3 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %v2, i32 -1)
-  store <16 x i32> %v3, <16 x i32>* @d, align 64
+  store <16 x i32> %v3, ptr @d, align 64
   ret void
 }
 
@@ -222,11 +222,11 @@ entry:
 ; CHECK: q{{[0-9]}} ^= vcmp.eq(v{{[0-9]+}}.b,v{{[0-9]+}}.b)
 define void @test19(<16 x i32> %a, <16 x i32> %b) #0 {
 entry:
-  %v0 = load <16 x i32>, <16 x i32>* @d, align 64
+  %v0 = load <16 x i32>, ptr @d, align 64
   %v1 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %v0, i32 -1)
   %v2 = tail call <64 x i1> @llvm.hexagon.V6.veqb.xor(<64 x i1> %v1, <16 x i32> %a, <16 x i32> %b)
   %v3 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %v2, i32 -1)
-  store <16 x i32> %v3, <16 x i32>* @d, align 64
+  store <16 x i32> %v3, ptr @d, align 64
   ret void
 }
 
@@ -234,11 +234,11 @@ entry:
 ; CHECK: q{{[0-9]}} ^= vcmp.eq(v{{[0-9]+}}.h,v{{[0-9]+}}.h)
 define void @test20(<16 x i32> %a, <16 x i32> %b) #0 {
 entry:
-  %v0 = load <16 x i32>, <16 x i32>* @d, align 64
+  %v0 = load <16 x i32>, ptr @d, align 64
   %v1 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %v0, i32 -1)
   %v2 = tail call <64 x i1> @llvm.hexagon.V6.veqh.xor(<64 x i1> %v1, <16 x i32> %a, <16 x i32> %b)
   %v3 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %v2, i32 -1)
-  store <16 x i32> %v3, <16 x i32>* @d, align 64
+  store <16 x i32> %v3, ptr @d, align 64
   ret void
 }
 
@@ -246,11 +246,11 @@ entry:
 ; CHECK: q{{[0-9]}} ^= vcmp.eq(v{{[0-9]+}}.w,v{{[0-9]+}}.w)
 define void @test21(<16 x i32> %a, <16 x i32> %b) #0 {
 entry:
-  %v0 = load <16 x i32>, <16 x i32>* @d, align 64
+  %v0 = load <16 x i32>, ptr @d, align 64
   %v1 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %v0, i32 -1)
   %v2 = tail call <64 x i1> @llvm.hexagon.V6.veqw.xor(<64 x i1> %v1, <16 x i32> %a, <16 x i32> %b)
   %v3 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %v2, i32 -1)
-  store <16 x i32> %v3, <16 x i32>* @d, align 64
+  store <16 x i32> %v3, ptr @d, align 64
   ret void
 }
 
@@ -258,11 +258,11 @@ entry:
 ; CHECK: q{{[0-9]}} ^= vcmp.gt(v{{[0-9]+}}.b,v{{[0-9]+}}.b)
 define void @test22(<16 x i32> %a, <16 x i32> %b) #0 {
 entry:
-  %v0 = load <16 x i32>, <16 x i32>* @d, align 64
+  %v0 = load <16 x i32>, ptr @d, align 64
   %v1 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %v0, i32 -1)
   %v2 = tail call <64 x i1> @llvm.hexagon.V6.vgtb.xor(<64 x i1> %v1, <16 x i32> %a, <16 x i32> %b)
   %v3 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %v2, i32 -1)
-  store <16 x i32> %v3, <16 x i32>* @d, align 64
+  store <16 x i32> %v3, ptr @d, align 64
   ret void
 }
 
@@ -270,11 +270,11 @@ entry:
 ; CHECK: q{{[0-9]}} ^= vcmp.gt(v{{[0-9]+}}.h,v{{[0-9]+}}.h)
 define void @test23(<16 x i32> %a, <16 x i32> %b) #0 {
 entry:
-  %v0 = load <16 x i32>, <16 x i32>* @d, align 64
+  %v0 = load <16 x i32>, ptr @d, align 64
   %v1 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %v0, i32 -1)
   %v2 = tail call <64 x i1> @llvm.hexagon.V6.vgth.xor(<64 x i1> %v1, <16 x i32> %a, <16 x i32> %b)
   %v3 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %v2, i32 -1)
-  store <16 x i32> %v3, <16 x i32>* @d, align 64
+  store <16 x i32> %v3, ptr @d, align 64
   ret void
 }
 
@@ -282,11 +282,11 @@ entry:
 ; CHECK: q{{[0-9]}} ^= vcmp.gt(v{{[0-9]+}}.w,v{{[0-9]+}}.w)
 define void @test24(<16 x i32> %a, <16 x i32> %b) #0 {
 entry:
-  %v0 = load <16 x i32>, <16 x i32>* @d, align 64
+  %v0 = load <16 x i32>, ptr @d, align 64
   %v1 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %v0, i32 -1)
   %v2 = tail call <64 x i1> @llvm.hexagon.V6.vgtw.xor(<64 x i1> %v1, <16 x i32> %a, <16 x i32> %b)
   %v3 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %v2, i32 -1)
-  store <16 x i32> %v3, <16 x i32>* @d, align 64
+  store <16 x i32> %v3, ptr @d, align 64
   ret void
 }
 
@@ -294,11 +294,11 @@ entry:
 ; CHECK: q{{[0-9]}} ^= vcmp.gt(v{{[0-9]+}}.ub,v{{[0-9]+}}.ub)
 define void @test25(<16 x i32> %a, <16 x i32> %b) #0 {
 entry:
-  %v0 = load <16 x i32>, <16 x i32>* @d, align 64
+  %v0 = load <16 x i32>, ptr @d, align 64
   %v1 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %v0, i32 -1)
   %v2 = tail call <64 x i1> @llvm.hexagon.V6.vgtub.xor(<64 x i1> %v1, <16 x i32> %a, <16 x i32> %b)
   %v3 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %v2, i32 -1)
-  store <16 x i32> %v3, <16 x i32>* @d, align 64
+  store <16 x i32> %v3, ptr @d, align 64
   ret void
 }
 
@@ -306,11 +306,11 @@ entry:
 ; CHECK: q{{[0-9]}} ^= vcmp.gt(v{{[0-9]+}}.uh,v{{[0-9]+}}.uh)
 define void @test26(<16 x i32> %a, <16 x i32> %b) #0 {
 entry:
-  %v0 = load <16 x i32>, <16 x i32>* @d, align 64
+  %v0 = load <16 x i32>, ptr @d, align 64
   %v1 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %v0, i32 -1)
   %v2 = tail call <64 x i1> @llvm.hexagon.V6.vgtuh.xor(<64 x i1> %v1, <16 x i32> %a, <16 x i32> %b)
   %v3 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %v2, i32 -1)
-  store <16 x i32> %v3, <16 x i32>* @d, align 64
+  store <16 x i32> %v3, ptr @d, align 64
   ret void
 }
 
@@ -318,11 +318,11 @@ entry:
 ; CHECK: q{{[0-9]}} ^= vcmp.gt(v{{[0-9]+}}.uw,v{{[0-9]+}}.uw)
 define void @test27(<16 x i32> %a, <16 x i32> %b) #0 {
 entry:
-  %v0 = load <16 x i32>, <16 x i32>* @d, align 64
+  %v0 = load <16 x i32>, ptr @d, align 64
   %v1 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %v0, i32 -1)
   %v2 = tail call <64 x i1> @llvm.hexagon.V6.vgtuw.xor(<64 x i1> %v1, <16 x i32> %a, <16 x i32> %b)
   %v3 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %v2, i32 -1)
-  store <16 x i32> %v3, <16 x i32>* @d, align 64
+  store <16 x i32> %v3, ptr @d, align 64
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/intrinsics-v60-vmpy-acc-128B.ll b/llvm/test/CodeGen/Hexagon/intrinsics-v60-vmpy-acc-128B.ll
index bf4f75a2db967..b35561a236057 100644
--- a/llvm/test/CodeGen/Hexagon/intrinsics-v60-vmpy-acc-128B.ll
+++ b/llvm/test/CodeGen/Hexagon/intrinsics-v60-vmpy-acc-128B.ll
@@ -9,13 +9,13 @@ define void @test1(<64 x i32> %a, i32 %b) #0 {
 entry:
   %a.addr = alloca <64 x i32>, align 256
   %b.addr = alloca i32, align 4
-  store <64 x i32> %a, <64 x i32>* %a.addr, align 256
-  store i32 %b, i32* %b.addr, align 4
-  %0 = load <64 x i32>, <64 x i32>* @c, align 256
-  %1 = load <64 x i32>, <64 x i32>* %a.addr, align 256
-  %2 = load i32, i32* %b.addr, align 4
+  store <64 x i32> %a, ptr %a.addr, align 256
+  store i32 %b, ptr %b.addr, align 4
+  %0 = load <64 x i32>, ptr @c, align 256
+  %1 = load <64 x i32>, ptr %a.addr, align 256
+  %2 = load i32, ptr %b.addr, align 4
   %3 = call <64 x i32> @llvm.hexagon.V6.vtmpyb.acc.128B(<64 x i32> %0, <64 x i32> %1, i32 %2)
-  store <64 x i32> %3, <64 x i32>* @c, align 256
+  store <64 x i32> %3, ptr @c, align 256
   ret void
 }
 
@@ -25,13 +25,13 @@ define void @test2(<64 x i32> %a, i32 %b) #0 {
 entry:
   %a.addr = alloca <64 x i32>, align 256
   %b.addr = alloca i32, align 4
-  store <64 x i32> %a, <64 x i32>* %a.addr, align 256
-  store i32 %b, i32* %b.addr, align 4
-  %0 = load <64 x i32>, <64 x i32>* @c, align 256
-  %1 = load <64 x i32>, <64 x i32>* %a.addr, align 256
-  %2 = load i32, i32* %b.addr, align 4
+  store <64 x i32> %a, ptr %a.addr, align 256
+  store i32 %b, ptr %b.addr, align 4
+  %0 = load <64 x i32>, ptr @c, align 256
+  %1 = load <64 x i32>, ptr %a.addr, align 256
+  %2 = load i32, ptr %b.addr, align 4
   %3 = call <64 x i32> @llvm.hexagon.V6.vtmpybus.acc.128B(<64 x i32> %0, <64 x i32> %1, i32 %2)
-  store <64 x i32> %3, <64 x i32>* @c, align 256
+  store <64 x i32> %3, ptr @c, align 256
   ret void
 }
 
@@ -41,13 +41,13 @@ define void @test3(<64 x i32> %a, i32 %b) #0 {
 entry:
   %a.addr = alloca <64 x i32>, align 256
   %b.addr = alloca i32, align 4
-  store <64 x i32> %a, <64 x i32>* %a.addr, align 256
-  store i32 %b, i32* %b.addr, align 4
-  %0 = load <64 x i32>, <64 x i32>* @c, align 256
-  %1 = load <64 x i32>, <64 x i32>* %a.addr, align 256
-  %2 = load i32, i32* %b.addr, align 4
+  store <64 x i32> %a, ptr %a.addr, align 256
+  store i32 %b, ptr %b.addr, align 4
+  %0 = load <64 x i32>, ptr @c, align 256
+  %1 = load <64 x i32>, ptr %a.addr, align 256
+  %2 = load i32, ptr %b.addr, align 4
   %3 = call <64 x i32> @llvm.hexagon.V6.vtmpyhb.acc.128B(<64 x i32> %0, <64 x i32> %1, i32 %2)
-  store <64 x i32> %3, <64 x i32>* @c, align 256
+  store <64 x i32> %3, ptr @c, align 256
   ret void
 }
 
@@ -57,13 +57,13 @@ define void @test4(<32 x i32> %a, i32 %b) #0 {
 entry:
   %a.addr = alloca <32 x i32>, align 128
   %b.addr = alloca i32, align 4
-  store <32 x i32> %a, <32 x i32>* %a.addr, align 128
-  store i32 %b, i32* %b.addr, align 4
-  %0 = load <32 x i32>, <32 x i32>* @d, align 128
-  %1 = load <32 x i32>, <32 x i32>* %a.addr, align 128
-  %2 = load i32, i32* %b.addr, align 4
+  store <32 x i32> %a, ptr %a.addr, align 128
+  store i32 %b, ptr %b.addr, align 4
+  %0 = load <32 x i32>, ptr @d, align 128
+  %1 = load <32 x i32>, ptr %a.addr, align 128
+  %2 = load i32, ptr %b.addr, align 4
   %3 = call <32 x i32> @llvm.hexagon.V6.vdmpyhb.acc.128B(<32 x i32> %0, <32 x i32> %1, i32 %2)
-  store <32 x i32> %3, <32 x i32>* @d, align 128
+  store <32 x i32> %3, ptr @d, align 128
   ret void
 }
 
@@ -73,13 +73,13 @@ define void @test5(<32 x i32> %a, i32 %b) #0 {
 entry:
   %a.addr = alloca <32 x i32>, align 128
   %b.addr = alloca i32, align 4
-  store <32 x i32> %a, <32 x i32>* %a.addr, align 128
-  store i32 %b, i32* %b.addr, align 4
-  %0 = load <32 x i32>, <32 x i32>* @d, align 128
-  %1 = load <32 x i32>, <32 x i32>* %a.addr, align 128
-  %2 = load i32, i32* %b.addr, align 4
+  store <32 x i32> %a, ptr %a.addr, align 128
+  store i32 %b, ptr %b.addr, align 4
+  %0 = load <32 x i32>, ptr @d, align 128
+  %1 = load <32 x i32>, ptr %a.addr, align 128
+  %2 = load i32, ptr %b.addr, align 4
   %3 = call <32 x i32> @llvm.hexagon.V6.vrmpyub.acc.128B(<32 x i32> %0, <32 x i32> %1, i32 %2)
-  store <32 x i32> %3, <32 x i32>* @d, align 128
+  store <32 x i32> %3, ptr @d, align 128
   ret void
 }
 
@@ -89,13 +89,13 @@ define void @test6(<32 x i32> %a, i32 %b) #0 {
 entry:
   %a.addr = alloca <32 x i32>, align 128
   %b.addr = alloca i32, align 4
-  store <32 x i32> %a, <32 x i32>* %a.addr, align 128
-  store i32 %b, i32* %b.addr, align 4
-  %0 = load <32 x i32>, <32 x i32>* @d, align 128
-  %1 = load <32 x i32>, <32 x i32>* %a.addr, align 128
-  %2 = load i32, i32* %b.addr, align 4
+  store <32 x i32> %a, ptr %a.addr, align 128
+  store i32 %b, ptr %b.addr, align 4
+  %0 = load <32 x i32>, ptr @d, align 128
+  %1 = load <32 x i32>, ptr %a.addr, align 128
+  %2 = load i32, ptr %b.addr, align 4
   %3 = call <32 x i32> @llvm.hexagon.V6.vrmpybus.acc.128B(<32 x i32> %0, <32 x i32> %1, i32 %2)
-  store <32 x i32> %3, <32 x i32>* @d, align 128
+  store <32 x i32> %3, ptr @d, align 128
   ret void
 }
 
@@ -105,13 +105,13 @@ define void @test7(<32 x i32> %a, i32 %b) #0 {
 entry:
   %a.addr = alloca <32 x i32>, align 128
   %b.addr = alloca i32, align 4
-  store <32 x i32> %a, <32 x i32>* %a.addr, align 128
-  store i32 %b, i32* %b.addr, align 4
-  %0 = load <32 x i32>, <32 x i32>* @d, align 128
-  %1 = load <32 x i32>, <32 x i32>* %a.addr, align 128
-  %2 = load i32, i32* %b.addr, align 4
+  store <32 x i32> %a, ptr %a.addr, align 128
+  store i32 %b, ptr %b.addr, align 4
+  %0 = load <32 x i32>, ptr @d, align 128
+  %1 = load <32 x i32>, ptr %a.addr, align 128
+  %2 = load i32, ptr %b.addr, align 4
   %3 = call <32 x i32> @llvm.hexagon.V6.vdmpybus.acc.128B(<32 x i32> %0, <32 x i32> %1, i32 %2)
-  store <32 x i32> %3, <32 x i32>* @d, align 128
+  store <32 x i32> %3, ptr @d, align 128
   ret void
 }
 
@@ -121,13 +121,13 @@ define void @test8(<64 x i32> %a, i32 %b) #0 {
 entry:
   %a.addr = alloca <64 x i32>, align 256
   %b.addr = alloca i32, align 4
-  store <64 x i32> %a, <64 x i32>* %a.addr, align 256
-  store i32 %b, i32* %b.addr, align 4
-  %0 = load <64 x i32>, <64 x i32>* @c, align 256
-  %1 = load <64 x i32>, <64 x i32>* %a.addr, align 256
-  %2 = load i32, i32* %b.addr, align 4
+  store <64 x i32> %a, ptr %a.addr, align 256
+  store i32 %b, ptr %b.addr, align 4
+  %0 = load <64 x i32>, ptr @c, align 256
+  %1 = load <64 x i32>, ptr %a.addr, align 256
+  %2 = load i32, ptr %b.addr, align 4
   %3 = call <64 x i32> @llvm.hexagon.V6.vdmpybus.dv.acc.128B(<64 x i32> %0, <64 x i32> %1, i32 %2)
-  store <64 x i32> %3, <64 x i32>* @c, align 256
+  store <64 x i32> %3, ptr @c, align 256
   ret void
 }
 
@@ -137,13 +137,13 @@ define void @test9(<32 x i32> %a, i32 %b) #0 {
 entry:
   %a.addr = alloca <32 x i32>, align 128
   %b.addr = alloca i32, align 4
-  store <32 x i32> %a, <32 x i32>* %a.addr, align 128
-  store i32 %b, i32* %b.addr, align 4
-  %0 = load <32 x i32>, <32 x i32>* @d, align 128
-  %1 = load <32 x i32>, <32 x i32>* %a.addr, align 128
-  %2 = load i32, i32* %b.addr, align 4
+  store <32 x i32> %a, ptr %a.addr, align 128
+  store i32 %b, ptr %b.addr, align 4
+  %0 = load <32 x i32>, ptr @d, align 128
+  %1 = load <32 x i32>, ptr %a.addr, align 128
+  %2 = load i32, ptr %b.addr, align 4
   %3 = call <32 x i32> @llvm.hexagon.V6.vdmpyhsusat.acc.128B(<32 x i32> %0, <32 x i32> %1, i32 %2)
-  store <32 x i32> %3, <32 x i32>* @d, align 128
+  store <32 x i32> %3, ptr @d, align 128
   ret void
 }
 
@@ -153,13 +153,13 @@ define void @test10(<64 x i32> %a, i32 %b) #0 {
 entry:
   %a.addr = alloca <64 x i32>, align 256
   %b.addr = alloca i32, align 4
-  store <64 x i32> %a, <64 x i32>* %a.addr, align 256
-  store i32 %b, i32* %b.addr, align 4
-  %0 = load <32 x i32>, <32 x i32>* @d, align 128
-  %1 = load <64 x i32>, <64 x i32>* %a.addr, align 256
-  %2 = load i32, i32* %b.addr, align 4
+  store <64 x i32> %a, ptr %a.addr, align 256
+  store i32 %b, ptr %b.addr, align 4
+  %0 = load <32 x i32>, ptr @d, align 128
+  %1 = load <64 x i32>, ptr %a.addr, align 256
+  %2 = load i32, ptr %b.addr, align 4
   %3 = call <32 x i32> @llvm.hexagon.V6.vdmpyhsuisat.acc.128B(<32 x i32> %0, <64 x i32> %1, i32 %2)
-  store <32 x i32> %3, <32 x i32>* @d, align 128
+  store <32 x i32> %3, ptr @d, align 128
   ret void
 }
 
@@ -169,13 +169,13 @@ define void @test11(<64 x i32> %a, i32 %b) #0 {
 entry:
   %a.addr = alloca <64 x i32>, align 256
   %b.addr = alloca i32, align 4
-  store <64 x i32> %a, <64 x i32>* %a.addr, align 256
-  store i32 %b, i32* %b.addr, align 4
-  %0 = load <32 x i32>, <32 x i32>* @d, align 128
-  %1 = load <64 x i32>, <64 x i32>* %a.addr, align 256
-  %2 = load i32, i32* %b.addr, align 4
+  store <64 x i32> %a, ptr %a.addr, align 256
+  store i32 %b, ptr %b.addr, align 4
+  %0 = load <32 x i32>, ptr @d, align 128
+  %1 = load <64 x i32>, ptr %a.addr, align 256
+  %2 = load i32, ptr %b.addr, align 4
   %3 = call <32 x i32> @llvm.hexagon.V6.vdmpyhisat.acc.128B(<32 x i32> %0, <64 x i32> %1, i32 %2)
-  store <32 x i32> %3, <32 x i32>* @d, align 128
+  store <32 x i32> %3, ptr @d, align 128
   ret void
 }
 
@@ -185,13 +185,13 @@ define void @test12(<32 x i32> %a, i32 %b) #0 {
 entry:
   %a.addr = alloca <32 x i32>, align 128
   %b.addr = alloca i32, align 4
-  store <32 x i32> %a, <32 x i32>* %a.addr, align 128
-  store i32 %b, i32* %b.addr, align 4
-  %0 = load <32 x i32>, <32 x i32>* @d, align 128
-  %1 = load <32 x i32>, <32 x i32>* %a.addr, align 128
-  %2 = load i32, i32* %b.addr, align 4
+  store <32 x i32> %a, ptr %a.addr, align 128
+  store i32 %b, ptr %b.addr, align 4
+  %0 = load <32 x i32>, ptr @d, align 128
+  %1 = load <32 x i32>, ptr %a.addr, align 128
+  %2 = load i32, ptr %b.addr, align 4
   %3 = call <32 x i32> @llvm.hexagon.V6.vdmpyhsat.acc.128B(<32 x i32> %0, <32 x i32> %1, i32 %2)
-  store <32 x i32> %3, <32 x i32>* @d, align 128
+  store <32 x i32> %3, ptr @d, align 128
   ret void
 }
 
@@ -201,13 +201,13 @@ define void @test13(<64 x i32> %a, i32 %b) #0 {
 entry:
   %a.addr = alloca <64 x i32>, align 256
   %b.addr = alloca i32, align 4
-  store <64 x i32> %a, <64 x i32>* %a.addr, align 256
-  store i32 %b, i32* %b.addr, align 4
-  %0 = load <64 x i32>, <64 x i32>* @c, align 256
-  %1 = load <64 x i32>, <64 x i32>* %a.addr, align 256
-  %2 = load i32, i32* %b.addr, align 4
+  store <64 x i32> %a, ptr %a.addr, align 256
+  store i32 %b, ptr %b.addr, align 4
+  %0 = load <64 x i32>, ptr @c, align 256
+  %1 = load <64 x i32>, ptr %a.addr, align 256
+  %2 = load i32, ptr %b.addr, align 4
   %3 = call <64 x i32> @llvm.hexagon.V6.vdmpyhb.dv.acc.128B(<64 x i32> %0, <64 x i32> %1, i32 %2)
-  store <64 x i32> %3, <64 x i32>* @c, align 256
+  store <64 x i32> %3, ptr @c, align 256
   ret void
 }
 
@@ -217,13 +217,13 @@ define void @test14(<32 x i32> %a, i32 %b) #0 {
 entry:
   %a.addr = alloca <32 x i32>, align 128
   %b.addr = alloca i32, align 4
-  store <32 x i32> %a, <32 x i32>* %a.addr, align 128
-  store i32 %b, i32* %b.addr, align 4
-  %0 = load <64 x i32>, <64 x i32>* @c, align 256
-  %1 = load <32 x i32>, <32 x i32>* %a.addr, align 128
-  %2 = load i32, i32* %b.addr, align 4
+  store <32 x i32> %a, ptr %a.addr, align 128
+  store i32 %b, ptr %b.addr, align 4
+  %0 = load <64 x i32>, ptr @c, align 256
+  %1 = load <32 x i32>, ptr %a.addr, align 128
+  %2 = load i32, ptr %b.addr, align 4
   %3 = call <64 x i32> @llvm.hexagon.V6.vmpybus.acc.128B(<64 x i32> %0, <32 x i32> %1, i32 %2)
-  store <64 x i32> %3, <64 x i32>* @c, align 256
+  store <64 x i32> %3, ptr @c, align 256
   ret void
 }
 
@@ -233,13 +233,13 @@ define void @test15(<64 x i32> %a, i32 %b) #0 {
 entry:
   %a.addr = alloca <64 x i32>, align 256
   %b.addr = alloca i32, align 4
-  store <64 x i32> %a, <64 x i32>* %a.addr, align 256
-  store i32 %b, i32* %b.addr, align 4
-  %0 = load <64 x i32>, <64 x i32>* @c, align 256
-  %1 = load <64 x i32>, <64 x i32>* %a.addr, align 256
-  %2 = load i32, i32* %b.addr, align 4
+  store <64 x i32> %a, ptr %a.addr, align 256
+  store i32 %b, ptr %b.addr, align 4
+  %0 = load <64 x i32>, ptr @c, align 256
+  %1 = load <64 x i32>, ptr %a.addr, align 256
+  %2 = load i32, ptr %b.addr, align 4
   %3 = call <64 x i32> @llvm.hexagon.V6.vmpabus.acc.128B(<64 x i32> %0, <64 x i32> %1, i32 %2)
-  store <64 x i32> %3, <64 x i32>* @c, align 256
+  store <64 x i32> %3, ptr @c, align 256
   ret void
 }
 
@@ -249,13 +249,13 @@ define void @test16(<64 x i32> %a, i32 %b) #0 {
 entry:
   %a.addr = alloca <64 x i32>, align 256
   %b.addr = alloca i32, align 4
-  store <64 x i32> %a, <64 x i32>* %a.addr, align 256
-  store i32 %b, i32* %b.addr, align 4
-  %0 = load <64 x i32>, <64 x i32>* @c, align 256
-  %1 = load <64 x i32>, <64 x i32>* %a.addr, align 256
-  %2 = load i32, i32* %b.addr, align 4
+  store <64 x i32> %a, ptr %a.addr, align 256
+  store i32 %b, ptr %b.addr, align 4
+  %0 = load <64 x i32>, ptr @c, align 256
+  %1 = load <64 x i32>, ptr %a.addr, align 256
+  %2 = load i32, ptr %b.addr, align 4
   %3 = call <64 x i32> @llvm.hexagon.V6.vmpahb.acc.128B(<64 x i32> %0, <64 x i32> %1, i32 %2)
-  store <64 x i32> %3, <64 x i32>* @c, align 256
+  store <64 x i32> %3, ptr @c, align 256
   ret void
 }
 
@@ -265,13 +265,13 @@ define void @test17(<32 x i32> %a, i32 %b) #0 {
 entry:
   %a.addr = alloca <32 x i32>, align 128
   %b.addr = alloca i32, align 4
-  store <32 x i32> %a, <32 x i32>* %a.addr, align 128
-  store i32 %b, i32* %b.addr, align 4
-  %0 = load <64 x i32>, <64 x i32>* @c, align 256
-  %1 = load <32 x i32>, <32 x i32>* %a.addr, align 128
-  %2 = load i32, i32* %b.addr, align 4
+  store <32 x i32> %a, ptr %a.addr, align 128
+  store i32 %b, ptr %b.addr, align 4
+  %0 = load <64 x i32>, ptr @c, align 256
+  %1 = load <32 x i32>, ptr %a.addr, align 128
+  %2 = load i32, ptr %b.addr, align 4
   %3 = call <64 x i32> @llvm.hexagon.V6.vmpyhsat.acc.128B(<64 x i32> %0, <32 x i32> %1, i32 %2)
-  store <64 x i32> %3, <64 x i32>* @c, align 256
+  store <64 x i32> %3, ptr @c, align 256
   ret void
 }
 
@@ -281,13 +281,13 @@ define void @test18(<32 x i32> %a, i32 %b) #0 {
 entry:
   %a.addr = alloca <32 x i32>, align 128
   %b.addr = alloca i32, align 4
-  store <32 x i32> %a, <32 x i32>* %a.addr, align 128
-  store i32 %b, i32* %b.addr, align 4
-  %0 = load <64 x i32>, <64 x i32>* @c, align 256
-  %1 = load <32 x i32>, <32 x i32>* %a.addr, align 128
-  %2 = load i32, i32* %b.addr, align 4
+  store <32 x i32> %a, ptr %a.addr, align 128
+  store i32 %b, ptr %b.addr, align 4
+  %0 = load <64 x i32>, ptr @c, align 256
+  %1 = load <32 x i32>, ptr %a.addr, align 128
+  %2 = load i32, ptr %b.addr, align 4
   %3 = call <64 x i32> @llvm.hexagon.V6.vmpyuh.acc.128B(<64 x i32> %0, <32 x i32> %1, i32 %2)
-  store <64 x i32> %3, <64 x i32>* @c, align 256
+  store <64 x i32> %3, ptr @c, align 256
   ret void
 }
 
@@ -297,13 +297,13 @@ define void @test19(<32 x i32> %a, i32 %b) #0 {
 entry:
   %a.addr = alloca <32 x i32>, align 128
   %b.addr = alloca i32, align 4
-  store <32 x i32> %a, <32 x i32>* %a.addr, align 128
-  store i32 %b, i32* %b.addr, align 4
-  %0 = load <32 x i32>, <32 x i32>* @d, align 128
-  %1 = load <32 x i32>, <32 x i32>* %a.addr, align 128
-  %2 = load i32, i32* %b.addr, align 4
+  store <32 x i32> %a, ptr %a.addr, align 128
+  store i32 %b, ptr %b.addr, align 4
+  %0 = load <32 x i32>, ptr @d, align 128
+  %1 = load <32 x i32>, ptr %a.addr, align 128
+  %2 = load i32, ptr %b.addr, align 4
   %3 = call <32 x i32> @llvm.hexagon.V6.vmpyiwb.acc.128B(<32 x i32> %0, <32 x i32> %1, i32 %2)
-  store <32 x i32> %3, <32 x i32>* @d, align 128
+  store <32 x i32> %3, ptr @d, align 128
   ret void
 }
 
@@ -313,13 +313,13 @@ define void @test20(<32 x i32> %a, i32 %b) #0 {
 entry:
   %a.addr = alloca <32 x i32>, align 128
   %b.addr = alloca i32, align 4
-  store <32 x i32> %a, <32 x i32>* %a.addr, align 128
-  store i32 %b, i32* %b.addr, align 4
-  %0 = load <32 x i32>, <32 x i32>* @d, align 128
-  %1 = load <32 x i32>, <32 x i32>* %a.addr, align 128
-  %2 = load i32, i32* %b.addr, align 4
+  store <32 x i32> %a, ptr %a.addr, align 128
+  store i32 %b, ptr %b.addr, align 4
+  %0 = load <32 x i32>, ptr @d, align 128
+  %1 = load <32 x i32>, ptr %a.addr, align 128
+  %2 = load i32, ptr %b.addr, align 4
   %3 = call <32 x i32> @llvm.hexagon.V6.vmpyiwh.acc.128B(<32 x i32> %0, <32 x i32> %1, i32 %2)
-  store <32 x i32> %3, <32 x i32>* @d, align 128
+  store <32 x i32> %3, ptr @d, align 128
   ret void
 }
 
@@ -329,13 +329,13 @@ define void @test21(<64 x i32> %a, i32 %b) #0 {
 entry:
   %a.addr = alloca <64 x i32>, align 256
   %b.addr = alloca i32, align 4
-  store <64 x i32> %a, <64 x i32>* %a.addr, align 256
-  store i32 %b, i32* %b.addr, align 4
-  %0 = load <64 x i32>, <64 x i32>* @c, align 256
-  %1 = load <64 x i32>, <64 x i32>* %a.addr, align 256
-  %2 = load i32, i32* %b.addr, align 4
+  store <64 x i32> %a, ptr %a.addr, align 256
+  store i32 %b, ptr %b.addr, align 4
+  %0 = load <64 x i32>, ptr @c, align 256
+  %1 = load <64 x i32>, ptr %a.addr, align 256
+  %2 = load i32, ptr %b.addr, align 4
   %3 = call <64 x i32> @llvm.hexagon.V6.vdsaduh.acc.128B(<64 x i32> %0, <64 x i32> %1, i32 %2)
-  store <64 x i32> %3, <64 x i32>* @c, align 256
+  store <64 x i32> %3, ptr @c, align 256
   ret void
 }
 
@@ -345,13 +345,13 @@ define void @test22(<32 x i32> %a, i32 %b) #0 {
 entry:
   %a.addr = alloca <32 x i32>, align 128
   %b.addr = alloca i32, align 4
-  store <32 x i32> %a, <32 x i32>* %a.addr, align 128
-  store i32 %b, i32* %b.addr, align 4
-  %0 = load <32 x i32>, <32 x i32>* @d, align 128
-  %1 = load <32 x i32>, <32 x i32>* %a.addr, align 128
-  %2 = load i32, i32* %b.addr, align 4
+  store <32 x i32> %a, ptr %a.addr, align 128
+  store i32 %b, ptr %b.addr, align 4
+  %0 = load <32 x i32>, ptr @d, align 128
+  %1 = load <32 x i32>, ptr %a.addr, align 128
+  %2 = load i32, ptr %b.addr, align 4
   %3 = call <32 x i32> @llvm.hexagon.V6.vmpyihb.acc.128B(<32 x i32> %0, <32 x i32> %1, i32 %2)
-  store <32 x i32> %3, <32 x i32>* @d, align 128
+  store <32 x i32> %3, ptr @d, align 128
   ret void
 }
 
@@ -361,13 +361,13 @@ define void @test23(<32 x i32> %a, i32 %b) #0 {
 entry:
   %a.addr = alloca <32 x i32>, align 128
   %b.addr = alloca i32, align 4
-  store <32 x i32> %a, <32 x i32>* %a.addr, align 128
-  store i32 %b, i32* %b.addr, align 4
-  %0 = load <32 x i32>, <32 x i32>* @d, align 128
-  %1 = load <32 x i32>, <32 x i32>* %a.addr, align 128
-  %2 = load i32, i32* %b.addr, align 4
+  store <32 x i32> %a, ptr %a.addr, align 128
+  store i32 %b, ptr %b.addr, align 4
+  %0 = load <32 x i32>, ptr @d, align 128
+  %1 = load <32 x i32>, ptr %a.addr, align 128
+  %2 = load i32, ptr %b.addr, align 4
   %3 = call <32 x i32> @llvm.hexagon.V6.vaslw.acc.128B(<32 x i32> %0, <32 x i32> %1, i32 %2)
-  store <32 x i32> %3, <32 x i32>* @d, align 128
+  store <32 x i32> %3, ptr @d, align 128
   ret void
 }
 
@@ -377,13 +377,13 @@ define void @test24(<32 x i32> %a, i32 %b) #0 {
 entry:
   %a.addr = alloca <32 x i32>, align 128
   %b.addr = alloca i32, align 4
-  store <32 x i32> %a, <32 x i32>* %a.addr, align 128
-  store i32 %b, i32* %b.addr, align 4
-  %0 = load <32 x i32>, <32 x i32>* @d, align 128
-  %1 = load <32 x i32>, <32 x i32>* %a.addr, align 128
-  %2 = load i32, i32* %b.addr, align 4
+  store <32 x i32> %a, ptr %a.addr, align 128
+  store i32 %b, ptr %b.addr, align 4
+  %0 = load <32 x i32>, ptr @d, align 128
+  %1 = load <32 x i32>, ptr %a.addr, align 128
+  %2 = load i32, ptr %b.addr, align 4
   %3 = call <32 x i32> @llvm.hexagon.V6.vasrw.acc.128B(<32 x i32> %0, <32 x i32> %1, i32 %2)
-  store <32 x i32> %3, <32 x i32>* @d, align 128
+  store <32 x i32> %3, ptr @d, align 128
   ret void
 }
 
@@ -393,13 +393,13 @@ define void @test25(<32 x i32> %a, i32 %b) #0 {
 entry:
   %a.addr = alloca <32 x i32>, align 128
   %b.addr = alloca i32, align 4
-  store <32 x i32> %a, <32 x i32>* %a.addr, align 128
-  store i32 %b, i32* %b.addr, align 4
-  %0 = load <64 x i32>, <64 x i32>* @c, align 256
-  %1 = load <32 x i32>, <32 x i32>* %a.addr, align 128
-  %2 = load i32, i32* %b.addr, align 4
+  store <32 x i32> %a, ptr %a.addr, align 128
+  store i32 %b, ptr %b.addr, align 4
+  %0 = load <64 x i32>, ptr @c, align 256
+  %1 = load <32 x i32>, ptr %a.addr, align 128
+  %2 = load i32, ptr %b.addr, align 4
   %3 = call <64 x i32> @llvm.hexagon.V6.vmpyub.acc.128B(<64 x i32> %0, <32 x i32> %1, i32 %2)
-  store <64 x i32> %3, <64 x i32>* @c, align 256
+  store <64 x i32> %3, ptr @c, align 256
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/intrinsics-v60-vmpy-acc.ll b/llvm/test/CodeGen/Hexagon/intrinsics-v60-vmpy-acc.ll
index eb8a85c6b93bf..383314947523f 100644
--- a/llvm/test/CodeGen/Hexagon/intrinsics-v60-vmpy-acc.ll
+++ b/llvm/test/CodeGen/Hexagon/intrinsics-v60-vmpy-acc.ll
@@ -7,9 +7,9 @@
 ; CHECK: v{{[0-9]+}}:{{[0-9]+}}.h += vtmpy(v{{[0-9]+}}:{{[0-9]+}}.b,r{{[0-9]+}}.b)
 define void @test1(<32 x i32> %a, i32 %b) #0 {
 entry:
-  %0 = load <32 x i32>, <32 x i32>* @c, align 128
+  %0 = load <32 x i32>, ptr @c, align 128
   %1 = tail call <32 x i32> @llvm.hexagon.V6.vtmpyb.acc(<32 x i32> %0, <32 x i32> %a, i32 %b)
-  store <32 x i32> %1, <32 x i32>* @c, align 128
+  store <32 x i32> %1, ptr @c, align 128
   ret void
 }
 
@@ -17,9 +17,9 @@ entry:
 ; CHECK: v{{[0-9]+}}:{{[0-9]+}}.h += vtmpy(v{{[0-9]+}}:{{[0-9]+}}.ub,r{{[0-9]+}}.b)
 define void @test2(<32 x i32> %a, i32 %b) #0 {
 entry:
-  %0 = load <32 x i32>, <32 x i32>* @c, align 128
+  %0 = load <32 x i32>, ptr @c, align 128
   %1 = tail call <32 x i32> @llvm.hexagon.V6.vtmpybus.acc(<32 x i32> %0, <32 x i32> %a, i32 %b)
-  store <32 x i32> %1, <32 x i32>* @c, align 128
+  store <32 x i32> %1, ptr @c, align 128
   ret void
 }
 
@@ -27,9 +27,9 @@ entry:
 ; CHECK: v{{[0-9]+}}:{{[0-9]+}}.w += vtmpy(v{{[0-9]+}}:{{[0-9]+}}.h,r{{[0-9]+}}.b)
 define void @test3(<32 x i32> %a, i32 %b) #0 {
 entry:
-  %0 = load <32 x i32>, <32 x i32>* @c, align 128
+  %0 = load <32 x i32>, ptr @c, align 128
   %1 = tail call <32 x i32> @llvm.hexagon.V6.vtmpyhb.acc(<32 x i32> %0, <32 x i32> %a, i32 %b)
-  store <32 x i32> %1, <32 x i32>* @c, align 128
+  store <32 x i32> %1, ptr @c, align 128
   ret void
 }
 
@@ -37,9 +37,9 @@ entry:
 ; CHECK: v{{[0-9]+}}.w += vdmpy(v{{[0-9]+}}.h,r{{[0-9]+}}.b)
 define void @test4(<16 x i32> %a, i32 %b) #0 {
 entry:
-  %0 = load <16 x i32>, <16 x i32>* @d, align 64
+  %0 = load <16 x i32>, ptr @d, align 64
   %1 = tail call <16 x i32> @llvm.hexagon.V6.vdmpyhb.acc(<16 x i32> %0, <16 x i32> %a, i32 %b)
-  store <16 x i32> %1, <16 x i32>* @d, align 64
+  store <16 x i32> %1, ptr @d, align 64
   ret void
 }
 
@@ -47,9 +47,9 @@ entry:
 ; CHECK: v{{[0-9]+}}.uw += vrmpy(v{{[0-9]+}}.ub,r{{[0-9]+}}.ub)
 define void @test5(<16 x i32> %a, i32 %b) #0 {
 entry:
-  %0 = load <16 x i32>, <16 x i32>* @d, align 64
+  %0 = load <16 x i32>, ptr @d, align 64
   %1 = tail call <16 x i32> @llvm.hexagon.V6.vrmpyub.acc(<16 x i32> %0, <16 x i32> %a, i32 %b)
-  store <16 x i32> %1, <16 x i32>* @d, align 64
+  store <16 x i32> %1, ptr @d, align 64
   ret void
 }
 
@@ -57,9 +57,9 @@ entry:
 ; CHECK: v{{[0-9]+}}.w += vrmpy(v{{[0-9]+}}.ub,r{{[0-9]+}}.b)
 define void @test6(<16 x i32> %a, i32 %b) #0 {
 entry:
-  %0 = load <16 x i32>, <16 x i32>* @d, align 64
+  %0 = load <16 x i32>, ptr @d, align 64
   %1 = tail call <16 x i32> @llvm.hexagon.V6.vrmpybus.acc(<16 x i32> %0, <16 x i32> %a, i32 %b)
-  store <16 x i32> %1, <16 x i32>* @d, align 64
+  store <16 x i32> %1, ptr @d, align 64
   ret void
 }
 
@@ -67,9 +67,9 @@ entry:
 ; CHECK: v{{[0-9]+}}.h += vdmpy(v{{[0-9]+}}.ub,r{{[0-9]+}}.b)
 define void @test7(<16 x i32> %a, i32 %b) #0 {
 entry:
-  %0 = load <16 x i32>, <16 x i32>* @d, align 64
+  %0 = load <16 x i32>, ptr @d, align 64
   %1 = tail call <16 x i32> @llvm.hexagon.V6.vdmpybus.acc(<16 x i32> %0, <16 x i32> %a, i32 %b)
-  store <16 x i32> %1, <16 x i32>* @d, align 64
+  store <16 x i32> %1, ptr @d, align 64
   ret void
 }
 
@@ -77,9 +77,9 @@ entry:
 ; CHECK: v{{[0-9]+}}:{{[0-9]+}}.h += vdmpy(v{{[0-9]+}}:{{[0-9]+}}.ub,r{{[0-9]+}}.b)
 define void @test8(<32 x i32> %a, i32 %b) #0 {
 entry:
-  %0 = load <32 x i32>, <32 x i32>* @c, align 128
+  %0 = load <32 x i32>, ptr @c, align 128
   %1 = tail call <32 x i32> @llvm.hexagon.V6.vdmpybus.dv.acc(<32 x i32> %0, <32 x i32> %a, i32 %b)
-  store <32 x i32> %1, <32 x i32>* @c, align 128
+  store <32 x i32> %1, ptr @c, align 128
   ret void
 }
 
@@ -87,9 +87,9 @@ entry:
 ; CHECK: v{{[0-9]+}}.w += vdmpy(v{{[0-9]+}}.h,r{{[0-9]+}}.uh):sat
 define void @test9(<16 x i32> %a, i32 %b) #0 {
 entry:
-  %0 = load <16 x i32>, <16 x i32>* @d, align 64
+  %0 = load <16 x i32>, ptr @d, align 64
   %1 = tail call <16 x i32> @llvm.hexagon.V6.vdmpyhsusat.acc(<16 x i32> %0, <16 x i32> %a, i32 %b)
-  store <16 x i32> %1, <16 x i32>* @d, align 64
+  store <16 x i32> %1, ptr @d, align 64
   ret void
 }
 
@@ -97,9 +97,9 @@ entry:
 ; CHECK: v{{[0-9]+}}.w += vdmpy(v{{[0-9]+}}:{{[0-9]+}}.h,r{{[0-9]+}}.uh,#1):sat
 define void @test10(<32 x i32> %a, i32 %b) #0 {
 entry:
-  %0 = load <16 x i32>, <16 x i32>* @d, align 64
+  %0 = load <16 x i32>, ptr @d, align 64
   %1 = tail call <16 x i32> @llvm.hexagon.V6.vdmpyhsuisat.acc(<16 x i32> %0, <32 x i32> %a, i32 %b)
-  store <16 x i32> %1, <16 x i32>* @d, align 64
+  store <16 x i32> %1, ptr @d, align 64
   ret void
 }
 
@@ -107,9 +107,9 @@ entry:
 ; CHECK: v{{[0-9]+}}.w += vdmpy(v{{[0-9]+}}:{{[0-9]+}}.h,r{{[0-9]+}}.h):sat
 define void @test11(<32 x i32> %a, i32 %b) #0 {
 entry:
-  %0 = load <16 x i32>, <16 x i32>* @d, align 64
+  %0 = load <16 x i32>, ptr @d, align 64
   %1 = tail call <16 x i32> @llvm.hexagon.V6.vdmpyhisat.acc(<16 x i32> %0, <32 x i32> %a, i32 %b)
-  store <16 x i32> %1, <16 x i32>* @d, align 64
+  store <16 x i32> %1, ptr @d, align 64
   ret void
 }
 
@@ -117,9 +117,9 @@ entry:
 ; CHECK: v{{[0-9]+}}.w += vdmpy(v{{[0-9]+}}.h,r{{[0-9]+}}.h):sat
 define void @test12(<16 x i32> %a, i32 %b) #0 {
 entry:
-  %0 = load <16 x i32>, <16 x i32>* @d, align 64
+  %0 = load <16 x i32>, ptr @d, align 64
   %1 = tail call <16 x i32> @llvm.hexagon.V6.vdmpyhsat.acc(<16 x i32> %0, <16 x i32> %a, i32 %b)
-  store <16 x i32> %1, <16 x i32>* @d, align 64
+  store <16 x i32> %1, ptr @d, align 64
   ret void
 }
 
@@ -127,9 +127,9 @@ entry:
 ; CHECK: v{{[0-9]+}}:{{[0-9]+}}.w += vdmpy(v{{[0-9]+}}:{{[0-9]+}}.h,r{{[0-9]+}}.b)
 define void @test13(<32 x i32> %a, i32 %b) #0 {
 entry:
-  %0 = load <32 x i32>, <32 x i32>* @c, align 128
+  %0 = load <32 x i32>, ptr @c, align 128
   %1 = tail call <32 x i32> @llvm.hexagon.V6.vdmpyhb.dv.acc(<32 x i32> %0, <32 x i32> %a, i32 %b)
-  store <32 x i32> %1, <32 x i32>* @c, align 128
+  store <32 x i32> %1, ptr @c, align 128
   ret void
 }
 
@@ -137,9 +137,9 @@ entry:
 ; CHECK: v{{[0-9]+}}:{{[0-9]+}}.h += vmpy(v{{[0-9]+}}.ub,r{{[0-9]+}}.b)
 define void @test14(<16 x i32> %a, i32 %b) #0 {
 entry:
-  %0 = load <32 x i32>, <32 x i32>* @c, align 128
+  %0 = load <32 x i32>, ptr @c, align 128
   %1 = tail call <32 x i32> @llvm.hexagon.V6.vmpybus.acc(<32 x i32> %0, <16 x i32> %a, i32 %b)
-  store <32 x i32> %1, <32 x i32>* @c, align 128
+  store <32 x i32> %1, ptr @c, align 128
   ret void
 }
 
@@ -147,9 +147,9 @@ entry:
 ; CHECK: v{{[0-9]+}}:{{[0-9]+}}.h += vmpa(v{{[0-9]+}}:{{[0-9]+}}.ub,r{{[0-9]+}}.b)
 define void @test15(<32 x i32> %a, i32 %b) #0 {
 entry:
-  %0 = load <32 x i32>, <32 x i32>* @c, align 128
+  %0 = load <32 x i32>, ptr @c, align 128
   %1 = tail call <32 x i32> @llvm.hexagon.V6.vmpabus.acc(<32 x i32> %0, <32 x i32> %a, i32 %b)
-  store <32 x i32> %1, <32 x i32>* @c, align 128
+  store <32 x i32> %1, ptr @c, align 128
   ret void
 }
 
@@ -157,9 +157,9 @@ entry:
 ; CHECK: v{{[0-9]+}}:{{[0-9]+}}.w += vmpa(v{{[0-9]+}}:{{[0-9]+}}.h,r{{[0-9]+}}.b)
 define void @test16(<32 x i32> %a, i32 %b) #0 {
 entry:
-  %0 = load <32 x i32>, <32 x i32>* @c, align 128
+  %0 = load <32 x i32>, ptr @c, align 128
   %1 = tail call <32 x i32> @llvm.hexagon.V6.vmpahb.acc(<32 x i32> %0, <32 x i32> %a, i32 %b)
-  store <32 x i32> %1, <32 x i32>* @c, align 128
+  store <32 x i32> %1, ptr @c, align 128
   ret void
 }
 
@@ -167,9 +167,9 @@ entry:
 ; CHECK: v{{[0-9]+}}:{{[0-9]+}}.w += vmpy(v{{[0-9]+}}.h,r{{[0-9]+}}.h):sat
 define void @test17(<16 x i32> %a, i32 %b) #0 {
 entry:
-  %0 = load <32 x i32>, <32 x i32>* @c, align 128
+  %0 = load <32 x i32>, ptr @c, align 128
   %1 = tail call <32 x i32> @llvm.hexagon.V6.vmpyhsat.acc(<32 x i32> %0, <16 x i32> %a, i32 %b)
-  store <32 x i32> %1, <32 x i32>* @c, align 128
+  store <32 x i32> %1, ptr @c, align 128
   ret void
 }
 
@@ -177,9 +177,9 @@ entry:
 ; CHECK: v{{[0-9]+}}:{{[0-9]+}}.uw += vmpy(v{{[0-9]+}}.uh,r{{[0-9]+}}.uh)
 define void @test18(<16 x i32> %a, i32 %b) #0 {
 entry:
-  %0 = load <32 x i32>, <32 x i32>* @c, align 128
+  %0 = load <32 x i32>, ptr @c, align 128
   %1 = tail call <32 x i32> @llvm.hexagon.V6.vmpyuh.acc(<32 x i32> %0, <16 x i32> %a, i32 %b)
-  store <32 x i32> %1, <32 x i32>* @c, align 128
+  store <32 x i32> %1, ptr @c, align 128
   ret void
 }
 
@@ -187,9 +187,9 @@ entry:
 ; CHECK: v{{[0-9]+}}.w += vmpyi(v{{[0-9]+}}.w,r{{[0-9]+}}.b)
 define void @test19(<16 x i32> %a, i32 %b) #0 {
 entry:
-  %0 = load <16 x i32>, <16 x i32>* @d, align 64
+  %0 = load <16 x i32>, ptr @d, align 64
   %1 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwb.acc(<16 x i32> %0, <16 x i32> %a, i32 %b)
-  store <16 x i32> %1, <16 x i32>* @d, align 64
+  store <16 x i32> %1, ptr @d, align 64
   ret void
 }
 
@@ -197,9 +197,9 @@ entry:
 ; CHECK: v{{[0-9]+}}.w += vmpyi(v{{[0-9]+}}.w,r{{[0-9]+}}.h)
 define void @test20(<16 x i32> %a, i32 %b) #0 {
 entry:
-  %0 = load <16 x i32>, <16 x i32>* @d, align 64
+  %0 = load <16 x i32>, ptr @d, align 64
   %1 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwh.acc(<16 x i32> %0, <16 x i32> %a, i32 %b)
-  store <16 x i32> %1, <16 x i32>* @d, align 64
+  store <16 x i32> %1, ptr @d, align 64
   ret void
 }
 
@@ -207,9 +207,9 @@ entry:
 ; CHECK: v{{[0-9]+}}:{{[0-9]+}}.uw += vdsad(v{{[0-9]+}}:{{[0-9]+}}.uh,r{{[0-9]+}}.uh)
 define void @test21(<32 x i32> %a, i32 %b) #0 {
 entry:
-  %0 = load <32 x i32>, <32 x i32>* @c, align 128
+  %0 = load <32 x i32>, ptr @c, align 128
   %1 = tail call <32 x i32> @llvm.hexagon.V6.vdsaduh.acc(<32 x i32> %0, <32 x i32> %a, i32 %b)
-  store <32 x i32> %1, <32 x i32>* @c, align 128
+  store <32 x i32> %1, ptr @c, align 128
   ret void
 }
 
@@ -217,9 +217,9 @@ entry:
 ; CHECK: v{{[0-9]+}}.h += vmpyi(v{{[0-9]+}}.h,r{{[0-9]+}}.b)
 define void @test22(<16 x i32> %a, i32 %b) #0 {
 entry:
-  %0 = load <16 x i32>, <16 x i32>* @d, align 64
+  %0 = load <16 x i32>, ptr @d, align 64
   %1 = tail call <16 x i32> @llvm.hexagon.V6.vmpyihb.acc(<16 x i32> %0, <16 x i32> %a, i32 %b)
-  store <16 x i32> %1, <16 x i32>* @d, align 64
+  store <16 x i32> %1, ptr @d, align 64
   ret void
 }
 
@@ -227,9 +227,9 @@ entry:
 ; CHECK: v{{[0-9]+}}.w += vasl(v{{[0-9]+}}.w,r{{[0-9]+}})
 define void @test23(<16 x i32> %a, i32 %b) #0 {
 entry:
-  %0 = load <16 x i32>, <16 x i32>* @d, align 64
+  %0 = load <16 x i32>, ptr @d, align 64
   %1 = tail call <16 x i32> @llvm.hexagon.V6.vaslw.acc(<16 x i32> %0, <16 x i32> %a, i32 %b)
-  store <16 x i32> %1, <16 x i32>* @d, align 64
+  store <16 x i32> %1, ptr @d, align 64
   ret void
 }
 
@@ -237,9 +237,9 @@ entry:
 ; CHECK: v{{[0-9]+}}.w += vasr(v{{[0-9]+}}.w,r{{[0-9]+}})
 define void @test24(<16 x i32> %a, i32 %b) #0 {
 entry:
-  %0 = load <16 x i32>, <16 x i32>* @d, align 64
+  %0 = load <16 x i32>, ptr @d, align 64
   %1 = tail call <16 x i32> @llvm.hexagon.V6.vasrw.acc(<16 x i32> %0, <16 x i32> %a, i32 %b)
-  store <16 x i32> %1, <16 x i32>* @d, align 64
+  store <16 x i32> %1, ptr @d, align 64
   ret void
 }
 
@@ -247,9 +247,9 @@ entry:
 ; CHECK: v{{[0-9]+}}:{{[0-9]+}}.uh += vmpy(v{{[0-9]+}}.ub,r{{[0-9]+}}.ub)
 define void @test25(<16 x i32> %a, i32 %b) #0 {
 entry:
-  %0 = load <32 x i32>, <32 x i32>* @c, align 128
+  %0 = load <32 x i32>, ptr @c, align 128
   %1 = tail call <32 x i32> @llvm.hexagon.V6.vmpyub.acc(<32 x i32> %0, <16 x i32> %a, i32 %b)
-  store <32 x i32> %1, <32 x i32>* @c, align 128
+  store <32 x i32> %1, ptr @c, align 128
   ret void
 }
 
@@ -257,9 +257,9 @@ entry:
 ; CHECK: v{{[0-9]+}}.w += vdmpy(v{{[0-9]+}}.h,v{{[0-9]+}}.h):sat
 define void @test26(<16 x i32> %a, <16 x i32> %b) #0 {
 entry:
-  %0 = load <16 x i32>, <16 x i32>* @d, align 64
+  %0 = load <16 x i32>, ptr @d, align 64
   %1 = tail call <16 x i32> @llvm.hexagon.V6.vdmpyhvsat.acc(<16 x i32> %0, <16 x i32> %a, <16 x i32> %b)
-  store <16 x i32> %1, <16 x i32>* @d, align 64
+  store <16 x i32> %1, ptr @d, align 64
   ret void
 }
 
@@ -267,9 +267,9 @@ entry:
 ; CHECK: v{{[0-9]+}}:{{[0-9]+}}.h += vmpy(v{{[0-9]+}}.ub,v{{[0-9]+}}.b)
 define void @test27(<16 x i32> %a, <16 x i32> %b) #0 {
 entry:
-  %0 = load <32 x i32>, <32 x i32>* @c, align 128
+  %0 = load <32 x i32>, ptr @c, align 128
   %1 = tail call <32 x i32> @llvm.hexagon.V6.vmpybusv.acc(<32 x i32> %0, <16 x i32> %a, <16 x i32> %b)
-  store <32 x i32> %1, <32 x i32>* @c, align 128
+  store <32 x i32> %1, ptr @c, align 128
   ret void
 }
 
@@ -277,9 +277,9 @@ entry:
 ; CHECK: v{{[0-9]+}}:{{[0-9]+}}.h += vmpy(v{{[0-9]+}}.b,v{{[0-9]+}}.b)
 define void @test28(<16 x i32> %a, <16 x i32> %b) #0 {
 entry:
-  %0 = load <32 x i32>, <32 x i32>* @c, align 128
+  %0 = load <32 x i32>, ptr @c, align 128
   %1 = tail call <32 x i32> @llvm.hexagon.V6.vmpybv.acc(<32 x i32> %0, <16 x i32> %a, <16 x i32> %b)
-  store <32 x i32> %1, <32 x i32>* @c, align 128
+  store <32 x i32> %1, ptr @c, align 128
   ret void
 }
 
@@ -287,9 +287,9 @@ entry:
 ; CHECK: v{{[0-9]+}}:{{[0-9]+}}.w += vmpy(v{{[0-9]+}}.h,v{{[0-9]+}}.uh)
 define void @test29(<16 x i32> %a, <16 x i32> %b) #0 {
 entry:
-  %0 = load <32 x i32>, <32 x i32>* @c, align 128
+  %0 = load <32 x i32>, ptr @c, align 128
   %1 = tail call <32 x i32> @llvm.hexagon.V6.vmpyhus.acc(<32 x i32> %0, <16 x i32> %a, <16 x i32> %b)
-  store <32 x i32> %1, <32 x i32>* @c, align 128
+  store <32 x i32> %1, ptr @c, align 128
   ret void
 }
 
@@ -297,9 +297,9 @@ entry:
 ; CHECK: v{{[0-9]+}}:{{[0-9]+}}.w += vmpy(v{{[0-9]+}}.h,v{{[0-9]+}}.h)
 define void @test30(<16 x i32> %a, <16 x i32> %b) #0 {
 entry:
-  %0 = load <32 x i32>, <32 x i32>* @c, align 128
+  %0 = load <32 x i32>, ptr @c, align 128
   %1 = tail call <32 x i32> @llvm.hexagon.V6.vmpyhv.acc(<32 x i32> %0, <16 x i32> %a, <16 x i32> %b)
-  store <32 x i32> %1, <32 x i32>* @c, align 128
+  store <32 x i32> %1, ptr @c, align 128
   ret void
 }
 
@@ -307,9 +307,9 @@ entry:
 ; CHECK: v{{[0-9]+}}.w += vmpyie(v{{[0-9]+}}.w,v{{[0-9]+}}.h)
 define void @test31(<16 x i32> %a, <16 x i32> %b) #0 {
 entry:
-  %0 = load <16 x i32>, <16 x i32>* @d, align 64
+  %0 = load <16 x i32>, ptr @d, align 64
   %1 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiewh.acc(<16 x i32> %0, <16 x i32> %a, <16 x i32> %b)
-  store <16 x i32> %1, <16 x i32>* @d, align 64
+  store <16 x i32> %1, ptr @d, align 64
   ret void
 }
 
@@ -317,9 +317,9 @@ entry:
 ; CHECK: v{{[0-9]+}}.w += vmpyie(v{{[0-9]+}}.w,v{{[0-9]+}}.uh)
 define void @test32(<16 x i32> %a, <16 x i32> %b) #0 {
 entry:
-  %0 = load <16 x i32>, <16 x i32>* @d, align 64
+  %0 = load <16 x i32>, ptr @d, align 64
   %1 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiewuh.acc(<16 x i32> %0, <16 x i32> %a, <16 x i32> %b)
-  store <16 x i32> %1, <16 x i32>* @d, align 64
+  store <16 x i32> %1, ptr @d, align 64
   ret void
 }
 
@@ -327,9 +327,9 @@ entry:
 ; CHECK: v{{[0-9]+}}.h += vmpyi(v{{[0-9]+}}.h,v{{[0-9]+}}.h)
 define void @test33(<16 x i32> %a, <16 x i32> %b) #0 {
 entry:
-  %0 = load <16 x i32>, <16 x i32>* @d, align 64
+  %0 = load <16 x i32>, ptr @d, align 64
   %1 = tail call <16 x i32> @llvm.hexagon.V6.vmpyih.acc(<16 x i32> %0, <16 x i32> %a, <16 x i32> %b)
-  store <16 x i32> %1, <16 x i32>* @d, align 64
+  store <16 x i32> %1, ptr @d, align 64
   ret void
 }
 
@@ -337,9 +337,9 @@ entry:
 ; CHECK: v{{[0-9]+}}.w += vmpyo(v{{[0-9]+}}.w,v{{[0-9]+}}.h):<<1:rnd:sat:shift
 define void @test34(<16 x i32> %a, <16 x i32> %b) #0 {
 entry:
-  %0 = load <16 x i32>, <16 x i32>* @d, align 64
+  %0 = load <16 x i32>, ptr @d, align 64
   %1 = tail call <16 x i32> @llvm.hexagon.V6.vmpyowh.rnd.sacc(<16 x i32> %0, <16 x i32> %a, <16 x i32> %b)
-  store <16 x i32> %1, <16 x i32>* @d, align 64
+  store <16 x i32> %1, ptr @d, align 64
   ret void
 }
 
@@ -347,9 +347,9 @@ entry:
 ; CHECK: v{{[0-9]+}}.w += vmpyo(v{{[0-9]+}}.w,v{{[0-9]+}}.h):<<1:sat:shift
 define void @test35(<16 x i32> %a, <16 x i32> %b) #0 {
 entry:
-  %0 = load <16 x i32>, <16 x i32>* @d, align 64
+  %0 = load <16 x i32>, ptr @d, align 64
   %1 = tail call <16 x i32> @llvm.hexagon.V6.vmpyowh.sacc(<16 x i32> %0, <16 x i32> %a, <16 x i32> %b)
-  store <16 x i32> %1, <16 x i32>* @d, align 64
+  store <16 x i32> %1, ptr @d, align 64
   ret void
 }
 
@@ -357,9 +357,9 @@ entry:
 ; CHECK: v{{[0-9]+}}:{{[0-9]+}}.uh += vmpy(v{{[0-9]+}}.ub,v{{[0-9]+}}.ub)
 define void @test36(<16 x i32> %a, <16 x i32> %b) #0 {
 entry:
-  %0 = load <32 x i32>, <32 x i32>* @c, align 128
+  %0 = load <32 x i32>, ptr @c, align 128
   %1 = tail call <32 x i32> @llvm.hexagon.V6.vmpyubv.acc(<32 x i32> %0, <16 x i32> %a, <16 x i32> %b)
-  store <32 x i32> %1, <32 x i32>* @c, align 128
+  store <32 x i32> %1, ptr @c, align 128
   ret void
 }
 
@@ -367,9 +367,9 @@ entry:
 ; CHECK: v{{[0-9]+}}:{{[0-9]+}}.uw += vmpy(v{{[0-9]+}}.uh,v{{[0-9]+}}.uh)
 define void @test37(<16 x i32> %a, <16 x i32> %b) #0 {
 entry:
-  %0 = load <32 x i32>, <32 x i32>* @c, align 128
+  %0 = load <32 x i32>, ptr @c, align 128
   %1 = tail call <32 x i32> @llvm.hexagon.V6.vmpyuhv.acc(<32 x i32> %0, <16 x i32> %a, <16 x i32> %b)
-  store <32 x i32> %1, <32 x i32>* @c, align 128
+  store <32 x i32> %1, ptr @c, align 128
   ret void
 }
 
@@ -377,9 +377,9 @@ entry:
 ; CHECK: v{{[0-9]+}}.w += vrmpy(v{{[0-9]+}}.ub,v{{[0-9]+}}.b)
 define void @test38(<16 x i32> %a, <16 x i32> %b) #0 {
 entry:
-  %0 = load <16 x i32>, <16 x i32>* @d, align 64
+  %0 = load <16 x i32>, ptr @d, align 64
   %1 = tail call <16 x i32> @llvm.hexagon.V6.vrmpybusv.acc(<16 x i32> %0, <16 x i32> %a, <16 x i32> %b)
-  store <16 x i32> %1, <16 x i32>* @d, align 64
+  store <16 x i32> %1, ptr @d, align 64
   ret void
 }
 
@@ -387,9 +387,9 @@ entry:
 ; CHECK: v{{[0-9]+}}.w += vrmpy(v{{[0-9]+}}.b,v{{[0-9]+}}.b)
 define void @test39(<16 x i32> %a, <16 x i32> %b) #0 {
 entry:
-  %0 = load <16 x i32>, <16 x i32>* @d, align 64
+  %0 = load <16 x i32>, ptr @d, align 64
   %1 = tail call <16 x i32> @llvm.hexagon.V6.vrmpybv.acc(<16 x i32> %0, <16 x i32> %a, <16 x i32> %b)
-  store <16 x i32> %1, <16 x i32>* @d, align 64
+  store <16 x i32> %1, ptr @d, align 64
   ret void
 }
 
@@ -397,9 +397,9 @@ entry:
 ; CHECK: v{{[0-9]+}}.uw += vrmpy(v{{[0-9]+}}.ub,v{{[0-9]+}}.ub)
 define void @test40(<16 x i32> %a, <16 x i32> %b) #0 {
 entry:
-  %0 = load <16 x i32>, <16 x i32>* @d, align 64
+  %0 = load <16 x i32>, ptr @d, align 64
   %1 = tail call <16 x i32> @llvm.hexagon.V6.vrmpyubv.acc(<16 x i32> %0, <16 x i32> %a, <16 x i32> %b)
-  store <16 x i32> %1, <16 x i32>* @d, align 64
+  store <16 x i32> %1, ptr @d, align 64
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/intrinsics-v60-vmpy.ll b/llvm/test/CodeGen/Hexagon/intrinsics-v60-vmpy.ll
index 1e4ef9735c63e..1060a809ede69 100644
--- a/llvm/test/CodeGen/Hexagon/intrinsics-v60-vmpy.ll
+++ b/llvm/test/CodeGen/Hexagon/intrinsics-v60-vmpy.ll
@@ -8,7 +8,7 @@
 define void @test1(<32 x i32> %a, i32 %b) #0 {
 entry:
   %0 = tail call <32 x i32> @llvm.hexagon.V6.vtmpyb(<32 x i32> %a, i32 %b)
-  store <32 x i32> %0, <32 x i32>* @c, align 128
+  store <32 x i32> %0, ptr @c, align 128
   ret void
 }
 
@@ -17,7 +17,7 @@ entry:
 define void @test2(<32 x i32> %a, i32 %b) #0 {
 entry:
   %0 = tail call <32 x i32> @llvm.hexagon.V6.vtmpybus(<32 x i32> %a, i32 %b)
-  store <32 x i32> %0, <32 x i32>* @c, align 128
+  store <32 x i32> %0, ptr @c, align 128
   ret void
 }
 
@@ -26,7 +26,7 @@ entry:
 define void @test3(<16 x i32> %a, i32 %b) #0 {
 entry:
   %0 = tail call <16 x i32> @llvm.hexagon.V6.vdmpyhb(<16 x i32> %a, i32 %b)
-  store <16 x i32> %0, <16 x i32>* @d, align 64
+  store <16 x i32> %0, ptr @d, align 64
   ret void
 }
 
@@ -35,7 +35,7 @@ entry:
 define void @test4(<16 x i32> %a, i32 %b) #0 {
 entry:
   %0 = tail call <16 x i32> @llvm.hexagon.V6.vrmpyub(<16 x i32> %a, i32 %b)
-  store <16 x i32> %0, <16 x i32>* @d, align 64
+  store <16 x i32> %0, ptr @d, align 64
   ret void
 }
 
@@ -44,7 +44,7 @@ entry:
 define void @test5(<16 x i32> %a, i32 %b) #0 {
 entry:
   %0 = tail call <16 x i32> @llvm.hexagon.V6.vrmpybus(<16 x i32> %a, i32 %b)
-  store <16 x i32> %0, <16 x i32>* @d, align 64
+  store <16 x i32> %0, ptr @d, align 64
   ret void
 }
 
@@ -53,7 +53,7 @@ entry:
 define void @test6(<32 x i32> %a, i32 %b) #0 {
 entry:
   %0 = tail call <32 x i32> @llvm.hexagon.V6.vdsaduh(<32 x i32> %a, i32 %b)
-  store <32 x i32> %0, <32 x i32>* @c, align 128
+  store <32 x i32> %0, ptr @c, align 128
   ret void
 }
 
@@ -62,7 +62,7 @@ entry:
 define void @test7(<16 x i32> %a, i32 %b) #0 {
 entry:
   %0 = tail call <16 x i32> @llvm.hexagon.V6.vdmpybus(<16 x i32> %a, i32 %b)
-  store <16 x i32> %0, <16 x i32>* @d, align 64
+  store <16 x i32> %0, ptr @d, align 64
   ret void
 }
 
@@ -71,7 +71,7 @@ entry:
 define void @test8(<32 x i32> %a, i32 %b) #0 {
 entry:
   %0 = tail call <32 x i32> @llvm.hexagon.V6.vdmpybus.dv(<32 x i32> %a, i32 %b)
-  store <32 x i32> %0, <32 x i32>* @c, align 128
+  store <32 x i32> %0, ptr @c, align 128
   ret void
 }
 
@@ -80,7 +80,7 @@ entry:
 define void @test9(<16 x i32> %a, i32 %b) #0 {
 entry:
   %0 = tail call <16 x i32> @llvm.hexagon.V6.vdmpyhsusat(<16 x i32> %a, i32 %b)
-  store <16 x i32> %0, <16 x i32>* @d, align 64
+  store <16 x i32> %0, ptr @d, align 64
   ret void
 }
 
@@ -89,7 +89,7 @@ entry:
 define void @test10(<32 x i32> %a, i32 %b) #0 {
 entry:
   %0 = tail call <16 x i32> @llvm.hexagon.V6.vdmpyhsuisat(<32 x i32> %a, i32 %b)
-  store <16 x i32> %0, <16 x i32>* @d, align 64
+  store <16 x i32> %0, ptr @d, align 64
   ret void
 }
 
@@ -98,7 +98,7 @@ entry:
 define void @test11(<16 x i32> %a, i32 %b) #0 {
 entry:
   %0 = tail call <16 x i32> @llvm.hexagon.V6.vdmpyhsat(<16 x i32> %a, i32 %b)
-  store <16 x i32> %0, <16 x i32>* @d, align 64
+  store <16 x i32> %0, ptr @d, align 64
   ret void
 }
 
@@ -107,7 +107,7 @@ entry:
 define void @test12(<32 x i32> %a, i32 %b) #0 {
 entry:
   %0 = tail call <16 x i32> @llvm.hexagon.V6.vdmpyhisat(<32 x i32> %a, i32 %b)
-  store <16 x i32> %0, <16 x i32>* @d, align 64
+  store <16 x i32> %0, ptr @d, align 64
   ret void
 }
 
@@ -116,7 +116,7 @@ entry:
 define void @test13(<32 x i32> %a, i32 %b) #0 {
 entry:
   %0 = tail call <32 x i32> @llvm.hexagon.V6.vdmpyhb.dv(<32 x i32> %a, i32 %b)
-  store <32 x i32> %0, <32 x i32>* @c, align 128
+  store <32 x i32> %0, ptr @c, align 128
   ret void
 }
 
@@ -125,7 +125,7 @@ entry:
 define void @test14(<16 x i32> %a, i32 %b) #0 {
 entry:
   %0 = tail call <32 x i32> @llvm.hexagon.V6.vmpybus(<16 x i32> %a, i32 %b)
-  store <32 x i32> %0, <32 x i32>* @c, align 128
+  store <32 x i32> %0, ptr @c, align 128
   ret void
 }
 
@@ -134,7 +134,7 @@ entry:
 define void @test15(<32 x i32> %a, i32 %b) #0 {
 entry:
   %0 = tail call <32 x i32> @llvm.hexagon.V6.vmpabus(<32 x i32> %a, i32 %b)
-  store <32 x i32> %0, <32 x i32>* @c, align 128
+  store <32 x i32> %0, ptr @c, align 128
   ret void
 }
 
@@ -143,7 +143,7 @@ entry:
 define void @test16(<32 x i32> %a, i32 %b) #0 {
 entry:
   %0 = tail call <32 x i32> @llvm.hexagon.V6.vmpahb(<32 x i32> %a, i32 %b)
-  store <32 x i32> %0, <32 x i32>* @c, align 128
+  store <32 x i32> %0, ptr @c, align 128
   ret void
 }
 
@@ -152,7 +152,7 @@ entry:
 define void @test17(<16 x i32> %a, i32 %b) #0 {
 entry:
   %0 = tail call <32 x i32> @llvm.hexagon.V6.vmpyh(<16 x i32> %a, i32 %b)
-  store <32 x i32> %0, <32 x i32>* @c, align 128
+  store <32 x i32> %0, ptr @c, align 128
   ret void
 }
 
@@ -161,7 +161,7 @@ entry:
 define void @test18(<16 x i32> %a, i32 %b) #0 {
 entry:
   %0 = tail call <16 x i32> @llvm.hexagon.V6.vmpyhss(<16 x i32> %a, i32 %b)
-  store <16 x i32> %0, <16 x i32>* @d, align 64
+  store <16 x i32> %0, ptr @d, align 64
   ret void
 }
 
@@ -170,7 +170,7 @@ entry:
 define void @test19(<16 x i32> %a, i32 %b) #0 {
 entry:
   %0 = tail call <16 x i32> @llvm.hexagon.V6.vmpyhsrs(<16 x i32> %a, i32 %b)
-  store <16 x i32> %0, <16 x i32>* @d, align 64
+  store <16 x i32> %0, ptr @d, align 64
   ret void
 }
 
@@ -179,7 +179,7 @@ entry:
 define void @test20(<16 x i32> %a, i32 %b) #0 {
 entry:
   %0 = tail call <32 x i32> @llvm.hexagon.V6.vmpyuh(<16 x i32> %a, i32 %b)
-  store <32 x i32> %0, <32 x i32>* @c, align 128
+  store <32 x i32> %0, ptr @c, align 128
   ret void
 }
 
@@ -188,7 +188,7 @@ entry:
 define void @test21(<16 x i32> %a, i32 %b) #0 {
 entry:
   %0 = tail call <16 x i32> @llvm.hexagon.V6.vmpyihb(<16 x i32> %a, i32 %b)
-  store <16 x i32> %0, <16 x i32>* @d, align 64
+  store <16 x i32> %0, ptr @d, align 64
   ret void
 }
 
@@ -197,7 +197,7 @@ entry:
 define void @test22(<16 x i32> %a, i32 %b) #0 {
 entry:
   %0 = tail call <16 x i32> @llvm.hexagon.V6.vror(<16 x i32> %a, i32 %b)
-  store <16 x i32> %0, <16 x i32>* @d, align 64
+  store <16 x i32> %0, ptr @d, align 64
   ret void
 }
 
@@ -206,7 +206,7 @@ entry:
 define void @test23(<16 x i32> %a, i32 %b) #0 {
 entry:
   %0 = tail call <16 x i32> @llvm.hexagon.V6.vasrw(<16 x i32> %a, i32 %b)
-  store <16 x i32> %0, <16 x i32>* @d, align 64
+  store <16 x i32> %0, ptr @d, align 64
   ret void
 }
 
@@ -215,7 +215,7 @@ entry:
 define void @test24(<16 x i32> %a, i32 %b) #0 {
 entry:
   %0 = tail call <16 x i32> @llvm.hexagon.V6.vasrh(<16 x i32> %a, i32 %b)
-  store <16 x i32> %0, <16 x i32>* @d, align 64
+  store <16 x i32> %0, ptr @d, align 64
   ret void
 }
 
@@ -224,7 +224,7 @@ entry:
 define void @test25(<16 x i32> %a, i32 %b) #0 {
 entry:
   %0 = tail call <16 x i32> @llvm.hexagon.V6.vaslw(<16 x i32> %a, i32 %b)
-  store <16 x i32> %0, <16 x i32>* @d, align 64
+  store <16 x i32> %0, ptr @d, align 64
   ret void
 }
 
@@ -233,7 +233,7 @@ entry:
 define void @test26(<16 x i32> %a, i32 %b) #0 {
 entry:
   %0 = tail call <16 x i32> @llvm.hexagon.V6.vaslh(<16 x i32> %a, i32 %b)
-  store <16 x i32> %0, <16 x i32>* @d, align 64
+  store <16 x i32> %0, ptr @d, align 64
   ret void
 }
 
@@ -242,7 +242,7 @@ entry:
 define void @test27(<16 x i32> %a, i32 %b) #0 {
 entry:
   %0 = tail call <16 x i32> @llvm.hexagon.V6.vlsrw(<16 x i32> %a, i32 %b)
-  store <16 x i32> %0, <16 x i32>* @d, align 64
+  store <16 x i32> %0, ptr @d, align 64
   ret void
 }
 
@@ -251,7 +251,7 @@ entry:
 define void @test28(<16 x i32> %a, i32 %b) #0 {
 entry:
   %0 = tail call <16 x i32> @llvm.hexagon.V6.vlsrh(<16 x i32> %a, i32 %b)
-  store <16 x i32> %0, <16 x i32>* @d, align 64
+  store <16 x i32> %0, ptr @d, align 64
   ret void
 }
 
@@ -260,7 +260,7 @@ entry:
 define void @test29(<16 x i32> %a, i32 %b) #0 {
 entry:
   %0 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwh(<16 x i32> %a, i32 %b)
-  store <16 x i32> %0, <16 x i32>* @d, align 64
+  store <16 x i32> %0, ptr @d, align 64
   ret void
 }
 
@@ -269,7 +269,7 @@ entry:
 define void @test30(<16 x i32> %a, i32 %b) #0 {
 entry:
   %0 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwb(<16 x i32> %a, i32 %b)
-  store <16 x i32> %0, <16 x i32>* @d, align 64
+  store <16 x i32> %0, ptr @d, align 64
   ret void
 }
 
@@ -278,7 +278,7 @@ entry:
 define void @test31(<32 x i32> %a, i32 %b) #0 {
 entry:
   %0 = tail call <32 x i32> @llvm.hexagon.V6.vtmpyhb(<32 x i32> %a, i32 %b)
-  store <32 x i32> %0, <32 x i32>* @c, align 128
+  store <32 x i32> %0, ptr @c, align 128
   ret void
 }
 
@@ -287,7 +287,7 @@ entry:
 define void @test32(<16 x i32> %a, i32 %b) #0 {
 entry:
   %0 = tail call <32 x i32> @llvm.hexagon.V6.vmpyub(<16 x i32> %a, i32 %b)
-  store <32 x i32> %0, <32 x i32>* @c, align 128
+  store <32 x i32> %0, ptr @c, align 128
   ret void
 }
 
@@ -296,7 +296,7 @@ entry:
 define void @test33(<16 x i32> %a, <16 x i32> %b) #0 {
 entry:
   %0 = tail call <16 x i32> @llvm.hexagon.V6.vrmpyubv(<16 x i32> %a, <16 x i32> %b)
-  store <16 x i32> %0, <16 x i32>* @d, align 64
+  store <16 x i32> %0, ptr @d, align 64
   ret void
 }
 
@@ -305,7 +305,7 @@ entry:
 define void @test34(<16 x i32> %a, <16 x i32> %b) #0 {
 entry:
   %0 = tail call <16 x i32> @llvm.hexagon.V6.vrmpybv(<16 x i32> %a, <16 x i32> %b)
-  store <16 x i32> %0, <16 x i32>* @d, align 64
+  store <16 x i32> %0, ptr @d, align 64
   ret void
 }
 
@@ -314,7 +314,7 @@ entry:
 define void @test35(<16 x i32> %a, <16 x i32> %b) #0 {
 entry:
   %0 = tail call <16 x i32> @llvm.hexagon.V6.vrmpybusv(<16 x i32> %a, <16 x i32> %b)
-  store <16 x i32> %0, <16 x i32>* @d, align 64
+  store <16 x i32> %0, ptr @d, align 64
   ret void
 }
 
@@ -323,7 +323,7 @@ entry:
 define void @test36(<16 x i32> %a, <16 x i32> %b) #0 {
 entry:
   %0 = tail call <16 x i32> @llvm.hexagon.V6.vdmpyhvsat(<16 x i32> %a, <16 x i32> %b)
-  store <16 x i32> %0, <16 x i32>* @d, align 64
+  store <16 x i32> %0, ptr @d, align 64
   ret void
 }
 
@@ -332,7 +332,7 @@ entry:
 define void @test37(<16 x i32> %a, <16 x i32> %b) #0 {
 entry:
   %0 = tail call <32 x i32> @llvm.hexagon.V6.vmpybv(<16 x i32> %a, <16 x i32> %b)
-  store <32 x i32> %0, <32 x i32>* @c, align 128
+  store <32 x i32> %0, ptr @c, align 128
   ret void
 }
 
@@ -341,7 +341,7 @@ entry:
 define void @test38(<16 x i32> %a, <16 x i32> %b) #0 {
 entry:
   %0 = tail call <32 x i32> @llvm.hexagon.V6.vmpyubv(<16 x i32> %a, <16 x i32> %b)
-  store <32 x i32> %0, <32 x i32>* @c, align 128
+  store <32 x i32> %0, ptr @c, align 128
   ret void
 }
 
@@ -350,7 +350,7 @@ entry:
 define void @test39(<16 x i32> %a, <16 x i32> %b) #0 {
 entry:
   %0 = tail call <32 x i32> @llvm.hexagon.V6.vmpybusv(<16 x i32> %a, <16 x i32> %b)
-  store <32 x i32> %0, <32 x i32>* @c, align 128
+  store <32 x i32> %0, ptr @c, align 128
   ret void
 }
 
@@ -359,7 +359,7 @@ entry:
 define void @test40(<16 x i32> %a, <16 x i32> %b) #0 {
 entry:
   %0 = tail call <32 x i32> @llvm.hexagon.V6.vmpyhv(<16 x i32> %a, <16 x i32> %b)
-  store <32 x i32> %0, <32 x i32>* @c, align 128
+  store <32 x i32> %0, ptr @c, align 128
   ret void
 }
 
@@ -368,7 +368,7 @@ entry:
 define void @test41(<16 x i32> %a, <16 x i32> %b) #0 {
 entry:
   %0 = tail call <32 x i32> @llvm.hexagon.V6.vmpyuhv(<16 x i32> %a, <16 x i32> %b)
-  store <32 x i32> %0, <32 x i32>* @c, align 128
+  store <32 x i32> %0, ptr @c, align 128
   ret void
 }
 
@@ -377,7 +377,7 @@ entry:
 define void @test42(<16 x i32> %a, <16 x i32> %b) #0 {
 entry:
   %0 = tail call <16 x i32> @llvm.hexagon.V6.vmpyhvsrs(<16 x i32> %a, <16 x i32> %b)
-  store <16 x i32> %0, <16 x i32>* @d, align 64
+  store <16 x i32> %0, ptr @d, align 64
   ret void
 }
 
@@ -386,7 +386,7 @@ entry:
 define void @test43(<16 x i32> %a, <16 x i32> %b) #0 {
 entry:
   %0 = tail call <32 x i32> @llvm.hexagon.V6.vmpyhus(<16 x i32> %a, <16 x i32> %b)
-  store <32 x i32> %0, <32 x i32>* @c, align 128
+  store <32 x i32> %0, ptr @c, align 128
   ret void
 }
 
@@ -395,7 +395,7 @@ entry:
 define void @test44(<32 x i32> %a, <32 x i32> %b) #0 {
 entry:
   %0 = tail call <32 x i32> @llvm.hexagon.V6.vmpabusv(<32 x i32> %a, <32 x i32> %b)
-  store <32 x i32> %0, <32 x i32>* @c, align 128
+  store <32 x i32> %0, ptr @c, align 128
   ret void
 }
 
@@ -404,7 +404,7 @@ entry:
 define void @test45(<16 x i32> %a, <16 x i32> %b) #0 {
 entry:
   %0 = tail call <16 x i32> @llvm.hexagon.V6.vmpyih(<16 x i32> %a, <16 x i32> %b)
-  store <16 x i32> %0, <16 x i32>* @d, align 64
+  store <16 x i32> %0, ptr @d, align 64
   ret void
 }
 
@@ -413,7 +413,7 @@ entry:
 define void @test46(<16 x i32> %a, <16 x i32> %b) #0 {
 entry:
   %0 = tail call <16 x i32> @llvm.hexagon.V6.vmpyewuh(<16 x i32> %a, <16 x i32> %b)
-  store <16 x i32> %0, <16 x i32>* @d, align 64
+  store <16 x i32> %0, ptr @d, align 64
   ret void
 }
 
@@ -422,7 +422,7 @@ entry:
 define void @test47(<16 x i32> %a, <16 x i32> %b) #0 {
 entry:
   %0 = tail call <16 x i32> @llvm.hexagon.V6.vmpyowh(<16 x i32> %a, <16 x i32> %b)
-  store <16 x i32> %0, <16 x i32>* @d, align 64
+  store <16 x i32> %0, ptr @d, align 64
   ret void
 }
 
@@ -431,7 +431,7 @@ entry:
 define void @test48(<16 x i32> %a, <16 x i32> %b) #0 {
 entry:
   %0 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiewuh(<16 x i32> %a, <16 x i32> %b)
-  store <16 x i32> %0, <16 x i32>* @d, align 64
+  store <16 x i32> %0, ptr @d, align 64
   ret void
 }
 
@@ -440,7 +440,7 @@ entry:
 define void @test49(<16 x i32> %a, <16 x i32> %b) #0 {
 entry:
   %0 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiowh(<16 x i32> %a, <16 x i32> %b)
-  store <16 x i32> %0, <16 x i32>* @d, align 64
+  store <16 x i32> %0, ptr @d, align 64
   ret void
 }
 
@@ -449,7 +449,7 @@ entry:
 define void @test50(<16 x i32> %a, <16 x i32> %b) #0 {
 entry:
   %0 = tail call <16 x i32> @llvm.hexagon.V6.vmpyowh.rnd(<16 x i32> %a, <16 x i32> %b)
-  store <16 x i32> %0, <16 x i32>* @d, align 64
+  store <16 x i32> %0, ptr @d, align 64
   ret void
 }
 
@@ -458,7 +458,7 @@ entry:
 define void @test51(<16 x i32> %a, <16 x i32> %b) #0 {
 entry:
   %0 = tail call <16 x i32> @llvm.hexagon.V6.vmpyieoh(<16 x i32> %a, <16 x i32> %b)
-  store <16 x i32> %0, <16 x i32>* @d, align 64
+  store <16 x i32> %0, ptr @d, align 64
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/intrinsics/atomic_load.ll b/llvm/test/CodeGen/Hexagon/intrinsics/atomic_load.ll
index 9b245dfbb5a39..a682159ad24fc 100644
--- a/llvm/test/CodeGen/Hexagon/intrinsics/atomic_load.ll
+++ b/llvm/test/CodeGen/Hexagon/intrinsics/atomic_load.ll
@@ -13,13 +13,13 @@
 @i32Dest = global i32 0, align 4
 @i64Src  = global i64 0, align 8
 @i64Dest = global i64 0, align 8
- at ptrSrc  = global %struct.Obj* null, align 4
- at ptrDest = global %struct.Obj* null, align 4
+ at ptrSrc  = global ptr null, align 4
+ at ptrDest = global ptr null, align 4
 
 define void @load_i8() #0 {
 entry:
-  %i8Tmp = load atomic i8, i8* @i8Src ORDER, align 1
-  store i8 %i8Tmp, i8* @i8Dest, align 1
+  %i8Tmp = load atomic i8, ptr @i8Src ORDER, align 1
+  store i8 %i8Tmp, ptr @i8Dest, align 1
   ret void
 }
 ; CHECK-LABEL: load_i8:
@@ -28,8 +28,8 @@ entry:
 
 define void @load_i16() #0 {
 entry:
-  %i16Tmp = load atomic i16, i16* @i16Src ORDER, align 2
-  store i16 %i16Tmp, i16* @i16Dest, align 2
+  %i16Tmp = load atomic i16, ptr @i16Src ORDER, align 2
+  store i16 %i16Tmp, ptr @i16Dest, align 2
   ret void
 }
 ; CHECK-LABEL: load_i16:
@@ -38,8 +38,8 @@ entry:
 
 define void @load_i32() #0 {
 entry:
-  %i32Tmp = load atomic i32, i32* @i32Src ORDER, align 4
-  store i32 %i32Tmp, i32* @i32Dest, align 4
+  %i32Tmp = load atomic i32, ptr @i32Src ORDER, align 4
+  store i32 %i32Tmp, ptr @i32Dest, align 4
   ret void
 }
 ; CHECK-LABEL: load_i32:
@@ -48,8 +48,8 @@ entry:
 
 define void @load_i64() #0 {
 entry:
-  %i64Tmp = load atomic i64, i64* @i64Src ORDER, align 8
-  store i64 %i64Tmp, i64* @i64Dest, align 8
+  %i64Tmp = load atomic i64, ptr @i64Src ORDER, align 8
+  store i64 %i64Tmp, ptr @i64Dest, align 8
   ret void
 }
 ; CHECK-LABEL: load_i64:
@@ -58,8 +58,8 @@ entry:
 
 define void @load_ptr() #0 {
 entry:
-  %ptrTmp = load atomic i32, i32* bitcast (%struct.Obj** @ptrSrc to i32*) ORDER, align 4
-  store i32 %ptrTmp, i32* bitcast (%struct.Obj** @ptrDest to i32*), align 4
+  %ptrTmp = load atomic i32, ptr @ptrSrc ORDER, align 4
+  store i32 %ptrTmp, ptr @ptrDest, align 4
   ret void
 }
 ; CHECK-LABEL: load_ptr:

diff  --git a/llvm/test/CodeGen/Hexagon/intrinsics/atomic_store.ll b/llvm/test/CodeGen/Hexagon/intrinsics/atomic_store.ll
index 23865994db27e..78eb11a1fee1f 100644
--- a/llvm/test/CodeGen/Hexagon/intrinsics/atomic_store.ll
+++ b/llvm/test/CodeGen/Hexagon/intrinsics/atomic_store.ll
@@ -13,13 +13,13 @@
 @i32Dest = global i32 0, align 4
 @i64Src  = global i64 0, align 8
 @i64Dest = global i64 0, align 8
- at ptrSrc  = global %struct.Obj* null, align 4
- at ptrDest = global %struct.Obj* null, align 4
+ at ptrSrc  = global ptr null, align 4
+ at ptrDest = global ptr null, align 4
 
 define void @store_i8() #0 {
 entry:
-  %i8Tmp = load i8, i8* @i8Src, align 1
-  store atomic i8 %i8Tmp, i8* @i8Dest ORDER, align 1
+  %i8Tmp = load i8, ptr @i8Src, align 1
+  store atomic i8 %i8Tmp, ptr @i8Dest ORDER, align 1
   ret void
 }
 ; CHECK-LABEL: store_i8:
@@ -28,8 +28,8 @@ entry:
 
 define void @store_i16() #0 {
 entry:
-  %i16Tmp = load i16, i16* @i16Src, align 2
-  store atomic i16 %i16Tmp, i16* @i16Dest ORDER, align 2
+  %i16Tmp = load i16, ptr @i16Src, align 2
+  store atomic i16 %i16Tmp, ptr @i16Dest ORDER, align 2
   ret void
 }
 ; CHECK-LABEL: store_i16:
@@ -38,8 +38,8 @@ entry:
 
 define void @store_i32() #0 {
 entry:
-  %i32Tmp = load i32, i32* @i32Src, align 4
-  store atomic i32 %i32Tmp, i32* @i32Dest ORDER, align 4
+  %i32Tmp = load i32, ptr @i32Src, align 4
+  store atomic i32 %i32Tmp, ptr @i32Dest ORDER, align 4
   ret void
 }
 ; CHECK-LABEL: store_i32:
@@ -48,8 +48,8 @@ entry:
 
 define void @store_i64() #0 {
 entry:
-  %i64Tmp = load i64, i64* @i64Src, align 8
-  store atomic i64 %i64Tmp, i64* @i64Dest ORDER, align 8
+  %i64Tmp = load i64, ptr @i64Src, align 8
+  store atomic i64 %i64Tmp, ptr @i64Dest ORDER, align 8
   ret void
 }
 ; CHECK-LABEL: store_i64:
@@ -58,8 +58,8 @@ entry:
 
 define void @store_ptr() #0 {
 entry:
-  %ptrTmp = load i32, i32* bitcast (%struct.Obj** @ptrSrc to i32*), align 4
-  store atomic i32 %ptrTmp, i32* bitcast (%struct.Obj** @ptrDest to i32*) ORDER, align 4
+  %ptrTmp = load i32, ptr @ptrSrc, align 4
+  store atomic i32 %ptrTmp, ptr @ptrDest ORDER, align 4
   ret void
 }
 ; CHECK-LABEL: store_ptr:

diff  --git a/llvm/test/CodeGen/Hexagon/intrinsics/atomicrmw_addsub_native.ll b/llvm/test/CodeGen/Hexagon/intrinsics/atomicrmw_addsub_native.ll
index 87f832eb717b4..d47bc8baa2b7e 100644
--- a/llvm/test/CodeGen/Hexagon/intrinsics/atomicrmw_addsub_native.ll
+++ b/llvm/test/CodeGen/Hexagon/intrinsics/atomicrmw_addsub_native.ll
@@ -17,15 +17,15 @@
 @i64First   = global i64 0, align 8
 @i64Second  = global i64 0, align 8
 @i64Result  = global i64 0, align 8
- at ptrFirst   = global %struct.Obj* null, align 4
- at ptrSecond  = global %struct.Obj* null, align 4
- at ptrResult  = global %struct.Obj* null, align 4
+ at ptrFirst   = global ptr null, align 4
+ at ptrSecond  = global ptr null, align 4
+ at ptrResult  = global ptr null, align 4
 
 define void @atomicrmw_op_i32() #0 {
 BINARY_OP_entry:
-  %i32First = load i32, i32* @i32First, align 4
-  %i32Result = atomicrmw BINARY_OP i32* @i32Second, i32 %i32First ORDER
-  store i32 %i32Result, i32* @i32Result, align 4
+  %i32First = load i32, ptr @i32First, align 4
+  %i32Result = atomicrmw BINARY_OP ptr @i32Second, i32 %i32First ORDER
+  store i32 %i32Result, ptr @i32Result, align 4
   ret void
 }
 ; CHECK-LABEL: atomicrmw_op_i32:
@@ -45,9 +45,9 @@ BINARY_OP_entry:
 
 define void @atomicrmw_op_i64() #0 {
 entry:
-  %i64First = load i64, i64* @i64First, align 8
-  %i64Result = atomicrmw BINARY_OP i64* @i64Second, i64 %i64First ORDER
-  store i64 %i64Result, i64* @i64Result, align 8
+  %i64First = load i64, ptr @i64First, align 8
+  %i64Result = atomicrmw BINARY_OP ptr @i64Second, i64 %i64First ORDER
+  store i64 %i64Result, ptr @i64Result, align 8
   ret void
 }
 ; CHECK-LABEL: atomicrmw_op_i64:
@@ -66,9 +66,9 @@ entry:
 
 define void @atomicrmw_op_ptr() #0 {
 entry:
-  %ptrFirst = load i32, i32* bitcast (%struct.Obj** @ptrFirst to i32*), align 4
-  %ptrResult = atomicrmw BINARY_OP i32* bitcast (%struct.Obj** @ptrSecond to i32*), i32 %ptrFirst ORDER
-  store i32 %ptrResult, i32* bitcast (%struct.Obj** @ptrResult to i32*), align 4
+  %ptrFirst = load i32, ptr @ptrFirst, align 4
+  %ptrResult = atomicrmw BINARY_OP ptr @ptrSecond, i32 %ptrFirst ORDER
+  store i32 %ptrResult, ptr @ptrResult, align 4
   ret void
 }
 ; CHECK-LABEL: atomicrmw_op_ptr:

diff  --git a/llvm/test/CodeGen/Hexagon/intrinsics/atomicrmw_bitwise_native.ll b/llvm/test/CodeGen/Hexagon/intrinsics/atomicrmw_bitwise_native.ll
index df5198b39f1a4..c60a30eff605b 100644
--- a/llvm/test/CodeGen/Hexagon/intrinsics/atomicrmw_bitwise_native.ll
+++ b/llvm/test/CodeGen/Hexagon/intrinsics/atomicrmw_bitwise_native.ll
@@ -37,9 +37,9 @@
 ; CHECK-DAG: jumpr r31
 define void @f0() {
 BINARY_OP_entry:
-  %v0 = load i32, i32* @g0, align 4
-  %v1 = atomicrmw BINARY_OP i32* @g1, i32 %v0 ORDER
-  store i32 %v1, i32* @g2, align 4
+  %v0 = load i32, ptr @g0, align 4
+  %v1 = atomicrmw BINARY_OP ptr @g1, i32 %v0 ORDER
+  store i32 %v1, ptr @g2, align 4
   ret void
 }
 
@@ -58,8 +58,8 @@ BINARY_OP_entry:
 ; CHECK-DAG: jumpr r31
 define void @f1() {
 b0:
-  %v0 = load i64, i64* @g3, align 8
-  %v1 = atomicrmw BINARY_OP i64* @g4, i64 %v0 ORDER
-  store i64 %v1, i64* @g5, align 8
+  %v0 = load i64, ptr @g3, align 8
+  %v1 = atomicrmw BINARY_OP ptr @g4, i64 %v0 ORDER
+  store i64 %v1, ptr @g5, align 8
   ret void
 }

diff  --git a/llvm/test/CodeGen/Hexagon/intrinsics/atomicrmw_nand.ll b/llvm/test/CodeGen/Hexagon/intrinsics/atomicrmw_nand.ll
index 15c94d65af8e5..5d292c39bc01e 100644
--- a/llvm/test/CodeGen/Hexagon/intrinsics/atomicrmw_nand.ll
+++ b/llvm/test/CodeGen/Hexagon/intrinsics/atomicrmw_nand.ll
@@ -27,9 +27,9 @@
 ; CHECK-DAG: jumpr r31
 define void @f0() {
 b0:
-  %v0 = load i32, i32* @g0, align 4
-  %v1 = atomicrmw nand i32* @g1, i32 %v0 ORDER
-  store i32 %v1, i32* @g2, align 4
+  %v0 = load i32, ptr @g0, align 4
+  %v1 = atomicrmw nand ptr @g1, i32 %v0 ORDER
+  store i32 %v1, ptr @g2, align 4
   ret void
 }
 
@@ -49,8 +49,8 @@ b0:
 ; CHECK-DAG: jumpr r31
 define void @f1() {
 b0:
-  %v0 = load i64, i64* @g3, align 8
-  %v1 = atomicrmw nand i64* @g4, i64 %v0 ORDER
-  store i64 %v1, i64* @g5, align 8
+  %v0 = load i64, ptr @g3, align 8
+  %v1 = atomicrmw nand ptr @g4, i64 %v0 ORDER
+  store i64 %v1, ptr @g5, align 8
   ret void
 }

diff  --git a/llvm/test/CodeGen/Hexagon/intrinsics/byte-store-double.ll b/llvm/test/CodeGen/Hexagon/intrinsics/byte-store-double.ll
index a9defbf11e265..7c50977d1529f 100644
--- a/llvm/test/CodeGen/Hexagon/intrinsics/byte-store-double.ll
+++ b/llvm/test/CodeGen/Hexagon/intrinsics/byte-store-double.ll
@@ -14,30 +14,30 @@
 
 declare <128 x i1> @llvm.hexagon.V6.vandvrt.128B(<32 x i32>, i32)
 
-declare void @llvm.hexagon.V6.vmaskedstoreq.128B(<128 x i1>, i8*, <32 x i32>)
-define void @V6_vmaskedstoreq_128B( <32 x i32> %a, i8* %b, <32 x i32> %c) {
+declare void @llvm.hexagon.V6.vmaskedstoreq.128B(<128 x i1>, ptr, <32 x i32>)
+define void @V6_vmaskedstoreq_128B( <32 x i32> %a, ptr %b, <32 x i32> %c) {
   %1 = tail call <128 x i1> @llvm.hexagon.V6.vandvrt.128B(<32 x i32> %a, i32 -1)
-  call void @llvm.hexagon.V6.vmaskedstoreq.128B(<128 x i1> %1, i8* %b, <32 x i32> %c)
+  call void @llvm.hexagon.V6.vmaskedstoreq.128B(<128 x i1> %1, ptr %b, <32 x i32> %c)
   ret void
 }
 
-declare void @llvm.hexagon.V6.vmaskedstorenq.128B(<128 x i1>, i8*, <32 x i32>)
-define void @V6_vmaskedstorenq_128B( <32 x i32> %a, i8* %b, <32 x i32> %c) {
+declare void @llvm.hexagon.V6.vmaskedstorenq.128B(<128 x i1>, ptr, <32 x i32>)
+define void @V6_vmaskedstorenq_128B( <32 x i32> %a, ptr %b, <32 x i32> %c) {
   %1 = tail call <128 x i1> @llvm.hexagon.V6.vandvrt.128B(<32 x i32> %a, i32 -1)
-  call void @llvm.hexagon.V6.vmaskedstorenq.128B(<128 x i1> %1, i8* %b, <32 x i32> %c)
+  call void @llvm.hexagon.V6.vmaskedstorenq.128B(<128 x i1> %1, ptr %b, <32 x i32> %c)
   ret void
 }
 
-declare void @llvm.hexagon.V6.vmaskedstorentq.128B(<128 x i1>, i8*, <32 x i32>)
-define void @V6_vmaskedstorentq_128B( <32 x i32> %a, i8* %b, <32 x i32> %c) {
+declare void @llvm.hexagon.V6.vmaskedstorentq.128B(<128 x i1>, ptr, <32 x i32>)
+define void @V6_vmaskedstorentq_128B( <32 x i32> %a, ptr %b, <32 x i32> %c) {
   %1 = tail call <128 x i1> @llvm.hexagon.V6.vandvrt.128B(<32 x i32> %a, i32 -1)
-  call void @llvm.hexagon.V6.vmaskedstorentq.128B(<128 x i1> %1, i8* %b, <32 x i32> %c)
+  call void @llvm.hexagon.V6.vmaskedstorentq.128B(<128 x i1> %1, ptr %b, <32 x i32> %c)
   ret void
 }
 
-declare void @llvm.hexagon.V6.vmaskedstorentnq.128B(<128 x i1>, i8*, <32 x i32>)
-define void @V6_vmaskedstorentnq_128B( <32 x i32> %a, i8* %b, <32 x i32> %c) {
+declare void @llvm.hexagon.V6.vmaskedstorentnq.128B(<128 x i1>, ptr, <32 x i32>)
+define void @V6_vmaskedstorentnq_128B( <32 x i32> %a, ptr %b, <32 x i32> %c) {
   %1 = tail call <128 x i1> @llvm.hexagon.V6.vandvrt.128B(<32 x i32> %a, i32 -1)
-  call void @llvm.hexagon.V6.vmaskedstorentnq.128B(<128 x i1> %1, i8* %b, <32 x i32> %c)
+  call void @llvm.hexagon.V6.vmaskedstorentnq.128B(<128 x i1> %1, ptr %b, <32 x i32> %c)
   ret void
 }

diff  --git a/llvm/test/CodeGen/Hexagon/intrinsics/byte-store.ll b/llvm/test/CodeGen/Hexagon/intrinsics/byte-store.ll
index 2aacaeae44b3e..805fbf071f972 100644
--- a/llvm/test/CodeGen/Hexagon/intrinsics/byte-store.ll
+++ b/llvm/test/CodeGen/Hexagon/intrinsics/byte-store.ll
@@ -14,30 +14,30 @@
 
 declare <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32>, i32)
 
-declare void @llvm.hexagon.V6.vmaskedstoreq(<64 x i1>, i8*, <16 x i32>)
-define void @V6_vmaskedstoreq( <16 x i32> %a, i8* %b, <16 x i32> %c) {
+declare void @llvm.hexagon.V6.vmaskedstoreq(<64 x i1>, ptr, <16 x i32>)
+define void @V6_vmaskedstoreq( <16 x i32> %a, ptr %b, <16 x i32> %c) {
   %1 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %a, i32 -1)
-  call void @llvm.hexagon.V6.vmaskedstoreq(<64 x i1> %1, i8* %b, <16 x i32> %c)
+  call void @llvm.hexagon.V6.vmaskedstoreq(<64 x i1> %1, ptr %b, <16 x i32> %c)
   ret void
 }
 
-declare void @llvm.hexagon.V6.vmaskedstorenq(<64 x i1>, i8*, <16 x i32>)
-define void @V6_vmaskedstorenq( <16 x i32> %a, i8* %b, <16 x i32> %c) {
+declare void @llvm.hexagon.V6.vmaskedstorenq(<64 x i1>, ptr, <16 x i32>)
+define void @V6_vmaskedstorenq( <16 x i32> %a, ptr %b, <16 x i32> %c) {
   %1 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %a, i32 -1)
-  call void @llvm.hexagon.V6.vmaskedstorenq(<64 x i1> %1, i8* %b, <16 x i32> %c)
+  call void @llvm.hexagon.V6.vmaskedstorenq(<64 x i1> %1, ptr %b, <16 x i32> %c)
   ret void
 }
 
-declare void @llvm.hexagon.V6.vmaskedstorentq(<64 x i1>, i8*, <16 x i32>)
-define void @V6_vmaskedstorentq( <16 x i32> %a, i8* %b, <16 x i32> %c) {
+declare void @llvm.hexagon.V6.vmaskedstorentq(<64 x i1>, ptr, <16 x i32>)
+define void @V6_vmaskedstorentq( <16 x i32> %a, ptr %b, <16 x i32> %c) {
   %1 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %a, i32 -1)
-  call void @llvm.hexagon.V6.vmaskedstorentq(<64 x i1> %1, i8* %b, <16 x i32> %c)
+  call void @llvm.hexagon.V6.vmaskedstorentq(<64 x i1> %1, ptr %b, <16 x i32> %c)
   ret void
 }
 
-declare void @llvm.hexagon.V6.vmaskedstorentnq(<64 x i1>, i8*, <16 x i32>)
-define void @V6_vmaskedstorentnq( <16 x i32> %a, i8* %b, <16 x i32> %c) {
+declare void @llvm.hexagon.V6.vmaskedstorentnq(<64 x i1>, ptr, <16 x i32>)
+define void @V6_vmaskedstorentnq( <16 x i32> %a, ptr %b, <16 x i32> %c) {
   %1 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %a, i32 -1)
-  call void @llvm.hexagon.V6.vmaskedstorentnq(<64 x i1> %1, i8* %b, <16 x i32> %c)
+  call void @llvm.hexagon.V6.vmaskedstorentnq(<64 x i1> %1, ptr %b, <16 x i32> %c)
   ret void
 }

diff  --git a/llvm/test/CodeGen/Hexagon/intrinsics/llsc_bundling.ll b/llvm/test/CodeGen/Hexagon/intrinsics/llsc_bundling.ll
index 966945b66f470..fbeaede5599d6 100644
--- a/llvm/test/CodeGen/Hexagon/intrinsics/llsc_bundling.ll
+++ b/llvm/test/CodeGen/Hexagon/intrinsics/llsc_bundling.ll
@@ -5,7 +5,7 @@ target triple = "hexagon-unknown--elf"
 define void @_Z4lockv() #0 {
 entry:
   %__shared_owners = alloca i32, align 4
-  %0 = cmpxchg weak i32* %__shared_owners, i32 0, i32 1 seq_cst seq_cst
+  %0 = cmpxchg weak ptr %__shared_owners, i32 0, i32 1 seq_cst seq_cst
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/intrinsics/system_user.ll b/llvm/test/CodeGen/Hexagon/intrinsics/system_user.ll
index 1a5fd138e0ff9..8c7228a2c27e3 100644
--- a/llvm/test/CodeGen/Hexagon/intrinsics/system_user.ll
+++ b/llvm/test/CodeGen/Hexagon/intrinsics/system_user.ll
@@ -4,66 +4,66 @@ target triple = "hexagon"
 
 ; CHECK-LABEL: dc00:
 ; CHECK: dcfetch
-define void @dc00(i8* nocapture readonly %p) local_unnamed_addr #0 {
-  tail call void @llvm.hexagon.prefetch(i8* %p)
+define void @dc00(ptr nocapture readonly %p) local_unnamed_addr #0 {
+  tail call void @llvm.hexagon.prefetch(ptr %p)
   ret void
 }
 
 ; CHECK-LABEL: dc01:
 ; CHECK: dccleana
-define void @dc01(i8* nocapture readonly %p) local_unnamed_addr #0 {
+define void @dc01(ptr nocapture readonly %p) local_unnamed_addr #0 {
 entry:
-  tail call void @llvm.hexagon.Y2.dccleana(i8* %p)
+  tail call void @llvm.hexagon.Y2.dccleana(ptr %p)
   ret void
 }
 
 ; CHECK-LABEL: dc02:
 ; CHECK: dccleaninva
-define void @dc02(i8* nocapture readonly %p) local_unnamed_addr #0 {
+define void @dc02(ptr nocapture readonly %p) local_unnamed_addr #0 {
 entry:
-  tail call void @llvm.hexagon.Y2.dccleaninva(i8* %p)
+  tail call void @llvm.hexagon.Y2.dccleaninva(ptr %p)
   ret void
 }
 
 ; CHECK-LABEL: dc03:
 ; CHECK: dcinva
-define void @dc03(i8* nocapture readonly %p) local_unnamed_addr #0 {
+define void @dc03(ptr nocapture readonly %p) local_unnamed_addr #0 {
 entry:
-  tail call void @llvm.hexagon.Y2.dcinva(i8* %p)
+  tail call void @llvm.hexagon.Y2.dcinva(ptr %p)
   ret void
 }
 
 ; CHECK-LABEL: dc04:
 ; CHECK: dczeroa
-define void @dc04(i8* nocapture %p) local_unnamed_addr #0 {
+define void @dc04(ptr nocapture %p) local_unnamed_addr #0 {
 entry:
-  tail call void @llvm.hexagon.Y2.dczeroa(i8* %p)
+  tail call void @llvm.hexagon.Y2.dczeroa(ptr %p)
   ret void
 }
 
 ; CHECK-LABEL: dc05:
 ; CHECK: l2fetch(r{{[0-9]+}},r{{[0-9]+}})
-define void @dc05(i8* nocapture readonly %p, i32 %q) local_unnamed_addr #0 {
+define void @dc05(ptr nocapture readonly %p, i32 %q) local_unnamed_addr #0 {
 entry:
-  tail call void @llvm.hexagon.Y4.l2fetch(i8* %p, i32 %q)
+  tail call void @llvm.hexagon.Y4.l2fetch(ptr %p, i32 %q)
   ret void
 }
 
 ; CHECK-LABEL: dc06:
 ; CHECK: l2fetch(r{{[0-9]+}},r{{[0-9]+}}:{{[0-9]+}})
-define void @dc06(i8* nocapture readonly %p, i64 %q) local_unnamed_addr #0 {
+define void @dc06(ptr nocapture readonly %p, i64 %q) local_unnamed_addr #0 {
 entry:
-  tail call void @llvm.hexagon.Y5.l2fetch(i8* %p, i64 %q)
+  tail call void @llvm.hexagon.Y5.l2fetch(ptr %p, i64 %q)
   ret void
 }
 
-declare void @llvm.hexagon.prefetch(i8* nocapture) #1
-declare void @llvm.hexagon.Y2.dccleana(i8* nocapture readonly) #2
-declare void @llvm.hexagon.Y2.dccleaninva(i8* nocapture readonly) #2
-declare void @llvm.hexagon.Y2.dcinva(i8* nocapture readonly) #2
-declare void @llvm.hexagon.Y2.dczeroa(i8* nocapture) #3
-declare void @llvm.hexagon.Y4.l2fetch(i8* nocapture readonly, i32) #2
-declare void @llvm.hexagon.Y5.l2fetch(i8* nocapture readonly, i64) #2
+declare void @llvm.hexagon.prefetch(ptr nocapture) #1
+declare void @llvm.hexagon.Y2.dccleana(ptr nocapture readonly) #2
+declare void @llvm.hexagon.Y2.dccleaninva(ptr nocapture readonly) #2
+declare void @llvm.hexagon.Y2.dcinva(ptr nocapture readonly) #2
+declare void @llvm.hexagon.Y2.dczeroa(ptr nocapture) #3
+declare void @llvm.hexagon.Y4.l2fetch(ptr nocapture readonly, i32) #2
+declare void @llvm.hexagon.Y5.l2fetch(ptr nocapture readonly, i64) #2
 
 attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="-hvx,-long-calls" }
 attributes #1 = { inaccessiblemem_or_argmemonly nounwind }

diff  --git a/llvm/test/CodeGen/Hexagon/intrinsics/v65-gather-double.ll b/llvm/test/CodeGen/Hexagon/intrinsics/v65-gather-double.ll
index c54cd95daf78d..6d9800ee882f0 100644
--- a/llvm/test/CodeGen/Hexagon/intrinsics/v65-gather-double.ll
+++ b/llvm/test/CodeGen/Hexagon/intrinsics/v65-gather-double.ll
@@ -21,42 +21,42 @@
 
 declare <128 x i1> @llvm.hexagon.V6.vandvrt.128B(<32 x i32>, i32)
 
-declare void @llvm.hexagon.V6.vgathermw.128B(i8*, i32, i32, <32 x i32>)
-define void @V6_vgathermw_128B(i8* %a, i32 %b, i32 %c, <32 x i32> %d) {
-  call void @llvm.hexagon.V6.vgathermw.128B(i8* %a, i32 %b, i32 %c, <32 x i32> %d)
+declare void @llvm.hexagon.V6.vgathermw.128B(ptr, i32, i32, <32 x i32>)
+define void @V6_vgathermw_128B(ptr %a, i32 %b, i32 %c, <32 x i32> %d) {
+  call void @llvm.hexagon.V6.vgathermw.128B(ptr %a, i32 %b, i32 %c, <32 x i32> %d)
   ret void
 }
 
-declare void @llvm.hexagon.V6.vgathermh.128B(i8*, i32, i32, <32 x i32>)
-define void @V6_vgathermh_128B(i8* %a, i32 %b, i32 %c, <32 x i32> %d) {
-  call void @llvm.hexagon.V6.vgathermh.128B(i8* %a, i32 %b, i32 %c, <32 x i32> %d)
+declare void @llvm.hexagon.V6.vgathermh.128B(ptr, i32, i32, <32 x i32>)
+define void @V6_vgathermh_128B(ptr %a, i32 %b, i32 %c, <32 x i32> %d) {
+  call void @llvm.hexagon.V6.vgathermh.128B(ptr %a, i32 %b, i32 %c, <32 x i32> %d)
   ret void
 }
 
-declare void @llvm.hexagon.V6.vgathermhw.128B(i8*, i32, i32, <64 x i32>)
-define void @V6_vgathermhw_128B(i8* %a, i32 %b, i32 %c, <64 x i32> %d) {
-  call void @llvm.hexagon.V6.vgathermhw.128B(i8* %a, i32 %b, i32 %c, <64 x i32> %d)
+declare void @llvm.hexagon.V6.vgathermhw.128B(ptr, i32, i32, <64 x i32>)
+define void @V6_vgathermhw_128B(ptr %a, i32 %b, i32 %c, <64 x i32> %d) {
+  call void @llvm.hexagon.V6.vgathermhw.128B(ptr %a, i32 %b, i32 %c, <64 x i32> %d)
   ret void
 }
 
-declare void @llvm.hexagon.V6.vgathermwq.128B(i8*, <128 x i1>, i32, i32, <32 x i32>)
-define void @V6_vgathermwq_128B(i8* %a, <32 x i32> %b, i32 %c, i32 %d, <32 x i32> %e) {
+declare void @llvm.hexagon.V6.vgathermwq.128B(ptr, <128 x i1>, i32, i32, <32 x i32>)
+define void @V6_vgathermwq_128B(ptr %a, <32 x i32> %b, i32 %c, i32 %d, <32 x i32> %e) {
   %1 = tail call <128 x i1> @llvm.hexagon.V6.vandvrt.128B(<32 x i32> %b, i32 -1)
-  call void @llvm.hexagon.V6.vgathermwq.128B(i8* %a, <128 x i1> %1, i32 %c, i32 %d, <32 x i32> %e)
+  call void @llvm.hexagon.V6.vgathermwq.128B(ptr %a, <128 x i1> %1, i32 %c, i32 %d, <32 x i32> %e)
   ret void
 }
 
-declare void @llvm.hexagon.V6.vgathermhq.128B(i8*, <128 x i1>, i32, i32, <32 x i32>)
-define void @V6_vgathermhq_128B(i8* %a, <32 x i32> %b, i32 %c, i32 %d, <32 x i32> %e) {
+declare void @llvm.hexagon.V6.vgathermhq.128B(ptr, <128 x i1>, i32, i32, <32 x i32>)
+define void @V6_vgathermhq_128B(ptr %a, <32 x i32> %b, i32 %c, i32 %d, <32 x i32> %e) {
   %1 = tail call <128 x i1> @llvm.hexagon.V6.vandvrt.128B(<32 x i32> %b, i32 -1)
-  call void @llvm.hexagon.V6.vgathermhq.128B(i8* %a, <128 x i1> %1, i32 %c, i32 %d, <32 x i32> %e)
+  call void @llvm.hexagon.V6.vgathermhq.128B(ptr %a, <128 x i1> %1, i32 %c, i32 %d, <32 x i32> %e)
   ret void
 }
 
-declare void @llvm.hexagon.V6.vgathermhwq.128B(i8*, <128 x i1>, i32, i32, <64 x i32>)
-define void @V6_vgathermhwq_128B(i8* %a, <32 x i32> %b, i32 %c, i32 %d, <64 x i32> %e) {
+declare void @llvm.hexagon.V6.vgathermhwq.128B(ptr, <128 x i1>, i32, i32, <64 x i32>)
+define void @V6_vgathermhwq_128B(ptr %a, <32 x i32> %b, i32 %c, i32 %d, <64 x i32> %e) {
   %1 = tail call <128 x i1> @llvm.hexagon.V6.vandvrt.128B(<32 x i32> %b, i32 -1)
-  call void @llvm.hexagon.V6.vgathermhwq.128B(i8* %a, <128 x i1> %1, i32 %c, i32 %d, <64 x i32> %e)
+  call void @llvm.hexagon.V6.vgathermhwq.128B(ptr %a, <128 x i1> %1, i32 %c, i32 %d, <64 x i32> %e)
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/intrinsics/v65-gather.ll b/llvm/test/CodeGen/Hexagon/intrinsics/v65-gather.ll
index c3a3b15ea1be3..475cb6c8a3f47 100644
--- a/llvm/test/CodeGen/Hexagon/intrinsics/v65-gather.ll
+++ b/llvm/test/CodeGen/Hexagon/intrinsics/v65-gather.ll
@@ -21,41 +21,41 @@
 
 declare <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32>, i32)
 
-declare void @llvm.hexagon.V6.vgathermw(i8*, i32, i32, <16 x i32>)
-define void @V6_vgathermw(i8* %a, i32 %b, i32 %c, <16 x i32> %d) {
-  call void @llvm.hexagon.V6.vgathermw(i8* %a, i32 %b, i32 %c, <16 x i32> %d)
+declare void @llvm.hexagon.V6.vgathermw(ptr, i32, i32, <16 x i32>)
+define void @V6_vgathermw(ptr %a, i32 %b, i32 %c, <16 x i32> %d) {
+  call void @llvm.hexagon.V6.vgathermw(ptr %a, i32 %b, i32 %c, <16 x i32> %d)
   ret void
 }
 
-declare void @llvm.hexagon.V6.vgathermh(i8*, i32, i32, <16 x i32>)
-define void @V6_vgathermh(i8* %a, i32 %b, i32 %c, <16 x i32> %d) {
-  call void @llvm.hexagon.V6.vgathermh(i8* %a, i32 %b, i32 %c, <16 x i32> %d)
+declare void @llvm.hexagon.V6.vgathermh(ptr, i32, i32, <16 x i32>)
+define void @V6_vgathermh(ptr %a, i32 %b, i32 %c, <16 x i32> %d) {
+  call void @llvm.hexagon.V6.vgathermh(ptr %a, i32 %b, i32 %c, <16 x i32> %d)
   ret void
 }
 
-declare void @llvm.hexagon.V6.vgathermhw(i8*, i32, i32, <32 x i32>)
-define void @V6_vgathermhw(i8* %a, i32 %b, i32 %c, <32 x i32> %d) {
-  call void @llvm.hexagon.V6.vgathermhw(i8* %a, i32 %b, i32 %c, <32 x i32> %d)
+declare void @llvm.hexagon.V6.vgathermhw(ptr, i32, i32, <32 x i32>)
+define void @V6_vgathermhw(ptr %a, i32 %b, i32 %c, <32 x i32> %d) {
+  call void @llvm.hexagon.V6.vgathermhw(ptr %a, i32 %b, i32 %c, <32 x i32> %d)
   ret void
 }
 
-declare void @llvm.hexagon.V6.vgathermwq(i8*, <64 x i1>, i32, i32, <16 x i32>)
-define void @V6_vgathermwq(i8* %a, <16 x i32> %b, i32 %c, i32 %d, <16 x i32> %e) {
+declare void @llvm.hexagon.V6.vgathermwq(ptr, <64 x i1>, i32, i32, <16 x i32>)
+define void @V6_vgathermwq(ptr %a, <16 x i32> %b, i32 %c, i32 %d, <16 x i32> %e) {
   %1 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %b, i32 -1)
-  call void @llvm.hexagon.V6.vgathermwq(i8* %a, <64 x i1> %1, i32 %c, i32 %d, <16 x i32> %e)
+  call void @llvm.hexagon.V6.vgathermwq(ptr %a, <64 x i1> %1, i32 %c, i32 %d, <16 x i32> %e)
   ret void
 }
 
-declare void @llvm.hexagon.V6.vgathermhq(i8*, <64 x i1>, i32, i32, <16 x i32>)
-define void @V6_vgathermhq(i8* %a, <16 x i32> %b, i32 %c, i32 %d, <16 x i32> %e) {
+declare void @llvm.hexagon.V6.vgathermhq(ptr, <64 x i1>, i32, i32, <16 x i32>)
+define void @V6_vgathermhq(ptr %a, <16 x i32> %b, i32 %c, i32 %d, <16 x i32> %e) {
   %1 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %b, i32 -1)
-  call void @llvm.hexagon.V6.vgathermhq(i8* %a, <64 x i1> %1, i32 %c, i32 %d, <16 x i32> %e)
+  call void @llvm.hexagon.V6.vgathermhq(ptr %a, <64 x i1> %1, i32 %c, i32 %d, <16 x i32> %e)
   ret void
 }
 
-declare void @llvm.hexagon.V6.vgathermhwq(i8*, <64 x i1>, i32, i32, <32 x i32>)
-define void @V6_vgathermhwq(i8* %a, <16 x i32> %b, i32 %c, i32 %d, <32 x i32> %e) {
+declare void @llvm.hexagon.V6.vgathermhwq(ptr, <64 x i1>, i32, i32, <32 x i32>)
+define void @V6_vgathermhwq(ptr %a, <16 x i32> %b, i32 %c, i32 %d, <32 x i32> %e) {
   %1 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %b, i32 -1)
-  call void @llvm.hexagon.V6.vgathermhwq(i8* %a, <64 x i1> %1, i32 %c, i32 %d, <32 x i32> %e)
+  call void @llvm.hexagon.V6.vgathermhwq(ptr %a, <64 x i1> %1, i32 %c, i32 %d, <32 x i32> %e)
   ret void
 }

diff  --git a/llvm/test/CodeGen/Hexagon/intrinsics/v65-scatter-gather.ll b/llvm/test/CodeGen/Hexagon/intrinsics/v65-scatter-gather.ll
index 2ebd22bdfb43d..f7d857600cdd8 100644
--- a/llvm/test/CodeGen/Hexagon/intrinsics/v65-scatter-gather.ll
+++ b/llvm/test/CodeGen/Hexagon/intrinsics/v65-scatter-gather.ll
@@ -6,18 +6,17 @@
 ; CHECK-NEXT: vmem(r{{[0-9]+}}+#0) = vtmp.new
 ; CHECK-NEXT: }
 
-declare i32 @add_translation_extended(i32, i8*, i64, i32, i32, i32, i32, i32, i32) local_unnamed_addr
+declare i32 @add_translation_extended(i32, ptr, i64, i32, i32, i32, i32, i32, i32) local_unnamed_addr
 
 ; Function Attrs: nounwind
 define i32 @main() local_unnamed_addr {
 entry:
   %hvx_vector = alloca <16 x i32>, align 64
-  %0 = bitcast <16 x i32>* %hvx_vector to i8*
-  %call.i = tail call i32 @add_translation_extended(i32 1, i8* inttoptr (i32 -668991488 to i8*), i64 3625975808, i32 16, i32 15, i32 0, i32 0, i32 0, i32 3)
-  %1 = tail call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 1)
-  %2 = tail call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 2)
-  tail call void @llvm.hexagon.V6.vscattermh.add(i32 -668991488, i32 1023, <16 x i32> %1, <16 x i32> %2)
-  call void @llvm.hexagon.V6.vgathermh(i8* %0, i32 -668991488, i32 1023, <16 x i32> %1)
+  %call.i = tail call i32 @add_translation_extended(i32 1, ptr inttoptr (i32 -668991488 to ptr), i64 3625975808, i32 16, i32 15, i32 0, i32 0, i32 0, i32 3)
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 1)
+  %1 = tail call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 2)
+  tail call void @llvm.hexagon.V6.vscattermh.add(i32 -668991488, i32 1023, <16 x i32> %0, <16 x i32> %1)
+  call void @llvm.hexagon.V6.vgathermh(ptr %hvx_vector, i32 -668991488, i32 1023, <16 x i32> %0)
   ret i32 0
 }
 
@@ -28,5 +27,5 @@ declare void @llvm.hexagon.V6.vscattermh.add(i32, i32, <16 x i32>, <16 x i32>)
 declare <16 x i32> @llvm.hexagon.V6.lvsplatw(i32)
 
 ; Function Attrs: argmemonly nounwind
-declare void @llvm.hexagon.V6.vgathermh(i8*, i32, i32, <16 x i32>)
+declare void @llvm.hexagon.V6.vgathermh(ptr, i32, i32, <16 x i32>)
 

diff  --git a/llvm/test/CodeGen/Hexagon/invalid-memrefs.ll b/llvm/test/CodeGen/Hexagon/invalid-memrefs.ll
index 94c6b5468f798..ba87c3163f5c9 100644
--- a/llvm/test/CodeGen/Hexagon/invalid-memrefs.ll
+++ b/llvm/test/CodeGen/Hexagon/invalid-memrefs.ll
@@ -4,8 +4,8 @@
 
 target triple = "hexagon-unknown--elf"
 
-%s.0 = type { %s.0*, %s.0* }
-%s.1 = type { %s.1*, %s.1** }
+%s.0 = type { ptr, ptr }
+%s.1 = type { ptr, ptr }
 
 @g0 = external global %s.0, align 4
 
@@ -66,18 +66,18 @@ b17:                                              ; preds = %b16
   unreachable
 
 b18:                                              ; preds = %b16
-  %v0 = load %s.0*, %s.0** getelementptr inbounds (%s.0, %s.0* @g0, i32 0, i32 1), align 4
-  %v1 = load %s.0*, %s.0** getelementptr inbounds (%s.0, %s.0* @g0, i32 0, i32 0), align 4
-  %v2 = select i1 undef, %s.0* %v0, %s.0* %v1
+  %v0 = load ptr, ptr getelementptr inbounds (%s.0, ptr @g0, i32 0, i32 1), align 4
+  %v1 = load ptr, ptr @g0, align 4
+  %v2 = select i1 undef, ptr %v0, ptr %v1
   br i1 undef, label %b22, label %b19
 
 b19:                                              ; preds = %b18
-  %v3 = load %s.1*, %s.1** undef, align 4
-  %v4 = icmp eq %s.1* %v3, null
+  %v3 = load ptr, ptr undef, align 4
+  %v4 = icmp eq ptr %v3, null
   br i1 %v4, label %b21, label %b20
 
 b20:                                              ; preds = %b19
-  store %s.1** undef, %s.1*** undef, align 4
+  store ptr undef, ptr undef, align 4
   br label %b21
 
 b21:                                              ; preds = %b20, %b19
@@ -87,7 +87,7 @@ b22:                                              ; preds = %b21, %b18
   br i1 undef, label %b24, label %b23
 
 b23:                                              ; preds = %b22
-  store %s.0* %v2, %s.0** undef, align 4
+  store ptr %v2, ptr undef, align 4
   br label %b24
 
 b24:                                              ; preds = %b23, %b22

diff  --git a/llvm/test/CodeGen/Hexagon/is-legal-void.ll b/llvm/test/CodeGen/Hexagon/is-legal-void.ll
index 222934abb82d8..cc6639ef6f5a0 100644
--- a/llvm/test/CodeGen/Hexagon/is-legal-void.ll
+++ b/llvm/test/CodeGen/Hexagon/is-legal-void.ll
@@ -7,8 +7,8 @@
 
 target triple = "hexagon"
 
-%struct.0 = type { i8*, i8, %union.anon.0 }
-%union.anon.0 = type { i8* }
+%struct.0 = type { ptr, i8, %union.anon.0 }
+%union.anon.0 = type { ptr }
 
 define hidden fastcc void @fred() unnamed_addr #0 {
 entry:
@@ -18,7 +18,7 @@ while.body.lr.ph:                                 ; preds = %entry
   br label %while.body
 
 while.body:                                       ; preds = %exit.2, %while.body.lr.ph
-  %lsr.iv = phi %struct.0* [ %cgep22, %exit.2 ], [ undef, %while.body.lr.ph ]
+  %lsr.iv = phi ptr [ %cgep22, %exit.2 ], [ undef, %while.body.lr.ph ]
   switch i32 undef, label %exit [
     i32 1, label %sw.bb.i
     i32 2, label %sw.bb3.i
@@ -37,17 +37,15 @@ exit:                                             ; preds = %while.body
   ]
 
 sw.bb.i17:                                        ; preds = %.exit
-  %0 = bitcast %struct.0* %lsr.iv to i32*
-  %1 = load i32, i32* %0, align 4
+  %0 = load i32, ptr %lsr.iv, align 4
   unreachable
 
 sw.bb3.i20:                                       ; preds = %exit
-  %2 = bitcast %struct.0* %lsr.iv to i8**
-  %3 = load i8*, i8** %2, align 4
+  %1 = load ptr, ptr %lsr.iv, align 4
   unreachable
 
 exit.2:                                           ; preds = %exit
-  %cgep22 = getelementptr %struct.0, %struct.0* %lsr.iv, i32 1
+  %cgep22 = getelementptr %struct.0, ptr %lsr.iv, i32 1
   br label %while.body
 
 while.end:                                        ; preds = %entry

diff  --git a/llvm/test/CodeGen/Hexagon/isel-bitcast-v1i8-i8.ll b/llvm/test/CodeGen/Hexagon/isel-bitcast-v1i8-i8.ll
index 77696a331a8f2..84a8f913a1f55 100644
--- a/llvm/test/CodeGen/Hexagon/isel-bitcast-v1i8-i8.ll
+++ b/llvm/test/CodeGen/Hexagon/isel-bitcast-v1i8-i8.ll
@@ -3,7 +3,7 @@
 
 ; This shouldn't crash.
 
-define i8 @fred(<4 x i8>* %a0) #0 {
+define i8 @fred(ptr %a0) #0 {
 ; CHECK-LABEL: fred:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    {
@@ -13,7 +13,7 @@ define i8 @fred(<4 x i8>* %a0) #0 {
 ; CHECK-NEXT:     r0 = extractu(r0,#8,#16)
 ; CHECK-NEXT:     jumpr r31
 ; CHECK-NEXT:    }
-  %v0 = load <4 x i8>, <4 x i8>* %a0, align 4
+  %v0 = load <4 x i8>, ptr %a0, align 4
   %v1 = shufflevector <4 x i8> %v0, <4 x i8> undef, <1 x i32> <i32 2>
   %v2 = bitcast <1 x i8> %v1 to i8
   ret i8 %v2

diff  --git a/llvm/test/CodeGen/Hexagon/isel-buildvector-v2f16.ll b/llvm/test/CodeGen/Hexagon/isel-buildvector-v2f16.ll
index d9d5654fe9368..f306b52291340 100644
--- a/llvm/test/CodeGen/Hexagon/isel-buildvector-v2f16.ll
+++ b/llvm/test/CodeGen/Hexagon/isel-buildvector-v2f16.ll
@@ -6,18 +6,14 @@
 target datalayout = "e-m:e-p:32:32:32-a:0-n16:32-i64:64:64-i32:32:32-i16:16:16-i1:8:8-f32:32:32-f64:64:64-v32:32:32-v64:64:64-v512:512:512-v1024:1024:1024-v2048:2048:2048"
 target triple = "hexagon"
 
-define dllexport void @f0(half* %a0, half* %a1) local_unnamed_addr #0 {
+define dllexport void @f0(ptr %a0, ptr %a1) local_unnamed_addr #0 {
 b0:
-  %v0 = bitcast half* %a0 to i8*
-  %v1 = bitcast half* %a1 to <160 x half>*
-  %v2 = load <160 x half>, <160 x half>* %v1, align 4
+  %v2 = load <160 x half>, ptr %a1, align 4
   %v3 = shufflevector <160 x half> %v2, <160 x half> poison, <32 x i32> <i32 4, i32 9, i32 14, i32 19, i32 24, i32 29, i32 34, i32 39, i32 44, i32 49, i32 54, i32 59, i32 64, i32 69, i32 74, i32 79, i32 84, i32 89, i32 94, i32 99, i32 104, i32 109, i32 114, i32 119, i32 124, i32 129, i32 134, i32 139, i32 144, i32 149, i32 154, i32 159>
   %v4 = fadd nnan nsz <32 x half> %v3, zeroinitializer
-  %v5 = getelementptr i8, i8* %v0, i32 0
-  %v6 = bitcast i8* %v5 to <160 x half>*
   %v7 = shufflevector <32 x half> %v4, <32 x half> poison, <128 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
   %v8 = shufflevector <128 x half> undef, <128 x half> %v7, <160 x i32> <i32 0, i32 32, i32 64, i32 96, i32 128, i32 1, i32 33, i32 65, i32 97, i32 129, i32 2, i32 34, i32 66, i32 98, i32 130, i32 3, i32 35, i32 67, i32 99, i32 131, i32 4, i32 36, i32 68, i32 100, i32 132, i32 5, i32 37, i32 69, i32 101, i32 133, i32 6, i32 38, i32 70, i32 102, i32 134, i32 7, i32 39, i32 71, i32 103, i32 135, i32 8, i32 40, i32 72, i32 104, i32 136, i32 9, i32 41, i32 73, i32 105, i32 137, i32 10, i32 42, i32 74, i32 106, i32 138, i32 11, i32 43, i32 75, i32 107, i32 139, i32 12, i32 44, i32 76, i32 108, i32 140, i32 13, i32 45, i32 77, i32 109, i32 141, i32 14, i32 46, i32 78, i32 110, i32 142, i32 15, i32 47, i32 79, i32 111, i32 143, i32 16, i32 48, i32 80, i32 112, i32 144, i32 17, i32 49, i32 81, i32 113, i32 145, i32 18, i32 50, i32 82, i32 114, i32 146, i32 19, i32 51, i32 83, i32 115, i32 147, i32 20, i32 52, i32 84, i32 116, i32 148, i32 21, i32 53, i32 85, i32 117, i32 149, i32 22, i32 54, i32 86, i32 118, i32 150, i32 23, i32 55, i32 87, i32 119, i32 151, i32 24, i32 56, i32 88, i32 120, i32 152, i32 25, i32 57, i32 89, i32 121, i32 153, i32 26, i32 58, i32 90, i32 122, i32 154, i32 27, i32 59, i32 91, i32 123, i32 155, i32 28, i32 60, i32 92, i32 124, i32 156, i32 29, i32 61, i32 93, i32 125, i32 157, i32 30, i32 62, i32 94, i32 126, i32 158, i32 31, i32 63, i32 95, i32 127, i32 159>
-  store <160 x half> %v8, <160 x half>* %v6, align 4
+  store <160 x half> %v8, ptr %a0, align 4
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/isel-dcfetch-intrin-map.ll b/llvm/test/CodeGen/Hexagon/isel-dcfetch-intrin-map.ll
index 6e9f1af74bd32..d7168489c1a33 100644
--- a/llvm/test/CodeGen/Hexagon/isel-dcfetch-intrin-map.ll
+++ b/llvm/test/CodeGen/Hexagon/isel-dcfetch-intrin-map.ll
@@ -7,12 +7,12 @@
 
 target triple = "hexagon"
 
-define void @fred(i8* %a0) #0 {
-  call void @llvm.hexagon.Y2.dcfetch(i8* %a0)
+define void @fred(ptr %a0) #0 {
+  call void @llvm.hexagon.Y2.dcfetch(ptr %a0)
   ret void
 }
 
-declare void @llvm.hexagon.Y2.dcfetch(i8*) #0
+declare void @llvm.hexagon.Y2.dcfetch(ptr) #0
 
 attributes #0 = { nounwind }
 

diff  --git a/llvm/test/CodeGen/Hexagon/isel-global-offset-alignment.ll b/llvm/test/CodeGen/Hexagon/isel-global-offset-alignment.ll
index bd170d89e4131..af479fde7ce35 100644
--- a/llvm/test/CodeGen/Hexagon/isel-global-offset-alignment.ll
+++ b/llvm/test/CodeGen/Hexagon/isel-global-offset-alignment.ll
@@ -36,10 +36,9 @@ b3:                                               ; preds = %b1, %b0
   %v4 = phi i32 [ 0, %b0 ], [ %v2, %b1 ]
   %v5 = or i32 %v4, 1
   %v6 = add nsw i32 %v5, -1
-  %v7 = getelementptr inbounds [1000000 x i16], [1000000 x i16]* @array, i32 0, i32 %v6
-  %v8 = getelementptr i16, i16* %v7, i32 88
-  %v9 = bitcast i16* %v8 to <8 x i16>*
-  store <8 x i16> zeroinitializer, <8 x i16>* %v9, align 8
+  %v7 = getelementptr inbounds [1000000 x i16], ptr @array, i32 0, i32 %v6
+  %v8 = getelementptr i16, ptr %v7, i32 88
+  store <8 x i16> zeroinitializer, ptr %v8, align 8
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/isel-memory-vNi1.ll b/llvm/test/CodeGen/Hexagon/isel-memory-vNi1.ll
index c12d5677fdbfa..2eecfa9f47f17 100644
--- a/llvm/test/CodeGen/Hexagon/isel-memory-vNi1.ll
+++ b/llvm/test/CodeGen/Hexagon/isel-memory-vNi1.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -march=hexagon < %s | FileCheck %s
 
-define i64 @f0(<8 x i1>* %a0, <8 x i8> %a1) #0 {
+define i64 @f0(ptr %a0, <8 x i8> %a1) #0 {
 ; CHECK-LABEL: f0:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -20,13 +20,13 @@ define i64 @f0(<8 x i1>* %a0, <8 x i8> %a1) #0 {
 ; CHECK-NEXT:     jumpr r31
 ; CHECK-NEXT:    }
 b0:
-  %v0 = load <8 x i1>, <8 x i1>* %a0, align 1
+  %v0 = load <8 x i1>, ptr %a0, align 1
   %v1 = select <8 x i1> %v0, <8 x i8> %a1, <8 x i8> zeroinitializer
   %v2 = bitcast <8 x i8> %v1 to i64
   ret i64 %v2
 }
 
-define i32 @f1(<4 x i1>* %a0, <4 x i8> %a1) #0 {
+define i32 @f1(ptr %a0, <4 x i8> %a1) #0 {
 ; CHECK-LABEL: f1:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -51,13 +51,13 @@ define i32 @f1(<4 x i1>* %a0, <4 x i8> %a1) #0 {
 ; CHECK-NEXT:     jumpr r31
 ; CHECK-NEXT:    }
 b0:
-  %v0 = load <4 x i1>, <4 x i1>* %a0, align 1
+  %v0 = load <4 x i1>, ptr %a0, align 1
   %v1 = select <4 x i1> %v0, <4 x i8> %a1, <4 x i8> zeroinitializer
   %v2 = bitcast <4 x i8> %v1 to i32
   ret i32 %v2
 }
 
-define i16 @f2(<2 x i1>* %a0, <2 x i8> %a1) #0 {
+define i16 @f2(ptr %a0, <2 x i8> %a1) #0 {
 ; CHECK-LABEL: f2:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -82,13 +82,13 @@ define i16 @f2(<2 x i1>* %a0, <2 x i8> %a1) #0 {
 ; CHECK-NEXT:     jumpr r31
 ; CHECK-NEXT:    }
 b0:
-  %v0 = load <2 x i1>, <2 x i1>* %a0, align 1
+  %v0 = load <2 x i1>, ptr %a0, align 1
   %v1 = select <2 x i1> %v0, <2 x i8> %a1, <2 x i8> zeroinitializer
   %v2 = bitcast <2 x i8> %v1 to i16
   ret i16 %v2
 }
 
-define i8 @f3(<1 x i1>* %a0, <1 x i8> %a1) #0 {
+define i8 @f3(ptr %a0, <1 x i8> %a1) #0 {
 ; CHECK-LABEL: f3:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -104,13 +104,13 @@ define i8 @f3(<1 x i1>* %a0, <1 x i8> %a1) #0 {
 ; CHECK-NEXT:     jumpr r31
 ; CHECK-NEXT:    }
 b0:
-  %v0 = load <1 x i1>, <1 x i1>* %a0, align 1
+  %v0 = load <1 x i1>, ptr %a0, align 1
   %v1 = select <1 x i1> %v0, <1 x i8> %a1, <1 x i8> zeroinitializer
   %v2 = bitcast <1 x i8> %v1 to i8
   ret i8 %v2
 }
 
-define void @f4(<8 x i1>* %a0, i64 %a1) #0 {
+define void @f4(ptr %a0, i64 %a1) #0 {
 ; CHECK-LABEL: f4:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -134,11 +134,11 @@ define void @f4(<8 x i1>* %a0, i64 %a1) #0 {
 b0:
   %v0 = bitcast i64 %a1 to <8 x i8>
   %v1 = icmp ne <8 x i8> %v0, zeroinitializer
-  store <8 x i1> %v1, <8 x i1>* %a0, align 1
+  store <8 x i1> %v1, ptr %a0, align 1
   ret void
 }
 
-define void @f5(<4 x i1>* %a0, i32 %a1) #0 {
+define void @f5(ptr %a0, i32 %a1) #0 {
 ; CHECK-LABEL: f5:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -165,11 +165,11 @@ define void @f5(<4 x i1>* %a0, i32 %a1) #0 {
 b0:
   %v0 = bitcast i32 %a1 to <4 x i8>
   %v1 = icmp ne <4 x i8> %v0, zeroinitializer
-  store <4 x i1> %v1, <4 x i1>* %a0, align 1
+  store <4 x i1> %v1, ptr %a0, align 1
   ret void
 }
 
-define void @f6(<2 x i1>* %a0, i16 %a1) #0 {
+define void @f6(ptr %a0, i16 %a1) #0 {
 ; CHECK-LABEL: f6:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -238,11 +238,11 @@ define void @f6(<2 x i1>* %a0, i16 %a1) #0 {
 b0:
   %v0 = bitcast i16 %a1 to <2 x i8>
   %v1 = icmp ne <2 x i8> %v0, zeroinitializer
-  store <2 x i1> %v1, <2 x i1>* %a0, align 1
+  store <2 x i1> %v1, ptr %a0, align 1
   ret void
 }
 
-define void @f7(<1 x i1>* %a0, i8 %a1) #0 {
+define void @f7(ptr %a0, i8 %a1) #0 {
 ; CHECK-LABEL: f7:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -263,7 +263,7 @@ define void @f7(<1 x i1>* %a0, i8 %a1) #0 {
 b0:
   %v0 = bitcast i8 %a1 to <1 x i8>
   %v1 = icmp ne <1 x i8> %v0, zeroinitializer
-  store <1 x i1> %v1, <1 x i1>* %a0, align 1
+  store <1 x i1> %v1, ptr %a0, align 1
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/isel-prefer.ll b/llvm/test/CodeGen/Hexagon/isel-prefer.ll
index 130ea04063beb..3d5c8fb54adec 100644
--- a/llvm/test/CodeGen/Hexagon/isel-prefer.ll
+++ b/llvm/test/CodeGen/Hexagon/isel-prefer.ll
@@ -37,8 +37,8 @@ define i32 @Prefer_M4_mpyri_addi(i32 %a0) #0 {
 ; CHECK-NEXT:     jumpr r31
 ; CHECK-NEXT:    }
 b1:
-  %v2 = getelementptr inbounds [2 x [31 x i8]], [2 x [31 x i8]]* @data1, i32 0, i32 %a0
-  %v3 = ptrtoint [31 x i8]* %v2 to i32
+  %v2 = getelementptr inbounds [2 x [31 x i8]], ptr @data1, i32 0, i32 %a0
+  %v3 = ptrtoint ptr %v2 to i32
   ret i32 %v3
 }
 
@@ -55,8 +55,8 @@ define i32 @Prefer_M4_mpyrr_addi(i32 %a0) #0 {
 ; CHECK-NEXT:     jumpr r31
 ; CHECK-NEXT:    }
 b1:
-  %v2 = getelementptr inbounds [2 x [91 x i8]], [2 x [91 x i8]]* @data2, i32 0, i32 %a0
-  %v3 = ptrtoint [91 x i8]* %v2 to i32
+  %v2 = getelementptr inbounds [2 x [91 x i8]], ptr @data2, i32 0, i32 %a0
+  %v3 = ptrtoint ptr %v2 to i32
   ret i32 %v3
 }
 
@@ -100,7 +100,7 @@ b2:
   ret i32 %v6
 }
 
-define i64 @Prefer_L2_loadrub_io(i8* %a0) #0 {
+define i64 @Prefer_L2_loadrub_io(ptr %a0) #0 {
 ; CHECK-LABEL: Prefer_L2_loadrub_io:
 ; CHECK:       // %bb.0: // %b1
 ; CHECK-NEXT:    {
@@ -113,8 +113,8 @@ define i64 @Prefer_L2_loadrub_io(i8* %a0) #0 {
 ; CHECK-NEXT:     jumpr r31
 ; CHECK-NEXT:    }
 b1:
-  %v2 = getelementptr i8, i8* %a0, i32 65
-  %v3 = load i8, i8* %v2
+  %v2 = getelementptr i8, ptr %a0, i32 65
+  %v3 = load i8, ptr %v2
   %v4 = zext i8 %v3 to i64
   ret i64 %v4
 }

diff  --git a/llvm/test/CodeGen/Hexagon/isel-select-v4i8.ll b/llvm/test/CodeGen/Hexagon/isel-select-v4i8.ll
index 58f72a15497a5..b1c6d7157e393 100644
--- a/llvm/test/CodeGen/Hexagon/isel-select-v4i8.ll
+++ b/llvm/test/CodeGen/Hexagon/isel-select-v4i8.ll
@@ -10,7 +10,7 @@ target triple = "hexagon"
 @g0 = external dso_local unnamed_addr constant [41 x i8], align 1
 define dso_local void @f0() local_unnamed_addr #0 {
 b0:
-  %v0 = load <16 x i32>, <16 x i32>* undef, align 16
+  %v0 = load <16 x i32>, ptr undef, align 16
   %v1 = icmp eq <16 x i32> %v0, zeroinitializer
   %v2 = or <16 x i1> %v1, zeroinitializer
   %v3 = or <16 x i1> %v2, zeroinitializer
@@ -23,13 +23,13 @@ b0:
   br i1 %v9, label %b2, label %b1
 
 b1:                                               ; preds = %b0
-  call void (i8*, ...) @f1(i8* getelementptr inbounds ([41 x i8], [41 x i8]* @g0, i32 0, i32 0))
+  call void (ptr, ...) @f1(ptr @g0)
   unreachable
 
 b2:                                               ; preds = %b0
   ret void
 }
-declare dso_local void @f1(i8*, ...) local_unnamed_addr #1
+declare dso_local void @f1(ptr, ...) local_unnamed_addr #1
 
 attributes #0 = { "target-cpu"="hexagonv66" "target-features"="+hvx-length64b,+hvxv66,+v66,-long-calls" }
 attributes #1 = { "use-soft-float"="false" }

diff  --git a/llvm/test/CodeGen/Hexagon/isel-setcc-i1.ll b/llvm/test/CodeGen/Hexagon/isel-setcc-i1.ll
index 42098f37f3ac4..008d976de8abe 100644
--- a/llvm/test/CodeGen/Hexagon/isel-setcc-i1.ll
+++ b/llvm/test/CodeGen/Hexagon/isel-setcc-i1.ll
@@ -10,7 +10,7 @@ b0:
   br label %b1
 
 b1:                                               ; preds = %b1, %b0
-  %v2 = load i32, i32* undef, align 4
+  %v2 = load i32, ptr undef, align 4
   %v3 = select i1 undef, i32 %v2, i32 0
   %v4 = and i32 %v3, 7
   %v5 = icmp eq i32 %v4, 4

diff  --git a/llvm/test/CodeGen/Hexagon/isel-simplify-crash.ll b/llvm/test/CodeGen/Hexagon/isel-simplify-crash.ll
index c13f59a7fcdb5..c982b93162ea7 100644
--- a/llvm/test/CodeGen/Hexagon/isel-simplify-crash.ll
+++ b/llvm/test/CodeGen/Hexagon/isel-simplify-crash.ll
@@ -7,7 +7,7 @@
 target datalayout = "e-m:e-p:32:32:32-a:0-n16:32-i64:64:64-i32:32:32-i16:16:16-i1:8:8-f32:32:32-f64:64:64-v32:32:32-v64:64:64-v512:512:512-v1024:1024:1024-v2048:2048:2048"
 target triple = "hexagon"
 
-define void @fred(i16 signext %a0, <32 x i16>* %a1, <32 x i16> %a3) #0 {
+define void @fred(i16 signext %a0, ptr %a1, <32 x i16> %a3) #0 {
 b1:
   %v4 = add i16 undef, %a0
   br i1 undef, label %b11, label %b5
@@ -18,7 +18,7 @@ b5:                                               ; preds = %b1
   %v8 = add <32 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 16, i16 17, i16 18, i16 19, i16 20, i16 21, i16 22, i16 23, i16 24, i16 25, i16 26, i16 27, i16 28, i16 29, i16 30, i16 31>, <i16 256, i16 256, i16 256, i16 256, i16 256, i16 256, i16 256, i16 256, i16 256, i16 256, i16 256, i16 256, i16 256, i16 256, i16 256, i16 256, i16 256, i16 256, i16 256, i16 256, i16 256, i16 256, i16 256, i16 256, i16 256, i16 256, i16 256, i16 256, i16 256, i16 256, i16 256, i16 256>
   %v9 = mul <32 x i16> %v8, %a3
   %v10 = add <32 x i16> %v7, %v9
-  store <32 x i16> %v10, <32 x i16>* %a1, align 2
+  store <32 x i16> %v10, ptr %a1, align 2
   ret void
 
 b11:                                              ; preds = %b1

diff  --git a/llvm/test/CodeGen/Hexagon/isel-splat-vector-crash.ll b/llvm/test/CodeGen/Hexagon/isel-splat-vector-crash.ll
index f58eca3446d35..d6469d365696a 100644
--- a/llvm/test/CodeGen/Hexagon/isel-splat-vector-crash.ll
+++ b/llvm/test/CodeGen/Hexagon/isel-splat-vector-crash.ll
@@ -6,10 +6,10 @@
 target datalayout = "e-m:e-p:32:32:32-a:0-n16:32-i64:64:64-i32:32:32-i16:16:16-i1:8:8-f32:32:32-f64:64:64-v32:32:32-v64:64:64-v512:512:512-v1024:1024:1024-v2048:2048:2048"
 target triple = "hexagon"
 
-define dso_local void @f0(i16* %a0) local_unnamed_addr #0 {
+define dso_local void @f0(ptr %a0) local_unnamed_addr #0 {
 b0:
-  %v0 = getelementptr inbounds i16, i16* %a0, i32 undef
-  %v1 = load <64 x i16>, <64 x i16>* undef, align 2
+  %v0 = getelementptr inbounds i16, ptr %a0, i32 undef
+  %v1 = load <64 x i16>, ptr undef, align 2
   %v2 = shufflevector <64 x i16> %v1, <64 x i16> undef, <8 x i32> <i32 2, i32 10, i32 18, i32 26, i32 34, i32 42, i32 50, i32 58>
   %v3 = shufflevector <64 x i16> %v1, <64 x i16> undef, <8 x i32> <i32 6, i32 14, i32 22, i32 30, i32 38, i32 46, i32 54, i32 62>
   %v4 = sext <8 x i16> %v2 to <8 x i32>
@@ -25,7 +25,7 @@ b0:
   %v14 = add nsw <8 x i32> zeroinitializer, %v13
   %v15 = trunc <8 x i32> %v14 to <8 x i16>
   %v16 = extractelement <8 x i16> %v15, i32 0
-  store i16 %v16, i16* %v0, align 2
+  store i16 %v16, ptr %v0, align 2
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/isel-splat-vector-dag-crash.ll b/llvm/test/CodeGen/Hexagon/isel-splat-vector-dag-crash.ll
index 2b1fe2dd6fd8f..ab659990644f0 100644
--- a/llvm/test/CodeGen/Hexagon/isel-splat-vector-dag-crash.ll
+++ b/llvm/test/CodeGen/Hexagon/isel-splat-vector-dag-crash.ll
@@ -7,10 +7,10 @@
 target datalayout = "e-m:e-p:32:32:32-a:0-n16:32-i64:64:64-i32:32:32-i16:16:16-i1:8:8-f32:32:32-f64:64:64-v32:32:32-v64:64:64-v512:512:512-v1024:1024:1024-v2048:2048:2048"
 target triple = "hexagon"
 
-define dso_local void @f0(i16* %a0) local_unnamed_addr #0 {
+define dso_local void @f0(ptr %a0) local_unnamed_addr #0 {
 b0:
-  %v0 = getelementptr inbounds i16, i16* %a0, i32 undef
-  %v1 = load <64 x i16>, <64 x i16>* undef, align 2
+  %v0 = getelementptr inbounds i16, ptr %a0, i32 undef
+  %v1 = load <64 x i16>, ptr undef, align 2
   %v2 = shufflevector <64 x i16> %v1, <64 x i16> undef, <8 x i32> <i32 3, i32 11, i32 19, i32 27, i32 35, i32 43, i32 51, i32 59>
   %v3 = sext <8 x i16> %v2 to <8 x i32>
   %v4 = mul nsw <8 x i32> %v3, <i32 54492, i32 54492, i32 54492, i32 54492, i32 54492, i32 54492, i32 54492, i32 54492>
@@ -23,7 +23,7 @@ b0:
   %v11 = add nsw <8 x i32> %v10, zeroinitializer
   %v12 = trunc <8 x i32> %v11 to <8 x i16>
   %v13 = extractelement <8 x i16> %v12, i32 0
-  store i16 %v13, i16* %v0, align 2
+  store i16 %v13, ptr %v0, align 2
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/isel-uaddo-1.ll b/llvm/test/CodeGen/Hexagon/isel-uaddo-1.ll
index 092fb60e0410b..b9c5e3bac8d8f 100644
--- a/llvm/test/CodeGen/Hexagon/isel-uaddo-1.ll
+++ b/llvm/test/CodeGen/Hexagon/isel-uaddo-1.ll
@@ -6,19 +6,19 @@
 target triple = "hexagon"
 
 ; Function Attrs: norecurse nounwind
-define dso_local void @f0(i32* nocapture readonly %a0, i32* nocapture %a1) local_unnamed_addr #0 {
+define dso_local void @f0(ptr nocapture readonly %a0, ptr nocapture %a1) local_unnamed_addr #0 {
 b0:
   br label %b1
 
 b1:                                               ; preds = %b1, %b0
   %v0 = phi i32 [ %v3, %b1 ], [ 100, %b0 ]
-  %v1 = phi i32* [ %v6, %b1 ], [ %a1, %b0 ]
-  %v2 = phi i32* [ %v4, %b1 ], [ %a0, %b0 ]
+  %v1 = phi ptr [ %v6, %b1 ], [ %a1, %b0 ]
+  %v2 = phi ptr [ %v4, %b1 ], [ %a0, %b0 ]
   %v3 = add nsw i32 %v0, -1
-  %v4 = getelementptr inbounds i32, i32* %v2, i32 1
-  %v5 = load i32, i32* %v2, align 4, !tbaa !1
-  %v6 = getelementptr inbounds i32, i32* %v1, i32 1
-  store i32 %v5, i32* %v1, align 4, !tbaa !1
+  %v4 = getelementptr inbounds i32, ptr %v2, i32 1
+  %v5 = load i32, ptr %v2, align 4, !tbaa !1
+  %v6 = getelementptr inbounds i32, ptr %v1, i32 1
+  store i32 %v5, ptr %v1, align 4, !tbaa !1
   %v7 = icmp eq i32 %v3, 0
   br i1 %v7, label %b2, label %b1
 

diff  --git a/llvm/test/CodeGen/Hexagon/isel-v3i16.ll b/llvm/test/CodeGen/Hexagon/isel-v3i16.ll
index ddca7f4e8014f..8c3565a37cf7f 100644
--- a/llvm/test/CodeGen/Hexagon/isel-v3i16.ll
+++ b/llvm/test/CodeGen/Hexagon/isel-v3i16.ll
@@ -6,12 +6,12 @@
 target datalayout = "e-m:e-p:32:32:32-a:0-n16:32-i64:64:64-i32:32:32-i16:16:16-i1:8:8-f32:32:32-f64:64:64-v32:32:32-v64:64:64-v512:512:512-v1024:1024:1024-v2048:2048:2048"
 target triple = "hexagon"
 
- at g0 = external dllexport global i8* (i32, i32, i64, i32, i32)*, align 4
+ at g0 = external dllexport global ptr, align 4
 
-define hidden void @f0(i32 %a0, <3 x i16>* %a1) #0 {
+define hidden void @f0(i32 %a0, ptr %a1) #0 {
 b0:
-  %v0 = load i8* (i32, i32, i64, i32, i32)*, i8* (i32, i32, i64, i32, i32)** @g0, align 4
-  %v1 = call i8* %v0(i32 1, i32 %a0, i64 314646, i32 0, i32 16)
+  %v0 = load ptr, ptr @g0, align 4
+  %v1 = call ptr %v0(i32 1, i32 %a0, i64 314646, i32 0, i32 16)
   br label %b1
 
 b1:                                               ; preds = %b2, %b0
@@ -25,15 +25,13 @@ b3:                                               ; preds = %b2
   br i1 undef, label %b4, label %b5
 
 b4:                                               ; preds = %b3
-  %v3 = load <3 x i16>, <3 x i16>* %a1, align 2
+  %v3 = load <3 x i16>, ptr %a1, align 2
   br label %b5
 
 b5:                                               ; preds = %b4, %b3
   %v4 = phi <3 x i16> [ %v3, %b4 ], [ zeroinitializer, %b3 ]
-  %v5 = bitcast i8* %v1 to i16*
-  %v6 = getelementptr inbounds i16, i16* %v5, i32 undef
-  %v7 = bitcast i16* %v6 to <3 x i16>*
-  store <3 x i16> %v4, <3 x i16>* %v7, align 2
+  %v6 = getelementptr inbounds i16, ptr %v1, i32 undef
+  store <3 x i16> %v4, ptr %v6, align 2
   br label %b2
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/isel-vacopy.ll b/llvm/test/CodeGen/Hexagon/isel-vacopy.ll
index 0138e424696c7..81458ac13ef98 100644
--- a/llvm/test/CodeGen/Hexagon/isel-vacopy.ll
+++ b/llvm/test/CodeGen/Hexagon/isel-vacopy.ll
@@ -8,11 +8,11 @@ target triple = "hexagon"
 ; Function Attrs: nounwind
 define hidden fastcc void @f0() unnamed_addr #0 {
 b0:
-  call void @llvm.va_copy(i8* nonnull undef, i8* nonnull undef)
+  call void @llvm.va_copy(ptr nonnull undef, ptr nonnull undef)
   ret void
 }
 
 ; Function Attrs: nounwind
-declare void @llvm.va_copy(i8*, i8*) #0
+declare void @llvm.va_copy(ptr, ptr) #0
 
 attributes #0 = { nounwind }

diff  --git a/llvm/test/CodeGen/Hexagon/isel-vlsr-v2i16.ll b/llvm/test/CodeGen/Hexagon/isel-vlsr-v2i16.ll
index 995ce06129d81..bf176d042c12a 100644
--- a/llvm/test/CodeGen/Hexagon/isel-vlsr-v2i16.ll
+++ b/llvm/test/CodeGen/Hexagon/isel-vlsr-v2i16.ll
@@ -5,8 +5,8 @@
 
 target triple = "hexagon-unknown-linux-gnu"
 
-define <2 x i16> @foo(<2 x i32>* nocapture %v) nounwind {
-  %vec = load <2 x i32>, <2 x i32>* %v, align 8
+define <2 x i16> @foo(ptr nocapture %v) nounwind {
+  %vec = load <2 x i32>, ptr %v, align 8
   %trunc = trunc <2 x i32> %vec to <2 x i16>
   %r = lshr <2 x i16> %trunc, <i16 4, i16 4>
   ret <2 x i16> %r

diff  --git a/llvm/test/CodeGen/Hexagon/isel/extload-i1.ll b/llvm/test/CodeGen/Hexagon/isel/extload-i1.ll
index def04ee4026c1..cfaf6fb0b57e5 100644
--- a/llvm/test/CodeGen/Hexagon/isel/extload-i1.ll
+++ b/llvm/test/CodeGen/Hexagon/isel/extload-i1.ll
@@ -7,7 +7,7 @@
 
 ; Sign extensions
 
-define i32 @f0(i1* %a0) #0 {
+define i32 @f0(ptr %a0) #0 {
 ; CHECK-LABEL: f0:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    {
@@ -17,13 +17,13 @@ define i32 @f0(i1* %a0) #0 {
 ; CHECK-NEXT:     r0 = sub(#0,r0)
 ; CHECK-NEXT:     jumpr r31
 ; CHECK-NEXT:    }
-  %v0 = getelementptr i1, i1* %a0, i32 1
-  %v1 = load i1, i1* %v0
+  %v0 = getelementptr i1, ptr %a0, i32 1
+  %v1 = load i1, ptr %v0
   %v2 = sext i1 %v1 to i32
   ret i32 %v2
 }
 
-define i32 @f1(i1* %a0, i32 %a1) #0 {
+define i32 @f1(ptr %a0, i32 %a1) #0 {
 ; CHECK-LABEL: f1:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    {
@@ -33,8 +33,8 @@ define i32 @f1(i1* %a0, i32 %a1) #0 {
 ; CHECK-NEXT:     r0 = sub(#0,r0)
 ; CHECK-NEXT:     jumpr r31
 ; CHECK-NEXT:    }
-  %v0 = getelementptr i1, i1* %a0, i32 %a1
-  %v1 = load i1, i1* %v0
+  %v0 = getelementptr i1, ptr %a0, i32 %a1
+  %v1 = load i1, ptr %v0
   %v2 = sext i1 %v1 to i32
   ret i32 %v2
 }
@@ -49,9 +49,8 @@ define i32 @f2(i32 %a0) #0 {
 ; CHECK-NEXT:     r0 = sub(#0,r0)
 ; CHECK-NEXT:     jumpr r31
 ; CHECK-NEXT:    }
-  %v0 = getelementptr [128 x i8], [128 x i8]* @array8, i32 0, i32 %a0
-  %v1 = bitcast i8* %v0 to i1*
-  %v2 = load i1, i1* %v1
+  %v0 = getelementptr [128 x i8], ptr @array8, i32 0, i32 %a0
+  %v2 = load i1, ptr %v0
   %v3 = sext i1 %v2 to i32
   ret i32 %v3
 }
@@ -66,9 +65,8 @@ define i32 @f3(i32 %a0) #0 {
 ; CHECK-NEXT:     r0 = sub(#0,r0)
 ; CHECK-NEXT:     jumpr r31
 ; CHECK-NEXT:    }
-  %v0 = getelementptr [128 x i32], [128 x i32]* @array32, i32 0, i32 %a0
-  %v1 = bitcast i32* %v0 to i1*
-  %v2 = load i1, i1* %v1
+  %v0 = getelementptr [128 x i32], ptr @array32, i32 0, i32 %a0
+  %v2 = load i1, ptr %v0
   %v3 = sext i1 %v2 to i32
   ret i32 %v3
 }
@@ -83,7 +81,7 @@ define i32 @f4() #0 {
 ; CHECK-NEXT:     r0 = sub(#0,r0)
 ; CHECK-NEXT:     jumpr r31
 ; CHECK-NEXT:    }
-  %v0 = load i1, i1* @global_gp
+  %v0 = load i1, ptr @global_gp
   %v1 = sext i1 %v0 to i32
   ret i32 %v1
 }
@@ -102,7 +100,7 @@ define i32 @f5(i64 %a0, i64 %a1, i64 %a2, i1 signext %a3) #0 {
   ret i32 %v0
 }
 
-define i64 @f6(i1* %a0) #0 {
+define i64 @f6(ptr %a0) #0 {
 ; CHECK-LABEL: f6:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    {
@@ -115,13 +113,13 @@ define i64 @f6(i1* %a0) #0 {
 ; CHECK-NEXT:     r1 = asr(r0,#31)
 ; CHECK-NEXT:     jumpr r31
 ; CHECK-NEXT:    }
-  %v0 = getelementptr i1, i1* %a0, i32 1
-  %v1 = load i1, i1* %v0
+  %v0 = getelementptr i1, ptr %a0, i32 1
+  %v1 = load i1, ptr %v0
   %v2 = sext i1 %v1 to i64
   ret i64 %v2
 }
 
-define i64 @f7(i1* %a0, i32 %a1) #0 {
+define i64 @f7(ptr %a0, i32 %a1) #0 {
 ; CHECK-LABEL: f7:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    {
@@ -134,8 +132,8 @@ define i64 @f7(i1* %a0, i32 %a1) #0 {
 ; CHECK-NEXT:     r1 = asr(r0,#31)
 ; CHECK-NEXT:     jumpr r31
 ; CHECK-NEXT:    }
-  %v0 = getelementptr i1, i1* %a0, i32 %a1
-  %v1 = load i1, i1* %v0
+  %v0 = getelementptr i1, ptr %a0, i32 %a1
+  %v1 = load i1, ptr %v0
   %v2 = sext i1 %v1 to i64
   ret i64 %v2
 }
@@ -153,9 +151,8 @@ define i64 @f8(i32 %a0) #0 {
 ; CHECK-NEXT:     r1 = asr(r0,#31)
 ; CHECK-NEXT:     jumpr r31
 ; CHECK-NEXT:    }
-  %v0 = getelementptr [128 x i8], [128 x i8]* @array8, i32 0, i32 %a0
-  %v1 = bitcast i8* %v0 to i1*
-  %v2 = load i1, i1* %v1
+  %v0 = getelementptr [128 x i8], ptr @array8, i32 0, i32 %a0
+  %v2 = load i1, ptr %v0
   %v3 = sext i1 %v2 to i64
   ret i64 %v3
 }
@@ -173,9 +170,8 @@ define i64 @f9(i32 %a0) #0 {
 ; CHECK-NEXT:     r1 = asr(r0,#31)
 ; CHECK-NEXT:     jumpr r31
 ; CHECK-NEXT:    }
-  %v0 = getelementptr [128 x i32], [128 x i32]* @array32, i32 0, i32 %a0
-  %v1 = bitcast i32* %v0 to i1*
-  %v2 = load i1, i1* %v1
+  %v0 = getelementptr [128 x i32], ptr @array32, i32 0, i32 %a0
+  %v2 = load i1, ptr %v0
   %v3 = sext i1 %v2 to i64
   ret i64 %v3
 }
@@ -193,7 +189,7 @@ define i64 @f10() #0 {
 ; CHECK-NEXT:     r1 = asr(r0,#31)
 ; CHECK-NEXT:     jumpr r31
 ; CHECK-NEXT:    }
-  %v0 = load i1, i1* @global_gp
+  %v0 = load i1, ptr @global_gp
   %v1 = sext i1 %v0 to i64
   ret i64 %v1
 }
@@ -217,28 +213,28 @@ define i64 @f11(i64 %a0, i64 %a1, i64 %a2, i1 signext %a3) #0 {
 
 ; Zero-extensions
 
-define i32 @f12(i1* %a0) #0 {
+define i32 @f12(ptr %a0) #0 {
 ; CHECK-LABEL: f12:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    {
 ; CHECK-NEXT:     r0 = memub(r0+#1)
 ; CHECK-NEXT:     jumpr r31
 ; CHECK-NEXT:    }
-  %v0 = getelementptr i1, i1* %a0, i32 1
-  %v1 = load i1, i1* %v0
+  %v0 = getelementptr i1, ptr %a0, i32 1
+  %v1 = load i1, ptr %v0
   %v2 = zext i1 %v1 to i32
   ret i32 %v2
 }
 
-define i32 @f13(i1* %a0, i32 %a1) #0 {
+define i32 @f13(ptr %a0, i32 %a1) #0 {
 ; CHECK-LABEL: f13:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    {
 ; CHECK-NEXT:     jumpr r31
 ; CHECK-NEXT:     r0 = memub(r0+r1<<#0)
 ; CHECK-NEXT:    }
-  %v0 = getelementptr i1, i1* %a0, i32 %a1
-  %v1 = load i1, i1* %v0
+  %v0 = getelementptr i1, ptr %a0, i32 %a1
+  %v1 = load i1, ptr %v0
   %v2 = zext i1 %v1 to i32
   ret i32 %v2
 }
@@ -250,9 +246,8 @@ define i32 @f14(i32 %a0) #0 {
 ; CHECK-NEXT:     jumpr r31
 ; CHECK-NEXT:     r0 = memub(r0+##array8)
 ; CHECK-NEXT:    }
-  %v0 = getelementptr [128 x i8], [128 x i8]* @array8, i32 0, i32 %a0
-  %v1 = bitcast i8* %v0 to i1*
-  %v2 = load i1, i1* %v1
+  %v0 = getelementptr [128 x i8], ptr @array8, i32 0, i32 %a0
+  %v2 = load i1, ptr %v0
   %v3 = zext i1 %v2 to i32
   ret i32 %v3
 }
@@ -264,9 +259,8 @@ define i32 @f15(i32 %a0) #0 {
 ; CHECK-NEXT:     jumpr r31
 ; CHECK-NEXT:     r0 = memub(r0<<#2+##array32)
 ; CHECK-NEXT:    }
-  %v0 = getelementptr [128 x i32], [128 x i32]* @array32, i32 0, i32 %a0
-  %v1 = bitcast i32* %v0 to i1*
-  %v2 = load i1, i1* %v1
+  %v0 = getelementptr [128 x i32], ptr @array32, i32 0, i32 %a0
+  %v2 = load i1, ptr %v0
   %v3 = zext i1 %v2 to i32
   ret i32 %v3
 }
@@ -278,7 +272,7 @@ define i32 @f16() #0 {
 ; CHECK-NEXT:     jumpr r31
 ; CHECK-NEXT:     r0 = memub(gp+#global_gp)
 ; CHECK-NEXT:    }
-  %v0 = load i1, i1* @global_gp
+  %v0 = load i1, ptr @global_gp
   %v1 = zext i1 %v0 to i32
   ret i32 %v1
 }
@@ -294,7 +288,7 @@ define i32 @f17(i64 %a0, i64 %a1, i64 %a2, i1 zeroext %a3) #0 {
   ret i32 %v0
 }
 
-define i64 @f18(i1* %a0) #0 {
+define i64 @f18(ptr %a0) #0 {
 ; CHECK-LABEL: f18:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    {
@@ -302,13 +296,13 @@ define i64 @f18(i1* %a0) #0 {
 ; CHECK-NEXT:     r1 = #0
 ; CHECK-NEXT:     r0 = memub(r0+#1)
 ; CHECK-NEXT:    }
-  %v0 = getelementptr i1, i1* %a0, i32 1
-  %v1 = load i1, i1* %v0
+  %v0 = getelementptr i1, ptr %a0, i32 1
+  %v1 = load i1, ptr %v0
   %v2 = zext i1 %v1 to i64
   ret i64 %v2
 }
 
-define i64 @f19(i1* %a0, i32 %a1) #0 {
+define i64 @f19(ptr %a0, i32 %a1) #0 {
 ; CHECK-LABEL: f19:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    {
@@ -316,8 +310,8 @@ define i64 @f19(i1* %a0, i32 %a1) #0 {
 ; CHECK-NEXT:     jumpr r31
 ; CHECK-NEXT:     r0 = memub(r0+r1<<#0)
 ; CHECK-NEXT:    }
-  %v0 = getelementptr i1, i1* %a0, i32 %a1
-  %v1 = load i1, i1* %v0
+  %v0 = getelementptr i1, ptr %a0, i32 %a1
+  %v1 = load i1, ptr %v0
   %v2 = zext i1 %v1 to i64
   ret i64 %v2
 }
@@ -330,9 +324,8 @@ define i64 @f20(i32 %a0) #0 {
 ; CHECK-NEXT:     jumpr r31
 ; CHECK-NEXT:     r0 = memub(r0+##array8)
 ; CHECK-NEXT:    }
-  %v0 = getelementptr [128 x i8], [128 x i8]* @array8, i32 0, i32 %a0
-  %v1 = bitcast i8* %v0 to i1*
-  %v2 = load i1, i1* %v1
+  %v0 = getelementptr [128 x i8], ptr @array8, i32 0, i32 %a0
+  %v2 = load i1, ptr %v0
   %v3 = zext i1 %v2 to i64
   ret i64 %v3
 }
@@ -345,9 +338,8 @@ define i64 @f21(i32 %a0) #0 {
 ; CHECK-NEXT:     jumpr r31
 ; CHECK-NEXT:     r0 = memub(r0<<#2+##array32)
 ; CHECK-NEXT:    }
-  %v0 = getelementptr [128 x i32], [128 x i32]* @array32, i32 0, i32 %a0
-  %v1 = bitcast i32* %v0 to i1*
-  %v2 = load i1, i1* %v1
+  %v0 = getelementptr [128 x i32], ptr @array32, i32 0, i32 %a0
+  %v2 = load i1, ptr %v0
   %v3 = zext i1 %v2 to i64
   ret i64 %v3
 }
@@ -360,7 +352,7 @@ define i64 @f22() #0 {
 ; CHECK-NEXT:     jumpr r31
 ; CHECK-NEXT:     r0 = memub(gp+#global_gp)
 ; CHECK-NEXT:    }
-  %v0 = load i1, i1* @global_gp
+  %v0 = load i1, ptr @global_gp
   %v1 = zext i1 %v0 to i64
   ret i64 %v1
 }

diff  --git a/llvm/test/CodeGen/Hexagon/jt-in-text.ll b/llvm/test/CodeGen/Hexagon/jt-in-text.ll
index 95c5d95c910b9..5b67fc0f0eb29 100644
--- a/llvm/test/CodeGen/Hexagon/jt-in-text.ll
+++ b/llvm/test/CodeGen/Hexagon/jt-in-text.ll
@@ -16,9 +16,9 @@ define void @test2(i32 %lane_id, i32 %rx_pwr_st) #0 {
 entry:
   %lane_id.addr = alloca i32, align 4
   %rx_pwr_st.addr = alloca i32, align 4
-  store i32 %lane_id, i32* %lane_id.addr, align 4
-  store i32 %rx_pwr_st, i32* %rx_pwr_st.addr, align 4
-  %0 = load i32, i32* %lane_id.addr, align 4
+  store i32 %lane_id, ptr %lane_id.addr, align 4
+  store i32 %rx_pwr_st, ptr %rx_pwr_st.addr, align 4
+  %0 = load i32, ptr %lane_id.addr, align 4
   switch i32 %0, label %sw.epilog [
     i32 0, label %sw.bb
     i32 1, label %sw.bb1
@@ -28,26 +28,26 @@ entry:
   ]
 
 sw.bb:                                            ; preds = %entry
-  store i32 1, i32* @lane0_pwr_st, align 4
+  store i32 1, ptr @lane0_pwr_st, align 4
   br label %sw.epilog
 
 sw.bb1:                                           ; preds = %entry
-  store i32 1, i32* @lane1_pwr_st, align 4
+  store i32 1, ptr @lane1_pwr_st, align 4
   br label %sw.epilog
 
 sw.bb2:                                           ; preds = %entry
-  store i32 1, i32* @lane2_pwr_st, align 4
+  store i32 1, ptr @lane2_pwr_st, align 4
   br label %sw.epilog
 
 sw.bb3:                                           ; preds = %entry
-  store i32 1, i32* @lane3_pwr_st, align 4
+  store i32 1, ptr @lane3_pwr_st, align 4
   br label %sw.epilog
 
 sw.bb4:                                           ; preds = %entry
-  store i32 1, i32* @lane0_pwr_st, align 4
-  store i32 1, i32* @lane1_pwr_st, align 4
-  store i32 1, i32* @lane2_pwr_st, align 4
-  store i32 1, i32* @lane3_pwr_st, align 4
+  store i32 1, ptr @lane0_pwr_st, align 4
+  store i32 1, ptr @lane1_pwr_st, align 4
+  store i32 1, ptr @lane2_pwr_st, align 4
+  store i32 1, ptr @lane3_pwr_st, align 4
   br label %sw.epilog
 
 sw.epilog:                                        ; preds = %entry, %sw.bb4, %sw.bb3, %sw.bb2, %sw.bb1, %sw.bb

diff  --git a/llvm/test/CodeGen/Hexagon/jump-prob.ll b/llvm/test/CodeGen/Hexagon/jump-prob.ll
index a5805e723cad2..9910c9a6a3bbb 100644
--- a/llvm/test/CodeGen/Hexagon/jump-prob.ll
+++ b/llvm/test/CodeGen/Hexagon/jump-prob.ll
@@ -10,30 +10,30 @@ target triple = "hexagon-unknown--elf"
 
 %s.0 = type { i8, i8, i8, [6 x i32] }
 %s.1 = type { %s.2 }
-%s.2 = type { i32, i8* }
-%s.3 = type <{ i8*, i8*, i16, i8, i8, i8 }>
+%s.2 = type { i32, ptr }
+%s.3 = type <{ ptr, ptr, i16, i8, i8, i8 }>
 
 @g0 = internal global [2 x %s.0] [%s.0 { i8 0, i8 6, i8 7, [6 x i32] zeroinitializer }, %s.0 { i8 0, i8 6, i8 7, [6 x i32] zeroinitializer }], align 8
 @g1 = internal constant [60 x i8] c"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\00", section "xxxxxxxxxxx.rodata.", align 4
- at g2 = internal constant %s.1 { %s.2 { i32 24, i8* getelementptr inbounds ([60 x i8], [60 x i8]* @g1, i32 0, i32 0) } }, section ".rodata.xxxxxxxxxx.", align 4
+ at g2 = internal constant %s.1 { %s.2 { i32 24, ptr @g1 } }, section ".rodata.xxxxxxxxxx.", align 4
 @g3 = internal constant [115 x i8] c"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\00", section "xxxxxxxxxxx.rodata.", align 4
- at g4 = internal constant %s.3 <{ i8* getelementptr inbounds ([120 x i8], [120 x i8]* @g5, i32 0, i32 0), i8* getelementptr inbounds ([31 x i8], [31 x i8]* @g6, i32 0, i32 0), i16 215, i8 4, i8 0, i8 1 }>, align 1
+ at g4 = internal constant %s.3 <{ ptr @g5, ptr @g6, i16 215, i8 4, i8 0, i8 1 }>, align 1
 @g5 = private unnamed_addr constant [120 x i8] c"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\00", align 1
 @g6 = private unnamed_addr constant [31 x i8] c"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\00", align 1
- at g7 = internal constant %s.3 <{ i8* getelementptr inbounds ([120 x i8], [120 x i8]* @g5, i32 0, i32 0), i8* getelementptr inbounds ([91 x i8], [91 x i8]* @g8, i32 0, i32 0), i16 225, i8 2, i8 2, i8 2 }>, align 1
+ at g7 = internal constant %s.3 <{ ptr @g5, ptr @g8, i16 225, i8 2, i8 2, i8 2 }>, align 1
 @g8 = private unnamed_addr constant [91 x i8] c"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\00", align 1
- at g9 = internal constant %s.3 <{ i8* getelementptr inbounds ([120 x i8], [120 x i8]* @g5, i32 0, i32 0), i8* getelementptr inbounds ([109 x i8], [109 x i8]* @g10, i32 0, i32 0), i16 233, i8 2, i8 2, i8 4 }>, align 1
+ at g9 = internal constant %s.3 <{ ptr @g5, ptr @g10, i16 233, i8 2, i8 2, i8 4 }>, align 1
 @g10 = private unnamed_addr constant [109 x i8] c"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\00", align 1
 @g11 = internal constant [116 x i8] c"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\00", section "xxxxxxxxxxx.rodata.", align 4
 @g12 = internal constant [134 x i8] c"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\00", section "xxxxxxxxxxx.rodata.", align 4
- at g13 = internal constant %s.3 <{ i8* getelementptr inbounds ([120 x i8], [120 x i8]* @g5, i32 0, i32 0), i8* getelementptr inbounds ([31 x i8], [31 x i8]* @g6, i32 0, i32 0), i16 264, i8 4, i8 0, i8 1 }>, align 1
+ at g13 = internal constant %s.3 <{ ptr @g5, ptr @g6, i16 264, i8 4, i8 0, i8 1 }>, align 1
 @g14 = internal constant [116 x i8] c"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\00", section "xxxxxxxxxxx.rodata.", align 4
 @g15 = internal constant [134 x i8] c"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\00", section "xxxxxxxxxxx.rodata.", align 4
 
 ; Function Attrs: nounwind
-define zeroext i8 @f0(i8 zeroext %a0, i8 zeroext %a1, i8* nocapture %a2) #0 {
+define zeroext i8 @f0(i8 zeroext %a0, i8 zeroext %a1, ptr nocapture %a2) #0 {
 b0:
-  store i8 -1, i8* %a2, align 1, !tbaa !0
+  store i8 -1, ptr %a2, align 1, !tbaa !0
   %v0 = zext i8 %a0 to i32
   %v1 = icmp ugt i8 %a0, 7
   %v2 = zext i8 %a1 to i32
@@ -42,100 +42,100 @@ b0:
   br i1 %v4, label %b1, label %b2
 
 b1:                                               ; preds = %b0
-  tail call void @f1(%s.1* @g2, i32 2, i32 %v0, i32 %v2)
+  tail call void @f1(ptr @g2, i32 2, i32 %v0, i32 %v2)
   br label %b12
 
 b2:                                               ; preds = %b0
-  %v5 = load i8, i8* getelementptr inbounds ([2 x %s.0], [2 x %s.0]* @g0, i32 0, i32 0, i32 2), align 2, !tbaa !0
+  %v5 = load i8, ptr getelementptr inbounds ([2 x %s.0], ptr @g0, i32 0, i32 0, i32 2), align 2, !tbaa !0
   %v6 = icmp eq i8 %v5, %a0
-  %v7 = load i8, i8* getelementptr inbounds ([2 x %s.0], [2 x %s.0]* @g0, i32 0, i32 1, i32 2), align 2, !tbaa !0
+  %v7 = load i8, ptr getelementptr inbounds ([2 x %s.0], ptr @g0, i32 0, i32 1, i32 2), align 2, !tbaa !0
   %v8 = icmp eq i8 %v7, %a0
   %v9 = and i1 %v6, %v8
   br i1 %v9, label %b3, label %b4
 
 b3:                                               ; preds = %b2
-  %v10 = getelementptr inbounds [2 x %s.0], [2 x %s.0]* @g0, i32 0, i32 0, i32 3, i32 %v2
-  %v11 = load i32, i32* %v10, align 4, !tbaa !3
-  %v12 = getelementptr inbounds [2 x %s.0], [2 x %s.0]* @g0, i32 0, i32 1, i32 3, i32 %v2
-  %v13 = load i32, i32* %v12, align 4, !tbaa !3
-  tail call void @f1(%s.1* @g2, i32 2, i32 %v0, i32 %v2)
+  %v10 = getelementptr inbounds [2 x %s.0], ptr @g0, i32 0, i32 0, i32 3, i32 %v2
+  %v11 = load i32, ptr %v10, align 4, !tbaa !3
+  %v12 = getelementptr inbounds [2 x %s.0], ptr @g0, i32 0, i32 1, i32 3, i32 %v2
+  %v13 = load i32, ptr %v12, align 4, !tbaa !3
+  tail call void @f1(ptr @g2, i32 2, i32 %v0, i32 %v2)
   br label %b12
 
 b4:                                               ; preds = %b2
-  %v14 = load i8, i8* getelementptr inbounds ([2 x %s.0], [2 x %s.0]* @g0, i32 0, i32 0, i32 0), align 8, !tbaa !0
+  %v14 = load i8, ptr @g0, align 8, !tbaa !0
   %v15 = icmp eq i8 %v14, 1
   %v16 = and i1 %v15, %v6
   br i1 %v16, label %b5, label %b8
 
 b5:                                               ; preds = %b4
-  store i8 0, i8* %a2, align 1, !tbaa !0
-  %v17 = getelementptr inbounds [2 x %s.0], [2 x %s.0]* @g0, i32 0, i32 0, i32 3, i32 %v2
-  %v18 = tail call i32 asm sideeffect "1:     $0 = memw_locked($2)\0A       $0 = add($0, $3)\0A       memw_locked($2, p0) = $0\0A       if !p0 jump 1b\0A", "=&r,=*m,r,r,*m,~{p0}"(i32* elementtype(i32) %v17, i32* %v17, i32 1, i32* elementtype(i32) %v17) #0, !srcloc !5
-  %v19 = load i32, i32* %v17, align 4, !tbaa !3
+  store i8 0, ptr %a2, align 1, !tbaa !0
+  %v17 = getelementptr inbounds [2 x %s.0], ptr @g0, i32 0, i32 0, i32 3, i32 %v2
+  %v18 = tail call i32 asm sideeffect "1:     $0 = memw_locked($2)\0A       $0 = add($0, $3)\0A       memw_locked($2, p0) = $0\0A       if !p0 jump 1b\0A", "=&r,=*m,r,r,*m,~{p0}"(ptr elementtype(i32) %v17, ptr %v17, i32 1, ptr elementtype(i32) %v17) #0, !srcloc !5
+  %v19 = load i32, ptr %v17, align 4, !tbaa !3
   %v20 = icmp eq i32 %v19, 255
   br i1 %v20, label %b6, label %b7
 
 b6:                                               ; preds = %b5
-  tail call void @f2(%s.3* @g4, i32 %v2) #2
+  tail call void @f2(ptr @g4, i32 %v2) #2
   unreachable
 
 b7:                                               ; preds = %b5
-  store i8 %a1, i8* getelementptr inbounds ([2 x %s.0], [2 x %s.0]* @g0, i32 0, i32 0, i32 1), align 1, !tbaa !0
-  %v21 = load i8, i8* %a2, align 1, !tbaa !0
+  store i8 %a1, ptr getelementptr inbounds ([2 x %s.0], ptr @g0, i32 0, i32 0, i32 1), align 1, !tbaa !0
+  %v21 = load i8, ptr %a2, align 1, !tbaa !0
   %v22 = zext i8 %v21 to i32
-  tail call void @f3(%s.3* @g7, i32 %v2, i32 %v22) #0
-  %v23 = load i32, i32* bitcast ([2 x %s.0]* @g0 to i32*), align 8
+  tail call void @f3(ptr @g7, i32 %v2, i32 %v22) #0
+  %v23 = load i32, ptr @g0, align 8
   %v24 = and i32 %v23, 255
   %v25 = lshr i32 %v23, 8
   %v26 = and i32 %v25, 255
   %v27 = lshr i32 %v23, 16
   %v28 = and i32 %v27, 255
-  %v29 = load i32, i32* %v17, align 4, !tbaa !3
-  tail call void @f4(%s.3* @g9, i32 %v24, i32 %v26, i32 %v28, i32 %v29) #0
-  %v30 = load i8, i8* %a2, align 1, !tbaa !0
+  %v29 = load i32, ptr %v17, align 4, !tbaa !3
+  tail call void @f4(ptr @g9, i32 %v24, i32 %v26, i32 %v28, i32 %v29) #0
+  %v30 = load i8, ptr %a2, align 1, !tbaa !0
   %v31 = zext i8 %v30 to i32
-  tail call void @f1(%s.1* @g2, i32 2, i32 %v0, i32 %v2)
-  %v32 = load i32, i32* bitcast ([2 x %s.0]* @g0 to i32*), align 8
+  tail call void @f1(ptr @g2, i32 2, i32 %v0, i32 %v2)
+  %v32 = load i32, ptr @g0, align 8
   %v33 = and i32 %v32, 255
   %v34 = lshr i32 %v32, 8
   %v35 = and i32 %v34, 255
   %v36 = lshr i32 %v32, 16
   %v37 = and i32 %v36, 255
-  %v38 = load i32, i32* %v17, align 4, !tbaa !3
-  tail call void @f1(%s.1* @g2, i32 2, i32 %v0, i32 %v2)
+  %v38 = load i32, ptr %v17, align 4, !tbaa !3
+  tail call void @f1(ptr @g2, i32 2, i32 %v0, i32 %v2)
   br label %b12
 
 b8:                                               ; preds = %b4
-  %v39 = load i8, i8* getelementptr inbounds ([2 x %s.0], [2 x %s.0]* @g0, i32 0, i32 1, i32 0), align 4, !tbaa !0
+  %v39 = load i8, ptr getelementptr inbounds ([2 x %s.0], ptr @g0, i32 0, i32 1, i32 0), align 4, !tbaa !0
   %v40 = icmp eq i8 %v39, 1
   %v41 = and i1 %v40, %v8
   br i1 %v41, label %b9, label %b12
 
 b9:                                               ; preds = %b8
-  store i8 1, i8* %a2, align 1, !tbaa !0
-  %v42 = getelementptr inbounds [2 x %s.0], [2 x %s.0]* @g0, i32 0, i32 1, i32 3, i32 %v2
-  %v43 = tail call i32 asm sideeffect "1:     $0 = memw_locked($2)\0A       $0 = add($0, $3)\0A       memw_locked($2, p0) = $0\0A       if !p0 jump 1b\0A", "=&r,=*m,r,r,*m,~{p0}"(i32* elementtype(i32) %v42, i32* %v42, i32 1, i32* elementtype(i32) %v42) #0, !srcloc !5
-  %v44 = load i32, i32* %v42, align 4, !tbaa !3
+  store i8 1, ptr %a2, align 1, !tbaa !0
+  %v42 = getelementptr inbounds [2 x %s.0], ptr @g0, i32 0, i32 1, i32 3, i32 %v2
+  %v43 = tail call i32 asm sideeffect "1:     $0 = memw_locked($2)\0A       $0 = add($0, $3)\0A       memw_locked($2, p0) = $0\0A       if !p0 jump 1b\0A", "=&r,=*m,r,r,*m,~{p0}"(ptr elementtype(i32) %v42, ptr %v42, i32 1, ptr elementtype(i32) %v42) #0, !srcloc !5
+  %v44 = load i32, ptr %v42, align 4, !tbaa !3
   %v45 = icmp eq i32 %v44, 255
   br i1 %v45, label %b10, label %b11
 
 b10:                                              ; preds = %b9
-  tail call void @f2(%s.3* @g13, i32 %v2) #2
+  tail call void @f2(ptr @g13, i32 %v2) #2
   unreachable
 
 b11:                                              ; preds = %b9
-  store i8 %a1, i8* getelementptr inbounds ([2 x %s.0], [2 x %s.0]* @g0, i32 0, i32 1, i32 1), align 1, !tbaa !0
-  %v46 = load i8, i8* %a2, align 1, !tbaa !0
+  store i8 %a1, ptr getelementptr inbounds ([2 x %s.0], ptr @g0, i32 0, i32 1, i32 1), align 1, !tbaa !0
+  %v46 = load i8, ptr %a2, align 1, !tbaa !0
   %v47 = zext i8 %v46 to i32
-  tail call void @f1(%s.1* @g2, i32 2, i32 %v0, i32 %v2)
-  %v48 = load i32, i32* bitcast (i8* getelementptr inbounds ([2 x %s.0], [2 x %s.0]* @g0, i32 0, i32 1, i32 0) to i32*), align 4
+  tail call void @f1(ptr @g2, i32 2, i32 %v0, i32 %v2)
+  %v48 = load i32, ptr getelementptr inbounds ([2 x %s.0], ptr @g0, i32 0, i32 1, i32 0), align 4
   %v49 = and i32 %v48, 255
   %v50 = lshr i32 %v48, 8
   %v51 = and i32 %v50, 255
   %v52 = lshr i32 %v48, 16
   %v53 = and i32 %v52, 255
-  %v54 = load i32, i32* %v42, align 4, !tbaa !3
-  tail call void @f1(%s.1* @g2, i32 2, i32 %v0, i32 %v2)
+  %v54 = load i32, ptr %v42, align 4, !tbaa !3
+  tail call void @f1(ptr @g2, i32 2, i32 %v0, i32 %v2)
   br label %b12
 
 b12:                                              ; preds = %b11, %b8, %b7, %b3, %b1
@@ -143,14 +143,14 @@ b12:                                              ; preds = %b11, %b8, %b7, %b3,
   ret i8 %v55
 }
 
-declare void @f1(%s.1*, i32, i32, i32)
+declare void @f1(ptr, i32, i32, i32)
 
 ; Function Attrs: noreturn
-declare void @f2(%s.3*, i32) #1
+declare void @f2(ptr, i32) #1
 
-declare void @f3(%s.3*, i32, i32)
+declare void @f3(ptr, i32, i32)
 
-declare void @f4(%s.3*, i32, i32, i32, i32)
+declare void @f4(ptr, i32, i32, i32, i32)
 
 attributes #0 = { nounwind "target-cpu"="hexagonv55" }
 attributes #1 = { noreturn }

diff  --git a/llvm/test/CodeGen/Hexagon/jump-table-isel.ll b/llvm/test/CodeGen/Hexagon/jump-table-isel.ll
index 835b90ba92148..53755f80f4f54 100644
--- a/llvm/test/CodeGen/Hexagon/jump-table-isel.ll
+++ b/llvm/test/CodeGen/Hexagon/jump-table-isel.ll
@@ -14,17 +14,16 @@ target triple = "hexagon"
 @g4 = external global %s.0
 
 ; Function Attrs: nounwind optsize
-define zeroext i8 @f0(%s.1* %a0, %s.0** nocapture %a1) #0 {
+define zeroext i8 @f0(ptr %a0, ptr nocapture %a1) #0 {
 b0:
-  store %s.0* null, %s.0** %a1, align 4, !tbaa !0
-  %v0 = getelementptr inbounds %s.1, %s.1* %a0, i32 0, i32 1
-  %v1 = load i32, i32* %v0, align 4, !tbaa !4
+  store ptr null, ptr %a1, align 4, !tbaa !0
+  %v0 = getelementptr inbounds %s.1, ptr %a0, i32 0, i32 1
+  %v1 = load i32, ptr %v0, align 4, !tbaa !4
   %v2 = icmp eq i32 %v1, 0
   br i1 %v2, label %b1, label %b8
 
 b1:                                               ; preds = %b0
-  %v3 = getelementptr inbounds %s.1, %s.1* %a0, i32 0, i32 0
-  %v4 = load i32, i32* %v3, align 4, !tbaa !7
+  %v4 = load i32, ptr %a0, align 4, !tbaa !7
   switch i32 %v4, label %b8 [
     i32 0, label %b2
     i32 1, label %b4
@@ -34,45 +33,45 @@ b1:                                               ; preds = %b0
   ]
 
 b2:                                               ; preds = %b1
-  %v5 = getelementptr inbounds %s.1, %s.1* %a0, i32 0, i32 2
-  %v6 = load i32, i32* %v5, align 4, !tbaa !8
+  %v5 = getelementptr inbounds %s.1, ptr %a0, i32 0, i32 2
+  %v6 = load i32, ptr %v5, align 4, !tbaa !8
   switch i32 %v6, label %b8 [
     i32 27, label %b3
     i32 44, label %b3
   ]
 
 b3:                                               ; preds = %b7, %b7, %b7, %b6, %b6, %b5, %b5, %b4, %b4, %b2, %b2
-  %v7 = phi %s.0* [ @g0, %b2 ], [ @g0, %b2 ], [ @g1, %b4 ], [ @g1, %b4 ], [ @g2, %b5 ], [ @g2, %b5 ], [ @g3, %b6 ], [ @g3, %b6 ], [ @g4, %b7 ], [ @g4, %b7 ], [ @g4, %b7 ]
-  store %s.0* %v7, %s.0** %a1, align 4, !tbaa !0
+  %v7 = phi ptr [ @g0, %b2 ], [ @g0, %b2 ], [ @g1, %b4 ], [ @g1, %b4 ], [ @g2, %b5 ], [ @g2, %b5 ], [ @g3, %b6 ], [ @g3, %b6 ], [ @g4, %b7 ], [ @g4, %b7 ], [ @g4, %b7 ]
+  store ptr %v7, ptr %a1, align 4, !tbaa !0
   br label %b8
 
 b4:                                               ; preds = %b1
-  %v8 = getelementptr inbounds %s.1, %s.1* %a0, i32 0, i32 2
-  %v9 = load i32, i32* %v8, align 4, !tbaa !8
+  %v8 = getelementptr inbounds %s.1, ptr %a0, i32 0, i32 2
+  %v9 = load i32, ptr %v8, align 4, !tbaa !8
   switch i32 %v9, label %b8 [
     i32 27, label %b3
     i32 44, label %b3
   ]
 
 b5:                                               ; preds = %b1
-  %v10 = getelementptr inbounds %s.1, %s.1* %a0, i32 0, i32 2
-  %v11 = load i32, i32* %v10, align 4, !tbaa !8
+  %v10 = getelementptr inbounds %s.1, ptr %a0, i32 0, i32 2
+  %v11 = load i32, ptr %v10, align 4, !tbaa !8
   switch i32 %v11, label %b8 [
     i32 27, label %b3
     i32 44, label %b3
   ]
 
 b6:                                               ; preds = %b1
-  %v12 = getelementptr inbounds %s.1, %s.1* %a0, i32 0, i32 2
-  %v13 = load i32, i32* %v12, align 4, !tbaa !8
+  %v12 = getelementptr inbounds %s.1, ptr %a0, i32 0, i32 2
+  %v13 = load i32, ptr %v12, align 4, !tbaa !8
   switch i32 %v13, label %b8 [
     i32 27, label %b3
     i32 44, label %b3
   ]
 
 b7:                                               ; preds = %b1
-  %v14 = getelementptr inbounds %s.1, %s.1* %a0, i32 0, i32 2
-  %v15 = load i32, i32* %v14, align 4, !tbaa !8
+  %v14 = getelementptr inbounds %s.1, ptr %a0, i32 0, i32 2
+  %v15 = load i32, ptr %v14, align 4, !tbaa !8
   switch i32 %v15, label %b8 [
     i32 40, label %b3
     i32 46, label %b3

diff  --git a/llvm/test/CodeGen/Hexagon/large-number-of-preds.ll b/llvm/test/CodeGen/Hexagon/large-number-of-preds.ll
index 1e01cb307777d..c380576cd63ef 100644
--- a/llvm/test/CodeGen/Hexagon/large-number-of-preds.ll
+++ b/llvm/test/CodeGen/Hexagon/large-number-of-preds.ll
@@ -3,235 +3,232 @@
 
 target triple = "hexagon-unknown--elf"
 
- at g0 = external global void (float*, i32, i32, float*, float*)**
+ at g0 = external global ptr
 
 ; Function Attrs: nounwind
-define void @f0(float* nocapture %a0, float* nocapture %a1, float* %a2) #0 {
+define void @f0(ptr nocapture %a0, ptr nocapture %a1, ptr %a2) #0 {
 b0:
   %v0 = alloca [64 x float], align 16
   %v1 = alloca [8 x float], align 8
-  %v2 = bitcast [64 x float]* %v0 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 256, i8* %v2) #2
-  %v3 = load float, float* %a0, align 4, !tbaa !0
-  %v4 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 35
-  store float %v3, float* %v4, align 4, !tbaa !0
-  %v5 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 0
-  store float %v3, float* %v5, align 16, !tbaa !0
-  %v6 = getelementptr inbounds float, float* %a0, i32 1
-  %v7 = load float, float* %v6, align 4, !tbaa !0
-  %v8 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 36
-  store float %v7, float* %v8, align 16, !tbaa !0
-  %v9 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 1
-  store float %v7, float* %v9, align 4, !tbaa !0
-  %v10 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 37
-  store float 1.000000e+00, float* %v10, align 4, !tbaa !0
-  %v11 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 2
-  store float 1.000000e+00, float* %v11, align 8, !tbaa !0
-  %v12 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 34
-  store float 0.000000e+00, float* %v12, align 8, !tbaa !0
-  %v13 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 33
-  store float 0.000000e+00, float* %v13, align 4, !tbaa !0
-  %v14 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 32
-  store float 0.000000e+00, float* %v14, align 16, !tbaa !0
-  %v15 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 5
-  store float 0.000000e+00, float* %v15, align 4, !tbaa !0
-  %v16 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 4
-  store float 0.000000e+00, float* %v16, align 16, !tbaa !0
-  %v17 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 3
-  store float 0.000000e+00, float* %v17, align 4, !tbaa !0
-  %v18 = load float, float* %a1, align 4, !tbaa !0
+  call void @llvm.lifetime.start.p0(i64 256, ptr %v0) #2
+  %v3 = load float, ptr %a0, align 4, !tbaa !0
+  %v4 = getelementptr inbounds [64 x float], ptr %v0, i32 0, i32 35
+  store float %v3, ptr %v4, align 4, !tbaa !0
+  store float %v3, ptr %v0, align 16, !tbaa !0
+  %v6 = getelementptr inbounds float, ptr %a0, i32 1
+  %v7 = load float, ptr %v6, align 4, !tbaa !0
+  %v8 = getelementptr inbounds [64 x float], ptr %v0, i32 0, i32 36
+  store float %v7, ptr %v8, align 16, !tbaa !0
+  %v9 = getelementptr inbounds [64 x float], ptr %v0, i32 0, i32 1
+  store float %v7, ptr %v9, align 4, !tbaa !0
+  %v10 = getelementptr inbounds [64 x float], ptr %v0, i32 0, i32 37
+  store float 1.000000e+00, ptr %v10, align 4, !tbaa !0
+  %v11 = getelementptr inbounds [64 x float], ptr %v0, i32 0, i32 2
+  store float 1.000000e+00, ptr %v11, align 8, !tbaa !0
+  %v12 = getelementptr inbounds [64 x float], ptr %v0, i32 0, i32 34
+  store float 0.000000e+00, ptr %v12, align 8, !tbaa !0
+  %v13 = getelementptr inbounds [64 x float], ptr %v0, i32 0, i32 33
+  store float 0.000000e+00, ptr %v13, align 4, !tbaa !0
+  %v14 = getelementptr inbounds [64 x float], ptr %v0, i32 0, i32 32
+  store float 0.000000e+00, ptr %v14, align 16, !tbaa !0
+  %v15 = getelementptr inbounds [64 x float], ptr %v0, i32 0, i32 5
+  store float 0.000000e+00, ptr %v15, align 4, !tbaa !0
+  %v16 = getelementptr inbounds [64 x float], ptr %v0, i32 0, i32 4
+  store float 0.000000e+00, ptr %v16, align 16, !tbaa !0
+  %v17 = getelementptr inbounds [64 x float], ptr %v0, i32 0, i32 3
+  store float 0.000000e+00, ptr %v17, align 4, !tbaa !0
+  %v18 = load float, ptr %a1, align 4, !tbaa !0
   %v19 = fmul float %v3, %v18
   %v20 = fsub float -0.000000e+00, %v19
-  %v21 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 6
-  store float %v20, float* %v21, align 8, !tbaa !0
+  %v21 = getelementptr inbounds [64 x float], ptr %v0, i32 0, i32 6
+  store float %v20, ptr %v21, align 8, !tbaa !0
   %v22 = fmul float %v7, %v18
   %v23 = fsub float -0.000000e+00, %v22
-  %v24 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 7
-  store float %v23, float* %v24, align 4, !tbaa !0
-  %v25 = getelementptr inbounds float, float* %a1, i32 1
-  %v26 = load float, float* %v25, align 4, !tbaa !0
+  %v24 = getelementptr inbounds [64 x float], ptr %v0, i32 0, i32 7
+  store float %v23, ptr %v24, align 4, !tbaa !0
+  %v25 = getelementptr inbounds float, ptr %a1, i32 1
+  %v26 = load float, ptr %v25, align 4, !tbaa !0
   %v27 = fmul float %v3, %v26
   %v28 = fsub float -0.000000e+00, %v27
-  %v29 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 38
-  store float %v28, float* %v29, align 8, !tbaa !0
+  %v29 = getelementptr inbounds [64 x float], ptr %v0, i32 0, i32 38
+  store float %v28, ptr %v29, align 8, !tbaa !0
   %v30 = fmul float %v7, %v26
   %v31 = fsub float -0.000000e+00, %v30
-  %v32 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 39
-  store float %v31, float* %v32, align 4, !tbaa !0
-  %v33 = getelementptr inbounds [8 x float], [8 x float]* %v1, i32 0, i32 0
-  store float %v18, float* %v33, align 8, !tbaa !0
-  %v34 = getelementptr inbounds [8 x float], [8 x float]* %v1, i32 0, i32 4
-  store float %v26, float* %v34, align 8, !tbaa !0
-  %v35 = getelementptr float, float* %a0, i32 2
-  %v36 = getelementptr float, float* %a1, i32 2
-  %v37 = load float, float* %v35, align 4, !tbaa !0
-  %v38 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 43
-  store float %v37, float* %v38, align 4, !tbaa !0
-  %v39 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 8
-  store float %v37, float* %v39, align 16, !tbaa !0
-  %v40 = getelementptr inbounds float, float* %a0, i32 3
-  %v41 = load float, float* %v40, align 4, !tbaa !0
-  %v42 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 44
-  store float %v41, float* %v42, align 16, !tbaa !0
-  %v43 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 9
-  store float %v41, float* %v43, align 4, !tbaa !0
-  %v44 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 45
-  store float 1.000000e+00, float* %v44, align 4, !tbaa !0
-  %v45 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 10
-  store float 1.000000e+00, float* %v45, align 8, !tbaa !0
-  %v46 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 42
-  store float 0.000000e+00, float* %v46, align 8, !tbaa !0
-  %v47 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 41
-  store float 0.000000e+00, float* %v47, align 4, !tbaa !0
-  %v48 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 40
-  store float 0.000000e+00, float* %v48, align 16, !tbaa !0
-  %v49 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 13
-  store float 0.000000e+00, float* %v49, align 4, !tbaa !0
-  %v50 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 12
-  store float 0.000000e+00, float* %v50, align 16, !tbaa !0
-  %v51 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 11
-  store float 0.000000e+00, float* %v51, align 4, !tbaa !0
-  %v52 = load float, float* %v36, align 4, !tbaa !0
+  %v32 = getelementptr inbounds [64 x float], ptr %v0, i32 0, i32 39
+  store float %v31, ptr %v32, align 4, !tbaa !0
+  store float %v18, ptr %v1, align 8, !tbaa !0
+  %v34 = getelementptr inbounds [8 x float], ptr %v1, i32 0, i32 4
+  store float %v26, ptr %v34, align 8, !tbaa !0
+  %v35 = getelementptr float, ptr %a0, i32 2
+  %v36 = getelementptr float, ptr %a1, i32 2
+  %v37 = load float, ptr %v35, align 4, !tbaa !0
+  %v38 = getelementptr inbounds [64 x float], ptr %v0, i32 0, i32 43
+  store float %v37, ptr %v38, align 4, !tbaa !0
+  %v39 = getelementptr inbounds [64 x float], ptr %v0, i32 0, i32 8
+  store float %v37, ptr %v39, align 16, !tbaa !0
+  %v40 = getelementptr inbounds float, ptr %a0, i32 3
+  %v41 = load float, ptr %v40, align 4, !tbaa !0
+  %v42 = getelementptr inbounds [64 x float], ptr %v0, i32 0, i32 44
+  store float %v41, ptr %v42, align 16, !tbaa !0
+  %v43 = getelementptr inbounds [64 x float], ptr %v0, i32 0, i32 9
+  store float %v41, ptr %v43, align 4, !tbaa !0
+  %v44 = getelementptr inbounds [64 x float], ptr %v0, i32 0, i32 45
+  store float 1.000000e+00, ptr %v44, align 4, !tbaa !0
+  %v45 = getelementptr inbounds [64 x float], ptr %v0, i32 0, i32 10
+  store float 1.000000e+00, ptr %v45, align 8, !tbaa !0
+  %v46 = getelementptr inbounds [64 x float], ptr %v0, i32 0, i32 42
+  store float 0.000000e+00, ptr %v46, align 8, !tbaa !0
+  %v47 = getelementptr inbounds [64 x float], ptr %v0, i32 0, i32 41
+  store float 0.000000e+00, ptr %v47, align 4, !tbaa !0
+  %v48 = getelementptr inbounds [64 x float], ptr %v0, i32 0, i32 40
+  store float 0.000000e+00, ptr %v48, align 16, !tbaa !0
+  %v49 = getelementptr inbounds [64 x float], ptr %v0, i32 0, i32 13
+  store float 0.000000e+00, ptr %v49, align 4, !tbaa !0
+  %v50 = getelementptr inbounds [64 x float], ptr %v0, i32 0, i32 12
+  store float 0.000000e+00, ptr %v50, align 16, !tbaa !0
+  %v51 = getelementptr inbounds [64 x float], ptr %v0, i32 0, i32 11
+  store float 0.000000e+00, ptr %v51, align 4, !tbaa !0
+  %v52 = load float, ptr %v36, align 4, !tbaa !0
   %v53 = fmul float %v37, %v52
   %v54 = fsub float -0.000000e+00, %v53
-  %v55 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 14
-  store float %v54, float* %v55, align 8, !tbaa !0
+  %v55 = getelementptr inbounds [64 x float], ptr %v0, i32 0, i32 14
+  store float %v54, ptr %v55, align 8, !tbaa !0
   %v56 = fmul float %v41, %v52
   %v57 = fsub float -0.000000e+00, %v56
-  %v58 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 15
-  store float %v57, float* %v58, align 4, !tbaa !0
-  %v59 = getelementptr inbounds float, float* %a1, i32 3
-  %v60 = load float, float* %v59, align 4, !tbaa !0
+  %v58 = getelementptr inbounds [64 x float], ptr %v0, i32 0, i32 15
+  store float %v57, ptr %v58, align 4, !tbaa !0
+  %v59 = getelementptr inbounds float, ptr %a1, i32 3
+  %v60 = load float, ptr %v59, align 4, !tbaa !0
   %v61 = fmul float %v37, %v60
   %v62 = fsub float -0.000000e+00, %v61
-  %v63 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 46
-  store float %v62, float* %v63, align 8, !tbaa !0
+  %v63 = getelementptr inbounds [64 x float], ptr %v0, i32 0, i32 46
+  store float %v62, ptr %v63, align 8, !tbaa !0
   %v64 = fmul float %v41, %v60
   %v65 = fsub float -0.000000e+00, %v64
-  %v66 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 47
-  store float %v65, float* %v66, align 4, !tbaa !0
-  %v67 = getelementptr inbounds [8 x float], [8 x float]* %v1, i32 0, i32 1
-  store float %v52, float* %v67, align 4, !tbaa !0
-  %v68 = getelementptr inbounds [8 x float], [8 x float]* %v1, i32 0, i32 5
-  store float %v60, float* %v68, align 4, !tbaa !0
-  %v69 = getelementptr float, float* %a0, i32 4
-  %v70 = getelementptr float, float* %a1, i32 4
-  %v71 = load float, float* %v69, align 4, !tbaa !0
-  %v72 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 51
-  store float %v71, float* %v72, align 4, !tbaa !0
-  %v73 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 16
-  store float %v71, float* %v73, align 16, !tbaa !0
-  %v74 = getelementptr inbounds float, float* %a0, i32 5
-  %v75 = load float, float* %v74, align 4, !tbaa !0
-  %v76 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 52
-  store float %v75, float* %v76, align 16, !tbaa !0
-  %v77 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 17
-  store float %v75, float* %v77, align 4, !tbaa !0
-  %v78 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 53
-  store float 1.000000e+00, float* %v78, align 4, !tbaa !0
-  %v79 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 18
-  store float 1.000000e+00, float* %v79, align 8, !tbaa !0
-  %v80 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 50
-  store float 0.000000e+00, float* %v80, align 8, !tbaa !0
-  %v81 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 49
-  store float 0.000000e+00, float* %v81, align 4, !tbaa !0
-  %v82 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 48
-  store float 0.000000e+00, float* %v82, align 16, !tbaa !0
-  %v83 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 21
-  store float 0.000000e+00, float* %v83, align 4, !tbaa !0
-  %v84 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 20
-  store float 0.000000e+00, float* %v84, align 16, !tbaa !0
-  %v85 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 19
-  store float 0.000000e+00, float* %v85, align 4, !tbaa !0
-  %v86 = load float, float* %v70, align 4, !tbaa !0
+  %v66 = getelementptr inbounds [64 x float], ptr %v0, i32 0, i32 47
+  store float %v65, ptr %v66, align 4, !tbaa !0
+  %v67 = getelementptr inbounds [8 x float], ptr %v1, i32 0, i32 1
+  store float %v52, ptr %v67, align 4, !tbaa !0
+  %v68 = getelementptr inbounds [8 x float], ptr %v1, i32 0, i32 5
+  store float %v60, ptr %v68, align 4, !tbaa !0
+  %v69 = getelementptr float, ptr %a0, i32 4
+  %v70 = getelementptr float, ptr %a1, i32 4
+  %v71 = load float, ptr %v69, align 4, !tbaa !0
+  %v72 = getelementptr inbounds [64 x float], ptr %v0, i32 0, i32 51
+  store float %v71, ptr %v72, align 4, !tbaa !0
+  %v73 = getelementptr inbounds [64 x float], ptr %v0, i32 0, i32 16
+  store float %v71, ptr %v73, align 16, !tbaa !0
+  %v74 = getelementptr inbounds float, ptr %a0, i32 5
+  %v75 = load float, ptr %v74, align 4, !tbaa !0
+  %v76 = getelementptr inbounds [64 x float], ptr %v0, i32 0, i32 52
+  store float %v75, ptr %v76, align 16, !tbaa !0
+  %v77 = getelementptr inbounds [64 x float], ptr %v0, i32 0, i32 17
+  store float %v75, ptr %v77, align 4, !tbaa !0
+  %v78 = getelementptr inbounds [64 x float], ptr %v0, i32 0, i32 53
+  store float 1.000000e+00, ptr %v78, align 4, !tbaa !0
+  %v79 = getelementptr inbounds [64 x float], ptr %v0, i32 0, i32 18
+  store float 1.000000e+00, ptr %v79, align 8, !tbaa !0
+  %v80 = getelementptr inbounds [64 x float], ptr %v0, i32 0, i32 50
+  store float 0.000000e+00, ptr %v80, align 8, !tbaa !0
+  %v81 = getelementptr inbounds [64 x float], ptr %v0, i32 0, i32 49
+  store float 0.000000e+00, ptr %v81, align 4, !tbaa !0
+  %v82 = getelementptr inbounds [64 x float], ptr %v0, i32 0, i32 48
+  store float 0.000000e+00, ptr %v82, align 16, !tbaa !0
+  %v83 = getelementptr inbounds [64 x float], ptr %v0, i32 0, i32 21
+  store float 0.000000e+00, ptr %v83, align 4, !tbaa !0
+  %v84 = getelementptr inbounds [64 x float], ptr %v0, i32 0, i32 20
+  store float 0.000000e+00, ptr %v84, align 16, !tbaa !0
+  %v85 = getelementptr inbounds [64 x float], ptr %v0, i32 0, i32 19
+  store float 0.000000e+00, ptr %v85, align 4, !tbaa !0
+  %v86 = load float, ptr %v70, align 4, !tbaa !0
   %v87 = fmul float %v71, %v86
   %v88 = fsub float -0.000000e+00, %v87
-  %v89 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 22
-  store float %v88, float* %v89, align 8, !tbaa !0
+  %v89 = getelementptr inbounds [64 x float], ptr %v0, i32 0, i32 22
+  store float %v88, ptr %v89, align 8, !tbaa !0
   %v90 = fmul float %v75, %v86
   %v91 = fsub float -0.000000e+00, %v90
-  %v92 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 23
-  store float %v91, float* %v92, align 4, !tbaa !0
-  %v93 = getelementptr inbounds float, float* %a1, i32 5
-  %v94 = load float, float* %v93, align 4, !tbaa !0
+  %v92 = getelementptr inbounds [64 x float], ptr %v0, i32 0, i32 23
+  store float %v91, ptr %v92, align 4, !tbaa !0
+  %v93 = getelementptr inbounds float, ptr %a1, i32 5
+  %v94 = load float, ptr %v93, align 4, !tbaa !0
   %v95 = fmul float %v71, %v94
   %v96 = fsub float -0.000000e+00, %v95
-  %v97 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 54
-  store float %v96, float* %v97, align 8, !tbaa !0
+  %v97 = getelementptr inbounds [64 x float], ptr %v0, i32 0, i32 54
+  store float %v96, ptr %v97, align 8, !tbaa !0
   %v98 = fmul float %v75, %v94
   %v99 = fsub float -0.000000e+00, %v98
-  %v100 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 55
-  store float %v99, float* %v100, align 4, !tbaa !0
-  %v101 = getelementptr inbounds [8 x float], [8 x float]* %v1, i32 0, i32 2
-  store float %v86, float* %v101, align 8, !tbaa !0
-  %v102 = getelementptr inbounds [8 x float], [8 x float]* %v1, i32 0, i32 6
-  store float %v94, float* %v102, align 8, !tbaa !0
-  %v103 = getelementptr float, float* %a0, i32 6
-  %v104 = getelementptr float, float* %a1, i32 6
-  %v105 = load float, float* %v103, align 4, !tbaa !0
-  %v106 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 59
-  store float %v105, float* %v106, align 4, !tbaa !0
-  %v107 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 24
-  store float %v105, float* %v107, align 16, !tbaa !0
-  %v108 = getelementptr inbounds float, float* %a0, i32 7
-  %v109 = load float, float* %v108, align 4, !tbaa !0
-  %v110 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 60
-  store float %v109, float* %v110, align 16, !tbaa !0
-  %v111 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 25
-  store float %v109, float* %v111, align 4, !tbaa !0
-  %v112 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 61
-  store float 1.000000e+00, float* %v112, align 4, !tbaa !0
-  %v113 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 26
-  store float 1.000000e+00, float* %v113, align 8, !tbaa !0
-  %v114 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 58
-  store float 0.000000e+00, float* %v114, align 8, !tbaa !0
-  %v115 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 57
-  store float 0.000000e+00, float* %v115, align 4, !tbaa !0
-  %v116 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 56
-  store float 0.000000e+00, float* %v116, align 16, !tbaa !0
-  %v117 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 29
-  store float 0.000000e+00, float* %v117, align 4, !tbaa !0
-  %v118 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 28
-  store float 0.000000e+00, float* %v118, align 16, !tbaa !0
-  %v119 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 27
-  store float 0.000000e+00, float* %v119, align 4, !tbaa !0
-  %v120 = load float, float* %v104, align 4, !tbaa !0
+  %v100 = getelementptr inbounds [64 x float], ptr %v0, i32 0, i32 55
+  store float %v99, ptr %v100, align 4, !tbaa !0
+  %v101 = getelementptr inbounds [8 x float], ptr %v1, i32 0, i32 2
+  store float %v86, ptr %v101, align 8, !tbaa !0
+  %v102 = getelementptr inbounds [8 x float], ptr %v1, i32 0, i32 6
+  store float %v94, ptr %v102, align 8, !tbaa !0
+  %v103 = getelementptr float, ptr %a0, i32 6
+  %v104 = getelementptr float, ptr %a1, i32 6
+  %v105 = load float, ptr %v103, align 4, !tbaa !0
+  %v106 = getelementptr inbounds [64 x float], ptr %v0, i32 0, i32 59
+  store float %v105, ptr %v106, align 4, !tbaa !0
+  %v107 = getelementptr inbounds [64 x float], ptr %v0, i32 0, i32 24
+  store float %v105, ptr %v107, align 16, !tbaa !0
+  %v108 = getelementptr inbounds float, ptr %a0, i32 7
+  %v109 = load float, ptr %v108, align 4, !tbaa !0
+  %v110 = getelementptr inbounds [64 x float], ptr %v0, i32 0, i32 60
+  store float %v109, ptr %v110, align 16, !tbaa !0
+  %v111 = getelementptr inbounds [64 x float], ptr %v0, i32 0, i32 25
+  store float %v109, ptr %v111, align 4, !tbaa !0
+  %v112 = getelementptr inbounds [64 x float], ptr %v0, i32 0, i32 61
+  store float 1.000000e+00, ptr %v112, align 4, !tbaa !0
+  %v113 = getelementptr inbounds [64 x float], ptr %v0, i32 0, i32 26
+  store float 1.000000e+00, ptr %v113, align 8, !tbaa !0
+  %v114 = getelementptr inbounds [64 x float], ptr %v0, i32 0, i32 58
+  store float 0.000000e+00, ptr %v114, align 8, !tbaa !0
+  %v115 = getelementptr inbounds [64 x float], ptr %v0, i32 0, i32 57
+  store float 0.000000e+00, ptr %v115, align 4, !tbaa !0
+  %v116 = getelementptr inbounds [64 x float], ptr %v0, i32 0, i32 56
+  store float 0.000000e+00, ptr %v116, align 16, !tbaa !0
+  %v117 = getelementptr inbounds [64 x float], ptr %v0, i32 0, i32 29
+  store float 0.000000e+00, ptr %v117, align 4, !tbaa !0
+  %v118 = getelementptr inbounds [64 x float], ptr %v0, i32 0, i32 28
+  store float 0.000000e+00, ptr %v118, align 16, !tbaa !0
+  %v119 = getelementptr inbounds [64 x float], ptr %v0, i32 0, i32 27
+  store float 0.000000e+00, ptr %v119, align 4, !tbaa !0
+  %v120 = load float, ptr %v104, align 4, !tbaa !0
   %v121 = fmul float %v105, %v120
   %v122 = fsub float -0.000000e+00, %v121
-  %v123 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 30
-  store float %v122, float* %v123, align 8, !tbaa !0
+  %v123 = getelementptr inbounds [64 x float], ptr %v0, i32 0, i32 30
+  store float %v122, ptr %v123, align 8, !tbaa !0
   %v124 = fmul float %v109, %v120
   %v125 = fsub float -0.000000e+00, %v124
-  %v126 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 31
-  store float %v125, float* %v126, align 4, !tbaa !0
-  %v127 = getelementptr inbounds float, float* %a1, i32 7
-  %v128 = load float, float* %v127, align 4, !tbaa !0
+  %v126 = getelementptr inbounds [64 x float], ptr %v0, i32 0, i32 31
+  store float %v125, ptr %v126, align 4, !tbaa !0
+  %v127 = getelementptr inbounds float, ptr %a1, i32 7
+  %v128 = load float, ptr %v127, align 4, !tbaa !0
   %v129 = fmul float %v105, %v128
   %v130 = fsub float -0.000000e+00, %v129
-  %v131 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 62
-  store float %v130, float* %v131, align 8, !tbaa !0
+  %v131 = getelementptr inbounds [64 x float], ptr %v0, i32 0, i32 62
+  store float %v130, ptr %v131, align 8, !tbaa !0
   %v132 = fmul float %v109, %v128
   %v133 = fsub float -0.000000e+00, %v132
-  %v134 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 63
-  store float %v133, float* %v134, align 4, !tbaa !0
-  %v135 = getelementptr inbounds [8 x float], [8 x float]* %v1, i32 0, i32 3
-  store float %v120, float* %v135, align 4, !tbaa !0
-  %v136 = getelementptr inbounds [8 x float], [8 x float]* %v1, i32 0, i32 7
-  store float %v128, float* %v136, align 4, !tbaa !0
-  %v137 = load void (float*, i32, i32, float*, float*)**, void (float*, i32, i32, float*, float*)*** @g0, align 4, !tbaa !4
-  %v138 = load void (float*, i32, i32, float*, float*)*, void (float*, i32, i32, float*, float*)** %v137, align 4, !tbaa !4
-  call void %v138(float* %v5, i32 8, i32 8, float* %v33, float* %a2) #2
-  %v139 = getelementptr inbounds float, float* %a2, i32 8
-  store float 1.000000e+00, float* %v139, align 4, !tbaa !0
-  call void @llvm.lifetime.end.p0i8(i64 256, i8* %v2) #2
+  %v134 = getelementptr inbounds [64 x float], ptr %v0, i32 0, i32 63
+  store float %v133, ptr %v134, align 4, !tbaa !0
+  %v135 = getelementptr inbounds [8 x float], ptr %v1, i32 0, i32 3
+  store float %v120, ptr %v135, align 4, !tbaa !0
+  %v136 = getelementptr inbounds [8 x float], ptr %v1, i32 0, i32 7
+  store float %v128, ptr %v136, align 4, !tbaa !0
+  %v137 = load ptr, ptr @g0, align 4, !tbaa !4
+  %v138 = load ptr, ptr %v137, align 4, !tbaa !4
+  call void %v138(ptr %v0, i32 8, i32 8, ptr %v1, ptr %a2) #2
+  %v139 = getelementptr inbounds float, ptr %a2, i32 8
+  store float 1.000000e+00, ptr %v139, align 4, !tbaa !0
+  call void @llvm.lifetime.end.p0(i64 256, ptr %v0) #2
   ret void
 }
 
 ; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #1
+declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
 
 ; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #1
+declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
 
 attributes #0 = { nounwind "target-cpu"="hexagonv55" }
 attributes #1 = { argmemonly nounwind }

diff  --git a/llvm/test/CodeGen/Hexagon/late-pred.ll b/llvm/test/CodeGen/Hexagon/late-pred.ll
index 3b690eb97ccad..0aba97d8440b3 100644
--- a/llvm/test/CodeGen/Hexagon/late-pred.ll
+++ b/llvm/test/CodeGen/Hexagon/late-pred.ll
@@ -8,7 +8,7 @@ target triple = "hexagon"
 
 define void @ext4_group_extend() #0 {
 entry:
-  %es.idx.val = load i32, i32* undef, align 4
+  %es.idx.val = load i32, ptr undef, align 4
   %conv1.i = zext i32 %es.idx.val to i64
   %or.i = or i64 undef, %conv1.i
   %add20 = add i64 %or.i, undef

diff  --git a/llvm/test/CodeGen/Hexagon/late_instr.ll b/llvm/test/CodeGen/Hexagon/late_instr.ll
index 7825ef96d2fff..eeeea44886344 100644
--- a/llvm/test/CodeGen/Hexagon/late_instr.ll
+++ b/llvm/test/CodeGen/Hexagon/late_instr.ll
@@ -10,10 +10,9 @@
 target triple = "hexagon-unknown-linux-gnu"
 
 ; Function Attrs: nounwind
-define void @f0(i8* noalias nocapture readonly %a0, i32 %a1, i32 %a2, i32 %a3, i32* noalias nocapture %a4, i32 %a5) #0 {
+define void @f0(ptr noalias nocapture readonly %a0, i32 %a1, i32 %a2, i32 %a3, ptr noalias nocapture %a4, i32 %a5) #0 {
 b0:
   %v0 = mul i32 %a2, 3
-  %v1 = bitcast i32* %a4 to <16 x i32>*
   %v2 = mul i32 %a5, -2
   %v3 = add i32 %v2, %a1
   %v4 = and i32 %a5, 63
@@ -38,25 +37,20 @@ b0:
   br i1 %v22, label %b1, label %b8
 
 b1:                                               ; preds = %b0
-  %v23 = getelementptr inbounds i8, i8* %a0, i32 %a5
-  %v24 = bitcast i8* %v23 to <16 x i32>*
-  %v25 = load <16 x i32>, <16 x i32>* %v24, align 64, !tbaa !0
+  %v23 = getelementptr inbounds i8, ptr %a0, i32 %a5
+  %v25 = load <16 x i32>, ptr %v23, align 64, !tbaa !0
   %v26 = add i32 %a5, 64
-  %v27 = getelementptr inbounds i8, i8* %a0, i32 %v26
-  %v28 = bitcast i8* %v27 to <16 x i32>*
+  %v27 = getelementptr inbounds i8, ptr %a0, i32 %v26
   %v29 = add i32 %a5, -64
-  %v30 = getelementptr inbounds i8, i8* %a0, i32 %v29
-  %v31 = bitcast i8* %v30 to <16 x i32>*
-  %v32 = load <16 x i32>, <16 x i32>* %v31, align 64, !tbaa !0
+  %v30 = getelementptr inbounds i8, ptr %a0, i32 %v29
+  %v32 = load <16 x i32>, ptr %v30, align 64, !tbaa !0
   %v33 = tail call <64 x i1> @llvm.hexagon.V6.pred.scalar2(i32 %a5)
   %v34 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %v33, i32 16843009)
   %v35 = tail call <16 x i32> @llvm.hexagon.V6.vnot(<16 x i32> %v34)
   %v36 = add i32 %v0, %a5
-  %v37 = getelementptr inbounds i8, i8* %a0, i32 %v36
-  %v38 = bitcast i8* %v37 to <16 x i32>*
+  %v37 = getelementptr inbounds i8, ptr %a0, i32 %v36
   %v39 = sub i32 %a5, %v0
-  %v40 = getelementptr inbounds i8, i8* %a0, i32 %v39
-  %v41 = bitcast i8* %v40 to <16 x i32>*
+  %v40 = getelementptr inbounds i8, ptr %a0, i32 %v39
   %v42 = tail call <16 x i32> @llvm.hexagon.V6.vd0()
   %v43 = add i32 %v4, %a1
   %v44 = mul i32 %a5, 2
@@ -71,16 +65,16 @@ b1:                                               ; preds = %b0
   %v53 = lshr i32 %v52, 9
   %v54 = mul nuw nsw i32 %v53, 16
   %v55 = add nuw nsw i32 %v54, 16
-  %v56 = getelementptr i32, i32* %a4, i32 %v55
+  %v56 = getelementptr i32, ptr %a4, i32 %v55
   br label %b2
 
 b2:                                               ; preds = %b6, %b1
   %v57 = phi i32 [ %v46, %b1 ], [ %v125, %b6 ]
   %v58 = phi i32 [ %v5, %b1 ], [ %v123, %b6 ]
-  %v59 = phi <16 x i32>* [ %v1, %b1 ], [ %v122, %b6 ]
-  %v60 = phi <16 x i32>* [ %v38, %b1 ], [ %v114, %b6 ]
-  %v61 = phi <16 x i32>* [ %v41, %b1 ], [ %v115, %b6 ]
-  %v62 = phi <16 x i32>* [ %v28, %b1 ], [ %v116, %b6 ]
+  %v59 = phi ptr [ %a4, %b1 ], [ %v122, %b6 ]
+  %v60 = phi ptr [ %v37, %b1 ], [ %v114, %b6 ]
+  %v61 = phi ptr [ %v40, %b1 ], [ %v115, %b6 ]
+  %v62 = phi ptr [ %v27, %b1 ], [ %v116, %b6 ]
   %v63 = phi i32 [ 512, %b1 ], [ %v69, %b6 ]
   %v64 = phi i32 [ -2139062144, %b1 ], [ %v117, %b6 ]
   %v65 = phi <16 x i32> [ %v32, %b1 ], [ %v118, %b6 ]
@@ -104,20 +98,20 @@ b3:                                               ; preds = %b2
 
 b4:                                               ; preds = %b4, %b3
   %v79 = phi i32 [ %v69, %b3 ], [ %v108, %b4 ]
-  %v80 = phi <16 x i32>* [ %v60, %b3 ], [ %v89, %b4 ]
-  %v81 = phi <16 x i32>* [ %v61, %b3 ], [ %v87, %b4 ]
-  %v82 = phi <16 x i32>* [ %v62, %b3 ], [ %v92, %b4 ]
+  %v80 = phi ptr [ %v60, %b3 ], [ %v89, %b4 ]
+  %v81 = phi ptr [ %v61, %b3 ], [ %v87, %b4 ]
+  %v82 = phi ptr [ %v62, %b3 ], [ %v92, %b4 ]
   %v83 = phi i32 [ %v64, %b3 ], [ %v106, %b4 ]
   %v84 = phi <16 x i32> [ %v65, %b3 ], [ %v85, %b4 ]
   %v85 = phi <16 x i32> [ %v66, %b3 ], [ %v93, %b4 ]
   %v86 = phi <16 x i32> [ %v42, %b3 ], [ %v107, %b4 ]
-  %v87 = getelementptr inbounds <16 x i32>, <16 x i32>* %v81, i32 1
-  %v88 = load <16 x i32>, <16 x i32>* %v81, align 64, !tbaa !0
-  %v89 = getelementptr inbounds <16 x i32>, <16 x i32>* %v80, i32 1
-  %v90 = load <16 x i32>, <16 x i32>* %v80, align 64, !tbaa !0
+  %v87 = getelementptr inbounds <16 x i32>, ptr %v81, i32 1
+  %v88 = load <16 x i32>, ptr %v81, align 64, !tbaa !0
+  %v89 = getelementptr inbounds <16 x i32>, ptr %v80, i32 1
+  %v90 = load <16 x i32>, ptr %v80, align 64, !tbaa !0
   %v91 = tail call <16 x i32> @llvm.hexagon.V6.vlalignbi(<16 x i32> %v85, <16 x i32> %v84, i32 3)
-  %v92 = getelementptr inbounds <16 x i32>, <16 x i32>* %v82, i32 1
-  %v93 = load <16 x i32>, <16 x i32>* %v82, align 64, !tbaa !0
+  %v92 = getelementptr inbounds <16 x i32>, ptr %v82, i32 1
+  %v93 = load <16 x i32>, ptr %v82, align 64, !tbaa !0
   %v94 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %v93, <16 x i32> %v85, i32 3)
   %v95 = tail call <16 x i32> @llvm.hexagon.V6.vsububsat(<16 x i32> %v85, <16 x i32> %v21)
   %v96 = tail call <16 x i32> @llvm.hexagon.V6.vaddubsat(<16 x i32> %v85, <16 x i32> %v21)
@@ -138,37 +132,36 @@ b4:                                               ; preds = %b4, %b3
 
 b5:                                               ; preds = %b4
   %v110 = add nuw nsw i32 %v78, 1
-  %v111 = getelementptr <16 x i32>, <16 x i32>* %v62, i32 %v110
-  %v112 = getelementptr <16 x i32>, <16 x i32>* %v60, i32 %v110
-  %v113 = getelementptr <16 x i32>, <16 x i32>* %v61, i32 %v110
+  %v111 = getelementptr <16 x i32>, ptr %v62, i32 %v110
+  %v112 = getelementptr <16 x i32>, ptr %v60, i32 %v110
+  %v113 = getelementptr <16 x i32>, ptr %v61, i32 %v110
   br label %b6
 
 b6:                                               ; preds = %b5, %b2
-  %v114 = phi <16 x i32>* [ %v112, %b5 ], [ %v60, %b2 ]
-  %v115 = phi <16 x i32>* [ %v113, %b5 ], [ %v61, %b2 ]
-  %v116 = phi <16 x i32>* [ %v111, %b5 ], [ %v62, %b2 ]
+  %v114 = phi ptr [ %v112, %b5 ], [ %v60, %b2 ]
+  %v115 = phi ptr [ %v113, %b5 ], [ %v61, %b2 ]
+  %v116 = phi ptr [ %v111, %b5 ], [ %v62, %b2 ]
   %v117 = phi i32 [ %v106, %b5 ], [ %v64, %b2 ]
   %v118 = phi <16 x i32> [ %v85, %b5 ], [ %v65, %b2 ]
   %v119 = phi <16 x i32> [ %v93, %b5 ], [ %v66, %b2 ]
   %v120 = phi <16 x i32> [ %v107, %b5 ], [ %v42, %b2 ]
   %v121 = tail call <16 x i32> @llvm.hexagon.V6.vand(<16 x i32> %v120, <16 x i32> %v67)
-  %v122 = getelementptr inbounds <16 x i32>, <16 x i32>* %v59, i32 1
-  store <16 x i32> %v121, <16 x i32>* %v59, align 64, !tbaa !0
+  %v122 = getelementptr inbounds <16 x i32>, ptr %v59, i32 1
+  store <16 x i32> %v121, ptr %v59, align 64, !tbaa !0
   %v123 = add nsw i32 %v58, -512
   %v124 = icmp sgt i32 %v58, 512
   %v125 = add i32 %v57, 512
   br i1 %v124, label %b2, label %b7
 
 b7:                                               ; preds = %b6
-  %v126 = bitcast i32* %v56 to <16 x i32>*
   br label %b8
 
 b8:                                               ; preds = %b7, %b0
-  %v127 = phi <16 x i32>* [ %v126, %b7 ], [ %v1, %b0 ]
-  %v128 = getelementptr inbounds <16 x i32>, <16 x i32>* %v127, i32 -1
-  %v129 = load <16 x i32>, <16 x i32>* %v128, align 64, !tbaa !0
+  %v127 = phi ptr [ %v56, %b7 ], [ %a4, %b0 ]
+  %v128 = getelementptr inbounds <16 x i32>, ptr %v127, i32 -1
+  %v129 = load <16 x i32>, ptr %v128, align 64, !tbaa !0
   %v130 = tail call <16 x i32> @llvm.hexagon.V6.vand(<16 x i32> %v129, <16 x i32> %v19)
-  store <16 x i32> %v130, <16 x i32>* %v128, align 64, !tbaa !0
+  store <16 x i32> %v130, ptr %v128, align 64, !tbaa !0
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/lcomm.ll b/llvm/test/CodeGen/Hexagon/lcomm.ll
index f7bcde7c206bc..558aadde7e1aa 100644
--- a/llvm/test/CodeGen/Hexagon/lcomm.ll
+++ b/llvm/test/CodeGen/Hexagon/lcomm.ll
@@ -8,7 +8,7 @@ target triple = "hexagon"
 ; Function Attrs: nounwind
 define i32 @f0() #0 {
 b0:
-  %v0 = load i32, i32* @g0, align 4
+  %v0 = load i32, ptr @g0, align 4
   ret i32 %v0
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/load-abs.ll b/llvm/test/CodeGen/Hexagon/load-abs.ll
index 49fb1e3ce9ebc..975f982205285 100644
--- a/llvm/test/CodeGen/Hexagon/load-abs.ll
+++ b/llvm/test/CodeGen/Hexagon/load-abs.ll
@@ -12,7 +12,7 @@ target triple = "hexagon-unknown--elf"
 ; CHECK: = memd(##441656)
 define i64 @f0() #0 {
 b0:
-  %v0 = load volatile i64, i64* inttoptr (i32 441656 to i64*)
+  %v0 = load volatile i64, ptr inttoptr (i32 441656 to ptr)
   ret i64 %v0
 }
 
@@ -20,7 +20,7 @@ b0:
 ; CHECK: = memw(##441656)
 define i64 @f1() #0 {
 b0:
-  %v0 = load volatile i32, i32* inttoptr (i32 441656 to i32*)
+  %v0 = load volatile i32, ptr inttoptr (i32 441656 to ptr)
   %v1 = sext i32 %v0 to i64
   ret i64 %v1
 }
@@ -29,7 +29,7 @@ b0:
 ; CHECK: = memw(##441656)
 define i64 @f2() #0 {
 b0:
-  %v0 = load volatile i32, i32* inttoptr (i32 441656 to i32*)
+  %v0 = load volatile i32, ptr inttoptr (i32 441656 to ptr)
   %v1 = zext i32 %v0 to i64
   ret i64 %v1
 }
@@ -38,7 +38,7 @@ b0:
 ; CHECK: = memh(##441656)
 define i64 @f3() #0 {
 b0:
-  %v0 = load volatile i16, i16* inttoptr (i32 441656 to i16*)
+  %v0 = load volatile i16, ptr inttoptr (i32 441656 to ptr)
   %v1 = sext i16 %v0 to i64
   ret i64 %v1
 }
@@ -47,7 +47,7 @@ b0:
 ; CHECK: = memuh(##441656)
 define i64 @f4() #0 {
 b0:
-  %v0 = load volatile i16, i16* inttoptr (i32 441656 to i16*)
+  %v0 = load volatile i16, ptr inttoptr (i32 441656 to ptr)
   %v1 = zext i16 %v0 to i64
   ret i64 %v1
 }
@@ -56,7 +56,7 @@ b0:
 ; CHECK: = memb(##441656)
 define i64 @f5() #0 {
 b0:
-  %v0 = load volatile i8, i8* inttoptr (i32 441656 to i8*)
+  %v0 = load volatile i8, ptr inttoptr (i32 441656 to ptr)
   %v1 = sext i8 %v0 to i64
   ret i64 %v1
 }
@@ -65,7 +65,7 @@ b0:
 ; CHECK: = memub(##441656)
 define i64 @f6() #0 {
 b0:
-  %v0 = load volatile i8, i8* inttoptr (i32 441656 to i8*)
+  %v0 = load volatile i8, ptr inttoptr (i32 441656 to ptr)
   %v1 = zext i8 %v0 to i64
   ret i64 %v1
 }
@@ -74,7 +74,7 @@ b0:
 ; CHECK: = memd(##g3)
 define i64 @f7() #0 {
 b0:
-  %v0 = load volatile i64, i64* @g3
+  %v0 = load volatile i64, ptr @g3
   ret i64 %v0
 }
 
@@ -82,7 +82,7 @@ b0:
 ; CHECK: = memw(##g2)
 define i64 @f8() #0 {
 b0:
-  %v0 = load volatile i32, i32* @g2
+  %v0 = load volatile i32, ptr @g2
   %v1 = sext i32 %v0 to i64
   ret i64 %v1
 }
@@ -91,7 +91,7 @@ b0:
 ; CHECK: = memw(##g2)
 define i64 @f9() #0 {
 b0:
-  %v0 = load volatile i32, i32* @g2
+  %v0 = load volatile i32, ptr @g2
   %v1 = zext i32 %v0 to i64
   ret i64 %v1
 }
@@ -100,7 +100,7 @@ b0:
 ; CHECK: = memh(##g1)
 define i64 @f10() #0 {
 b0:
-  %v0 = load volatile i16, i16* @g1
+  %v0 = load volatile i16, ptr @g1
   %v1 = sext i16 %v0 to i64
   ret i64 %v1
 }
@@ -109,7 +109,7 @@ b0:
 ; CHECK: = memuh(##g1)
 define i64 @f11() #0 {
 b0:
-  %v0 = load volatile i16, i16* @g1
+  %v0 = load volatile i16, ptr @g1
   %v1 = zext i16 %v0 to i64
   ret i64 %v1
 }
@@ -118,7 +118,7 @@ b0:
 ; CHECK: = memb(##g0)
 define i64 @f12() #0 {
 b0:
-  %v0 = load volatile i8, i8* @g0
+  %v0 = load volatile i8, ptr @g0
   %v1 = sext i8 %v0 to i64
   ret i64 %v1
 }
@@ -127,7 +127,7 @@ b0:
 ; CHECK: = memub(##g0)
 define i64 @f13() #0 {
 b0:
-  %v0 = load volatile i8, i8* @g0
+  %v0 = load volatile i8, ptr @g0
   %v1 = zext i8 %v0 to i64
   ret i64 %v1
 }

diff  --git a/llvm/test/CodeGen/Hexagon/loadi1-G0.ll b/llvm/test/CodeGen/Hexagon/loadi1-G0.ll
index 1116341c92ba2..c3c0983a1e584 100644
--- a/llvm/test/CodeGen/Hexagon/loadi1-G0.ll
+++ b/llvm/test/CodeGen/Hexagon/loadi1-G0.ll
@@ -10,7 +10,7 @@ target triple = "hexagon-unknown-linux-gnu"
 
 define i32 @test_sextloadi1_32() {
 entry:
-  %0 = load i1, i1* @flag, align 4
+  %0 = load i1, ptr @flag, align 4
   %1 = sext i1 %0 to i32
   ret i32 %1
 }
@@ -19,7 +19,7 @@ entry:
 
 define i16 @test_zextloadi1_16() {
 entry:
-  %0 = load i1, i1* @flag, align 4
+  %0 = load i1, ptr @flag, align 4
   %1 = zext i1 %0 to i16
   ret i16 %1
 }
@@ -27,7 +27,7 @@ entry:
 
 define i32 @test_zextloadi1_32() {
 entry:
-  %0 = load i1, i1* @flag, align 4
+  %0 = load i1, ptr @flag, align 4
   %1 = zext i1 %0 to i32
   ret i32 %1
 }
@@ -35,7 +35,7 @@ entry:
 
 define i64 @test_zextloadi1_64() {
 entry:
-  %0 = load i1, i1* @flag, align 4
+  %0 = load i1, ptr @flag, align 4
   %1 = zext i1 %0 to i64
   ret i64 %1
 }

diff  --git a/llvm/test/CodeGen/Hexagon/loadi1-v4-G0.ll b/llvm/test/CodeGen/Hexagon/loadi1-v4-G0.ll
index b7df1a125fb0c..2dd62d67232f6 100644
--- a/llvm/test/CodeGen/Hexagon/loadi1-v4-G0.ll
+++ b/llvm/test/CodeGen/Hexagon/loadi1-v4-G0.ll
@@ -10,7 +10,7 @@ target triple = "hexagon-unknown-linux-gnu"
 
 define i32 @test_sextloadi1_32() {
 entry:
-  %0 = load i1, i1* @flag, align 4
+  %0 = load i1, ptr @flag, align 4
   %1 = sext i1 %0 to i32
   ret i32 %1
 }
@@ -19,7 +19,7 @@ entry:
 
 define i16 @test_zextloadi1_16() {
 entry:
-  %0 = load i1, i1* @flag, align 4
+  %0 = load i1, ptr @flag, align 4
   %1 = zext i1 %0 to i16
   ret i16 %1
 }
@@ -27,7 +27,7 @@ entry:
 
 define i32 @test_zextloadi1_32() {
 entry:
-  %0 = load i1, i1* @flag, align 4
+  %0 = load i1, ptr @flag, align 4
   %1 = zext i1 %0 to i32
   ret i32 %1
 }
@@ -35,7 +35,7 @@ entry:
 
 define i64 @test_zextloadi1_64() {
 entry:
-  %0 = load i1, i1* @flag, align 4
+  %0 = load i1, ptr @flag, align 4
   %1 = zext i1 %0 to i64
   ret i64 %1
 }

diff  --git a/llvm/test/CodeGen/Hexagon/loadi1-v4.ll b/llvm/test/CodeGen/Hexagon/loadi1-v4.ll
index 15b056040a42f..f99b7e1625a3d 100644
--- a/llvm/test/CodeGen/Hexagon/loadi1-v4.ll
+++ b/llvm/test/CodeGen/Hexagon/loadi1-v4.ll
@@ -8,7 +8,7 @@ target triple = "hexagon-unknown-linux-gnu"
 
 define i32 @test_sextloadi1_32() {
 entry:
-  %0 = load i1, i1* @flag, align 4
+  %0 = load i1, ptr @flag, align 4
 ; CHECK: memub
   %1 = sext i1 %0 to i32
   ret i32 %1
@@ -18,7 +18,7 @@ entry:
 
 define i16 @test_zextloadi1_16() {
 entry:
-  %0 = load i1, i1* @flag, align 4
+  %0 = load i1, ptr @flag, align 4
 ; CHECK: memub
   %1 = zext i1 %0 to i16
   ret i16 %1
@@ -27,7 +27,7 @@ entry:
 
 define i32 @test_zextloadi1_32() {
 entry:
-  %0 = load i1, i1* @flag, align 4
+  %0 = load i1, ptr @flag, align 4
 ; CHECK: memub
   %1 = zext i1 %0 to i32
   ret i32 %1
@@ -36,7 +36,7 @@ entry:
 
 define i64 @test_zextloadi1_64() {
 entry:
-  %0 = load i1, i1* @flag, align 4
+  %0 = load i1, ptr @flag, align 4
 ; CHECK: memub
   %1 = zext i1 %0 to i64
   ret i64 %1

diff  --git a/llvm/test/CodeGen/Hexagon/loadi1.ll b/llvm/test/CodeGen/Hexagon/loadi1.ll
index 38c1dfec83296..9914a498b5344 100644
--- a/llvm/test/CodeGen/Hexagon/loadi1.ll
+++ b/llvm/test/CodeGen/Hexagon/loadi1.ll
@@ -8,7 +8,7 @@ target triple = "hexagon-unknown-linux-gnu"
 
 define i32 @test_sextloadi1_32() {
 entry:
-  %0 = load i1, i1* @flag, align 4
+  %0 = load i1, ptr @flag, align 4
 ; CHECK: memub
   %1 = sext i1 %0 to i32
   ret i32 %1
@@ -18,7 +18,7 @@ entry:
 
 define i16 @test_zextloadi1_16() {
 entry:
-  %0 = load i1, i1* @flag, align 4
+  %0 = load i1, ptr @flag, align 4
 ; CHECK: memub
   %1 = zext i1 %0 to i16
   ret i16 %1
@@ -27,7 +27,7 @@ entry:
 
 define i32 @test_zextloadi1_32() {
 entry:
-  %0 = load i1, i1* @flag, align 4
+  %0 = load i1, ptr @flag, align 4
 ; CHECK: memub
   %1 = zext i1 %0 to i32
   ret i32 %1
@@ -36,7 +36,7 @@ entry:
 
 define i64 @test_zextloadi1_64() {
 entry:
-  %0 = load i1, i1* @flag, align 4
+  %0 = load i1, ptr @flag, align 4
 ; CHECK: memub
   %1 = zext i1 %0 to i64
   ret i64 %1

diff  --git a/llvm/test/CodeGen/Hexagon/local-exec.ll b/llvm/test/CodeGen/Hexagon/local-exec.ll
index 36f73efe4f366..b57fb6c5bf85e 100644
--- a/llvm/test/CodeGen/Hexagon/local-exec.ll
+++ b/llvm/test/CodeGen/Hexagon/local-exec.ll
@@ -11,12 +11,12 @@ target triple = "hexagon-unknown--elf"
 define i32 @f0() #0 {
 b0:
   %v0 = alloca i32, align 4
-  %v1 = alloca i32*, align 4
-  store i32 0, i32* %v0
-  store i32* @g0, i32** %v1, align 4
-  %v2 = load i32, i32* @g1, align 4
-  %v3 = load i32*, i32** %v1, align 4
-  store i32 %v2, i32* %v3, align 4
+  %v1 = alloca ptr, align 4
+  store i32 0, ptr %v0
+  store ptr @g0, ptr %v1, align 4
+  %v2 = load i32, ptr @g1, align 4
+  %v3 = load ptr, ptr %v1, align 4
+  store i32 %v2, ptr %v3, align 4
   ret i32 0
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/loop-idiom/hexagon-memmove1.ll b/llvm/test/CodeGen/Hexagon/loop-idiom/hexagon-memmove1.ll
index 728d2cf66bd39..c7110263c6580 100644
--- a/llvm/test/CodeGen/Hexagon/loop-idiom/hexagon-memmove1.ll
+++ b/llvm/test/CodeGen/Hexagon/loop-idiom/hexagon-memmove1.ll
@@ -4,26 +4,24 @@
 ; CHECK: call void @llvm.memmove
 
 ; Function Attrs: norecurse nounwind
-define void @foo(i32* nocapture %A, i32* nocapture readonly %B, i32 %n) #0 {
+define void @foo(ptr nocapture %A, ptr nocapture readonly %B, i32 %n) #0 {
 entry:
   %cmp1 = icmp sgt i32 %n, 0
   br i1 %cmp1, label %for.body.preheader, label %for.end
 
 for.body.preheader:                               ; preds = %entry
-  %arrayidx.gep = getelementptr i32, i32* %B, i32 0
-  %arrayidx1.gep = getelementptr i32, i32* %A, i32 0
   br label %for.body
 
 for.body:                                         ; preds = %for.body.preheader, %for.body
-  %arrayidx.phi = phi i32* [ %arrayidx.gep, %for.body.preheader ], [ %arrayidx.inc, %for.body ]
-  %arrayidx1.phi = phi i32* [ %arrayidx1.gep, %for.body.preheader ], [ %arrayidx1.inc, %for.body ]
+  %arrayidx.phi = phi ptr [ %B, %for.body.preheader ], [ %arrayidx.inc, %for.body ]
+  %arrayidx1.phi = phi ptr [ %A, %for.body.preheader ], [ %arrayidx1.inc, %for.body ]
   %i.02 = phi i32 [ %inc, %for.body ], [ 0, %for.body.preheader ]
-  %0 = load i32, i32* %arrayidx.phi, align 4
-  store i32 %0, i32* %arrayidx1.phi, align 4
+  %0 = load i32, ptr %arrayidx.phi, align 4
+  store i32 %0, ptr %arrayidx1.phi, align 4
   %inc = add nuw nsw i32 %i.02, 1
   %exitcond = icmp ne i32 %inc, %n
-  %arrayidx.inc = getelementptr i32, i32* %arrayidx.phi, i32 1
-  %arrayidx1.inc = getelementptr i32, i32* %arrayidx1.phi, i32 1
+  %arrayidx.inc = getelementptr i32, ptr %arrayidx.phi, i32 1
+  %arrayidx1.inc = getelementptr i32, ptr %arrayidx1.phi, i32 1
   br i1 %exitcond, label %for.body, label %for.end.loopexit
 
 for.end.loopexit:                                 ; preds = %for.body

diff  --git a/llvm/test/CodeGen/Hexagon/loop-idiom/hexagon-memmove2.ll b/llvm/test/CodeGen/Hexagon/loop-idiom/hexagon-memmove2.ll
index d47b73d2ee891..234e4f56b5d81 100644
--- a/llvm/test/CodeGen/Hexagon/loop-idiom/hexagon-memmove2.ll
+++ b/llvm/test/CodeGen/Hexagon/loop-idiom/hexagon-memmove2.ll
@@ -1,7 +1,7 @@
 ; RUN: opt -hexagon-loop-idiom -S -mtriple hexagon-unknown-elf < %s \
 ; RUN:  | FileCheck %s
 
-define void @PR14241(i32* %s, i64 %size) #0 {
+define void @PR14241(ptr %s, i64 %size) #0 {
 ; Ensure that we don't form a memcpy for strided loops. Briefly, when we taught
 ; LoopIdiom about memmove and strided loops, this got miscompiled into a memcpy
 ; instead of a memmove. If we get the memmove transform back, this will catch
@@ -11,21 +11,20 @@ define void @PR14241(i32* %s, i64 %size) #0 {
 
 entry:
   %end.idx = add i64 %size, -1
-  %end.ptr = getelementptr inbounds i32, i32* %s, i64 %end.idx
+  %end.ptr = getelementptr inbounds i32, ptr %s, i64 %end.idx
   br label %while.body
 ; CHECK-NOT: memcpy
 ; CHECK: memmove
 
 while.body:
-  %phi.ptr = phi i32* [ %s, %entry ], [ %next.ptr, %while.body ]
-  %src.ptr = getelementptr inbounds i32, i32* %phi.ptr, i64 1
-  %val = load i32, i32* %src.ptr, align 4
+  %phi.ptr = phi ptr [ %s, %entry ], [ %next.ptr, %while.body ]
+  %src.ptr = getelementptr inbounds i32, ptr %phi.ptr, i64 1
+  %val = load i32, ptr %src.ptr, align 4
 ; CHECK: load
-  %dst.ptr = getelementptr inbounds i32, i32* %phi.ptr, i64 0
-  store i32 %val, i32* %dst.ptr, align 4
+  store i32 %val, ptr %phi.ptr, align 4
 ; CHECK: store
-  %next.ptr = getelementptr inbounds i32, i32* %phi.ptr, i64 1
-  %cmp = icmp eq i32* %next.ptr, %end.ptr
+  %next.ptr = getelementptr inbounds i32, ptr %phi.ptr, i64 1
+  %cmp = icmp eq ptr %next.ptr, %end.ptr
   br i1 %cmp, label %exit, label %while.body
 
 exit:

diff  --git a/llvm/test/CodeGen/Hexagon/loop-idiom/lcssa.ll b/llvm/test/CodeGen/Hexagon/loop-idiom/lcssa.ll
index 48632fde1368b..cb34d9c04fb0d 100644
--- a/llvm/test/CodeGen/Hexagon/loop-idiom/lcssa.ll
+++ b/llvm/test/CodeGen/Hexagon/loop-idiom/lcssa.ll
@@ -29,12 +29,12 @@ do.end329:
 
 do.body330:
   %row_width.7 = phi i32 [ %sub325.lcssa, %do.end329 ], [ %dec334, %do.body330 ]
-  %sp.5 = phi i8* [ undef, %do.end329 ], [ %incdec.ptr331, %do.body330 ]
-  %dp.addr.5 = phi i8* [ undef, %do.end329 ], [ %incdec.ptr332, %do.body330 ]
-  %0 = load i8, i8* %sp.5, align 1
-  store i8 %0, i8* %dp.addr.5, align 1
-  %incdec.ptr332 = getelementptr inbounds i8, i8* %dp.addr.5, i32 1
-  %incdec.ptr331 = getelementptr inbounds i8, i8* %sp.5, i32 1
+  %sp.5 = phi ptr [ undef, %do.end329 ], [ %incdec.ptr331, %do.body330 ]
+  %dp.addr.5 = phi ptr [ undef, %do.end329 ], [ %incdec.ptr332, %do.body330 ]
+  %0 = load i8, ptr %sp.5, align 1
+  store i8 %0, ptr %dp.addr.5, align 1
+  %incdec.ptr332 = getelementptr inbounds i8, ptr %dp.addr.5, i32 1
+  %incdec.ptr331 = getelementptr inbounds i8, ptr %sp.5, i32 1
   %dec334 = add i32 %row_width.7, -1
   %cmp335 = icmp eq i32 %dec334, 0
   br i1 %cmp335, label %if.end375, label %do.body330

diff  --git a/llvm/test/CodeGen/Hexagon/loop-idiom/memmove-rt-check.ll b/llvm/test/CodeGen/Hexagon/loop-idiom/memmove-rt-check.ll
index 299a910dd5139..7a7d1d9b1a865 100644
--- a/llvm/test/CodeGen/Hexagon/loop-idiom/memmove-rt-check.ll
+++ b/llvm/test/CodeGen/Hexagon/loop-idiom/memmove-rt-check.ll
@@ -4,8 +4,8 @@
 
 ; CHECK: b7.old:
 ; CHECK:   [[LEN:%[0-9]+]] = shl nuw i32 %len, 3
-; CHECK:   [[SRC:%[0-9]+]] = ptrtoint i8* %src to i32
-; CHECK:   [[DST:%[0-9]+]] = ptrtoint i8* %dst to i32
+; CHECK:   [[SRC:%[0-9]+]] = ptrtoint ptr %src to i32
+; CHECK:   [[DST:%[0-9]+]] = ptrtoint ptr %dst to i32
 ; CHECK:   [[ULT:%[0-9]+]] = icmp ult i32 [[DST]], [[SRC]]
 ; CHECK:   [[SUB:%[0-9]+]] = sub i32 [[DST]], [[SRC]]
 ; CHECK:   [[SLT:%[0-9]+]] = icmp sle i32 [[LEN]], [[SUB]]
@@ -15,10 +15,8 @@
 target datalayout = "e-m:e-p:32:32:32-a:0-n16:32-i64:64:64-i32:32:32-i16:16:16-i1:8:8-f32:32:32-f64:64:64-v32:32:32-v64:64:64-v512:512:512-v1024:1024:1024-v2048:2048:2048"
 target triple = "hexagon"
 
-define void @fred(i8* %dst, i8* %src, i32 %len) #0 {
+define void @fred(ptr %dst, ptr %src, i32 %len) #0 {
 b3:
-  %v4 = bitcast i8* %src to i64*
-  %v5 = bitcast i8* %dst to i64*
   %v6 = icmp slt i32 0, %len
   br i1 %v6, label %b7, label %b16
 
@@ -27,10 +25,10 @@ b7:                                               ; preds = %b3
 
 b8:                                               ; preds = %b13, %b7
   %v9 = phi i32 [ 0, %b7 ], [ %v14, %b13 ]
-  %v10 = getelementptr inbounds i64, i64* %v4, i32 %v9
-  %v11 = load i64, i64* %v10, align 8
-  %v12 = getelementptr inbounds i64, i64* %v5, i32 %v9
-  store i64 %v11, i64* %v12, align 8
+  %v10 = getelementptr inbounds i64, ptr %src, i32 %v9
+  %v11 = load i64, ptr %v10, align 8
+  %v12 = getelementptr inbounds i64, ptr %dst, i32 %v9
+  store i64 %v11, ptr %v12, align 8
   br label %b13
 
 b13:                                              ; preds = %b8

diff  --git a/llvm/test/CodeGen/Hexagon/loop-idiom/pmpy-infinite-loop.ll b/llvm/test/CodeGen/Hexagon/loop-idiom/pmpy-infinite-loop.ll
index 92f3b6048bfb1..1934ced7e7aed 100644
--- a/llvm/test/CodeGen/Hexagon/loop-idiom/pmpy-infinite-loop.ll
+++ b/llvm/test/CodeGen/Hexagon/loop-idiom/pmpy-infinite-loop.ll
@@ -8,22 +8,22 @@ target triple = "hexagon"
 
 %struct.0 = type { [120 x i16], [80 x i16], [80 x i16], [80 x i16], [80 x i16], [80 x i16], [40 x i16], [40 x i16], [40 x i16], [40 x i16], [40 x i16], [40 x i16] }
 
-define void @fred(%struct.0* %demod_state) local_unnamed_addr #0 {
+define void @fred(ptr %demod_state) local_unnamed_addr #0 {
 entry:
   br label %for.body309
 
 for.body309:                                      ; preds = %for.body309, %entry
   %max_
diff .0300 = phi i16 [ %max_
diff .1, %for.body309 ], [ 0, %entry ]
-  %arrayidx322.phi = phi i16* [ undef, %entry ], [ %arrayidx322.inc, %for.body309 ]
-  %arrayidx331.phi = phi i16* [ undef, %entry ], [ %arrayidx331.inc, %for.body309 ]
+  %arrayidx322.phi = phi ptr [ undef, %entry ], [ %arrayidx322.inc, %for.body309 ]
+  %arrayidx331.phi = phi ptr [ undef, %entry ], [ %arrayidx331.inc, %for.body309 ]
   %lag.4299.apmt = phi i32 [ %inc376.apmt, %for.body309 ], [ 0, %entry ]
-  %0 = load i16, i16* %arrayidx322.phi, align 2
+  %0 = load i16, ptr %arrayidx322.phi, align 2
   %conv323 = sext i16 %0 to i32
   %sub324 = sub nsw i32 0, %conv323
   %ispos258 = icmp sgt i32 %sub324, -1
   %1 = select i1 %ispos258, i32 %sub324, i32 0
   %add326 = add nsw i32 %1, 0
-  %2 = load i16, i16* %arrayidx331.phi, align 2
+  %2 = load i16, ptr %arrayidx331.phi, align 2
   %conv332 = sext i16 %2 to i32
   %sub333 = sub nsw i32 0, %conv332
   %ispos260 = icmp sgt i32 %sub333, -1
@@ -50,8 +50,8 @@ for.body309:                                      ; preds = %for.body309, %entry
   %max_
diff .1 = select i1 %cmp371, i16 %conv364, i16 %max_
diff .0300
   %inc376.apmt = add nuw nsw i32 %lag.4299.apmt, 1
   %exitcond331 = icmp ne i32 %inc376.apmt, 40
-  %arrayidx322.inc = getelementptr i16, i16* %arrayidx322.phi, i32 1
-  %arrayidx331.inc = getelementptr i16, i16* %arrayidx331.phi, i32 1
+  %arrayidx322.inc = getelementptr i16, ptr %arrayidx322.phi, i32 1
+  %arrayidx331.inc = getelementptr i16, ptr %arrayidx331.phi, i32 1
   br i1 %exitcond331, label %for.body309, label %for.end377
 
 for.end377:                                       ; preds = %for.body309
@@ -60,12 +60,12 @@ for.end377:                                       ; preds = %for.body309
   br label %for.body405
 
 for.body405:                                      ; preds = %if.end437, %for.end377
-  %arrayidx412 = getelementptr inbounds %struct.0, %struct.0* %demod_state, i32 0, i32 11, i32 undef
+  %arrayidx412 = getelementptr inbounds %struct.0, ptr %demod_state, i32 0, i32 11, i32 undef
   br i1 %cmp407, label %if.then409, label %if.end437
 
 if.then409:                                       ; preds = %for.body405
-  %arrayidx416 = getelementptr inbounds [40 x i16], [40 x i16]* null, i32 0, i32 undef
-  %7 = load i16, i16* %arrayidx416, align 2
+  %arrayidx416 = getelementptr inbounds [40 x i16], ptr null, i32 0, i32 undef
+  %7 = load i16, ptr %arrayidx416, align 2
   %conv417 = sext i16 %7 to i32
   %shl = shl i32 %conv417, 4
   %mul419 = mul nsw i32 %shl, 655
@@ -76,7 +76,7 @@ if.end437:                                        ; preds = %if.then409, %for.bo
   %mul431.sink = phi i32 [ %add420, %if.then409 ], [ undef, %for.body405 ]
   %shr432257 = lshr i32 %mul431.sink, 15
   %conv433 = trunc i32 %shr432257 to i16
-  store i16 %conv433, i16* %arrayidx412, align 2
+  store i16 %conv433, ptr %arrayidx412, align 2
   br label %for.body405
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/loop-idiom/pmpy-shiftconv-fail.ll b/llvm/test/CodeGen/Hexagon/loop-idiom/pmpy-shiftconv-fail.ll
index 0abf8f8732009..e4b2b5a298edf 100644
--- a/llvm/test/CodeGen/Hexagon/loop-idiom/pmpy-shiftconv-fail.ll
+++ b/llvm/test/CodeGen/Hexagon/loop-idiom/pmpy-shiftconv-fail.ll
@@ -35,8 +35,8 @@ b3:                                               ; preds = %b3, %b1
 
 b13:                                              ; preds = %b3
   %v14 = phi i32 [ %v10, %b3 ]
-  %v15 = getelementptr inbounds [256 x i32], [256 x i32]* @A, i32 0, i32 %v2
-  store i32 %v14, i32* %v15, align 4
+  %v15 = getelementptr inbounds [256 x i32], ptr @A, i32 0, i32 %v2
+  store i32 %v14, ptr %v15, align 4
   %v16 = add nuw nsw i32 %v2, 1
   %v17 = icmp ne i32 %v16, 256
   br i1 %v17, label %b1, label %b18

diff  --git a/llvm/test/CodeGen/Hexagon/loop-prefetch.ll b/llvm/test/CodeGen/Hexagon/loop-prefetch.ll
index 24518421c4452..76e82688e5443 100644
--- a/llvm/test/CodeGen/Hexagon/loop-prefetch.ll
+++ b/llvm/test/CodeGen/Hexagon/loop-prefetch.ll
@@ -3,20 +3,20 @@
 
 target triple = "hexagon"
 
-define void @copy(i32* nocapture %d, i32* nocapture readonly %s, i32 %n) local_unnamed_addr #0 {
+define void @copy(ptr nocapture %d, ptr nocapture readonly %s, i32 %n) local_unnamed_addr #0 {
 entry:
   %tobool2 = icmp eq i32 %n, 0
   br i1 %tobool2, label %while.end, label %while.body
 
 while.body:                                       ; preds = %entry, %while.body
   %n.addr.05 = phi i32 [ %dec, %while.body ], [ %n, %entry ]
-  %s.addr.04 = phi i32* [ %incdec.ptr, %while.body ], [ %s, %entry ]
-  %d.addr.03 = phi i32* [ %incdec.ptr1, %while.body ], [ %d, %entry ]
+  %s.addr.04 = phi ptr [ %incdec.ptr, %while.body ], [ %s, %entry ]
+  %d.addr.03 = phi ptr [ %incdec.ptr1, %while.body ], [ %d, %entry ]
   %dec = add i32 %n.addr.05, -1
-  %incdec.ptr = getelementptr inbounds i32, i32* %s.addr.04, i32 1
-  %0 = load i32, i32* %s.addr.04, align 4
-  %incdec.ptr1 = getelementptr inbounds i32, i32* %d.addr.03, i32 1
-  store i32 %0, i32* %d.addr.03, align 4
+  %incdec.ptr = getelementptr inbounds i32, ptr %s.addr.04, i32 1
+  %0 = load i32, ptr %s.addr.04, align 4
+  %incdec.ptr1 = getelementptr inbounds i32, ptr %d.addr.03, i32 1
+  store i32 %0, ptr %d.addr.03, align 4
   %tobool = icmp eq i32 %dec, 0
   br i1 %tobool, label %while.end, label %while.body
 

diff  --git a/llvm/test/CodeGen/Hexagon/loop-rotate-bug.ll b/llvm/test/CodeGen/Hexagon/loop-rotate-bug.ll
index f49f782f8a182..6a2aa76bc2bac 100644
--- a/llvm/test/CodeGen/Hexagon/loop-rotate-bug.ll
+++ b/llvm/test/CodeGen/Hexagon/loop-rotate-bug.ll
@@ -5,63 +5,61 @@
 ; CHECK: cmp.eq
 ; CHECK: cmp.eq
 
-%s.0 = type { i8*, i32, %s.0* }
+%s.0 = type { ptr, i32, ptr }
 
- at g0 = external global %s.0**, align 4
+ at g0 = external global ptr, align 4
 @g1 = private global [4 x i64] zeroinitializer, section "__llvm_prf_cnts", align 8
 
-declare void @f0(%s.0*)
+declare void @f0(ptr)
 
 define i32 @f1() #0 {
 b0:
-  %v0 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @g1, i32 0, i32 0), align 8
+  %v0 = load i64, ptr @g1, align 8
   %v1 = add i64 %v0, 1
-  store i64 %v1, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @g1, i32 0, i32 0), align 8
+  store i64 %v1, ptr @g1, align 8
   br label %b1
 
 b1:                                               ; preds = %b6, %b0
   %v2 = phi i32 [ 0, %b0 ], [ %v27, %b6 ]
-  %v3 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @g1, i32 0, i32 1), align 8
+  %v3 = load i64, ptr getelementptr inbounds ([4 x i64], ptr @g1, i32 0, i32 1), align 8
   %v4 = add i64 %v3, 1
-  store i64 %v4, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @g1, i32 0, i32 1), align 8
-  %v5 = load %s.0**, %s.0*** @g0, align 4
-  %v6 = getelementptr inbounds %s.0*, %s.0** %v5, i32 %v2
-  %v7 = load %s.0*, %s.0** %v6, align 4
-  %v8 = icmp eq %s.0* %v7, null
+  store i64 %v4, ptr getelementptr inbounds ([4 x i64], ptr @g1, i32 0, i32 1), align 8
+  %v5 = load ptr, ptr @g0, align 4
+  %v6 = getelementptr inbounds ptr, ptr %v5, i32 %v2
+  %v7 = load ptr, ptr %v6, align 4
+  %v8 = icmp eq ptr %v7, null
   br i1 %v8, label %b6, label %b2
 
 b2:                                               ; preds = %b1
-  %v9 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @g1, i32 0, i32 2), align 8
+  %v9 = load i64, ptr getelementptr inbounds ([4 x i64], ptr @g1, i32 0, i32 2), align 8
   %v10 = add i64 %v9, 1
-  store i64 %v10, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @g1, i32 0, i32 2), align 8
-  %v11 = bitcast %s.0* %v7 to %s.0*
-  %v12 = getelementptr inbounds %s.0, %s.0* %v11, i32 0, i32 2
-  %v13 = load %s.0*, %s.0** %v12, align 4
-  %v14 = icmp eq %s.0* %v13, null
-  %v15 = getelementptr inbounds %s.0, %s.0* %v11, i32 0, i32 2
+  store i64 %v10, ptr getelementptr inbounds ([4 x i64], ptr @g1, i32 0, i32 2), align 8
+  %v12 = getelementptr inbounds %s.0, ptr %v7, i32 0, i32 2
+  %v13 = load ptr, ptr %v12, align 4
+  %v14 = icmp eq ptr %v13, null
+  %v15 = getelementptr inbounds %s.0, ptr %v7, i32 0, i32 2
   br i1 %v14, label %b5, label %b3
 
 b3:                                               ; preds = %b2
   br label %b4
 
 b4:                                               ; preds = %b4, %b3
-  %v16 = phi %s.0** [ %v25, %b4 ], [ %v15, %b3 ]
-  %v17 = phi %s.0* [ %v20, %b4 ], [ %v7, %b3 ]
-  %v18 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @g1, i32 0, i32 3), align 8
+  %v16 = phi ptr [ %v25, %b4 ], [ %v15, %b3 ]
+  %v17 = phi ptr [ %v20, %b4 ], [ %v7, %b3 ]
+  %v18 = load i64, ptr getelementptr inbounds ([4 x i64], ptr @g1, i32 0, i32 3), align 8
   %v19 = add i64 %v18, 1
-  store i64 %v19, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @g1, i32 0, i32 3), align 8
-  %v20 = load %s.0*, %s.0** %v16, align 4
-  tail call void @f0(%s.0* %v17)
-  %v21 = bitcast %s.0* %v20 to %s.0*
-  %v22 = getelementptr inbounds %s.0, %s.0* %v21, i32 0, i32 2
-  %v23 = load %s.0*, %s.0** %v22, align 4
-  %v24 = icmp eq %s.0* %v23, null
-  %v25 = getelementptr inbounds %s.0, %s.0* %v21, i32 0, i32 2
+  store i64 %v19, ptr getelementptr inbounds ([4 x i64], ptr @g1, i32 0, i32 3), align 8
+  %v20 = load ptr, ptr %v16, align 4
+  tail call void @f0(ptr %v17)
+  %v22 = getelementptr inbounds %s.0, ptr %v20, i32 0, i32 2
+  %v23 = load ptr, ptr %v22, align 4
+  %v24 = icmp eq ptr %v23, null
+  %v25 = getelementptr inbounds %s.0, ptr %v20, i32 0, i32 2
   br i1 %v24, label %b5, label %b4
 
 b5:                                               ; preds = %b4, %b2
-  %v26 = phi %s.0* [ %v7, %b2 ], [ %v20, %b4 ]
-  tail call void @f0(%s.0* %v26)
+  %v26 = phi ptr [ %v7, %b2 ], [ %v20, %b4 ]
+  tail call void @f0(ptr %v26)
   br label %b6
 
 b6:                                               ; preds = %b5, %b1
@@ -70,8 +68,8 @@ b6:                                               ; preds = %b5, %b1
   br i1 %v28, label %b7, label %b1
 
 b7:                                               ; preds = %b6
-  %v29 = load %s.0*, %s.0** bitcast (%s.0*** @g0 to %s.0**), align 4
-  tail call void @f0(%s.0* %v29)
+  %v29 = load ptr, ptr @g0, align 4
+  tail call void @f0(ptr %v29)
   ret i32 undef
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/loop-rotate-liveins.ll b/llvm/test/CodeGen/Hexagon/loop-rotate-liveins.ll
index 59ea95adb828d..cdca097113570 100644
--- a/llvm/test/CodeGen/Hexagon/loop-rotate-liveins.ll
+++ b/llvm/test/CodeGen/Hexagon/loop-rotate-liveins.ll
@@ -37,7 +37,7 @@ b5:                                               ; preds = %b4, %b4
   unreachable
 
 b6:                                               ; preds = %b4
-  call void (i8*, i8*, ...) @f1(i8* nonnull undef, i8* getelementptr inbounds ([3 x i8], [3 x i8]* @g1, i32 0, i32 0), i32* getelementptr inbounds (%s.0, %s.0* @g0, i32 0, i32 45)) #0
+  call void (ptr, ptr, ...) @f1(ptr nonnull undef, ptr @g1, ptr getelementptr inbounds (%s.0, ptr @g0, i32 0, i32 45)) #0
   br label %b7
 
 b7:                                               ; preds = %b6
@@ -77,15 +77,15 @@ b15:                                              ; preds = %b13, %b13
   unreachable
 
 b16:                                              ; preds = %b17, %b14
-  %v0 = phi i8* [ %v2, %b17 ], [ undef, %b14 ]
-  %v1 = load i8, i8* %v0, align 1
+  %v0 = phi ptr [ %v2, %b17 ], [ undef, %b14 ]
+  %v1 = load i8, ptr %v0, align 1
   switch i8 %v1, label %b17 [
     i8 32, label %b18
     i8 9, label %b18
   ]
 
 b17:                                              ; preds = %b16
-  %v2 = getelementptr inbounds i8, i8* %v0, i32 1
+  %v2 = getelementptr inbounds i8, ptr %v0, i32 1
   br label %b16
 
 b18:                                              ; preds = %b16, %b16
@@ -93,6 +93,6 @@ b18:                                              ; preds = %b16, %b16
 }
 
 ; Function Attrs: nounwind
-declare void @f1(i8* nocapture readonly, i8* nocapture readonly, ...) local_unnamed_addr #0
+declare void @f1(ptr nocapture readonly, ptr nocapture readonly, ...) local_unnamed_addr #0
 
 attributes #0 = { nounwind "target-cpu"="hexagonv62" }

diff  --git a/llvm/test/CodeGen/Hexagon/loop_correctness.ll b/llvm/test/CodeGen/Hexagon/loop_correctness.ll
index 7f484faae4334..35e7c90d1bb74 100644
--- a/llvm/test/CodeGen/Hexagon/loop_correctness.ll
+++ b/llvm/test/CodeGen/Hexagon/loop_correctness.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -march=hexagon -O3 -hexagon-instsimplify=0 < %s | FileCheck %s
 
-define void @f0(i8* nocapture %a0, i32 %a1, i32 %a2) #0 {
+define void @f0(ptr nocapture %a0, i32 %a1, i32 %a2) #0 {
 ; CHECK-LABEL: f0:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -35,7 +35,7 @@ b3:                                               ; preds = %b2
   ret void
 }
 
-define void @f1(i8* nocapture %a0, i32 %a1, i32 %a2) #0 {
+define void @f1(ptr nocapture %a0, i32 %a1, i32 %a2) #0 {
 ; CHECK-LABEL: f1:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -69,7 +69,7 @@ b3:                                               ; preds = %b2
   ret void
 }
 
-define void @f2(i8* nocapture %a0, i32 %a1, i32 %a2) #0 {
+define void @f2(ptr nocapture %a0, i32 %a1, i32 %a2) #0 {
 ; CHECK-LABEL: f2:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -103,7 +103,7 @@ b3:                                               ; preds = %b2
   ret void
 }
 
-define void @f3(i8* nocapture %a0, i32 %a1, i32 %a2) #0 {
+define void @f3(ptr nocapture %a0, i32 %a1, i32 %a2) #0 {
 ; CHECK-LABEL: f3:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -137,7 +137,7 @@ b3:                                               ; preds = %b2
   ret void
 }
 
-define void @f4(i8* nocapture %a0, i32 %a1, i32 %a2) #0 {
+define void @f4(ptr nocapture %a0, i32 %a1, i32 %a2) #0 {
 ; CHECK-LABEL: f4:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -171,7 +171,7 @@ b3:                                               ; preds = %b2
   ret void
 }
 
-define void @f5(i8* nocapture %a0, i32 %a1, i32 %a2) #0 {
+define void @f5(ptr nocapture %a0, i32 %a1, i32 %a2) #0 {
 ; CHECK-LABEL: f5:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {

diff  --git a/llvm/test/CodeGen/Hexagon/lower-extract-subvector.ll b/llvm/test/CodeGen/Hexagon/lower-extract-subvector.ll
index 43b8119f76bc6..9d9435ad4efae 100644
--- a/llvm/test/CodeGen/Hexagon/lower-extract-subvector.ll
+++ b/llvm/test/CodeGen/Hexagon/lower-extract-subvector.ll
@@ -6,12 +6,12 @@
 ; CHECK: vmem
 target triple = "hexagon-unknown--elf"
 
-define void @f0(<64 x i16>* %a0) #0 {
+define void @f0(ptr %a0) #0 {
 b0:
   %v0 = tail call <64 x i32> @llvm.hexagon.V6.vshuffvdd.128B(<32 x i32> undef, <32 x i32> undef, i32 -2)
   %v1 = bitcast <64 x i32> %v0 to <128 x i16>
   %v2 = shufflevector <128 x i16> %v1, <128 x i16> undef, <64 x i32> <i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127>
-  store <64 x i16> %v2, <64 x i16>* %a0, align 128
+  store <64 x i16> %v2, ptr %a0, align 128
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/lsr-post-inc-cross-use-offsets.ll b/llvm/test/CodeGen/Hexagon/lsr-post-inc-cross-use-offsets.ll
index f327586411ec8..472c96d2c85f9 100644
--- a/llvm/test/CodeGen/Hexagon/lsr-post-inc-cross-use-offsets.ll
+++ b/llvm/test/CodeGen/Hexagon/lsr-post-inc-cross-use-offsets.ll
@@ -10,11 +10,11 @@
 
 target triple = "hexagon-unknown--elf"
 
-%0 = type { i8*, i32, i32, i32, i32, %1*, %1*, %1* }
+%0 = type { ptr, i32, i32, i32, i32, ptr, ptr, ptr }
 %1 = type { %2 }
 %2 = type { i64 }
-%3 = type { i8*, i32, i32, i32, i32, i32, i32, i8*, i32, i32* }
-%4 = type { i64, i8*, [4 x i32], [4 x i32], [4 x i32], i32, i8, i8, [6 x i8] }
+%3 = type { ptr, i32, i32, i32, i32, i32, i32, ptr, i32, ptr }
+%4 = type { i64, ptr, [4 x i32], [4 x i32], [4 x i32], i32, i8, i8, [6 x i8] }
 
 @g0 = private unnamed_addr constant [5 x i8] c"Load\00", align 1
 @g1 = private unnamed_addr constant [6 x i8] c"Store\00", align 1
@@ -26,20 +26,20 @@ target triple = "hexagon-unknown--elf"
 @g7 = private unnamed_addr constant [12 x i8] c"End consume\00", align 1
 @g8 = private constant [6 x i8] c"input\00", align 32
 @g9 = private constant [10 x i8] c"dilate3x3\00", align 32
- at g10 = private constant [2 x %0] [%0 { i8* getelementptr inbounds ([6 x i8], [6 x i8]* @g8, i32 0, i32 0), i32 1, i32 2, i32 1, i32 8, %1* null, %1* null, %1* null }, %0 { i8* getelementptr inbounds ([10 x i8], [10 x i8]* @g9, i32 0, i32 0), i32 2, i32 2, i32 1, i32 8, %1* null, %1* null, %1* null }]
+ at g10 = private constant [2 x %0] [%0 { ptr @g8, i32 1, i32 2, i32 1, i32 8, ptr null, ptr null, ptr null }, %0 { ptr @g9, i32 2, i32 2, i32 1, i32 8, ptr null, ptr null, ptr null }]
 @g11 = private constant [64 x i8] c"...............................................................\00", align 32
 
 ; Function Attrs: nounwind
-declare i8* @f0(i8*, i32) #0
+declare ptr @f0(ptr, i32) #0
 
 ; Function Attrs: nounwind
-declare void @f1(i8*, i8*) #0
+declare void @f1(ptr, ptr) #0
 
 ; Function Attrs: nounwind
-declare void @f2(i8*, i8*) #0
+declare void @f2(ptr, ptr) #0
 
 ; Function Attrs: nounwind
-declare i32 @f3(i8*, %3*) #0
+declare i32 @f3(ptr, ptr) #0
 
 ; Function Attrs: nounwind
 declare void @f4() #0
@@ -48,28 +48,28 @@ declare void @f4() #0
 declare void @f5() #0
 
 ; Function Attrs: nounwind
-define i32 @f6(%4* noalias nocapture readonly %a0, %4* noalias nocapture readonly %a1) #0 {
+define i32 @f6(ptr noalias nocapture readonly %a0, ptr noalias nocapture readonly %a1) #0 {
 b0:
-  %v0 = getelementptr inbounds %4, %4* %a0, i32 0, i32 1
-  %v1 = load i8*, i8** %v0, align 4
-  %v2 = getelementptr inbounds %4, %4* %a0, i32 0, i32 3, i32 1
-  %v3 = load i32, i32* %v2, align 4
-  %v4 = getelementptr inbounds %4, %4* %a0, i32 0, i32 4, i32 0
-  %v5 = load i32, i32* %v4, align 4
-  %v6 = getelementptr inbounds %4, %4* %a0, i32 0, i32 4, i32 1
-  %v7 = load i32, i32* %v6, align 4
-  %v8 = getelementptr inbounds %4, %4* %a1, i32 0, i32 1
-  %v9 = load i8*, i8** %v8, align 4
-  %v10 = getelementptr inbounds %4, %4* %a1, i32 0, i32 2, i32 0
-  %v11 = load i32, i32* %v10, align 4
-  %v12 = getelementptr inbounds %4, %4* %a1, i32 0, i32 3, i32 1
-  %v13 = load i32, i32* %v12, align 4
-  %v14 = getelementptr inbounds %4, %4* %a1, i32 0, i32 4, i32 0
-  %v15 = load i32, i32* %v14, align 4
-  %v16 = getelementptr inbounds %4, %4* %a1, i32 0, i32 4, i32 1
-  %v17 = load i32, i32* %v16, align 4
-  %v18 = getelementptr inbounds %4, %4* %a1, i32 0, i32 2, i32 1
-  %v19 = load i32, i32* %v18, align 4
+  %v0 = getelementptr inbounds %4, ptr %a0, i32 0, i32 1
+  %v1 = load ptr, ptr %v0, align 4
+  %v2 = getelementptr inbounds %4, ptr %a0, i32 0, i32 3, i32 1
+  %v3 = load i32, ptr %v2, align 4
+  %v4 = getelementptr inbounds %4, ptr %a0, i32 0, i32 4, i32 0
+  %v5 = load i32, ptr %v4, align 4
+  %v6 = getelementptr inbounds %4, ptr %a0, i32 0, i32 4, i32 1
+  %v7 = load i32, ptr %v6, align 4
+  %v8 = getelementptr inbounds %4, ptr %a1, i32 0, i32 1
+  %v9 = load ptr, ptr %v8, align 4
+  %v10 = getelementptr inbounds %4, ptr %a1, i32 0, i32 2, i32 0
+  %v11 = load i32, ptr %v10, align 4
+  %v12 = getelementptr inbounds %4, ptr %a1, i32 0, i32 3, i32 1
+  %v13 = load i32, ptr %v12, align 4
+  %v14 = getelementptr inbounds %4, ptr %a1, i32 0, i32 4, i32 0
+  %v15 = load i32, ptr %v14, align 4
+  %v16 = getelementptr inbounds %4, ptr %a1, i32 0, i32 4, i32 1
+  %v17 = load i32, ptr %v16, align 4
+  %v18 = getelementptr inbounds %4, ptr %a1, i32 0, i32 2, i32 1
+  %v19 = load i32, ptr %v18, align 4
   %v20 = add nsw i32 %v19, %v17
   %v21 = icmp sgt i32 %v19, 0
   br i1 %v21, label %b1, label %b11, !prof !3
@@ -95,48 +95,39 @@ b2:                                               ; preds = %b5, %b2
   %v36 = sub i32 %v32, %v28
   %v37 = add i32 %v36, %v30
   %v38 = add nsw i32 %v37, -1
-  %v39 = getelementptr inbounds i8, i8* %v1, i32 %v38
-  %v40 = bitcast i8* %v39 to <32 x i32>*
-  %v41 = load <32 x i32>, <32 x i32>* %v40, align 1, !tbaa !4
-  %v42 = getelementptr inbounds i8, i8* %v1, i32 %v37
-  %v43 = bitcast i8* %v42 to <32 x i32>*
-  %v44 = load <32 x i32>, <32 x i32>* %v43, align 1, !tbaa !4
+  %v39 = getelementptr inbounds i8, ptr %v1, i32 %v38
+  %v41 = load <32 x i32>, ptr %v39, align 1, !tbaa !4
+  %v42 = getelementptr inbounds i8, ptr %v1, i32 %v37
+  %v44 = load <32 x i32>, ptr %v42, align 1, !tbaa !4
   %v45 = tail call <32 x i32> @llvm.hexagon.V6.vmaxub.128B(<32 x i32> %v41, <32 x i32> %v44)
   %v46 = add nsw i32 %v37, 1
-  %v47 = getelementptr inbounds i8, i8* %v1, i32 %v46
-  %v48 = bitcast i8* %v47 to <32 x i32>*
-  %v49 = load <32 x i32>, <32 x i32>* %v48, align 1, !tbaa !4
+  %v47 = getelementptr inbounds i8, ptr %v1, i32 %v46
+  %v49 = load <32 x i32>, ptr %v47, align 1, !tbaa !4
   %v50 = tail call <32 x i32> @llvm.hexagon.V6.vmaxub.128B(<32 x i32> %v45, <32 x i32> %v49)
   %v51 = sub i32 %v33, %v28
   %v52 = add i32 %v51, %v30
   %v53 = add nsw i32 %v52, -1
-  %v54 = getelementptr inbounds i8, i8* %v1, i32 %v53
-  %v55 = bitcast i8* %v54 to <32 x i32>*
-  %v56 = load <32 x i32>, <32 x i32>* %v55, align 1, !tbaa !4
-  %v57 = getelementptr inbounds i8, i8* %v1, i32 %v52
-  %v58 = bitcast i8* %v57 to <32 x i32>*
-  %v59 = load <32 x i32>, <32 x i32>* %v58, align 1, !tbaa !4
+  %v54 = getelementptr inbounds i8, ptr %v1, i32 %v53
+  %v56 = load <32 x i32>, ptr %v54, align 1, !tbaa !4
+  %v57 = getelementptr inbounds i8, ptr %v1, i32 %v52
+  %v59 = load <32 x i32>, ptr %v57, align 1, !tbaa !4
   %v60 = tail call <32 x i32> @llvm.hexagon.V6.vmaxub.128B(<32 x i32> %v56, <32 x i32> %v59)
   %v61 = add nsw i32 %v52, 1
-  %v62 = getelementptr inbounds i8, i8* %v1, i32 %v61
-  %v63 = bitcast i8* %v62 to <32 x i32>*
-  %v64 = load <32 x i32>, <32 x i32>* %v63, align 1, !tbaa !4
+  %v62 = getelementptr inbounds i8, ptr %v1, i32 %v61
+  %v64 = load <32 x i32>, ptr %v62, align 1, !tbaa !4
   %v65 = tail call <32 x i32> @llvm.hexagon.V6.vmaxub.128B(<32 x i32> %v60, <32 x i32> %v64)
   %v66 = tail call <32 x i32> @llvm.hexagon.V6.vmaxub.128B(<32 x i32> %v50, <32 x i32> %v65)
   %v67 = sub i32 %v35, %v28
   %v68 = add i32 %v67, %v30
   %v69 = add nsw i32 %v68, -1
-  %v70 = getelementptr inbounds i8, i8* %v1, i32 %v69
-  %v71 = bitcast i8* %v70 to <32 x i32>*
-  %v72 = load <32 x i32>, <32 x i32>* %v71, align 1, !tbaa !4
-  %v73 = getelementptr inbounds i8, i8* %v1, i32 %v68
-  %v74 = bitcast i8* %v73 to <32 x i32>*
-  %v75 = load <32 x i32>, <32 x i32>* %v74, align 1, !tbaa !4
+  %v70 = getelementptr inbounds i8, ptr %v1, i32 %v69
+  %v72 = load <32 x i32>, ptr %v70, align 1, !tbaa !4
+  %v73 = getelementptr inbounds i8, ptr %v1, i32 %v68
+  %v75 = load <32 x i32>, ptr %v73, align 1, !tbaa !4
   %v76 = tail call <32 x i32> @llvm.hexagon.V6.vmaxub.128B(<32 x i32> %v72, <32 x i32> %v75)
   %v77 = add nsw i32 %v68, 1
-  %v78 = getelementptr inbounds i8, i8* %v1, i32 %v77
-  %v79 = bitcast i8* %v78 to <32 x i32>*
-  %v80 = load <32 x i32>, <32 x i32>* %v79, align 1, !tbaa !4
+  %v78 = getelementptr inbounds i8, ptr %v1, i32 %v77
+  %v80 = load <32 x i32>, ptr %v78, align 1, !tbaa !4
   %v81 = tail call <32 x i32> @llvm.hexagon.V6.vmaxub.128B(<32 x i32> %v76, <32 x i32> %v80)
   %v82 = tail call <32 x i32> @llvm.hexagon.V6.vmaxub.128B(<32 x i32> %v66, <32 x i32> %v81)
   %v83 = mul nsw i32 %v150, %v13
@@ -144,9 +135,8 @@ b2:                                               ; preds = %b5, %b2
   %v85 = add i32 %v84, %v15
   %v86 = sub i32 %v83, %v85
   %v87 = add i32 %v86, %v30
-  %v88 = getelementptr inbounds i8, i8* %v9, i32 %v87
-  %v89 = bitcast i8* %v88 to <32 x i32>*
-  store <32 x i32> %v82, <32 x i32>* %v89, align 1, !tbaa !7
+  %v88 = getelementptr inbounds i8, ptr %v9, i32 %v87
+  store <32 x i32> %v82, ptr %v88, align 1, !tbaa !7
   %v90 = add nuw nsw i32 %v26, 1
   %v91 = icmp eq i32 %v90, %v24
   br i1 %v91, label %b6, label %b2
@@ -157,57 +147,47 @@ b3:                                               ; preds = %b6, %b3
   %v94 = sub i32 %v93, %v28
   %v95 = add i32 %v94, %v32
   %v96 = add nsw i32 %v95, -129
-  %v97 = getelementptr inbounds i8, i8* %v1, i32 %v96
-  %v98 = bitcast i8* %v97 to <32 x i32>*
-  %v99 = load <32 x i32>, <32 x i32>* %v98, align 1, !tbaa !4
+  %v97 = getelementptr inbounds i8, ptr %v1, i32 %v96
+  %v99 = load <32 x i32>, ptr %v97, align 1, !tbaa !4
   %v100 = add nsw i32 %v95, -128
-  %v101 = getelementptr inbounds i8, i8* %v1, i32 %v100
-  %v102 = bitcast i8* %v101 to <32 x i32>*
-  %v103 = load <32 x i32>, <32 x i32>* %v102, align 1, !tbaa !4
+  %v101 = getelementptr inbounds i8, ptr %v1, i32 %v100
+  %v103 = load <32 x i32>, ptr %v101, align 1, !tbaa !4
   %v104 = tail call <32 x i32> @llvm.hexagon.V6.vmaxub.128B(<32 x i32> %v99, <32 x i32> %v103)
   %v105 = add nsw i32 %v95, -127
-  %v106 = getelementptr inbounds i8, i8* %v1, i32 %v105
-  %v107 = bitcast i8* %v106 to <32 x i32>*
-  %v108 = load <32 x i32>, <32 x i32>* %v107, align 1, !tbaa !4
+  %v106 = getelementptr inbounds i8, ptr %v1, i32 %v105
+  %v108 = load <32 x i32>, ptr %v106, align 1, !tbaa !4
   %v109 = tail call <32 x i32> @llvm.hexagon.V6.vmaxub.128B(<32 x i32> %v104, <32 x i32> %v108)
   %v110 = add i32 %v94, %v33
   %v111 = add nsw i32 %v110, -129
-  %v112 = getelementptr inbounds i8, i8* %v1, i32 %v111
-  %v113 = bitcast i8* %v112 to <32 x i32>*
-  %v114 = load <32 x i32>, <32 x i32>* %v113, align 1, !tbaa !4
+  %v112 = getelementptr inbounds i8, ptr %v1, i32 %v111
+  %v114 = load <32 x i32>, ptr %v112, align 1, !tbaa !4
   %v115 = add nsw i32 %v110, -128
-  %v116 = getelementptr inbounds i8, i8* %v1, i32 %v115
-  %v117 = bitcast i8* %v116 to <32 x i32>*
-  %v118 = load <32 x i32>, <32 x i32>* %v117, align 1, !tbaa !4
+  %v116 = getelementptr inbounds i8, ptr %v1, i32 %v115
+  %v118 = load <32 x i32>, ptr %v116, align 1, !tbaa !4
   %v119 = tail call <32 x i32> @llvm.hexagon.V6.vmaxub.128B(<32 x i32> %v114, <32 x i32> %v118)
   %v120 = add nsw i32 %v110, -127
-  %v121 = getelementptr inbounds i8, i8* %v1, i32 %v120
-  %v122 = bitcast i8* %v121 to <32 x i32>*
-  %v123 = load <32 x i32>, <32 x i32>* %v122, align 1, !tbaa !4
+  %v121 = getelementptr inbounds i8, ptr %v1, i32 %v120
+  %v123 = load <32 x i32>, ptr %v121, align 1, !tbaa !4
   %v124 = tail call <32 x i32> @llvm.hexagon.V6.vmaxub.128B(<32 x i32> %v119, <32 x i32> %v123)
   %v125 = tail call <32 x i32> @llvm.hexagon.V6.vmaxub.128B(<32 x i32> %v109, <32 x i32> %v124)
   %v126 = add i32 %v94, %v35
   %v127 = add nsw i32 %v126, -129
-  %v128 = getelementptr inbounds i8, i8* %v1, i32 %v127
-  %v129 = bitcast i8* %v128 to <32 x i32>*
-  %v130 = load <32 x i32>, <32 x i32>* %v129, align 1, !tbaa !4
+  %v128 = getelementptr inbounds i8, ptr %v1, i32 %v127
+  %v130 = load <32 x i32>, ptr %v128, align 1, !tbaa !4
   %v131 = add nsw i32 %v126, -128
-  %v132 = getelementptr inbounds i8, i8* %v1, i32 %v131
-  %v133 = bitcast i8* %v132 to <32 x i32>*
-  %v134 = load <32 x i32>, <32 x i32>* %v133, align 1, !tbaa !4
+  %v132 = getelementptr inbounds i8, ptr %v1, i32 %v131
+  %v134 = load <32 x i32>, ptr %v132, align 1, !tbaa !4
   %v135 = tail call <32 x i32> @llvm.hexagon.V6.vmaxub.128B(<32 x i32> %v130, <32 x i32> %v134)
   %v136 = add nsw i32 %v126, -127
-  %v137 = getelementptr inbounds i8, i8* %v1, i32 %v136
-  %v138 = bitcast i8* %v137 to <32 x i32>*
-  %v139 = load <32 x i32>, <32 x i32>* %v138, align 1, !tbaa !4
+  %v137 = getelementptr inbounds i8, ptr %v1, i32 %v136
+  %v139 = load <32 x i32>, ptr %v137, align 1, !tbaa !4
   %v140 = tail call <32 x i32> @llvm.hexagon.V6.vmaxub.128B(<32 x i32> %v135, <32 x i32> %v139)
   %v141 = tail call <32 x i32> @llvm.hexagon.V6.vmaxub.128B(<32 x i32> %v125, <32 x i32> %v140)
   %v142 = add i32 %v11, -128
   %v143 = sub i32 %v142, %v84
   %v144 = add i32 %v143, %v83
-  %v145 = getelementptr inbounds i8, i8* %v9, i32 %v144
-  %v146 = bitcast i8* %v145 to <32 x i32>*
-  store <32 x i32> %v141, <32 x i32>* %v146, align 1, !tbaa !7
+  %v145 = getelementptr inbounds i8, ptr %v9, i32 %v144
+  store <32 x i32> %v141, ptr %v145, align 1, !tbaa !7
   %v147 = add nuw nsw i32 %v92, 1
   %v148 = icmp eq i32 %v147, %v152
   br i1 %v148, label %b4, label %b3
@@ -245,49 +225,40 @@ b8:                                               ; preds = %b9, %b8
   %v166 = sub i32 %v160, %v159
   %v167 = add i32 %v166, %v162
   %v168 = add nsw i32 %v167, -129
-  %v169 = getelementptr inbounds i8, i8* %v1, i32 %v168
-  %v170 = bitcast i8* %v169 to <32 x i32>*
-  %v171 = load <32 x i32>, <32 x i32>* %v170, align 1, !tbaa !4
+  %v169 = getelementptr inbounds i8, ptr %v1, i32 %v168
+  %v171 = load <32 x i32>, ptr %v169, align 1, !tbaa !4
   %v172 = add nsw i32 %v167, -128
-  %v173 = getelementptr inbounds i8, i8* %v1, i32 %v172
-  %v174 = bitcast i8* %v173 to <32 x i32>*
-  %v175 = load <32 x i32>, <32 x i32>* %v174, align 1, !tbaa !4
+  %v173 = getelementptr inbounds i8, ptr %v1, i32 %v172
+  %v175 = load <32 x i32>, ptr %v173, align 1, !tbaa !4
   %v176 = tail call <32 x i32> @llvm.hexagon.V6.vmaxub.128B(<32 x i32> %v171, <32 x i32> %v175)
   %v177 = add nsw i32 %v167, -127
-  %v178 = getelementptr inbounds i8, i8* %v1, i32 %v177
-  %v179 = bitcast i8* %v178 to <32 x i32>*
-  %v180 = load <32 x i32>, <32 x i32>* %v179, align 1, !tbaa !4
+  %v178 = getelementptr inbounds i8, ptr %v1, i32 %v177
+  %v180 = load <32 x i32>, ptr %v178, align 1, !tbaa !4
   %v181 = tail call <32 x i32> @llvm.hexagon.V6.vmaxub.128B(<32 x i32> %v176, <32 x i32> %v180)
   %v182 = add i32 %v166, %v163
   %v183 = add nsw i32 %v182, -129
-  %v184 = getelementptr inbounds i8, i8* %v1, i32 %v183
-  %v185 = bitcast i8* %v184 to <32 x i32>*
-  %v186 = load <32 x i32>, <32 x i32>* %v185, align 1, !tbaa !4
+  %v184 = getelementptr inbounds i8, ptr %v1, i32 %v183
+  %v186 = load <32 x i32>, ptr %v184, align 1, !tbaa !4
   %v187 = add nsw i32 %v182, -128
-  %v188 = getelementptr inbounds i8, i8* %v1, i32 %v187
-  %v189 = bitcast i8* %v188 to <32 x i32>*
-  %v190 = load <32 x i32>, <32 x i32>* %v189, align 1, !tbaa !4
+  %v188 = getelementptr inbounds i8, ptr %v1, i32 %v187
+  %v190 = load <32 x i32>, ptr %v188, align 1, !tbaa !4
   %v191 = tail call <32 x i32> @llvm.hexagon.V6.vmaxub.128B(<32 x i32> %v186, <32 x i32> %v190)
   %v192 = add nsw i32 %v182, -127
-  %v193 = getelementptr inbounds i8, i8* %v1, i32 %v192
-  %v194 = bitcast i8* %v193 to <32 x i32>*
-  %v195 = load <32 x i32>, <32 x i32>* %v194, align 1, !tbaa !4
+  %v193 = getelementptr inbounds i8, ptr %v1, i32 %v192
+  %v195 = load <32 x i32>, ptr %v193, align 1, !tbaa !4
   %v196 = tail call <32 x i32> @llvm.hexagon.V6.vmaxub.128B(<32 x i32> %v191, <32 x i32> %v195)
   %v197 = tail call <32 x i32> @llvm.hexagon.V6.vmaxub.128B(<32 x i32> %v181, <32 x i32> %v196)
   %v198 = add i32 %v166, %v165
   %v199 = add nsw i32 %v198, -129
-  %v200 = getelementptr inbounds i8, i8* %v1, i32 %v199
-  %v201 = bitcast i8* %v200 to <32 x i32>*
-  %v202 = load <32 x i32>, <32 x i32>* %v201, align 1, !tbaa !4
+  %v200 = getelementptr inbounds i8, ptr %v1, i32 %v199
+  %v202 = load <32 x i32>, ptr %v200, align 1, !tbaa !4
   %v203 = add nsw i32 %v198, -128
-  %v204 = getelementptr inbounds i8, i8* %v1, i32 %v203
-  %v205 = bitcast i8* %v204 to <32 x i32>*
-  %v206 = load <32 x i32>, <32 x i32>* %v205, align 1, !tbaa !4
+  %v204 = getelementptr inbounds i8, ptr %v1, i32 %v203
+  %v206 = load <32 x i32>, ptr %v204, align 1, !tbaa !4
   %v207 = tail call <32 x i32> @llvm.hexagon.V6.vmaxub.128B(<32 x i32> %v202, <32 x i32> %v206)
   %v208 = add nsw i32 %v198, -127
-  %v209 = getelementptr inbounds i8, i8* %v1, i32 %v208
-  %v210 = bitcast i8* %v209 to <32 x i32>*
-  %v211 = load <32 x i32>, <32 x i32>* %v210, align 1, !tbaa !4
+  %v209 = getelementptr inbounds i8, ptr %v1, i32 %v208
+  %v211 = load <32 x i32>, ptr %v209, align 1, !tbaa !4
   %v212 = tail call <32 x i32> @llvm.hexagon.V6.vmaxub.128B(<32 x i32> %v207, <32 x i32> %v211)
   %v213 = tail call <32 x i32> @llvm.hexagon.V6.vmaxub.128B(<32 x i32> %v197, <32 x i32> %v212)
   %v214 = mul nsw i32 %v223, %v13
@@ -295,9 +266,8 @@ b8:                                               ; preds = %b9, %b8
   %v216 = add i32 %v11, -128
   %v217 = sub i32 %v216, %v215
   %v218 = add i32 %v217, %v214
-  %v219 = getelementptr inbounds i8, i8* %v9, i32 %v218
-  %v220 = bitcast i8* %v219 to <32 x i32>*
-  store <32 x i32> %v213, <32 x i32>* %v220, align 1, !tbaa !7
+  %v219 = getelementptr inbounds i8, ptr %v9, i32 %v218
+  store <32 x i32> %v213, ptr %v219, align 1, !tbaa !7
   %v221 = add nuw nsw i32 %v157, 1
   %v222 = icmp eq i32 %v221, %v155
   br i1 %v222, label %b10, label %b8
@@ -318,21 +288,19 @@ b11:                                              ; preds = %b10, %b7, %b4, %b0
 declare <32 x i32> @llvm.hexagon.V6.vmaxub.128B(<32 x i32>, <32 x i32>) #1
 
 ; Function Attrs: nounwind
-define i32 @f7(%4* noalias nocapture readonly %a0, %4* noalias nocapture readonly %a1) #0 {
+define i32 @f7(ptr noalias nocapture readonly %a0, ptr noalias nocapture readonly %a1) #0 {
 b0:
-  %v0 = tail call i32 @f6(%4* %a0, %4* %a1) #0
+  %v0 = tail call i32 @f6(ptr %a0, ptr %a1) #0
   ret i32 0
 }
 
 ; Function Attrs: nounwind
-define i32 @f8(i8** nocapture readonly %a0) #0 {
+define i32 @f8(ptr nocapture readonly %a0) #0 {
 b0:
-  %v0 = bitcast i8** %a0 to %4**
-  %v1 = load %4*, %4** %v0, align 4
-  %v2 = getelementptr i8*, i8** %a0, i32 1
-  %v3 = bitcast i8** %v2 to %4**
-  %v4 = load %4*, %4** %v3, align 4
-  %v5 = tail call i32 @f7(%4* %v1, %4* %v4)
+  %v1 = load ptr, ptr %a0, align 4
+  %v2 = getelementptr ptr, ptr %a0, i32 1
+  %v4 = load ptr, ptr %v2, align 4
+  %v5 = tail call i32 @f7(ptr %v1, ptr %v4)
   ret i32 0
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/lsr-postinc-nested-loop.ll b/llvm/test/CodeGen/Hexagon/lsr-postinc-nested-loop.ll
index 8fbf913a22cbb..0e6aadf092db9 100644
--- a/llvm/test/CodeGen/Hexagon/lsr-postinc-nested-loop.ll
+++ b/llvm/test/CodeGen/Hexagon/lsr-postinc-nested-loop.ll
@@ -8,7 +8,7 @@
 ; CHECK: endloop
 
 
-define dso_local signext i16 @foo(i16* nocapture readonly %filt, i16* nocapture readonly %inp, i32 %c1, i32 %c2) local_unnamed_addr {
+define dso_local signext i16 @foo(ptr nocapture readonly %filt, ptr nocapture readonly %inp, i32 %c1, i32 %c2) local_unnamed_addr {
 entry:
   %cmp28 = icmp sgt i32 %c1, 0
   %cmp221 = icmp sgt i32 %c2, 0
@@ -16,22 +16,22 @@ entry:
   br i1 %or.cond, label %for.cond1.preheader.us, label %for.cond.cleanup
 
 for.cond1.preheader.us:                           ; preds = %entry, %for.cond1.for.cond.cleanup3_crit_edge.us
-  %filt.addr.032.us = phi i16* [ %scevgep, %for.cond1.for.cond.cleanup3_crit_edge.us ], [ %filt, %entry ]
-  %inp.addr.031.us = phi i16* [ %scevgep35, %for.cond1.for.cond.cleanup3_crit_edge.us ], [ %inp, %entry ]
+  %filt.addr.032.us = phi ptr [ %scevgep, %for.cond1.for.cond.cleanup3_crit_edge.us ], [ %filt, %entry ]
+  %inp.addr.031.us = phi ptr [ %scevgep35, %for.cond1.for.cond.cleanup3_crit_edge.us ], [ %inp, %entry ]
   %l.030.us = phi i32 [ %inc11.us, %for.cond1.for.cond.cleanup3_crit_edge.us ], [ 0, %entry ]
   %sum0.029.us = phi i16 [ %add8.us, %for.cond1.for.cond.cleanup3_crit_edge.us ], [ 0, %entry ]
-  %scevgep = getelementptr i16, i16* %filt.addr.032.us, i32 %c2
+  %scevgep = getelementptr i16, ptr %filt.addr.032.us, i32 %c2
   br label %for.body4.us
 
 for.body4.us:                                     ; preds = %for.body4.us, %for.cond1.preheader.us
   %z.025.us = phi i32 [ 0, %for.cond1.preheader.us ], [ %inc.us, %for.body4.us ]
-  %filt.addr.124.us = phi i16* [ %filt.addr.032.us, %for.cond1.preheader.us ], [ %incdec.ptr.us, %for.body4.us ]
-  %inp.addr.123.us = phi i16* [ %inp.addr.031.us, %for.cond1.preheader.us ], [ %incdec.ptr5.us, %for.body4.us ]
+  %filt.addr.124.us = phi ptr [ %filt.addr.032.us, %for.cond1.preheader.us ], [ %incdec.ptr.us, %for.body4.us ]
+  %inp.addr.123.us = phi ptr [ %inp.addr.031.us, %for.cond1.preheader.us ], [ %incdec.ptr5.us, %for.body4.us ]
   %sum0.122.us = phi i16 [ %sum0.029.us, %for.cond1.preheader.us ], [ %add8.us, %for.body4.us ]
-  %incdec.ptr.us = getelementptr inbounds i16, i16* %filt.addr.124.us, i32 1
-  %0 = load i16, i16* %filt.addr.124.us, align 2
-  %incdec.ptr5.us = getelementptr inbounds i16, i16* %inp.addr.123.us, i32 1
-  %1 = load i16, i16* %inp.addr.123.us, align 2
+  %incdec.ptr.us = getelementptr inbounds i16, ptr %filt.addr.124.us, i32 1
+  %0 = load i16, ptr %filt.addr.124.us, align 2
+  %incdec.ptr5.us = getelementptr inbounds i16, ptr %inp.addr.123.us, i32 1
+  %1 = load i16, ptr %inp.addr.123.us, align 2
   %add.us = add i16 %0, %sum0.122.us
   %add8.us = add i16 %add.us, %1
   %inc.us = add nuw nsw i32 %z.025.us, 1
@@ -39,7 +39,7 @@ for.body4.us:                                     ; preds = %for.body4.us, %for.
   br i1 %exitcond, label %for.cond1.for.cond.cleanup3_crit_edge.us, label %for.body4.us
 
 for.cond1.for.cond.cleanup3_crit_edge.us:         ; preds = %for.body4.us
-  %scevgep35 = getelementptr i16, i16* %inp.addr.031.us, i32 %c2
+  %scevgep35 = getelementptr i16, ptr %inp.addr.031.us, i32 %c2
   %inc11.us = add nuw nsw i32 %l.030.us, 1
   %exitcond36 = icmp eq i32 %inc11.us, %c1
   br i1 %exitcond36, label %for.cond.cleanup, label %for.cond1.preheader.us

diff  --git a/llvm/test/CodeGen/Hexagon/machine-sink.ll b/llvm/test/CodeGen/Hexagon/machine-sink.ll
index b3ecb022d8c1e..674c2528e4b60 100644
--- a/llvm/test/CodeGen/Hexagon/machine-sink.ll
+++ b/llvm/test/CodeGen/Hexagon/machine-sink.ll
@@ -14,9 +14,9 @@ b1:                                               ; preds = %b0
   unreachable
 
 b2:                                               ; preds = %b0
-  %v0 = load i8*, i8** undef, align 4
-  %v1 = getelementptr inbounds i8, i8* %v0, i32 1
-  %v2 = load i8, i8* %v0, align 1, !tbaa !0
+  %v0 = load ptr, ptr undef, align 4
+  %v1 = getelementptr inbounds i8, ptr %v0, i32 1
+  %v2 = load i8, ptr %v0, align 1, !tbaa !0
   %v3 = zext i8 %v2 to i32
   %v4 = shl nuw nsw i32 %v3, 8
   br i1 undef, label %b3, label %b5
@@ -28,8 +28,8 @@ b4:                                               ; preds = %b3
   br label %b5
 
 b5:                                               ; preds = %b4, %b2
-  %v5 = phi i8* [ undef, %b4 ], [ %v1, %b2 ]
-  %v6 = load i8, i8* %v5, align 1, !tbaa !0
+  %v5 = phi ptr [ undef, %b4 ], [ %v1, %b2 ]
+  %v6 = load i8, ptr %v5, align 1, !tbaa !0
   %v7 = zext i8 %v6 to i32
   %v8 = add nsw i32 %v7, %v4
   %v9 = add nsw i32 %v8, -2
@@ -57,7 +57,7 @@ b12:                                              ; preds = %b11
   unreachable
 
 b13:                                              ; preds = %b11, %b10
-  store i32 %v9, i32* undef, align 4, !tbaa !3
+  store i32 %v9, ptr undef, align 4, !tbaa !3
   unreachable
 
 b14:                                              ; preds = %b9

diff  --git a/llvm/test/CodeGen/Hexagon/macint.ll b/llvm/test/CodeGen/Hexagon/macint.ll
index 47856f2fcb548..0a0508e747477 100644
--- a/llvm/test/CodeGen/Hexagon/macint.ll
+++ b/llvm/test/CodeGen/Hexagon/macint.ll
@@ -3,12 +3,12 @@
 
 ; CHECK: r{{[0-9]+}} {{\+|\-}}= mpyi(r{{[0-9]+}},
 
-define i32 @f0(i32* %a0, i32* %a1) #0 {
+define i32 @f0(ptr %a0, ptr %a1) #0 {
 b0:
-  %v0 = load i32, i32* %a0, align 4
+  %v0 = load i32, ptr %a0, align 4
   %v1 = udiv i32 %v0, 10000
   %v2 = urem i32 %v1, 10
-  store i32 %v2, i32* %a1, align 4
+  store i32 %v2, ptr %a1, align 4
   ret i32 0
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/mem-fi-add.ll b/llvm/test/CodeGen/Hexagon/mem-fi-add.ll
index 6bda299476a32..74dcd035c0453 100644
--- a/llvm/test/CodeGen/Hexagon/mem-fi-add.ll
+++ b/llvm/test/CodeGen/Hexagon/mem-fi-add.ll
@@ -12,17 +12,15 @@ target triple = "hexagon"
 define void @foo() #0 {
 entry:
   %t = alloca [4 x [2 x i32]], align 8
-  %0 = bitcast [4 x [2 x i32]]* %t to i8*
-  call void @llvm.memset.p0i8.i32(i8* align 8 %0, i8 0, i32 32, i1 false)
-  %arraydecay = getelementptr inbounds [4 x [2 x i32]], [4 x [2 x i32]]* %t, i32 0, i32 0
-  call void @bar([2 x i32]* %arraydecay) #1
+  call void @llvm.memset.p0.i32(ptr align 8 %t, i8 0, i32 32, i1 false)
+  call void @bar(ptr %t) #1
   ret void
 }
 
 ; Function Attrs: nounwind
-declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i1) #1
+declare void @llvm.memset.p0.i32(ptr nocapture, i8, i32, i1) #1
 
-declare void @bar([2 x i32]*) #2
+declare void @bar(ptr) #2
 
 attributes #0 = { nounwind "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
 attributes #1 = { nounwind }

diff  --git a/llvm/test/CodeGen/Hexagon/mem-load-circ.ll b/llvm/test/CodeGen/Hexagon/mem-load-circ.ll
index d1a95d0c57448..d716b3b6e33f9 100644
--- a/llvm/test/CodeGen/Hexagon/mem-load-circ.ll
+++ b/llvm/test/CodeGen/Hexagon/mem-load-circ.ll
@@ -2,101 +2,93 @@
 
 ; CHECK-LABEL: f0:
 ; CHECK: r{{[1-9]+:[0-9]+}} = memd(r{{[0-9]*}}++#{{[0-9]}}:circ(m{{[01]}}))
-define i64 @f0(i64* %a0) {
+define i64 @f0(ptr %a0) {
 b0:
   %v0 = alloca i64, align 8
-  %v1 = getelementptr inbounds i64, i64* %a0, i32 1
-  store i64 0, i64* %v0, align 8, !tbaa !0
-  %v2 = bitcast i64* %v1 to i8*
-  %v3 = bitcast i64* %v0 to i8*
-  %v4 = call i8* @llvm.hexagon.circ.ldd(i8* %v2, i8* %v3, i32 150996984, i32 8)
-  %v5 = load i64, i64* %v0, align 8, !tbaa !0
+  %v1 = getelementptr inbounds i64, ptr %a0, i32 1
+  store i64 0, ptr %v0, align 8, !tbaa !0
+  %v4 = call ptr @llvm.hexagon.circ.ldd(ptr %v1, ptr %v0, i32 150996984, i32 8)
+  %v5 = load i64, ptr %v0, align 8, !tbaa !0
   ret i64 %v5
 }
 
 ; Function Attrs: argmemonly nounwind
-declare i8* @llvm.hexagon.circ.ldd(i8*, i8*, i32, i32) #0
+declare ptr @llvm.hexagon.circ.ldd(ptr, ptr, i32, i32) #0
 
 ; CHECK-LABEL: f1:
 ; CHECK: r{{[0-9]*}} = memb(r{{[0-9]*}}++#{{[0-9]}}:circ(m{{[01]}}))
-define signext i8 @f1(i8* %a0) {
+define signext i8 @f1(ptr %a0) {
 b0:
   %v0 = alloca i8, align 1
-  %v1 = getelementptr inbounds i8, i8* %a0, i32 1
-  store i8 0, i8* %v0, align 1, !tbaa !4
-  %v2 = call i8* @llvm.hexagon.circ.ldb(i8* %v1, i8* %v0, i32 16777471, i32 1)
-  %v3 = load i8, i8* %v0, align 1, !tbaa !4
+  %v1 = getelementptr inbounds i8, ptr %a0, i32 1
+  store i8 0, ptr %v0, align 1, !tbaa !4
+  %v2 = call ptr @llvm.hexagon.circ.ldb(ptr %v1, ptr %v0, i32 16777471, i32 1)
+  %v3 = load i8, ptr %v0, align 1, !tbaa !4
   ret i8 %v3
 }
 
 ; Function Attrs: argmemonly nounwind
-declare i8* @llvm.hexagon.circ.ldb(i8*, i8*, i32, i32) #0
+declare ptr @llvm.hexagon.circ.ldb(ptr, ptr, i32, i32) #0
 
 ; CHECK-LABEL: f2:
 ; CHECK: r{{[0-9]*}} = memub(r{{[0-9]*}}++#{{[0-9]}}:circ(m{{[01]}}))
-define signext i8 @f2(i8* %a0) {
+define signext i8 @f2(ptr %a0) {
 b0:
   %v0 = alloca i8, align 1
-  %v1 = getelementptr inbounds i8, i8* %a0, i32 1
-  store i8 0, i8* %v0, align 1, !tbaa !4
-  %v2 = call i8* @llvm.hexagon.circ.ldub(i8* %v1, i8* %v0, i32 16777471, i32 1)
-  %v3 = load i8, i8* %v0, align 1, !tbaa !4
+  %v1 = getelementptr inbounds i8, ptr %a0, i32 1
+  store i8 0, ptr %v0, align 1, !tbaa !4
+  %v2 = call ptr @llvm.hexagon.circ.ldub(ptr %v1, ptr %v0, i32 16777471, i32 1)
+  %v3 = load i8, ptr %v0, align 1, !tbaa !4
   ret i8 %v3
 }
 
 ; Function Attrs: argmemonly nounwind
-declare i8* @llvm.hexagon.circ.ldub(i8*, i8*, i32, i32) #0
+declare ptr @llvm.hexagon.circ.ldub(ptr, ptr, i32, i32) #0
 
 ; CHECK-LABEL: f3:
 ; CHECK: r{{[0-9]*}} = memh(r{{[0-9]*}}++#{{[0-9]}}:circ(m{{[01]}}))
-define signext i16 @f3(i16* %a0) {
+define signext i16 @f3(ptr %a0) {
 b0:
   %v0 = alloca i16, align 2
-  %v1 = getelementptr inbounds i16, i16* %a0, i32 1
-  store i16 0, i16* %v0, align 2, !tbaa !5
-  %v2 = bitcast i16* %v1 to i8*
-  %v3 = bitcast i16* %v0 to i8*
-  %v4 = call i8* @llvm.hexagon.circ.ldh(i8* %v2, i8* %v3, i32 33554942, i32 2)
-  %v5 = load i16, i16* %v0, align 2, !tbaa !5
+  %v1 = getelementptr inbounds i16, ptr %a0, i32 1
+  store i16 0, ptr %v0, align 2, !tbaa !5
+  %v4 = call ptr @llvm.hexagon.circ.ldh(ptr %v1, ptr %v0, i32 33554942, i32 2)
+  %v5 = load i16, ptr %v0, align 2, !tbaa !5
   ret i16 %v5
 }
 
 ; Function Attrs: argmemonly nounwind
-declare i8* @llvm.hexagon.circ.ldh(i8*, i8*, i32, i32) #0
+declare ptr @llvm.hexagon.circ.ldh(ptr, ptr, i32, i32) #0
 
 ; CHECK-LABEL: f4:
 ; CHECK: r{{[0-9]*}} = memuh(r{{[0-9]*}}++#{{[0-9]}}:circ(m{{[01]}}))
-define signext i16 @f4(i16* %a0) {
+define signext i16 @f4(ptr %a0) {
 b0:
   %v0 = alloca i16, align 2
-  %v1 = getelementptr inbounds i16, i16* %a0, i32 1
-  store i16 0, i16* %v0, align 2, !tbaa !5
-  %v2 = bitcast i16* %v1 to i8*
-  %v3 = bitcast i16* %v0 to i8*
-  %v4 = call i8* @llvm.hexagon.circ.lduh(i8* %v2, i8* %v3, i32 33554942, i32 2)
-  %v5 = load i16, i16* %v0, align 2, !tbaa !5
+  %v1 = getelementptr inbounds i16, ptr %a0, i32 1
+  store i16 0, ptr %v0, align 2, !tbaa !5
+  %v4 = call ptr @llvm.hexagon.circ.lduh(ptr %v1, ptr %v0, i32 33554942, i32 2)
+  %v5 = load i16, ptr %v0, align 2, !tbaa !5
   ret i16 %v5
 }
 
 ; Function Attrs: argmemonly nounwind
-declare i8* @llvm.hexagon.circ.lduh(i8*, i8*, i32, i32) #0
+declare ptr @llvm.hexagon.circ.lduh(ptr, ptr, i32, i32) #0
 
 ; CHECK-LABEL: f5:
 ; CHECK: r{{[0-9]*}} = memw(r{{[0-9]*}}++#{{[0-9]}}:circ(m{{[01]}}))
-define i32 @f5(i32* %a0) {
+define i32 @f5(ptr %a0) {
 b0:
   %v0 = alloca i32, align 4
-  %v1 = getelementptr inbounds i32, i32* %a0, i32 1
-  store i32 0, i32* %v0, align 4, !tbaa !7
-  %v2 = bitcast i32* %v1 to i8*
-  %v3 = bitcast i32* %v0 to i8*
-  %v4 = call i8* @llvm.hexagon.circ.ldw(i8* %v2, i8* %v3, i32 50332668, i32 4)
-  %v5 = load i32, i32* %v0, align 4, !tbaa !7
+  %v1 = getelementptr inbounds i32, ptr %a0, i32 1
+  store i32 0, ptr %v0, align 4, !tbaa !7
+  %v4 = call ptr @llvm.hexagon.circ.ldw(ptr %v1, ptr %v0, i32 50332668, i32 4)
+  %v5 = load i32, ptr %v0, align 4, !tbaa !7
   ret i32 %v5
 }
 
 ; Function Attrs: argmemonly nounwind
-declare i8* @llvm.hexagon.circ.ldw(i8*, i8*, i32, i32) #0
+declare ptr @llvm.hexagon.circ.ldw(ptr, ptr, i32, i32) #0
 
 attributes #0 = { argmemonly nounwind }
 

diff  --git a/llvm/test/CodeGen/Hexagon/mem-ops-sub.ll b/llvm/test/CodeGen/Hexagon/mem-ops-sub.ll
index 5ca0d749e75c3..2ef11624cc798 100644
--- a/llvm/test/CodeGen/Hexagon/mem-ops-sub.ll
+++ b/llvm/test/CodeGen/Hexagon/mem-ops-sub.ll
@@ -7,26 +7,26 @@
 ; Function Attrs: norecurse nounwind
 define fastcc void @f0() unnamed_addr #0 {
 b0:
-  %v0 = load i8, i8* @g0, align 1, !tbaa !4
+  %v0 = load i8, ptr @g0, align 1, !tbaa !4
   %v1 = zext i8 %v0 to i32
   %v2 = mul nuw nsw i32 %v1, 9625
   %v3 = and i32 %v2, 255
   %v4 = mul nuw nsw i32 %v3, 9625
   %v5 = and i32 %v4, 255
   %v6 = trunc i32 %v5 to i8
-  store i8 %v6, i8* @g0, align 1, !tbaa !4
+  store i8 %v6, ptr @g0, align 1, !tbaa !4
   ret void
 }
 
 define i32 @f1() {
 b0:
-  %v0 = load i8, i8* @g0, align 1, !tbaa !4
+  %v0 = load i8, ptr @g0, align 1, !tbaa !4
   %v1 = zext i8 %v0 to i32
   %v2 = add nuw nsw i32 %v1, 224
   %v3 = trunc i32 %v2 to i8
-  store i8 %v3, i8* @g0, align 1, !tbaa !4
+  store i8 %v3, ptr @g0, align 1, !tbaa !4
   tail call fastcc void @f0()
-  %v4 = load i8, i8* @g0, align 1, !tbaa !4
+  %v4 = load i8, ptr @g0, align 1, !tbaa !4
   %v5 = zext i8 %v4 to i32
   ret i32 %v5
 }

diff  --git a/llvm/test/CodeGen/Hexagon/mem-ops-sub_01.ll b/llvm/test/CodeGen/Hexagon/mem-ops-sub_01.ll
index 3df701e8d9823..2c41c13128e26 100644
--- a/llvm/test/CodeGen/Hexagon/mem-ops-sub_01.ll
+++ b/llvm/test/CodeGen/Hexagon/mem-ops-sub_01.ll
@@ -7,26 +7,26 @@
 ; Function Attrs: norecurse nounwind
 define fastcc void @f0() unnamed_addr #0 {
 b0:
-  %v0 = load i8, i8* @g0, align 1, !tbaa !4
+  %v0 = load i8, ptr @g0, align 1, !tbaa !4
   %v1 = zext i8 %v0 to i32
   %v2 = mul nuw nsw i32 %v1, 9625
   %v3 = and i32 %v2, 255
   %v4 = mul nuw nsw i32 %v3, 9625
   %v5 = and i32 %v4, 255
   %v6 = trunc i32 %v5 to i8
-  store i8 %v6, i8* @g0, align 1, !tbaa !4
+  store i8 %v6, ptr @g0, align 1, !tbaa !4
   ret void
 }
 
 define i32 @f1() {
 b0:
-  %v0 = load i8, i8* @g0, align 1, !tbaa !4
+  %v0 = load i8, ptr @g0, align 1, !tbaa !4
   %v1 = zext i8 %v0 to i32
   %v2 = add nuw nsw i32 %v1, 225
   %v3 = trunc i32 %v2 to i8
-  store i8 %v3, i8* @g0, align 1, !tbaa !4
+  store i8 %v3, ptr @g0, align 1, !tbaa !4
   tail call fastcc void @f0()
-  %v4 = load i8, i8* @g0, align 1, !tbaa !4
+  %v4 = load i8, ptr @g0, align 1, !tbaa !4
   %v5 = zext i8 %v4 to i32
   ret i32 %v5
 }

diff  --git a/llvm/test/CodeGen/Hexagon/mem-ops-sub_i16.ll b/llvm/test/CodeGen/Hexagon/mem-ops-sub_i16.ll
index 8eb2e8e83f50d..a5bc0668a4d69 100644
--- a/llvm/test/CodeGen/Hexagon/mem-ops-sub_i16.ll
+++ b/llvm/test/CodeGen/Hexagon/mem-ops-sub_i16.ll
@@ -7,26 +7,26 @@
 ; Function Attrs: norecurse nounwind
 define fastcc void @f0() unnamed_addr #0 {
 b0:
-  %v0 = load i16, i16* @g0, align 1, !tbaa !4
+  %v0 = load i16, ptr @g0, align 1, !tbaa !4
   %v1 = zext i16 %v0 to i32
   %v2 = mul nuw nsw i32 %v1, 9625
   %v3 = and i32 %v2, 255
   %v4 = mul nuw nsw i32 %v3, 9625
   %v5 = and i32 %v4, 255
   %v6 = trunc i32 %v5 to i16
-  store i16 %v6, i16* @g0, align 2, !tbaa !4
+  store i16 %v6, ptr @g0, align 2, !tbaa !4
   ret void
 }
 
 define i32 @f1() {
 b0:
-  %v0 = load i16, i16* @g0, align 2, !tbaa !4
+  %v0 = load i16, ptr @g0, align 2, !tbaa !4
   %v1 = zext i16 %v0 to i32
   %v2 = add nuw nsw i32 %v1, 65505
   %v3 = trunc i32 %v2 to i16
-  store i16 %v3, i16* @g0, align 2, !tbaa !4
+  store i16 %v3, ptr @g0, align 2, !tbaa !4
   tail call fastcc void @f0()
-  %v4 = load i16, i16* @g0, align 2, !tbaa !4
+  %v4 = load i16, ptr @g0, align 2, !tbaa !4
   %v5 = zext i16 %v4 to i32
   ret i32 %v5
 }

diff  --git a/llvm/test/CodeGen/Hexagon/mem-ops-sub_i16_01.ll b/llvm/test/CodeGen/Hexagon/mem-ops-sub_i16_01.ll
index 59e16f0a3e985..e156bcfc44f3d 100644
--- a/llvm/test/CodeGen/Hexagon/mem-ops-sub_i16_01.ll
+++ b/llvm/test/CodeGen/Hexagon/mem-ops-sub_i16_01.ll
@@ -7,26 +7,26 @@
 ; Function Attrs: norecurse nounwind
 define fastcc void @f0() unnamed_addr #0 {
 b0:
-  %v0 = load i16, i16* @g0, align 1, !tbaa !4
+  %v0 = load i16, ptr @g0, align 1, !tbaa !4
   %v1 = zext i16 %v0 to i32
   %v2 = mul nuw nsw i32 %v1, 9625
   %v3 = and i32 %v2, 255
   %v4 = mul nuw nsw i32 %v3, 9625
   %v5 = and i32 %v4, 255
   %v6 = trunc i32 %v5 to i16
-  store i16 %v6, i16* @g0, align 2, !tbaa !4
+  store i16 %v6, ptr @g0, align 2, !tbaa !4
   ret void
 }
 
 define i32 @f1() {
 b0:
-  %v0 = load i16, i16* @g0, align 2, !tbaa !4
+  %v0 = load i16, ptr @g0, align 2, !tbaa !4
   %v1 = zext i16 %v0 to i32
   %v2 = add nuw nsw i32 %v1, 65504
   %v3 = trunc i32 %v2 to i16
-  store i16 %v3, i16* @g0, align 2, !tbaa !4
+  store i16 %v3, ptr @g0, align 2, !tbaa !4
   tail call fastcc void @f0()
-  %v4 = load i16, i16* @g0, align 2, !tbaa !4
+  %v4 = load i16, ptr @g0, align 2, !tbaa !4
   %v5 = zext i16 %v4 to i32
   ret i32 %v5
 }

diff  --git a/llvm/test/CodeGen/Hexagon/memcmp.ll b/llvm/test/CodeGen/Hexagon/memcmp.ll
index a3244e505b428..30cdeb649d6bc 100644
--- a/llvm/test/CodeGen/Hexagon/memcmp.ll
+++ b/llvm/test/CodeGen/Hexagon/memcmp.ll
@@ -4,7 +4,7 @@
 target triple = "hexagon"
 
 ; Function Attrs: nounwind readonly
-define i32 @f0(i8* nocapture %a0, i8* nocapture %a1, i32 %a2) #0 {
+define i32 @f0(ptr nocapture %a0, ptr nocapture %a1, i32 %a2) #0 {
 b0:
   %v0 = icmp eq i32 %a2, 0
   br i1 %v0, label %b6, label %b1
@@ -13,11 +13,11 @@ b1:                                               ; preds = %b0
   br label %b2
 
 b2:                                               ; preds = %b4, %b1
-  %v1 = phi i8* [ %v10, %b4 ], [ %a1, %b1 ]
-  %v2 = phi i8* [ %v9, %b4 ], [ %a0, %b1 ]
+  %v1 = phi ptr [ %v10, %b4 ], [ %a1, %b1 ]
+  %v2 = phi ptr [ %v9, %b4 ], [ %a0, %b1 ]
   %v3 = phi i32 [ %v11, %b4 ], [ %a2, %b1 ]
-  %v4 = load i8, i8* %v2, align 1, !tbaa !0
-  %v5 = load i8, i8* %v1, align 1, !tbaa !0
+  %v4 = load i8, ptr %v2, align 1, !tbaa !0
+  %v5 = load i8, ptr %v1, align 1, !tbaa !0
   %v6 = icmp eq i8 %v4, %v5
   br i1 %v6, label %b4, label %b3
 
@@ -27,8 +27,8 @@ b3:                                               ; preds = %b2
   br label %b6
 
 b4:                                               ; preds = %b2
-  %v9 = getelementptr inbounds i8, i8* %v2, i32 1
-  %v10 = getelementptr inbounds i8, i8* %v1, i32 1
+  %v9 = getelementptr inbounds i8, ptr %v2, i32 1
+  %v10 = getelementptr inbounds i8, ptr %v1, i32 1
   %v11 = add i32 %v3, -1
   %v12 = icmp eq i32 %v11, 0
   br i1 %v12, label %b5, label %b2

diff  --git a/llvm/test/CodeGen/Hexagon/memcpy-likely-aligned.ll b/llvm/test/CodeGen/Hexagon/memcpy-likely-aligned.ll
index 2cce3f12c1d40..c3b62c41e5830 100644
--- a/llvm/test/CodeGen/Hexagon/memcpy-likely-aligned.ll
+++ b/llvm/test/CodeGen/Hexagon/memcpy-likely-aligned.ll
@@ -5,7 +5,7 @@ target datalayout = "e-p:32:32:32-i64:64:64-i32:32:32-i16:16:16-i1:32:32-f64:64:
 target triple = "hexagon-unknown-linux-gnu"
 
 %struct.e = type { i8, i8, [2 x i8] }
-%struct.s = type { i8* }
+%struct.s = type { ptr }
 %struct.o = type { %struct.n }
 %struct.n = type { [2 x %struct.l] }
 %struct.l = type { %struct.e, %struct.d, %struct.e }
@@ -13,20 +13,15 @@ target triple = "hexagon-unknown-linux-gnu"
 
 @y = global { <{ { %struct.e, { i8, i8, i8, [5 x i8] }, %struct.e }, { %struct.e, { i8, i8, i8, [5 x i8] }, %struct.e } }> } { <{ { %struct.e, { i8, i8, i8, [5 x i8] }, %struct.e }, { %struct.e, { i8, i8, i8, [5 x i8] }, %struct.e } }> <{ { %struct.e, { i8, i8, i8, [5 x i8] }, %struct.e } { %struct.e { i8 3, i8 0, [2 x i8] undef }, { i8, i8, i8, [5 x i8] } { i8 -47, i8 2, i8 0, [5 x i8] undef }, %struct.e { i8 3, i8 0, [2 x i8] undef } }, { %struct.e, { i8, i8, i8, [5 x i8] }, %struct.e } { %struct.e { i8 3, i8 0, [2 x i8] undef }, { i8, i8, i8, [5 x i8] } { i8 -47, i8 2, i8 0, [5 x i8] undef }, %struct.e { i8 3, i8 0, [2 x i8] undef } } }> }, align 4
 @t = common global %struct.s zeroinitializer, align 4
- at q = internal global %struct.o* null, align 4
+ at q = internal global ptr null, align 4
 
 define void @foo() nounwind {
 entry:
-  %0 = load i8*, i8** getelementptr inbounds (%struct.s, %struct.s* @t, i32 0, i32 0), align 4
-  %1 = bitcast i8* %0 to %struct.o*
-  store %struct.o* %1, %struct.o** @q, align 4
-  %2 = load %struct.o*, %struct.o** @q, align 4
-  %p = getelementptr inbounds %struct.o, %struct.o* %2, i32 0, i32 0
-  %m = getelementptr inbounds %struct.n, %struct.n* %p, i32 0, i32 0
-  %arraydecay = getelementptr inbounds [2 x %struct.l], [2 x %struct.l]* %m, i32 0, i32 0
-  %3 = bitcast %struct.l* %arraydecay to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %3, i8* align 4 getelementptr inbounds ({ <{ { %struct.e, { i8, i8, i8, [5 x i8] }, %struct.e }, { %struct.e, { i8, i8, i8, [5 x i8] }, %struct.e } }> }, { <{ { %struct.e, { i8, i8, i8, [5 x i8] }, %struct.e }, { %struct.e, { i8, i8, i8, [5 x i8] }, %struct.e } }> }* @y, i32 0, i32 0, i32 0, i32 0, i32 0), i32 32, i1 false)
+  %0 = load ptr, ptr @t, align 4
+  store ptr %0, ptr @q, align 4
+  %1 = load ptr, ptr @q, align 4
+  call void @llvm.memcpy.p0.p0.i32(ptr align 4 %1, ptr align 4 getelementptr inbounds ({ <{ { %struct.e, { i8, i8, i8, [5 x i8] }, %struct.e }, { %struct.e, { i8, i8, i8, [5 x i8] }, %struct.e } }> }, ptr @y, i32 0, i32 0, i32 0, i32 0, i32 0), i32 32, i1 false)
   ret void
 }
 
-declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i1) nounwind
+declare void @llvm.memcpy.p0.p0.i32(ptr nocapture, ptr nocapture, i32, i1) nounwind

diff  --git a/llvm/test/CodeGen/Hexagon/memcpy-memmove-inline.ll b/llvm/test/CodeGen/Hexagon/memcpy-memmove-inline.ll
index 4f5b510f42718..e7d9498c3f48a 100644
--- a/llvm/test/CodeGen/Hexagon/memcpy-memmove-inline.ll
+++ b/llvm/test/CodeGen/Hexagon/memcpy-memmove-inline.ll
@@ -13,18 +13,15 @@ target triple = "hexagon-unknown--elf"
 ; CHECK-DAG: memh(r{{[0-9]*}}+#4) = [[REG2]]
 ; CHECK-DAG: memb(r{{[0-9]*}}+#6) = [[REG3]]
 
-define i32 @f0(i32* %a0) #0 {
+define i32 @f0(ptr %a0) #0 {
 b0:
   %v0 = alloca [10 x i32], align 8
-  %v1 = bitcast [10 x i32]* %v0 to i8*
-  %v2 = bitcast i32* %a0 to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %v1, i8* align 4 %v2, i32 7, i1 false)
-  %v3 = getelementptr inbounds [10 x i32], [10 x i32]* %v0, i32 0, i32 0
-  call void @f1(i32* %v3, i32* %a0) #0
+  call void @llvm.memcpy.p0.p0.i32(ptr align 4 %v0, ptr align 4 %a0, i32 7, i1 false)
+  call void @f1(ptr %v0, ptr %a0) #0
   ret i32 0
 }
 
-declare void @f1(i32*, i32*)
+declare void @f1(ptr, ptr)
 
 ; CHECK-LABEL: f2:
 ; CHECK-DAG: [[REG4:r[0-9]*]] = memub(r{{[0-9]*}}+#6)
@@ -34,17 +31,15 @@ declare void @f1(i32*, i32*)
 ; CHECK-DAG: memh(r{{[0-9]*}}+#4) = [[REG5]]
 ; CHECK-DAG: memb(r{{[0-9]*}}+#6) = [[REG4]]
 
-define i32 @f2(i32* %a0, i32* %a1) #0 {
+define i32 @f2(ptr %a0, ptr %a1) #0 {
 b0:
-  %v0 = bitcast i32* %a1 to i8*
-  %v1 = bitcast i32* %a0 to i8*
-  call void @llvm.memmove.p0i8.p0i8.i32(i8* align 4 %v0, i8* align 4 %v1, i32 7, i1 false)
-  tail call void @f1(i32* %a1, i32* %a0) #0
+  call void @llvm.memmove.p0.p0.i32(ptr align 4 %a1, ptr align 4 %a0, i32 7, i1 false)
+  tail call void @f1(ptr %a1, ptr %a0) #0
   ret i32 0
 }
 
-declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture writeonly, i8* nocapture readonly, i32, i1) #1
-declare void @llvm.memmove.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i1) #1
+declare void @llvm.memcpy.p0.p0.i32(ptr nocapture writeonly, ptr nocapture readonly, i32, i1) #1
+declare void @llvm.memmove.p0.p0.i32(ptr nocapture, ptr nocapture readonly, i32, i1) #1
 
 attributes #0 = { nounwind }
 attributes #1 = { argmemonly nounwind }

diff  --git a/llvm/test/CodeGen/Hexagon/memop-bit18.ll b/llvm/test/CodeGen/Hexagon/memop-bit18.ll
index 469aa52da7d74..ecc81d058cafe 100644
--- a/llvm/test/CodeGen/Hexagon/memop-bit18.ll
+++ b/llvm/test/CodeGen/Hexagon/memop-bit18.ll
@@ -4,11 +4,11 @@ target triple = "hexagon"
 
 ; CHECK-LABEL: f0:
 ; CHECK: memw({{.*}}) = clrbit(#18)
-define void @f0(i32* nocapture %a0) #0 {
+define void @f0(ptr nocapture %a0) #0 {
 b0:
-  %v0 = load i32, i32* %a0, align 4, !tbaa !0
+  %v0 = load i32, ptr %a0, align 4, !tbaa !0
   %v1 = and i32 %v0, -262145
-  store i32 %v1, i32* %a0, align 4, !tbaa !0
+  store i32 %v1, ptr %a0, align 4, !tbaa !0
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/memops-stack.ll b/llvm/test/CodeGen/Hexagon/memops-stack.ll
index d4bba476db3c1..6d9fbf73ee650 100644
--- a/llvm/test/CodeGen/Hexagon/memops-stack.ll
+++ b/llvm/test/CodeGen/Hexagon/memops-stack.ll
@@ -8,14 +8,13 @@ target triple = "hexagon"
 define void @test0() #0 {
 entry:
   %x = alloca i32, align 4
-  %0 = bitcast i32* %x to i8*
-  call void @llvm.lifetime.start.p0i8(i64 4, i8* %0) #3
-  call void @foo(i32* nonnull %x) #3
-  %1 = load i32, i32* %x, align 4, !tbaa !1
-  %inc = add nsw i32 %1, 1
-  store i32 %inc, i32* %x, align 4, !tbaa !1
-  call void @foo(i32* nonnull %x) #3
-  call void @llvm.lifetime.end.p0i8(i64 4, i8* %0) #3
+  call void @llvm.lifetime.start.p0(i64 4, ptr %x) #3
+  call void @foo(ptr nonnull %x) #3
+  %0 = load i32, ptr %x, align 4, !tbaa !1
+  %inc = add nsw i32 %0, 1
+  store i32 %inc, ptr %x, align 4, !tbaa !1
+  call void @foo(ptr nonnull %x) #3
+  call void @llvm.lifetime.end.p0(i64 4, ptr %x) #3
   ret void
 }
 
@@ -24,14 +23,13 @@ entry:
 define void @test1() #0 {
 entry:
   %x = alloca i32, align 4
-  %0 = bitcast i32* %x to i8*
-  call void @llvm.lifetime.start.p0i8(i64 4, i8* %0) #3
-  call void @foo(i32* nonnull %x) #3
-  %1 = load i32, i32* %x, align 4, !tbaa !1
-  %inc = sub nsw i32 %1, 1
-  store i32 %inc, i32* %x, align 4, !tbaa !1
-  call void @foo(i32* nonnull %x) #3
-  call void @llvm.lifetime.end.p0i8(i64 4, i8* %0) #3
+  call void @llvm.lifetime.start.p0(i64 4, ptr %x) #3
+  call void @foo(ptr nonnull %x) #3
+  %0 = load i32, ptr %x, align 4, !tbaa !1
+  %inc = sub nsw i32 %0, 1
+  store i32 %inc, ptr %x, align 4, !tbaa !1
+  call void @foo(ptr nonnull %x) #3
+  call void @llvm.lifetime.end.p0(i64 4, ptr %x) #3
   ret void
 }
 
@@ -40,14 +38,13 @@ entry:
 define void @test2() #0 {
 entry:
   %x = alloca i32, align 4
-  %0 = bitcast i32* %x to i8*
-  call void @llvm.lifetime.start.p0i8(i64 4, i8* %0) #3
-  call void @foo(i32* nonnull %x) #3
-  %1 = load i32, i32* %x, align 4, !tbaa !1
-  %inc = or i32 %1, 1
-  store i32 %inc, i32* %x, align 4, !tbaa !1
-  call void @foo(i32* nonnull %x) #3
-  call void @llvm.lifetime.end.p0i8(i64 4, i8* %0) #3
+  call void @llvm.lifetime.start.p0(i64 4, ptr %x) #3
+  call void @foo(ptr nonnull %x) #3
+  %0 = load i32, ptr %x, align 4, !tbaa !1
+  %inc = or i32 %0, 1
+  store i32 %inc, ptr %x, align 4, !tbaa !1
+  call void @foo(ptr nonnull %x) #3
+  call void @llvm.lifetime.end.p0(i64 4, ptr %x) #3
   ret void
 }
 
@@ -56,14 +53,13 @@ entry:
 define void @test3() #0 {
 entry:
   %x = alloca i32, align 4
-  %0 = bitcast i32* %x to i8*
-  call void @llvm.lifetime.start.p0i8(i64 4, i8* %0) #3
-  call void @foo(i32* nonnull %x) #3
-  %1 = load i32, i32* %x, align 4, !tbaa !1
-  %inc = and i32 %1, -2
-  store i32 %inc, i32* %x, align 4, !tbaa !1
-  call void @foo(i32* nonnull %x) #3
-  call void @llvm.lifetime.end.p0i8(i64 4, i8* %0) #3
+  call void @llvm.lifetime.start.p0(i64 4, ptr %x) #3
+  call void @foo(ptr nonnull %x) #3
+  %0 = load i32, ptr %x, align 4, !tbaa !1
+  %inc = and i32 %0, -2
+  store i32 %inc, ptr %x, align 4, !tbaa !1
+  call void @foo(ptr nonnull %x) #3
+  call void @llvm.lifetime.end.p0(i64 4, ptr %x) #3
   ret void
 }
 
@@ -72,14 +68,13 @@ entry:
 define void @test4(i32 %a) #0 {
 entry:
   %x = alloca i32, align 4
-  %0 = bitcast i32* %x to i8*
-  call void @llvm.lifetime.start.p0i8(i64 4, i8* %0) #3
-  call void @foo(i32* nonnull %x) #3
-  %1 = load i32, i32* %x, align 4, !tbaa !1
-  %inc = add nsw i32 %1, %a
-  store i32 %inc, i32* %x, align 4, !tbaa !1
-  call void @foo(i32* nonnull %x) #3
-  call void @llvm.lifetime.end.p0i8(i64 4, i8* %0) #3
+  call void @llvm.lifetime.start.p0(i64 4, ptr %x) #3
+  call void @foo(ptr nonnull %x) #3
+  %0 = load i32, ptr %x, align 4, !tbaa !1
+  %inc = add nsw i32 %0, %a
+  store i32 %inc, ptr %x, align 4, !tbaa !1
+  call void @foo(ptr nonnull %x) #3
+  call void @llvm.lifetime.end.p0(i64 4, ptr %x) #3
   ret void
 }
 
@@ -88,14 +83,13 @@ entry:
 define void @test5(i32 %a) #0 {
 entry:
   %x = alloca i32, align 4
-  %0 = bitcast i32* %x to i8*
-  call void @llvm.lifetime.start.p0i8(i64 4, i8* %0) #3
-  call void @foo(i32* nonnull %x) #3
-  %1 = load i32, i32* %x, align 4, !tbaa !1
-  %inc = sub nsw i32 %1, %a
-  store i32 %inc, i32* %x, align 4, !tbaa !1
-  call void @foo(i32* nonnull %x) #3
-  call void @llvm.lifetime.end.p0i8(i64 4, i8* %0) #3
+  call void @llvm.lifetime.start.p0(i64 4, ptr %x) #3
+  call void @foo(ptr nonnull %x) #3
+  %0 = load i32, ptr %x, align 4, !tbaa !1
+  %inc = sub nsw i32 %0, %a
+  store i32 %inc, ptr %x, align 4, !tbaa !1
+  call void @foo(ptr nonnull %x) #3
+  call void @llvm.lifetime.end.p0(i64 4, ptr %x) #3
   ret void
 }
 
@@ -104,14 +98,13 @@ entry:
 define void @test6(i32 %a) #0 {
 entry:
   %x = alloca i32, align 4
-  %0 = bitcast i32* %x to i8*
-  call void @llvm.lifetime.start.p0i8(i64 4, i8* %0) #3
-  call void @foo(i32* nonnull %x) #3
-  %1 = load i32, i32* %x, align 4, !tbaa !1
-  %inc = or i32 %1, %a
-  store i32 %inc, i32* %x, align 4, !tbaa !1
-  call void @foo(i32* nonnull %x) #3
-  call void @llvm.lifetime.end.p0i8(i64 4, i8* %0) #3
+  call void @llvm.lifetime.start.p0(i64 4, ptr %x) #3
+  call void @foo(ptr nonnull %x) #3
+  %0 = load i32, ptr %x, align 4, !tbaa !1
+  %inc = or i32 %0, %a
+  store i32 %inc, ptr %x, align 4, !tbaa !1
+  call void @foo(ptr nonnull %x) #3
+  call void @llvm.lifetime.end.p0(i64 4, ptr %x) #3
   ret void
 }
 
@@ -120,21 +113,20 @@ entry:
 define void @test7(i32 %a) #0 {
 entry:
   %x = alloca i32, align 4
-  %0 = bitcast i32* %x to i8*
-  call void @llvm.lifetime.start.p0i8(i64 4, i8* %0) #3
-  call void @foo(i32* nonnull %x) #3
-  %1 = load i32, i32* %x, align 4, !tbaa !1
-  %inc = and i32 %1, %a
-  store i32 %inc, i32* %x, align 4, !tbaa !1
-  call void @foo(i32* nonnull %x) #3
-  call void @llvm.lifetime.end.p0i8(i64 4, i8* %0) #3
+  call void @llvm.lifetime.start.p0(i64 4, ptr %x) #3
+  call void @foo(ptr nonnull %x) #3
+  %0 = load i32, ptr %x, align 4, !tbaa !1
+  %inc = and i32 %0, %a
+  store i32 %inc, ptr %x, align 4, !tbaa !1
+  call void @foo(ptr nonnull %x) #3
+  call void @llvm.lifetime.end.p0(i64 4, ptr %x) #3
   ret void
 }
 
 
-declare void @foo(i32*) #2
-declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #1
-declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #1
+declare void @foo(ptr) #2
+declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
 
 attributes #0 = { nounwind "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="hexagonv60" "target-features"="+hvx,+hvx-length64b" "unsafe-fp-math"="false" "use-soft-float"="false" }
 attributes #1 = { argmemonly nounwind }

diff  --git a/llvm/test/CodeGen/Hexagon/memops.ll b/llvm/test/CodeGen/Hexagon/memops.ll
index c3b8b7bdf98b3..65ddf5167cbd3 100644
--- a/llvm/test/CodeGen/Hexagon/memops.ll
+++ b/llvm/test/CodeGen/Hexagon/memops.ll
@@ -1,1490 +1,1490 @@
 ; RUN: llc -march=hexagon -mcpu=hexagonv5  < %s | FileCheck %s
 ; Generate MemOps for V4 and above.
 
-define void @memop_unsigned_char_add5(i8* nocapture %p) nounwind {
+define void @memop_unsigned_char_add5(ptr nocapture %p) nounwind {
 entry:
 ; CHECK-LABEL: memop_unsigned_char_add5:
 ; CHECK:  memb(r{{[0-9]+}}+#0) += #5
-  %0 = load i8, i8* %p, align 1
+  %0 = load i8, ptr %p, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 5
   %conv1 = trunc i32 %add to i8
-  store i8 %conv1, i8* %p, align 1
+  store i8 %conv1, ptr %p, align 1
   ret void
 }
 
-define void @memop_unsigned_char_add(i8* nocapture %p, i8 zeroext %x) nounwind {
+define void @memop_unsigned_char_add(ptr nocapture %p, i8 zeroext %x) nounwind {
 entry:
 ; CHECK-LABEL: memop_unsigned_char_add:
 ; CHECK:  memb(r{{[0-9]+}}+#0) += r{{[0-9]+}}
   %conv = zext i8 %x to i32
-  %0 = load i8, i8* %p, align 1
+  %0 = load i8, ptr %p, align 1
   %conv1 = zext i8 %0 to i32
   %add = add nsw i32 %conv1, %conv
   %conv2 = trunc i32 %add to i8
-  store i8 %conv2, i8* %p, align 1
+  store i8 %conv2, ptr %p, align 1
   ret void
 }
 
-define void @memop_unsigned_char_sub(i8* nocapture %p, i8 zeroext %x) nounwind {
+define void @memop_unsigned_char_sub(ptr nocapture %p, i8 zeroext %x) nounwind {
 entry:
 ; CHECK-LABEL: memop_unsigned_char_sub:
 ; CHECK:  memb(r{{[0-9]+}}+#0) -= r{{[0-9]+}}
   %conv = zext i8 %x to i32
-  %0 = load i8, i8* %p, align 1
+  %0 = load i8, ptr %p, align 1
   %conv1 = zext i8 %0 to i32
   %sub = sub nsw i32 %conv1, %conv
   %conv2 = trunc i32 %sub to i8
-  store i8 %conv2, i8* %p, align 1
+  store i8 %conv2, ptr %p, align 1
   ret void
 }
 
-define void @memop_unsigned_char_or(i8* nocapture %p, i8 zeroext %x) nounwind {
+define void @memop_unsigned_char_or(ptr nocapture %p, i8 zeroext %x) nounwind {
 entry:
 ; CHECK-LABEL: memop_unsigned_char_or:
 ; CHECK:  memb(r{{[0-9]+}}+#0) |= r{{[0-9]+}}
-  %0 = load i8, i8* %p, align 1
+  %0 = load i8, ptr %p, align 1
   %or3 = or i8 %0, %x
-  store i8 %or3, i8* %p, align 1
+  store i8 %or3, ptr %p, align 1
   ret void
 }
 
-define void @memop_unsigned_char_and(i8* nocapture %p, i8 zeroext %x) nounwind {
+define void @memop_unsigned_char_and(ptr nocapture %p, i8 zeroext %x) nounwind {
 entry:
 ; CHECK-LABEL: memop_unsigned_char_and:
 ; CHECK:  memb(r{{[0-9]+}}+#0) &= r{{[0-9]+}}
-  %0 = load i8, i8* %p, align 1
+  %0 = load i8, ptr %p, align 1
   %and3 = and i8 %0, %x
-  store i8 %and3, i8* %p, align 1
+  store i8 %and3, ptr %p, align 1
   ret void
 }
 
-define void @memop_unsigned_char_clrbit(i8* nocapture %p) nounwind {
+define void @memop_unsigned_char_clrbit(ptr nocapture %p) nounwind {
 entry:
 ; CHECK-LABEL: memop_unsigned_char_clrbit:
 ; CHECK:  memb(r{{[0-9]+}}+#0) = clrbit(#5)
-  %0 = load i8, i8* %p, align 1
+  %0 = load i8, ptr %p, align 1
   %conv = zext i8 %0 to i32
   %and = and i32 %conv, 223
   %conv1 = trunc i32 %and to i8
-  store i8 %conv1, i8* %p, align 1
+  store i8 %conv1, ptr %p, align 1
   ret void
 }
 
-define void @memop_unsigned_char_setbit(i8* nocapture %p) nounwind {
+define void @memop_unsigned_char_setbit(ptr nocapture %p) nounwind {
 entry:
 ; CHECK-LABEL: memop_unsigned_char_setbit:
 ; CHECK:  memb(r{{[0-9]+}}+#0) = setbit(#7)
-  %0 = load i8, i8* %p, align 1
+  %0 = load i8, ptr %p, align 1
   %conv = zext i8 %0 to i32
   %or = or i32 %conv, 128
   %conv1 = trunc i32 %or to i8
-  store i8 %conv1, i8* %p, align 1
+  store i8 %conv1, ptr %p, align 1
   ret void
 }
 
-define void @memop_unsigned_char_add5_index(i8* nocapture %p, i32 %i) nounwind {
+define void @memop_unsigned_char_add5_index(ptr nocapture %p, i32 %i) nounwind {
 entry:
 ; CHECK-LABEL: memop_unsigned_char_add5_index:
 ; CHECK:  memb(r{{[0-9]+}}+#0) += #5
-  %add.ptr = getelementptr inbounds i8, i8* %p, i32 %i
-  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %p, i32 %i
+  %0 = load i8, ptr %add.ptr, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 5
   %conv1 = trunc i32 %add to i8
-  store i8 %conv1, i8* %add.ptr, align 1
+  store i8 %conv1, ptr %add.ptr, align 1
   ret void
 }
 
-define void @memop_unsigned_char_add_index(i8* nocapture %p, i32 %i, i8 zeroext %x) nounwind {
+define void @memop_unsigned_char_add_index(ptr nocapture %p, i32 %i, i8 zeroext %x) nounwind {
 entry:
 ; CHECK-LABEL: memop_unsigned_char_add_index:
 ; CHECK:  memb(r{{[0-9]+}}+#0) += r{{[0-9]+}}
   %conv = zext i8 %x to i32
-  %add.ptr = getelementptr inbounds i8, i8* %p, i32 %i
-  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %p, i32 %i
+  %0 = load i8, ptr %add.ptr, align 1
   %conv1 = zext i8 %0 to i32
   %add = add nsw i32 %conv1, %conv
   %conv2 = trunc i32 %add to i8
-  store i8 %conv2, i8* %add.ptr, align 1
+  store i8 %conv2, ptr %add.ptr, align 1
   ret void
 }
 
-define void @memop_unsigned_char_sub_index(i8* nocapture %p, i32 %i, i8 zeroext %x) nounwind {
+define void @memop_unsigned_char_sub_index(ptr nocapture %p, i32 %i, i8 zeroext %x) nounwind {
 entry:
 ; CHECK-LABEL: memop_unsigned_char_sub_index:
 ; CHECK:  memb(r{{[0-9]+}}+#0) -= r{{[0-9]+}}
   %conv = zext i8 %x to i32
-  %add.ptr = getelementptr inbounds i8, i8* %p, i32 %i
-  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %p, i32 %i
+  %0 = load i8, ptr %add.ptr, align 1
   %conv1 = zext i8 %0 to i32
   %sub = sub nsw i32 %conv1, %conv
   %conv2 = trunc i32 %sub to i8
-  store i8 %conv2, i8* %add.ptr, align 1
+  store i8 %conv2, ptr %add.ptr, align 1
   ret void
 }
 
-define void @memop_unsigned_char_or_index(i8* nocapture %p, i32 %i, i8 zeroext %x) nounwind {
+define void @memop_unsigned_char_or_index(ptr nocapture %p, i32 %i, i8 zeroext %x) nounwind {
 entry:
 ; CHECK-LABEL: memop_unsigned_char_or_index:
 ; CHECK:  memb(r{{[0-9]+}}+#0) |= r{{[0-9]+}}
-  %add.ptr = getelementptr inbounds i8, i8* %p, i32 %i
-  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %p, i32 %i
+  %0 = load i8, ptr %add.ptr, align 1
   %or3 = or i8 %0, %x
-  store i8 %or3, i8* %add.ptr, align 1
+  store i8 %or3, ptr %add.ptr, align 1
   ret void
 }
 
-define void @memop_unsigned_char_and_index(i8* nocapture %p, i32 %i, i8 zeroext %x) nounwind {
+define void @memop_unsigned_char_and_index(ptr nocapture %p, i32 %i, i8 zeroext %x) nounwind {
 entry:
 ; CHECK-LABEL: memop_unsigned_char_and_index:
 ; CHECK:  memb(r{{[0-9]+}}+#0) &= r{{[0-9]+}}
-  %add.ptr = getelementptr inbounds i8, i8* %p, i32 %i
-  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %p, i32 %i
+  %0 = load i8, ptr %add.ptr, align 1
   %and3 = and i8 %0, %x
-  store i8 %and3, i8* %add.ptr, align 1
+  store i8 %and3, ptr %add.ptr, align 1
   ret void
 }
 
-define void @memop_unsigned_char_clrbit_index(i8* nocapture %p, i32 %i) nounwind {
+define void @memop_unsigned_char_clrbit_index(ptr nocapture %p, i32 %i) nounwind {
 entry:
 ; CHECK-LABEL: memop_unsigned_char_clrbit_index:
 ; CHECK:  memb(r{{[0-9]+}}+#0) = clrbit(#5)
-  %add.ptr = getelementptr inbounds i8, i8* %p, i32 %i
-  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %p, i32 %i
+  %0 = load i8, ptr %add.ptr, align 1
   %conv = zext i8 %0 to i32
   %and = and i32 %conv, 223
   %conv1 = trunc i32 %and to i8
-  store i8 %conv1, i8* %add.ptr, align 1
+  store i8 %conv1, ptr %add.ptr, align 1
   ret void
 }
 
-define void @memop_unsigned_char_setbit_index(i8* nocapture %p, i32 %i) nounwind {
+define void @memop_unsigned_char_setbit_index(ptr nocapture %p, i32 %i) nounwind {
 entry:
 ; CHECK-LABEL: memop_unsigned_char_setbit_index:
 ; CHECK:  memb(r{{[0-9]+}}+#0) = setbit(#7)
-  %add.ptr = getelementptr inbounds i8, i8* %p, i32 %i
-  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %p, i32 %i
+  %0 = load i8, ptr %add.ptr, align 1
   %conv = zext i8 %0 to i32
   %or = or i32 %conv, 128
   %conv1 = trunc i32 %or to i8
-  store i8 %conv1, i8* %add.ptr, align 1
+  store i8 %conv1, ptr %add.ptr, align 1
   ret void
 }
 
-define void @memop_unsigned_char_add5_index5(i8* nocapture %p) nounwind {
+define void @memop_unsigned_char_add5_index5(ptr nocapture %p) nounwind {
 entry:
 ; CHECK-LABEL: memop_unsigned_char_add5_index5:
 ; CHECK:  memb(r{{[0-9]+}}+#5) += #5
-  %add.ptr = getelementptr inbounds i8, i8* %p, i32 5
-  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %p, i32 5
+  %0 = load i8, ptr %add.ptr, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %conv, 5
   %conv1 = trunc i32 %add to i8
-  store i8 %conv1, i8* %add.ptr, align 1
+  store i8 %conv1, ptr %add.ptr, align 1
   ret void
 }
 
-define void @memop_unsigned_char_add_index5(i8* nocapture %p, i8 zeroext %x) nounwind {
+define void @memop_unsigned_char_add_index5(ptr nocapture %p, i8 zeroext %x) nounwind {
 entry:
 ; CHECK-LABEL: memop_unsigned_char_add_index5:
 ; CHECK:  memb(r{{[0-9]+}}+#5) += r{{[0-9]+}}
   %conv = zext i8 %x to i32
-  %add.ptr = getelementptr inbounds i8, i8* %p, i32 5
-  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %p, i32 5
+  %0 = load i8, ptr %add.ptr, align 1
   %conv1 = zext i8 %0 to i32
   %add = add nsw i32 %conv1, %conv
   %conv2 = trunc i32 %add to i8
-  store i8 %conv2, i8* %add.ptr, align 1
+  store i8 %conv2, ptr %add.ptr, align 1
   ret void
 }
 
-define void @memop_unsigned_char_sub_index5(i8* nocapture %p, i8 zeroext %x) nounwind {
+define void @memop_unsigned_char_sub_index5(ptr nocapture %p, i8 zeroext %x) nounwind {
 entry:
 ; CHECK-LABEL: memop_unsigned_char_sub_index5:
 ; CHECK:  memb(r{{[0-9]+}}+#5) -= r{{[0-9]+}}
   %conv = zext i8 %x to i32
-  %add.ptr = getelementptr inbounds i8, i8* %p, i32 5
-  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %p, i32 5
+  %0 = load i8, ptr %add.ptr, align 1
   %conv1 = zext i8 %0 to i32
   %sub = sub nsw i32 %conv1, %conv
   %conv2 = trunc i32 %sub to i8
-  store i8 %conv2, i8* %add.ptr, align 1
+  store i8 %conv2, ptr %add.ptr, align 1
   ret void
 }
 
-define void @memop_unsigned_char_or_index5(i8* nocapture %p, i8 zeroext %x) nounwind {
+define void @memop_unsigned_char_or_index5(ptr nocapture %p, i8 zeroext %x) nounwind {
 entry:
 ; CHECK-LABEL: memop_unsigned_char_or_index5:
 ; CHECK:  memb(r{{[0-9]+}}+#5) |= r{{[0-9]+}}
-  %add.ptr = getelementptr inbounds i8, i8* %p, i32 5
-  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %p, i32 5
+  %0 = load i8, ptr %add.ptr, align 1
   %or3 = or i8 %0, %x
-  store i8 %or3, i8* %add.ptr, align 1
+  store i8 %or3, ptr %add.ptr, align 1
   ret void
 }
 
-define void @memop_unsigned_char_and_index5(i8* nocapture %p, i8 zeroext %x) nounwind {
+define void @memop_unsigned_char_and_index5(ptr nocapture %p, i8 zeroext %x) nounwind {
 entry:
 ; CHECK-LABEL: memop_unsigned_char_and_index5:
 ; CHECK:  memb(r{{[0-9]+}}+#5) &= r{{[0-9]+}}
-  %add.ptr = getelementptr inbounds i8, i8* %p, i32 5
-  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %p, i32 5
+  %0 = load i8, ptr %add.ptr, align 1
   %and3 = and i8 %0, %x
-  store i8 %and3, i8* %add.ptr, align 1
+  store i8 %and3, ptr %add.ptr, align 1
   ret void
 }
 
-define void @memop_unsigned_char_clrbit_index5(i8* nocapture %p) nounwind {
+define void @memop_unsigned_char_clrbit_index5(ptr nocapture %p) nounwind {
 entry:
 ; CHECK-LABEL: memop_unsigned_char_clrbit_index5:
 ; CHECK:  memb(r{{[0-9]+}}+#5) = clrbit(#5)
-  %add.ptr = getelementptr inbounds i8, i8* %p, i32 5
-  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %p, i32 5
+  %0 = load i8, ptr %add.ptr, align 1
   %conv = zext i8 %0 to i32
   %and = and i32 %conv, 223
   %conv1 = trunc i32 %and to i8
-  store i8 %conv1, i8* %add.ptr, align 1
+  store i8 %conv1, ptr %add.ptr, align 1
   ret void
 }
 
-define void @memop_unsigned_char_setbit_index5(i8* nocapture %p) nounwind {
+define void @memop_unsigned_char_setbit_index5(ptr nocapture %p) nounwind {
 entry:
 ; CHECK-LABEL: memop_unsigned_char_setbit_index5:
 ; CHECK:  memb(r{{[0-9]+}}+#5) = setbit(#7)
-  %add.ptr = getelementptr inbounds i8, i8* %p, i32 5
-  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %p, i32 5
+  %0 = load i8, ptr %add.ptr, align 1
   %conv = zext i8 %0 to i32
   %or = or i32 %conv, 128
   %conv1 = trunc i32 %or to i8
-  store i8 %conv1, i8* %add.ptr, align 1
+  store i8 %conv1, ptr %add.ptr, align 1
   ret void
 }
 
-define void @memop_signed_char_add5(i8* nocapture %p) nounwind {
+define void @memop_signed_char_add5(ptr nocapture %p) nounwind {
 entry:
 ; CHECK-LABEL: memop_signed_char_add5:
 ; CHECK:  memb(r{{[0-9]+}}+#0) += #5
-  %0 = load i8, i8* %p, align 1
+  %0 = load i8, ptr %p, align 1
   %conv2 = zext i8 %0 to i32
   %add = add nsw i32 %conv2, 5
   %conv1 = trunc i32 %add to i8
-  store i8 %conv1, i8* %p, align 1
+  store i8 %conv1, ptr %p, align 1
   ret void
 }
 
-define void @memop_signed_char_add(i8* nocapture %p, i8 signext %x) nounwind {
+define void @memop_signed_char_add(ptr nocapture %p, i8 signext %x) nounwind {
 entry:
 ; CHECK-LABEL: memop_signed_char_add:
 ; CHECK:  memb(r{{[0-9]+}}+#0) += r{{[0-9]+}}
   %conv4 = zext i8 %x to i32
-  %0 = load i8, i8* %p, align 1
+  %0 = load i8, ptr %p, align 1
   %conv13 = zext i8 %0 to i32
   %add = add nsw i32 %conv13, %conv4
   %conv2 = trunc i32 %add to i8
-  store i8 %conv2, i8* %p, align 1
+  store i8 %conv2, ptr %p, align 1
   ret void
 }
 
-define void @memop_signed_char_sub(i8* nocapture %p, i8 signext %x) nounwind {
+define void @memop_signed_char_sub(ptr nocapture %p, i8 signext %x) nounwind {
 entry:
 ; CHECK-LABEL: memop_signed_char_sub:
 ; CHECK:  memb(r{{[0-9]+}}+#0) -= r{{[0-9]+}}
   %conv4 = zext i8 %x to i32
-  %0 = load i8, i8* %p, align 1
+  %0 = load i8, ptr %p, align 1
   %conv13 = zext i8 %0 to i32
   %sub = sub nsw i32 %conv13, %conv4
   %conv2 = trunc i32 %sub to i8
-  store i8 %conv2, i8* %p, align 1
+  store i8 %conv2, ptr %p, align 1
   ret void
 }
 
-define void @memop_signed_char_or(i8* nocapture %p, i8 signext %x) nounwind {
+define void @memop_signed_char_or(ptr nocapture %p, i8 signext %x) nounwind {
 entry:
 ; CHECK-LABEL: memop_signed_char_or:
 ; CHECK:  memb(r{{[0-9]+}}+#0) |= r{{[0-9]+}}
-  %0 = load i8, i8* %p, align 1
+  %0 = load i8, ptr %p, align 1
   %or3 = or i8 %0, %x
-  store i8 %or3, i8* %p, align 1
+  store i8 %or3, ptr %p, align 1
   ret void
 }
 
-define void @memop_signed_char_and(i8* nocapture %p, i8 signext %x) nounwind {
+define void @memop_signed_char_and(ptr nocapture %p, i8 signext %x) nounwind {
 entry:
 ; CHECK-LABEL: memop_signed_char_and:
 ; CHECK:  memb(r{{[0-9]+}}+#0) &= r{{[0-9]+}}
-  %0 = load i8, i8* %p, align 1
+  %0 = load i8, ptr %p, align 1
   %and3 = and i8 %0, %x
-  store i8 %and3, i8* %p, align 1
+  store i8 %and3, ptr %p, align 1
   ret void
 }
 
-define void @memop_signed_char_clrbit(i8* nocapture %p) nounwind {
+define void @memop_signed_char_clrbit(ptr nocapture %p) nounwind {
 entry:
 ; CHECK-LABEL: memop_signed_char_clrbit:
 ; CHECK:  memb(r{{[0-9]+}}+#0) = clrbit(#5)
-  %0 = load i8, i8* %p, align 1
+  %0 = load i8, ptr %p, align 1
   %conv2 = zext i8 %0 to i32
   %and = and i32 %conv2, 223
   %conv1 = trunc i32 %and to i8
-  store i8 %conv1, i8* %p, align 1
+  store i8 %conv1, ptr %p, align 1
   ret void
 }
 
-define void @memop_signed_char_setbit(i8* nocapture %p) nounwind {
+define void @memop_signed_char_setbit(ptr nocapture %p) nounwind {
 entry:
 ; CHECK-LABEL: memop_signed_char_setbit:
 ; CHECK:  memb(r{{[0-9]+}}+#0) = setbit(#7)
-  %0 = load i8, i8* %p, align 1
+  %0 = load i8, ptr %p, align 1
   %conv2 = zext i8 %0 to i32
   %or = or i32 %conv2, 128
   %conv1 = trunc i32 %or to i8
-  store i8 %conv1, i8* %p, align 1
+  store i8 %conv1, ptr %p, align 1
   ret void
 }
 
-define void @memop_signed_char_add5_index(i8* nocapture %p, i32 %i) nounwind {
+define void @memop_signed_char_add5_index(ptr nocapture %p, i32 %i) nounwind {
 entry:
 ; CHECK-LABEL: memop_signed_char_add5_index:
 ; CHECK:  memb(r{{[0-9]+}}+#0) += #5
-  %add.ptr = getelementptr inbounds i8, i8* %p, i32 %i
-  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %p, i32 %i
+  %0 = load i8, ptr %add.ptr, align 1
   %conv2 = zext i8 %0 to i32
   %add = add nsw i32 %conv2, 5
   %conv1 = trunc i32 %add to i8
-  store i8 %conv1, i8* %add.ptr, align 1
+  store i8 %conv1, ptr %add.ptr, align 1
   ret void
 }
 
-define void @memop_signed_char_add_index(i8* nocapture %p, i32 %i, i8 signext %x) nounwind {
+define void @memop_signed_char_add_index(ptr nocapture %p, i32 %i, i8 signext %x) nounwind {
 entry:
 ; CHECK-LABEL: memop_signed_char_add_index:
 ; CHECK:  memb(r{{[0-9]+}}+#0) += r{{[0-9]+}}
   %conv4 = zext i8 %x to i32
-  %add.ptr = getelementptr inbounds i8, i8* %p, i32 %i
-  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %p, i32 %i
+  %0 = load i8, ptr %add.ptr, align 1
   %conv13 = zext i8 %0 to i32
   %add = add nsw i32 %conv13, %conv4
   %conv2 = trunc i32 %add to i8
-  store i8 %conv2, i8* %add.ptr, align 1
+  store i8 %conv2, ptr %add.ptr, align 1
   ret void
 }
 
-define void @memop_signed_char_sub_index(i8* nocapture %p, i32 %i, i8 signext %x) nounwind {
+define void @memop_signed_char_sub_index(ptr nocapture %p, i32 %i, i8 signext %x) nounwind {
 entry:
 ; CHECK-LABEL: memop_signed_char_sub_index:
 ; CHECK:  memb(r{{[0-9]+}}+#0) -= r{{[0-9]+}}
   %conv4 = zext i8 %x to i32
-  %add.ptr = getelementptr inbounds i8, i8* %p, i32 %i
-  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %p, i32 %i
+  %0 = load i8, ptr %add.ptr, align 1
   %conv13 = zext i8 %0 to i32
   %sub = sub nsw i32 %conv13, %conv4
   %conv2 = trunc i32 %sub to i8
-  store i8 %conv2, i8* %add.ptr, align 1
+  store i8 %conv2, ptr %add.ptr, align 1
   ret void
 }
 
-define void @memop_signed_char_or_index(i8* nocapture %p, i32 %i, i8 signext %x) nounwind {
+define void @memop_signed_char_or_index(ptr nocapture %p, i32 %i, i8 signext %x) nounwind {
 entry:
 ; CHECK-LABEL: memop_signed_char_or_index:
 ; CHECK:  memb(r{{[0-9]+}}+#0) |= r{{[0-9]+}}
-  %add.ptr = getelementptr inbounds i8, i8* %p, i32 %i
-  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %p, i32 %i
+  %0 = load i8, ptr %add.ptr, align 1
   %or3 = or i8 %0, %x
-  store i8 %or3, i8* %add.ptr, align 1
+  store i8 %or3, ptr %add.ptr, align 1
   ret void
 }
 
-define void @memop_signed_char_and_index(i8* nocapture %p, i32 %i, i8 signext %x) nounwind {
+define void @memop_signed_char_and_index(ptr nocapture %p, i32 %i, i8 signext %x) nounwind {
 entry:
 ; CHECK-LABEL: memop_signed_char_and_index:
 ; CHECK:  memb(r{{[0-9]+}}+#0) &= r{{[0-9]+}}
-  %add.ptr = getelementptr inbounds i8, i8* %p, i32 %i
-  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %p, i32 %i
+  %0 = load i8, ptr %add.ptr, align 1
   %and3 = and i8 %0, %x
-  store i8 %and3, i8* %add.ptr, align 1
+  store i8 %and3, ptr %add.ptr, align 1
   ret void
 }
 
-define void @memop_signed_char_clrbit_index(i8* nocapture %p, i32 %i) nounwind {
+define void @memop_signed_char_clrbit_index(ptr nocapture %p, i32 %i) nounwind {
 entry:
 ; CHECK-LABEL: memop_signed_char_clrbit_index:
 ; CHECK:  memb(r{{[0-9]+}}+#0) = clrbit(#5)
-  %add.ptr = getelementptr inbounds i8, i8* %p, i32 %i
-  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %p, i32 %i
+  %0 = load i8, ptr %add.ptr, align 1
   %conv2 = zext i8 %0 to i32
   %and = and i32 %conv2, 223
   %conv1 = trunc i32 %and to i8
-  store i8 %conv1, i8* %add.ptr, align 1
+  store i8 %conv1, ptr %add.ptr, align 1
   ret void
 }
 
-define void @memop_signed_char_setbit_index(i8* nocapture %p, i32 %i) nounwind {
+define void @memop_signed_char_setbit_index(ptr nocapture %p, i32 %i) nounwind {
 entry:
 ; CHECK-LABEL: memop_signed_char_setbit_index:
 ; CHECK:  memb(r{{[0-9]+}}+#0) = setbit(#7)
-  %add.ptr = getelementptr inbounds i8, i8* %p, i32 %i
-  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %p, i32 %i
+  %0 = load i8, ptr %add.ptr, align 1
   %conv2 = zext i8 %0 to i32
   %or = or i32 %conv2, 128
   %conv1 = trunc i32 %or to i8
-  store i8 %conv1, i8* %add.ptr, align 1
+  store i8 %conv1, ptr %add.ptr, align 1
   ret void
 }
 
-define void @memop_signed_char_add5_index5(i8* nocapture %p) nounwind {
+define void @memop_signed_char_add5_index5(ptr nocapture %p) nounwind {
 entry:
 ; CHECK-LABEL: memop_signed_char_add5_index5:
 ; CHECK:  memb(r{{[0-9]+}}+#5) += #5
-  %add.ptr = getelementptr inbounds i8, i8* %p, i32 5
-  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %p, i32 5
+  %0 = load i8, ptr %add.ptr, align 1
   %conv2 = zext i8 %0 to i32
   %add = add nsw i32 %conv2, 5
   %conv1 = trunc i32 %add to i8
-  store i8 %conv1, i8* %add.ptr, align 1
+  store i8 %conv1, ptr %add.ptr, align 1
   ret void
 }
 
-define void @memop_signed_char_add_index5(i8* nocapture %p, i8 signext %x) nounwind {
+define void @memop_signed_char_add_index5(ptr nocapture %p, i8 signext %x) nounwind {
 entry:
 ; CHECK-LABEL: memop_signed_char_add_index5:
 ; CHECK:  memb(r{{[0-9]+}}+#5) += r{{[0-9]+}}
   %conv4 = zext i8 %x to i32
-  %add.ptr = getelementptr inbounds i8, i8* %p, i32 5
-  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %p, i32 5
+  %0 = load i8, ptr %add.ptr, align 1
   %conv13 = zext i8 %0 to i32
   %add = add nsw i32 %conv13, %conv4
   %conv2 = trunc i32 %add to i8
-  store i8 %conv2, i8* %add.ptr, align 1
+  store i8 %conv2, ptr %add.ptr, align 1
   ret void
 }
 
-define void @memop_signed_char_sub_index5(i8* nocapture %p, i8 signext %x) nounwind {
+define void @memop_signed_char_sub_index5(ptr nocapture %p, i8 signext %x) nounwind {
 entry:
 ; CHECK-LABEL: memop_signed_char_sub_index5:
 ; CHECK:  memb(r{{[0-9]+}}+#5) -= r{{[0-9]+}}
   %conv4 = zext i8 %x to i32
-  %add.ptr = getelementptr inbounds i8, i8* %p, i32 5
-  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %p, i32 5
+  %0 = load i8, ptr %add.ptr, align 1
   %conv13 = zext i8 %0 to i32
   %sub = sub nsw i32 %conv13, %conv4
   %conv2 = trunc i32 %sub to i8
-  store i8 %conv2, i8* %add.ptr, align 1
+  store i8 %conv2, ptr %add.ptr, align 1
   ret void
 }
 
-define void @memop_signed_char_or_index5(i8* nocapture %p, i8 signext %x) nounwind {
+define void @memop_signed_char_or_index5(ptr nocapture %p, i8 signext %x) nounwind {
 entry:
 ; CHECK-LABEL: memop_signed_char_or_index5:
 ; CHECK:  memb(r{{[0-9]+}}+#5) |= r{{[0-9]+}}
-  %add.ptr = getelementptr inbounds i8, i8* %p, i32 5
-  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %p, i32 5
+  %0 = load i8, ptr %add.ptr, align 1
   %or3 = or i8 %0, %x
-  store i8 %or3, i8* %add.ptr, align 1
+  store i8 %or3, ptr %add.ptr, align 1
   ret void
 }
 
-define void @memop_signed_char_and_index5(i8* nocapture %p, i8 signext %x) nounwind {
+define void @memop_signed_char_and_index5(ptr nocapture %p, i8 signext %x) nounwind {
 entry:
 ; CHECK-LABEL: memop_signed_char_and_index5:
 ; CHECK:  memb(r{{[0-9]+}}+#5) &= r{{[0-9]+}}
-  %add.ptr = getelementptr inbounds i8, i8* %p, i32 5
-  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %p, i32 5
+  %0 = load i8, ptr %add.ptr, align 1
   %and3 = and i8 %0, %x
-  store i8 %and3, i8* %add.ptr, align 1
+  store i8 %and3, ptr %add.ptr, align 1
   ret void
 }
 
-define void @memop_signed_char_clrbit_index5(i8* nocapture %p) nounwind {
+define void @memop_signed_char_clrbit_index5(ptr nocapture %p) nounwind {
 entry:
 ; CHECK-LABEL: memop_signed_char_clrbit_index5:
 ; CHECK:  memb(r{{[0-9]+}}+#5) = clrbit(#5)
-  %add.ptr = getelementptr inbounds i8, i8* %p, i32 5
-  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %p, i32 5
+  %0 = load i8, ptr %add.ptr, align 1
   %conv2 = zext i8 %0 to i32
   %and = and i32 %conv2, 223
   %conv1 = trunc i32 %and to i8
-  store i8 %conv1, i8* %add.ptr, align 1
+  store i8 %conv1, ptr %add.ptr, align 1
   ret void
 }
 
-define void @memop_signed_char_setbit_index5(i8* nocapture %p) nounwind {
+define void @memop_signed_char_setbit_index5(ptr nocapture %p) nounwind {
 entry:
 ; CHECK-LABEL: memop_signed_char_setbit_index5:
 ; CHECK:  memb(r{{[0-9]+}}+#5) = setbit(#7)
-  %add.ptr = getelementptr inbounds i8, i8* %p, i32 5
-  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %p, i32 5
+  %0 = load i8, ptr %add.ptr, align 1
   %conv2 = zext i8 %0 to i32
   %or = or i32 %conv2, 128
   %conv1 = trunc i32 %or to i8
-  store i8 %conv1, i8* %add.ptr, align 1
+  store i8 %conv1, ptr %add.ptr, align 1
   ret void
 }
 
-define void @memop_unsigned_short_add5(i16* nocapture %p) nounwind {
+define void @memop_unsigned_short_add5(ptr nocapture %p) nounwind {
 entry:
 ; CHECK-LABEL: memop_unsigned_short_add5:
 ; CHECK:  memh(r{{[0-9]+}}+#0) += #5
-  %0 = load i16, i16* %p, align 2
+  %0 = load i16, ptr %p, align 2
   %conv = zext i16 %0 to i32
   %add = add nsw i32 %conv, 5
   %conv1 = trunc i32 %add to i16
-  store i16 %conv1, i16* %p, align 2
+  store i16 %conv1, ptr %p, align 2
   ret void
 }
 
-define void @memop_unsigned_short_add(i16* nocapture %p, i16 zeroext %x) nounwind {
+define void @memop_unsigned_short_add(ptr nocapture %p, i16 zeroext %x) nounwind {
 entry:
 ; CHECK-LABEL: memop_unsigned_short_add:
 ; CHECK:  memh(r{{[0-9]+}}+#0) += r{{[0-9]+}}
   %conv = zext i16 %x to i32
-  %0 = load i16, i16* %p, align 2
+  %0 = load i16, ptr %p, align 2
   %conv1 = zext i16 %0 to i32
   %add = add nsw i32 %conv1, %conv
   %conv2 = trunc i32 %add to i16
-  store i16 %conv2, i16* %p, align 2
+  store i16 %conv2, ptr %p, align 2
   ret void
 }
 
-define void @memop_unsigned_short_sub(i16* nocapture %p, i16 zeroext %x) nounwind {
+define void @memop_unsigned_short_sub(ptr nocapture %p, i16 zeroext %x) nounwind {
 entry:
 ; CHECK-LABEL: memop_unsigned_short_sub:
 ; CHECK:  memh(r{{[0-9]+}}+#0) -= r{{[0-9]+}}
   %conv = zext i16 %x to i32
-  %0 = load i16, i16* %p, align 2
+  %0 = load i16, ptr %p, align 2
   %conv1 = zext i16 %0 to i32
   %sub = sub nsw i32 %conv1, %conv
   %conv2 = trunc i32 %sub to i16
-  store i16 %conv2, i16* %p, align 2
+  store i16 %conv2, ptr %p, align 2
   ret void
 }
 
-define void @memop_unsigned_short_or(i16* nocapture %p, i16 zeroext %x) nounwind {
+define void @memop_unsigned_short_or(ptr nocapture %p, i16 zeroext %x) nounwind {
 entry:
 ; CHECK-LABEL: memop_unsigned_short_or:
 ; CHECK:  memh(r{{[0-9]+}}+#0) |= r{{[0-9]+}}
-  %0 = load i16, i16* %p, align 2
+  %0 = load i16, ptr %p, align 2
   %or3 = or i16 %0, %x
-  store i16 %or3, i16* %p, align 2
+  store i16 %or3, ptr %p, align 2
   ret void
 }
 
-define void @memop_unsigned_short_and(i16* nocapture %p, i16 zeroext %x) nounwind {
+define void @memop_unsigned_short_and(ptr nocapture %p, i16 zeroext %x) nounwind {
 entry:
 ; CHECK-LABEL: memop_unsigned_short_and:
 ; CHECK:  memh(r{{[0-9]+}}+#0) &= r{{[0-9]+}}
-  %0 = load i16, i16* %p, align 2
+  %0 = load i16, ptr %p, align 2
   %and3 = and i16 %0, %x
-  store i16 %and3, i16* %p, align 2
+  store i16 %and3, ptr %p, align 2
   ret void
 }
 
-define void @memop_unsigned_short_clrbit(i16* nocapture %p) nounwind {
+define void @memop_unsigned_short_clrbit(ptr nocapture %p) nounwind {
 entry:
 ; CHECK-LABEL: memop_unsigned_short_clrbit:
 ; CHECK:  memh(r{{[0-9]+}}+#0) = clrbit(#5)
-  %0 = load i16, i16* %p, align 2
+  %0 = load i16, ptr %p, align 2
   %conv = zext i16 %0 to i32
   %and = and i32 %conv, 65503
   %conv1 = trunc i32 %and to i16
-  store i16 %conv1, i16* %p, align 2
+  store i16 %conv1, ptr %p, align 2
   ret void
 }
 
-define void @memop_unsigned_short_setbit(i16* nocapture %p) nounwind {
+define void @memop_unsigned_short_setbit(ptr nocapture %p) nounwind {
 entry:
 ; CHECK-LABEL: memop_unsigned_short_setbit:
 ; CHECK:  memh(r{{[0-9]+}}+#0) = setbit(#7)
-  %0 = load i16, i16* %p, align 2
+  %0 = load i16, ptr %p, align 2
   %conv = zext i16 %0 to i32
   %or = or i32 %conv, 128
   %conv1 = trunc i32 %or to i16
-  store i16 %conv1, i16* %p, align 2
+  store i16 %conv1, ptr %p, align 2
   ret void
 }
 
-define void @memop_unsigned_short_add5_index(i16* nocapture %p, i32 %i) nounwind {
+define void @memop_unsigned_short_add5_index(ptr nocapture %p, i32 %i) nounwind {
 entry:
 ; CHECK-LABEL: memop_unsigned_short_add5_index:
 ; CHECK:  memh(r{{[0-9]+}}+#0) += #5
-  %add.ptr = getelementptr inbounds i16, i16* %p, i32 %i
-  %0 = load i16, i16* %add.ptr, align 2
+  %add.ptr = getelementptr inbounds i16, ptr %p, i32 %i
+  %0 = load i16, ptr %add.ptr, align 2
   %conv = zext i16 %0 to i32
   %add = add nsw i32 %conv, 5
   %conv1 = trunc i32 %add to i16
-  store i16 %conv1, i16* %add.ptr, align 2
+  store i16 %conv1, ptr %add.ptr, align 2
   ret void
 }
 
-define void @memop_unsigned_short_add_index(i16* nocapture %p, i32 %i, i16 zeroext %x) nounwind {
+define void @memop_unsigned_short_add_index(ptr nocapture %p, i32 %i, i16 zeroext %x) nounwind {
 entry:
 ; CHECK-LABEL: memop_unsigned_short_add_index:
 ; CHECK:  memh(r{{[0-9]+}}+#0) += r{{[0-9]+}}
   %conv = zext i16 %x to i32
-  %add.ptr = getelementptr inbounds i16, i16* %p, i32 %i
-  %0 = load i16, i16* %add.ptr, align 2
+  %add.ptr = getelementptr inbounds i16, ptr %p, i32 %i
+  %0 = load i16, ptr %add.ptr, align 2
   %conv1 = zext i16 %0 to i32
   %add = add nsw i32 %conv1, %conv
   %conv2 = trunc i32 %add to i16
-  store i16 %conv2, i16* %add.ptr, align 2
+  store i16 %conv2, ptr %add.ptr, align 2
   ret void
 }
 
-define void @memop_unsigned_short_sub_index(i16* nocapture %p, i32 %i, i16 zeroext %x) nounwind {
+define void @memop_unsigned_short_sub_index(ptr nocapture %p, i32 %i, i16 zeroext %x) nounwind {
 entry:
 ; CHECK-LABEL: memop_unsigned_short_sub_index:
 ; CHECK:  memh(r{{[0-9]+}}+#0) -= r{{[0-9]+}}
   %conv = zext i16 %x to i32
-  %add.ptr = getelementptr inbounds i16, i16* %p, i32 %i
-  %0 = load i16, i16* %add.ptr, align 2
+  %add.ptr = getelementptr inbounds i16, ptr %p, i32 %i
+  %0 = load i16, ptr %add.ptr, align 2
   %conv1 = zext i16 %0 to i32
   %sub = sub nsw i32 %conv1, %conv
   %conv2 = trunc i32 %sub to i16
-  store i16 %conv2, i16* %add.ptr, align 2
+  store i16 %conv2, ptr %add.ptr, align 2
   ret void
 }
 
-define void @memop_unsigned_short_or_index(i16* nocapture %p, i32 %i, i16 zeroext %x) nounwind {
+define void @memop_unsigned_short_or_index(ptr nocapture %p, i32 %i, i16 zeroext %x) nounwind {
 entry:
 ; CHECK-LABEL: memop_unsigned_short_or_index:
 ; CHECK:  memh(r{{[0-9]+}}+#0) |= r{{[0-9]+}}
-  %add.ptr = getelementptr inbounds i16, i16* %p, i32 %i
-  %0 = load i16, i16* %add.ptr, align 2
+  %add.ptr = getelementptr inbounds i16, ptr %p, i32 %i
+  %0 = load i16, ptr %add.ptr, align 2
   %or3 = or i16 %0, %x
-  store i16 %or3, i16* %add.ptr, align 2
+  store i16 %or3, ptr %add.ptr, align 2
   ret void
 }
 
-define void @memop_unsigned_short_and_index(i16* nocapture %p, i32 %i, i16 zeroext %x) nounwind {
+define void @memop_unsigned_short_and_index(ptr nocapture %p, i32 %i, i16 zeroext %x) nounwind {
 entry:
 ; CHECK-LABEL: memop_unsigned_short_and_index:
 ; CHECK:  memh(r{{[0-9]+}}+#0) &= r{{[0-9]+}}
-  %add.ptr = getelementptr inbounds i16, i16* %p, i32 %i
-  %0 = load i16, i16* %add.ptr, align 2
+  %add.ptr = getelementptr inbounds i16, ptr %p, i32 %i
+  %0 = load i16, ptr %add.ptr, align 2
   %and3 = and i16 %0, %x
-  store i16 %and3, i16* %add.ptr, align 2
+  store i16 %and3, ptr %add.ptr, align 2
   ret void
 }
 
-define void @memop_unsigned_short_clrbit_index(i16* nocapture %p, i32 %i) nounwind {
+define void @memop_unsigned_short_clrbit_index(ptr nocapture %p, i32 %i) nounwind {
 entry:
 ; CHECK-LABEL: memop_unsigned_short_clrbit_index:
 ; CHECK:  memh(r{{[0-9]+}}+#0) = clrbit(#5)
-  %add.ptr = getelementptr inbounds i16, i16* %p, i32 %i
-  %0 = load i16, i16* %add.ptr, align 2
+  %add.ptr = getelementptr inbounds i16, ptr %p, i32 %i
+  %0 = load i16, ptr %add.ptr, align 2
   %conv = zext i16 %0 to i32
   %and = and i32 %conv, 65503
   %conv1 = trunc i32 %and to i16
-  store i16 %conv1, i16* %add.ptr, align 2
+  store i16 %conv1, ptr %add.ptr, align 2
   ret void
 }
 
-define void @memop_unsigned_short_setbit_index(i16* nocapture %p, i32 %i) nounwind {
+define void @memop_unsigned_short_setbit_index(ptr nocapture %p, i32 %i) nounwind {
 entry:
 ; CHECK-LABEL: memop_unsigned_short_setbit_index:
 ; CHECK:  memh(r{{[0-9]+}}+#0) = setbit(#7)
-  %add.ptr = getelementptr inbounds i16, i16* %p, i32 %i
-  %0 = load i16, i16* %add.ptr, align 2
+  %add.ptr = getelementptr inbounds i16, ptr %p, i32 %i
+  %0 = load i16, ptr %add.ptr, align 2
   %conv = zext i16 %0 to i32
   %or = or i32 %conv, 128
   %conv1 = trunc i32 %or to i16
-  store i16 %conv1, i16* %add.ptr, align 2
+  store i16 %conv1, ptr %add.ptr, align 2
   ret void
 }
 
-define void @memop_unsigned_short_add5_index5(i16* nocapture %p) nounwind {
+define void @memop_unsigned_short_add5_index5(ptr nocapture %p) nounwind {
 entry:
 ; CHECK-LABEL: memop_unsigned_short_add5_index5:
 ; CHECK:  memh(r{{[0-9]+}}+#10) += #5
-  %add.ptr = getelementptr inbounds i16, i16* %p, i32 5
-  %0 = load i16, i16* %add.ptr, align 2
+  %add.ptr = getelementptr inbounds i16, ptr %p, i32 5
+  %0 = load i16, ptr %add.ptr, align 2
   %conv = zext i16 %0 to i32
   %add = add nsw i32 %conv, 5
   %conv1 = trunc i32 %add to i16
-  store i16 %conv1, i16* %add.ptr, align 2
+  store i16 %conv1, ptr %add.ptr, align 2
   ret void
 }
 
-define void @memop_unsigned_short_add_index5(i16* nocapture %p, i16 zeroext %x) nounwind {
+define void @memop_unsigned_short_add_index5(ptr nocapture %p, i16 zeroext %x) nounwind {
 entry:
 ; CHECK-LABEL: memop_unsigned_short_add_index5:
 ; CHECK:  memh(r{{[0-9]+}}+#10) += r{{[0-9]+}}
   %conv = zext i16 %x to i32
-  %add.ptr = getelementptr inbounds i16, i16* %p, i32 5
-  %0 = load i16, i16* %add.ptr, align 2
+  %add.ptr = getelementptr inbounds i16, ptr %p, i32 5
+  %0 = load i16, ptr %add.ptr, align 2
   %conv1 = zext i16 %0 to i32
   %add = add nsw i32 %conv1, %conv
   %conv2 = trunc i32 %add to i16
-  store i16 %conv2, i16* %add.ptr, align 2
+  store i16 %conv2, ptr %add.ptr, align 2
   ret void
 }
 
-define void @memop_unsigned_short_sub_index5(i16* nocapture %p, i16 zeroext %x) nounwind {
+define void @memop_unsigned_short_sub_index5(ptr nocapture %p, i16 zeroext %x) nounwind {
 entry:
 ; CHECK-LABEL: memop_unsigned_short_sub_index5:
 ; CHECK:  memh(r{{[0-9]+}}+#10) -= r{{[0-9]+}}
   %conv = zext i16 %x to i32
-  %add.ptr = getelementptr inbounds i16, i16* %p, i32 5
-  %0 = load i16, i16* %add.ptr, align 2
+  %add.ptr = getelementptr inbounds i16, ptr %p, i32 5
+  %0 = load i16, ptr %add.ptr, align 2
   %conv1 = zext i16 %0 to i32
   %sub = sub nsw i32 %conv1, %conv
   %conv2 = trunc i32 %sub to i16
-  store i16 %conv2, i16* %add.ptr, align 2
+  store i16 %conv2, ptr %add.ptr, align 2
   ret void
 }
 
-define void @memop_unsigned_short_or_index5(i16* nocapture %p, i16 zeroext %x) nounwind {
+define void @memop_unsigned_short_or_index5(ptr nocapture %p, i16 zeroext %x) nounwind {
 entry:
 ; CHECK-LABEL: memop_unsigned_short_or_index5:
 ; CHECK:  memh(r{{[0-9]+}}+#10) |= r{{[0-9]+}}
-  %add.ptr = getelementptr inbounds i16, i16* %p, i32 5
-  %0 = load i16, i16* %add.ptr, align 2
+  %add.ptr = getelementptr inbounds i16, ptr %p, i32 5
+  %0 = load i16, ptr %add.ptr, align 2
   %or3 = or i16 %0, %x
-  store i16 %or3, i16* %add.ptr, align 2
+  store i16 %or3, ptr %add.ptr, align 2
   ret void
 }
 
-define void @memop_unsigned_short_and_index5(i16* nocapture %p, i16 zeroext %x) nounwind {
+define void @memop_unsigned_short_and_index5(ptr nocapture %p, i16 zeroext %x) nounwind {
 entry:
 ; CHECK-LABEL: memop_unsigned_short_and_index5:
 ; CHECK:  memh(r{{[0-9]+}}+#10) &= r{{[0-9]+}}
-  %add.ptr = getelementptr inbounds i16, i16* %p, i32 5
-  %0 = load i16, i16* %add.ptr, align 2
+  %add.ptr = getelementptr inbounds i16, ptr %p, i32 5
+  %0 = load i16, ptr %add.ptr, align 2
   %and3 = and i16 %0, %x
-  store i16 %and3, i16* %add.ptr, align 2
+  store i16 %and3, ptr %add.ptr, align 2
   ret void
 }
 
-define void @memop_unsigned_short_clrbit_index5(i16* nocapture %p) nounwind {
+define void @memop_unsigned_short_clrbit_index5(ptr nocapture %p) nounwind {
 entry:
 ; CHECK-LABEL: memop_unsigned_short_clrbit_index5:
 ; CHECK:  memh(r{{[0-9]+}}+#10) = clrbit(#5)
-  %add.ptr = getelementptr inbounds i16, i16* %p, i32 5
-  %0 = load i16, i16* %add.ptr, align 2
+  %add.ptr = getelementptr inbounds i16, ptr %p, i32 5
+  %0 = load i16, ptr %add.ptr, align 2
   %conv = zext i16 %0 to i32
   %and = and i32 %conv, 65503
   %conv1 = trunc i32 %and to i16
-  store i16 %conv1, i16* %add.ptr, align 2
+  store i16 %conv1, ptr %add.ptr, align 2
   ret void
 }
 
-define void @memop_unsigned_short_setbit_index5(i16* nocapture %p) nounwind {
+define void @memop_unsigned_short_setbit_index5(ptr nocapture %p) nounwind {
 entry:
 ; CHECK-LABEL: memop_unsigned_short_setbit_index5:
 ; CHECK:  memh(r{{[0-9]+}}+#10) = setbit(#7)
-  %add.ptr = getelementptr inbounds i16, i16* %p, i32 5
-  %0 = load i16, i16* %add.ptr, align 2
+  %add.ptr = getelementptr inbounds i16, ptr %p, i32 5
+  %0 = load i16, ptr %add.ptr, align 2
   %conv = zext i16 %0 to i32
   %or = or i32 %conv, 128
   %conv1 = trunc i32 %or to i16
-  store i16 %conv1, i16* %add.ptr, align 2
+  store i16 %conv1, ptr %add.ptr, align 2
   ret void
 }
 
-define void @memop_signed_short_add5(i16* nocapture %p) nounwind {
+define void @memop_signed_short_add5(ptr nocapture %p) nounwind {
 entry:
 ; CHECK-LABEL: memop_signed_short_add5:
 ; CHECK:  memh(r{{[0-9]+}}+#0) += #5
-  %0 = load i16, i16* %p, align 2
+  %0 = load i16, ptr %p, align 2
   %conv2 = zext i16 %0 to i32
   %add = add nsw i32 %conv2, 5
   %conv1 = trunc i32 %add to i16
-  store i16 %conv1, i16* %p, align 2
+  store i16 %conv1, ptr %p, align 2
   ret void
 }
 
-define void @memop_signed_short_add(i16* nocapture %p, i16 signext %x) nounwind {
+define void @memop_signed_short_add(ptr nocapture %p, i16 signext %x) nounwind {
 entry:
 ; CHECK-LABEL: memop_signed_short_add:
 ; CHECK:  memh(r{{[0-9]+}}+#0) += r{{[0-9]+}}
   %conv4 = zext i16 %x to i32
-  %0 = load i16, i16* %p, align 2
+  %0 = load i16, ptr %p, align 2
   %conv13 = zext i16 %0 to i32
   %add = add nsw i32 %conv13, %conv4
   %conv2 = trunc i32 %add to i16
-  store i16 %conv2, i16* %p, align 2
+  store i16 %conv2, ptr %p, align 2
   ret void
 }
 
-define void @memop_signed_short_sub(i16* nocapture %p, i16 signext %x) nounwind {
+define void @memop_signed_short_sub(ptr nocapture %p, i16 signext %x) nounwind {
 entry:
 ; CHECK-LABEL: memop_signed_short_sub:
 ; CHECK:  memh(r{{[0-9]+}}+#0) -= r{{[0-9]+}}
   %conv4 = zext i16 %x to i32
-  %0 = load i16, i16* %p, align 2
+  %0 = load i16, ptr %p, align 2
   %conv13 = zext i16 %0 to i32
   %sub = sub nsw i32 %conv13, %conv4
   %conv2 = trunc i32 %sub to i16
-  store i16 %conv2, i16* %p, align 2
+  store i16 %conv2, ptr %p, align 2
   ret void
 }
 
-define void @memop_signed_short_or(i16* nocapture %p, i16 signext %x) nounwind {
+define void @memop_signed_short_or(ptr nocapture %p, i16 signext %x) nounwind {
 entry:
 ; CHECK-LABEL: memop_signed_short_or:
 ; CHECK:  memh(r{{[0-9]+}}+#0) |= r{{[0-9]+}}
-  %0 = load i16, i16* %p, align 2
+  %0 = load i16, ptr %p, align 2
   %or3 = or i16 %0, %x
-  store i16 %or3, i16* %p, align 2
+  store i16 %or3, ptr %p, align 2
   ret void
 }
 
-define void @memop_signed_short_and(i16* nocapture %p, i16 signext %x) nounwind {
+define void @memop_signed_short_and(ptr nocapture %p, i16 signext %x) nounwind {
 entry:
 ; CHECK-LABEL: memop_signed_short_and:
 ; CHECK:  memh(r{{[0-9]+}}+#0) &= r{{[0-9]+}}
-  %0 = load i16, i16* %p, align 2
+  %0 = load i16, ptr %p, align 2
   %and3 = and i16 %0, %x
-  store i16 %and3, i16* %p, align 2
+  store i16 %and3, ptr %p, align 2
   ret void
 }
 
-define void @memop_signed_short_clrbit(i16* nocapture %p) nounwind {
+define void @memop_signed_short_clrbit(ptr nocapture %p) nounwind {
 entry:
 ; CHECK-LABEL: memop_signed_short_clrbit:
 ; CHECK:  memh(r{{[0-9]+}}+#0) = clrbit(#5)
-  %0 = load i16, i16* %p, align 2
+  %0 = load i16, ptr %p, align 2
   %conv2 = zext i16 %0 to i32
   %and = and i32 %conv2, 65503
   %conv1 = trunc i32 %and to i16
-  store i16 %conv1, i16* %p, align 2
+  store i16 %conv1, ptr %p, align 2
   ret void
 }
 
-define void @memop_signed_short_setbit(i16* nocapture %p) nounwind {
+define void @memop_signed_short_setbit(ptr nocapture %p) nounwind {
 entry:
 ; CHECK-LABEL: memop_signed_short_setbit:
 ; CHECK:  memh(r{{[0-9]+}}+#0) = setbit(#7)
-  %0 = load i16, i16* %p, align 2
+  %0 = load i16, ptr %p, align 2
   %conv2 = zext i16 %0 to i32
   %or = or i32 %conv2, 128
   %conv1 = trunc i32 %or to i16
-  store i16 %conv1, i16* %p, align 2
+  store i16 %conv1, ptr %p, align 2
   ret void
 }
 
-define void @memop_signed_short_add5_index(i16* nocapture %p, i32 %i) nounwind {
+define void @memop_signed_short_add5_index(ptr nocapture %p, i32 %i) nounwind {
 entry:
 ; CHECK-LABEL: memop_signed_short_add5_index:
 ; CHECK:  memh(r{{[0-9]+}}+#0) += #5
-  %add.ptr = getelementptr inbounds i16, i16* %p, i32 %i
-  %0 = load i16, i16* %add.ptr, align 2
+  %add.ptr = getelementptr inbounds i16, ptr %p, i32 %i
+  %0 = load i16, ptr %add.ptr, align 2
   %conv2 = zext i16 %0 to i32
   %add = add nsw i32 %conv2, 5
   %conv1 = trunc i32 %add to i16
-  store i16 %conv1, i16* %add.ptr, align 2
+  store i16 %conv1, ptr %add.ptr, align 2
   ret void
 }
 
-define void @memop_signed_short_add_index(i16* nocapture %p, i32 %i, i16 signext %x) nounwind {
+define void @memop_signed_short_add_index(ptr nocapture %p, i32 %i, i16 signext %x) nounwind {
 entry:
 ; CHECK-LABEL: memop_signed_short_add_index:
 ; CHECK:  memh(r{{[0-9]+}}+#0) += r{{[0-9]+}}
   %conv4 = zext i16 %x to i32
-  %add.ptr = getelementptr inbounds i16, i16* %p, i32 %i
-  %0 = load i16, i16* %add.ptr, align 2
+  %add.ptr = getelementptr inbounds i16, ptr %p, i32 %i
+  %0 = load i16, ptr %add.ptr, align 2
   %conv13 = zext i16 %0 to i32
   %add = add nsw i32 %conv13, %conv4
   %conv2 = trunc i32 %add to i16
-  store i16 %conv2, i16* %add.ptr, align 2
+  store i16 %conv2, ptr %add.ptr, align 2
   ret void
 }
 
-define void @memop_signed_short_sub_index(i16* nocapture %p, i32 %i, i16 signext %x) nounwind {
+define void @memop_signed_short_sub_index(ptr nocapture %p, i32 %i, i16 signext %x) nounwind {
 entry:
 ; CHECK-LABEL: memop_signed_short_sub_index:
 ; CHECK:  memh(r{{[0-9]+}}+#0) -= r{{[0-9]+}}
   %conv4 = zext i16 %x to i32
-  %add.ptr = getelementptr inbounds i16, i16* %p, i32 %i
-  %0 = load i16, i16* %add.ptr, align 2
+  %add.ptr = getelementptr inbounds i16, ptr %p, i32 %i
+  %0 = load i16, ptr %add.ptr, align 2
   %conv13 = zext i16 %0 to i32
   %sub = sub nsw i32 %conv13, %conv4
   %conv2 = trunc i32 %sub to i16
-  store i16 %conv2, i16* %add.ptr, align 2
+  store i16 %conv2, ptr %add.ptr, align 2
   ret void
 }
 
-define void @memop_signed_short_or_index(i16* nocapture %p, i32 %i, i16 signext %x) nounwind {
+define void @memop_signed_short_or_index(ptr nocapture %p, i32 %i, i16 signext %x) nounwind {
 entry:
 ; CHECK-LABEL: memop_signed_short_or_index:
 ; CHECK:  memh(r{{[0-9]+}}+#0) |= r{{[0-9]+}}
-  %add.ptr = getelementptr inbounds i16, i16* %p, i32 %i
-  %0 = load i16, i16* %add.ptr, align 2
+  %add.ptr = getelementptr inbounds i16, ptr %p, i32 %i
+  %0 = load i16, ptr %add.ptr, align 2
   %or3 = or i16 %0, %x
-  store i16 %or3, i16* %add.ptr, align 2
+  store i16 %or3, ptr %add.ptr, align 2
   ret void
 }
 
-define void @memop_signed_short_and_index(i16* nocapture %p, i32 %i, i16 signext %x) nounwind {
+define void @memop_signed_short_and_index(ptr nocapture %p, i32 %i, i16 signext %x) nounwind {
 entry:
 ; CHECK-LABEL: memop_signed_short_and_index:
 ; CHECK:  memh(r{{[0-9]+}}+#0) &= r{{[0-9]+}}
-  %add.ptr = getelementptr inbounds i16, i16* %p, i32 %i
-  %0 = load i16, i16* %add.ptr, align 2
+  %add.ptr = getelementptr inbounds i16, ptr %p, i32 %i
+  %0 = load i16, ptr %add.ptr, align 2
   %and3 = and i16 %0, %x
-  store i16 %and3, i16* %add.ptr, align 2
+  store i16 %and3, ptr %add.ptr, align 2
   ret void
 }
 
-define void @memop_signed_short_clrbit_index(i16* nocapture %p, i32 %i) nounwind {
+define void @memop_signed_short_clrbit_index(ptr nocapture %p, i32 %i) nounwind {
 entry:
 ; CHECK-LABEL: memop_signed_short_clrbit_index:
 ; CHECK:  memh(r{{[0-9]+}}+#0) = clrbit(#5)
-  %add.ptr = getelementptr inbounds i16, i16* %p, i32 %i
-  %0 = load i16, i16* %add.ptr, align 2
+  %add.ptr = getelementptr inbounds i16, ptr %p, i32 %i
+  %0 = load i16, ptr %add.ptr, align 2
   %conv2 = zext i16 %0 to i32
   %and = and i32 %conv2, 65503
   %conv1 = trunc i32 %and to i16
-  store i16 %conv1, i16* %add.ptr, align 2
+  store i16 %conv1, ptr %add.ptr, align 2
   ret void
 }
 
-define void @memop_signed_short_setbit_index(i16* nocapture %p, i32 %i) nounwind {
+define void @memop_signed_short_setbit_index(ptr nocapture %p, i32 %i) nounwind {
 entry:
 ; CHECK-LABEL: memop_signed_short_setbit_index:
 ; CHECK:  memh(r{{[0-9]+}}+#0) = setbit(#7)
-  %add.ptr = getelementptr inbounds i16, i16* %p, i32 %i
-  %0 = load i16, i16* %add.ptr, align 2
+  %add.ptr = getelementptr inbounds i16, ptr %p, i32 %i
+  %0 = load i16, ptr %add.ptr, align 2
   %conv2 = zext i16 %0 to i32
   %or = or i32 %conv2, 128
   %conv1 = trunc i32 %or to i16
-  store i16 %conv1, i16* %add.ptr, align 2
+  store i16 %conv1, ptr %add.ptr, align 2
   ret void
 }
 
-define void @memop_signed_short_add5_index5(i16* nocapture %p) nounwind {
+define void @memop_signed_short_add5_index5(ptr nocapture %p) nounwind {
 entry:
 ; CHECK-LABEL: memop_signed_short_add5_index5:
 ; CHECK:  memh(r{{[0-9]+}}+#10) += #5
-  %add.ptr = getelementptr inbounds i16, i16* %p, i32 5
-  %0 = load i16, i16* %add.ptr, align 2
+  %add.ptr = getelementptr inbounds i16, ptr %p, i32 5
+  %0 = load i16, ptr %add.ptr, align 2
   %conv2 = zext i16 %0 to i32
   %add = add nsw i32 %conv2, 5
   %conv1 = trunc i32 %add to i16
-  store i16 %conv1, i16* %add.ptr, align 2
+  store i16 %conv1, ptr %add.ptr, align 2
   ret void
 }
 
-define void @memop_signed_short_add_index5(i16* nocapture %p, i16 signext %x) nounwind {
+define void @memop_signed_short_add_index5(ptr nocapture %p, i16 signext %x) nounwind {
 entry:
 ; CHECK-LABEL: memop_signed_short_add_index5:
 ; CHECK:  memh(r{{[0-9]+}}+#10) += r{{[0-9]+}}
   %conv4 = zext i16 %x to i32
-  %add.ptr = getelementptr inbounds i16, i16* %p, i32 5
-  %0 = load i16, i16* %add.ptr, align 2
+  %add.ptr = getelementptr inbounds i16, ptr %p, i32 5
+  %0 = load i16, ptr %add.ptr, align 2
   %conv13 = zext i16 %0 to i32
   %add = add nsw i32 %conv13, %conv4
   %conv2 = trunc i32 %add to i16
-  store i16 %conv2, i16* %add.ptr, align 2
+  store i16 %conv2, ptr %add.ptr, align 2
   ret void
 }
 
-define void @memop_signed_short_sub_index5(i16* nocapture %p, i16 signext %x) nounwind {
+define void @memop_signed_short_sub_index5(ptr nocapture %p, i16 signext %x) nounwind {
 entry:
 ; CHECK-LABEL: memop_signed_short_sub_index5:
 ; CHECK:  memh(r{{[0-9]+}}+#10) -= r{{[0-9]+}}
   %conv4 = zext i16 %x to i32
-  %add.ptr = getelementptr inbounds i16, i16* %p, i32 5
-  %0 = load i16, i16* %add.ptr, align 2
+  %add.ptr = getelementptr inbounds i16, ptr %p, i32 5
+  %0 = load i16, ptr %add.ptr, align 2
   %conv13 = zext i16 %0 to i32
   %sub = sub nsw i32 %conv13, %conv4
   %conv2 = trunc i32 %sub to i16
-  store i16 %conv2, i16* %add.ptr, align 2
+  store i16 %conv2, ptr %add.ptr, align 2
   ret void
 }
 
-define void @memop_signed_short_or_index5(i16* nocapture %p, i16 signext %x) nounwind {
+define void @memop_signed_short_or_index5(ptr nocapture %p, i16 signext %x) nounwind {
 entry:
 ; CHECK-LABEL: memop_signed_short_or_index5:
 ; CHECK:  memh(r{{[0-9]+}}+#10) |= r{{[0-9]+}}
-  %add.ptr = getelementptr inbounds i16, i16* %p, i32 5
-  %0 = load i16, i16* %add.ptr, align 2
+  %add.ptr = getelementptr inbounds i16, ptr %p, i32 5
+  %0 = load i16, ptr %add.ptr, align 2
   %or3 = or i16 %0, %x
-  store i16 %or3, i16* %add.ptr, align 2
+  store i16 %or3, ptr %add.ptr, align 2
   ret void
 }
 
-define void @memop_signed_short_and_index5(i16* nocapture %p, i16 signext %x) nounwind {
+define void @memop_signed_short_and_index5(ptr nocapture %p, i16 signext %x) nounwind {
 entry:
 ; CHECK-LABEL: memop_signed_short_and_index5:
 ; CHECK:  memh(r{{[0-9]+}}+#10) &= r{{[0-9]+}}
-  %add.ptr = getelementptr inbounds i16, i16* %p, i32 5
-  %0 = load i16, i16* %add.ptr, align 2
+  %add.ptr = getelementptr inbounds i16, ptr %p, i32 5
+  %0 = load i16, ptr %add.ptr, align 2
   %and3 = and i16 %0, %x
-  store i16 %and3, i16* %add.ptr, align 2
+  store i16 %and3, ptr %add.ptr, align 2
   ret void
 }
 
-define void @memop_signed_short_clrbit_index5(i16* nocapture %p) nounwind {
+define void @memop_signed_short_clrbit_index5(ptr nocapture %p) nounwind {
 entry:
 ; CHECK-LABEL: memop_signed_short_clrbit_index5:
 ; CHECK:  memh(r{{[0-9]+}}+#10) = clrbit(#5)
-  %add.ptr = getelementptr inbounds i16, i16* %p, i32 5
-  %0 = load i16, i16* %add.ptr, align 2
+  %add.ptr = getelementptr inbounds i16, ptr %p, i32 5
+  %0 = load i16, ptr %add.ptr, align 2
   %conv2 = zext i16 %0 to i32
   %and = and i32 %conv2, 65503
   %conv1 = trunc i32 %and to i16
-  store i16 %conv1, i16* %add.ptr, align 2
+  store i16 %conv1, ptr %add.ptr, align 2
   ret void
 }
 
-define void @memop_signed_short_setbit_index5(i16* nocapture %p) nounwind {
+define void @memop_signed_short_setbit_index5(ptr nocapture %p) nounwind {
 entry:
 ; CHECK-LABEL: memop_signed_short_setbit_index5:
 ; CHECK:  memh(r{{[0-9]+}}+#10) = setbit(#7)
-  %add.ptr = getelementptr inbounds i16, i16* %p, i32 5
-  %0 = load i16, i16* %add.ptr, align 2
+  %add.ptr = getelementptr inbounds i16, ptr %p, i32 5
+  %0 = load i16, ptr %add.ptr, align 2
   %conv2 = zext i16 %0 to i32
   %or = or i32 %conv2, 128
   %conv1 = trunc i32 %or to i16
-  store i16 %conv1, i16* %add.ptr, align 2
+  store i16 %conv1, ptr %add.ptr, align 2
   ret void
 }
 
-define void @memop_signed_int_add5(i32* nocapture %p) nounwind {
+define void @memop_signed_int_add5(ptr nocapture %p) nounwind {
 entry:
 ; CHECK-LABEL: memop_signed_int_add5:
 ; CHECK:  memw(r{{[0-9]+}}+#0) += #5
-  %0 = load i32, i32* %p, align 4
+  %0 = load i32, ptr %p, align 4
   %add = add i32 %0, 5
-  store i32 %add, i32* %p, align 4
+  store i32 %add, ptr %p, align 4
   ret void
 }
 
-define void @memop_signed_int_add(i32* nocapture %p, i32 %x) nounwind {
+define void @memop_signed_int_add(ptr nocapture %p, i32 %x) nounwind {
 entry:
 ; CHECK-LABEL: memop_signed_int_add:
 ; CHECK:  memw(r{{[0-9]+}}+#0) += r{{[0-9]+}}
-  %0 = load i32, i32* %p, align 4
+  %0 = load i32, ptr %p, align 4
   %add = add i32 %0, %x
-  store i32 %add, i32* %p, align 4
+  store i32 %add, ptr %p, align 4
   ret void
 }
 
-define void @memop_signed_int_sub(i32* nocapture %p, i32 %x) nounwind {
+define void @memop_signed_int_sub(ptr nocapture %p, i32 %x) nounwind {
 entry:
 ; CHECK-LABEL: memop_signed_int_sub:
 ; CHECK:  memw(r{{[0-9]+}}+#0) -= r{{[0-9]+}}
-  %0 = load i32, i32* %p, align 4
+  %0 = load i32, ptr %p, align 4
   %sub = sub i32 %0, %x
-  store i32 %sub, i32* %p, align 4
+  store i32 %sub, ptr %p, align 4
   ret void
 }
 
-define void @memop_signed_int_or(i32* nocapture %p, i32 %x) nounwind {
+define void @memop_signed_int_or(ptr nocapture %p, i32 %x) nounwind {
 entry:
 ; CHECK-LABEL: memop_signed_int_or:
 ; CHECK:  memw(r{{[0-9]+}}+#0) |= r{{[0-9]+}}
-  %0 = load i32, i32* %p, align 4
+  %0 = load i32, ptr %p, align 4
   %or = or i32 %0, %x
-  store i32 %or, i32* %p, align 4
+  store i32 %or, ptr %p, align 4
   ret void
 }
 
-define void @memop_signed_int_and(i32* nocapture %p, i32 %x) nounwind {
+define void @memop_signed_int_and(ptr nocapture %p, i32 %x) nounwind {
 entry:
 ; CHECK-LABEL: memop_signed_int_and:
 ; CHECK:  memw(r{{[0-9]+}}+#0) &= r{{[0-9]+}}
-  %0 = load i32, i32* %p, align 4
+  %0 = load i32, ptr %p, align 4
   %and = and i32 %0, %x
-  store i32 %and, i32* %p, align 4
+  store i32 %and, ptr %p, align 4
   ret void
 }
 
-define void @memop_signed_int_clrbit(i32* nocapture %p) nounwind {
+define void @memop_signed_int_clrbit(ptr nocapture %p) nounwind {
 entry:
 ; CHECK-LABEL: memop_signed_int_clrbit:
 ; CHECK:  memw(r{{[0-9]+}}+#0) = clrbit(#5)
-  %0 = load i32, i32* %p, align 4
+  %0 = load i32, ptr %p, align 4
   %and = and i32 %0, -33
-  store i32 %and, i32* %p, align 4
+  store i32 %and, ptr %p, align 4
   ret void
 }
 
-define void @memop_signed_int_setbit(i32* nocapture %p) nounwind {
+define void @memop_signed_int_setbit(ptr nocapture %p) nounwind {
 entry:
 ; CHECK-LABEL: memop_signed_int_setbit:
 ; CHECK:  memw(r{{[0-9]+}}+#0) = setbit(#7)
-  %0 = load i32, i32* %p, align 4
+  %0 = load i32, ptr %p, align 4
   %or = or i32 %0, 128
-  store i32 %or, i32* %p, align 4
+  store i32 %or, ptr %p, align 4
   ret void
 }
 
-define void @memop_signed_int_add5_index(i32* nocapture %p, i32 %i) nounwind {
+define void @memop_signed_int_add5_index(ptr nocapture %p, i32 %i) nounwind {
 entry:
 ; CHECK-LABEL: memop_signed_int_add5_index:
 ; CHECK:  memw(r{{[0-9]+}}+#0) += #5
-  %add.ptr = getelementptr inbounds i32, i32* %p, i32 %i
-  %0 = load i32, i32* %add.ptr, align 4
+  %add.ptr = getelementptr inbounds i32, ptr %p, i32 %i
+  %0 = load i32, ptr %add.ptr, align 4
   %add = add i32 %0, 5
-  store i32 %add, i32* %add.ptr, align 4
+  store i32 %add, ptr %add.ptr, align 4
   ret void
 }
 
-define void @memop_signed_int_add_index(i32* nocapture %p, i32 %i, i32 %x) nounwind {
+define void @memop_signed_int_add_index(ptr nocapture %p, i32 %i, i32 %x) nounwind {
 entry:
 ; CHECK-LABEL: memop_signed_int_add_index:
 ; CHECK:  memw(r{{[0-9]+}}+#0) += r{{[0-9]+}}
-  %add.ptr = getelementptr inbounds i32, i32* %p, i32 %i
-  %0 = load i32, i32* %add.ptr, align 4
+  %add.ptr = getelementptr inbounds i32, ptr %p, i32 %i
+  %0 = load i32, ptr %add.ptr, align 4
   %add = add i32 %0, %x
-  store i32 %add, i32* %add.ptr, align 4
+  store i32 %add, ptr %add.ptr, align 4
   ret void
 }
 
-define void @memop_signed_int_sub_index(i32* nocapture %p, i32 %i, i32 %x) nounwind {
+define void @memop_signed_int_sub_index(ptr nocapture %p, i32 %i, i32 %x) nounwind {
 entry:
 ; CHECK-LABEL: memop_signed_int_sub_index:
 ; CHECK:  memw(r{{[0-9]+}}+#0) -= r{{[0-9]+}}
-  %add.ptr = getelementptr inbounds i32, i32* %p, i32 %i
-  %0 = load i32, i32* %add.ptr, align 4
+  %add.ptr = getelementptr inbounds i32, ptr %p, i32 %i
+  %0 = load i32, ptr %add.ptr, align 4
   %sub = sub i32 %0, %x
-  store i32 %sub, i32* %add.ptr, align 4
+  store i32 %sub, ptr %add.ptr, align 4
   ret void
 }
 
-define void @memop_signed_int_or_index(i32* nocapture %p, i32 %i, i32 %x) nounwind {
+define void @memop_signed_int_or_index(ptr nocapture %p, i32 %i, i32 %x) nounwind {
 entry:
 ; CHECK-LABEL: memop_signed_int_or_index:
 ; CHECK:  memw(r{{[0-9]+}}+#0) |= r{{[0-9]+}}
-  %add.ptr = getelementptr inbounds i32, i32* %p, i32 %i
-  %0 = load i32, i32* %add.ptr, align 4
+  %add.ptr = getelementptr inbounds i32, ptr %p, i32 %i
+  %0 = load i32, ptr %add.ptr, align 4
   %or = or i32 %0, %x
-  store i32 %or, i32* %add.ptr, align 4
+  store i32 %or, ptr %add.ptr, align 4
   ret void
 }
 
-define void @memop_signed_int_and_index(i32* nocapture %p, i32 %i, i32 %x) nounwind {
+define void @memop_signed_int_and_index(ptr nocapture %p, i32 %i, i32 %x) nounwind {
 entry:
 ; CHECK-LABEL: memop_signed_int_and_index:
 ; CHECK:  memw(r{{[0-9]+}}+#0) &= r{{[0-9]+}}
-  %add.ptr = getelementptr inbounds i32, i32* %p, i32 %i
-  %0 = load i32, i32* %add.ptr, align 4
+  %add.ptr = getelementptr inbounds i32, ptr %p, i32 %i
+  %0 = load i32, ptr %add.ptr, align 4
   %and = and i32 %0, %x
-  store i32 %and, i32* %add.ptr, align 4
+  store i32 %and, ptr %add.ptr, align 4
   ret void
 }
 
-define void @memop_signed_int_clrbit_index(i32* nocapture %p, i32 %i) nounwind {
+define void @memop_signed_int_clrbit_index(ptr nocapture %p, i32 %i) nounwind {
 entry:
 ; CHECK-LABEL: memop_signed_int_clrbit_index:
 ; CHECK:  memw(r{{[0-9]+}}+#0) = clrbit(#5)
-  %add.ptr = getelementptr inbounds i32, i32* %p, i32 %i
-  %0 = load i32, i32* %add.ptr, align 4
+  %add.ptr = getelementptr inbounds i32, ptr %p, i32 %i
+  %0 = load i32, ptr %add.ptr, align 4
   %and = and i32 %0, -33
-  store i32 %and, i32* %add.ptr, align 4
+  store i32 %and, ptr %add.ptr, align 4
   ret void
 }
 
-define void @memop_signed_int_setbit_index(i32* nocapture %p, i32 %i) nounwind {
+define void @memop_signed_int_setbit_index(ptr nocapture %p, i32 %i) nounwind {
 entry:
 ; CHECK-LABEL: memop_signed_int_setbit_index:
 ; CHECK:  memw(r{{[0-9]+}}+#0) = setbit(#7)
-  %add.ptr = getelementptr inbounds i32, i32* %p, i32 %i
-  %0 = load i32, i32* %add.ptr, align 4
+  %add.ptr = getelementptr inbounds i32, ptr %p, i32 %i
+  %0 = load i32, ptr %add.ptr, align 4
   %or = or i32 %0, 128
-  store i32 %or, i32* %add.ptr, align 4
+  store i32 %or, ptr %add.ptr, align 4
   ret void
 }
 
-define void @memop_signed_int_add5_index5(i32* nocapture %p) nounwind {
+define void @memop_signed_int_add5_index5(ptr nocapture %p) nounwind {
 entry:
 ; CHECK-LABEL: memop_signed_int_add5_index5:
 ; CHECK:  memw(r{{[0-9]+}}+#20) += #5
-  %add.ptr = getelementptr inbounds i32, i32* %p, i32 5
-  %0 = load i32, i32* %add.ptr, align 4
+  %add.ptr = getelementptr inbounds i32, ptr %p, i32 5
+  %0 = load i32, ptr %add.ptr, align 4
   %add = add i32 %0, 5
-  store i32 %add, i32* %add.ptr, align 4
+  store i32 %add, ptr %add.ptr, align 4
   ret void
 }
 
-define void @memop_signed_int_add_index5(i32* nocapture %p, i32 %x) nounwind {
+define void @memop_signed_int_add_index5(ptr nocapture %p, i32 %x) nounwind {
 entry:
 ; CHECK-LABEL: memop_signed_int_add_index5:
 ; CHECK:  memw(r{{[0-9]+}}+#20) += r{{[0-9]+}}
-  %add.ptr = getelementptr inbounds i32, i32* %p, i32 5
-  %0 = load i32, i32* %add.ptr, align 4
+  %add.ptr = getelementptr inbounds i32, ptr %p, i32 5
+  %0 = load i32, ptr %add.ptr, align 4
   %add = add i32 %0, %x
-  store i32 %add, i32* %add.ptr, align 4
+  store i32 %add, ptr %add.ptr, align 4
   ret void
 }
 
-define void @memop_signed_int_sub_index5(i32* nocapture %p, i32 %x) nounwind {
+define void @memop_signed_int_sub_index5(ptr nocapture %p, i32 %x) nounwind {
 entry:
 ; CHECK-LABEL: memop_signed_int_sub_index5:
 ; CHECK:  memw(r{{[0-9]+}}+#20) -= r{{[0-9]+}}
-  %add.ptr = getelementptr inbounds i32, i32* %p, i32 5
-  %0 = load i32, i32* %add.ptr, align 4
+  %add.ptr = getelementptr inbounds i32, ptr %p, i32 5
+  %0 = load i32, ptr %add.ptr, align 4
   %sub = sub i32 %0, %x
-  store i32 %sub, i32* %add.ptr, align 4
+  store i32 %sub, ptr %add.ptr, align 4
   ret void
 }
 
-define void @memop_signed_int_or_index5(i32* nocapture %p, i32 %x) nounwind {
+define void @memop_signed_int_or_index5(ptr nocapture %p, i32 %x) nounwind {
 entry:
 ; CHECK-LABEL: memop_signed_int_or_index5:
 ; CHECK:  memw(r{{[0-9]+}}+#20) |= r{{[0-9]+}}
-  %add.ptr = getelementptr inbounds i32, i32* %p, i32 5
-  %0 = load i32, i32* %add.ptr, align 4
+  %add.ptr = getelementptr inbounds i32, ptr %p, i32 5
+  %0 = load i32, ptr %add.ptr, align 4
   %or = or i32 %0, %x
-  store i32 %or, i32* %add.ptr, align 4
+  store i32 %or, ptr %add.ptr, align 4
   ret void
 }
 
-define void @memop_signed_int_and_index5(i32* nocapture %p, i32 %x) nounwind {
+define void @memop_signed_int_and_index5(ptr nocapture %p, i32 %x) nounwind {
 entry:
 ; CHECK-LABEL: memop_signed_int_and_index5:
 ; CHECK:  memw(r{{[0-9]+}}+#20) &= r{{[0-9]+}}
-  %add.ptr = getelementptr inbounds i32, i32* %p, i32 5
-  %0 = load i32, i32* %add.ptr, align 4
+  %add.ptr = getelementptr inbounds i32, ptr %p, i32 5
+  %0 = load i32, ptr %add.ptr, align 4
   %and = and i32 %0, %x
-  store i32 %and, i32* %add.ptr, align 4
+  store i32 %and, ptr %add.ptr, align 4
   ret void
 }
 
-define void @memop_signed_int_clrbit_index5(i32* nocapture %p) nounwind {
+define void @memop_signed_int_clrbit_index5(ptr nocapture %p) nounwind {
 entry:
 ; CHECK-LABEL: memop_signed_int_clrbit_index5:
 ; CHECK:  memw(r{{[0-9]+}}+#20) = clrbit(#5)
-  %add.ptr = getelementptr inbounds i32, i32* %p, i32 5
-  %0 = load i32, i32* %add.ptr, align 4
+  %add.ptr = getelementptr inbounds i32, ptr %p, i32 5
+  %0 = load i32, ptr %add.ptr, align 4
   %and = and i32 %0, -33
-  store i32 %and, i32* %add.ptr, align 4
+  store i32 %and, ptr %add.ptr, align 4
   ret void
 }
 
-define void @memop_signed_int_setbit_index5(i32* nocapture %p) nounwind {
+define void @memop_signed_int_setbit_index5(ptr nocapture %p) nounwind {
 entry:
 ; CHECK-LABEL: memop_signed_int_setbit_index5:
 ; CHECK:  memw(r{{[0-9]+}}+#20) = setbit(#7)
-  %add.ptr = getelementptr inbounds i32, i32* %p, i32 5
-  %0 = load i32, i32* %add.ptr, align 4
+  %add.ptr = getelementptr inbounds i32, ptr %p, i32 5
+  %0 = load i32, ptr %add.ptr, align 4
   %or = or i32 %0, 128
-  store i32 %or, i32* %add.ptr, align 4
+  store i32 %or, ptr %add.ptr, align 4
   ret void
 }
 
-define void @memop_unsigned_int_add5(i32* nocapture %p) nounwind {
+define void @memop_unsigned_int_add5(ptr nocapture %p) nounwind {
 entry:
 ; CHECK-LABEL: memop_unsigned_int_add5:
 ; CHECK:  memw(r{{[0-9]+}}+#0) += #5
-  %0 = load i32, i32* %p, align 4
+  %0 = load i32, ptr %p, align 4
   %add = add nsw i32 %0, 5
-  store i32 %add, i32* %p, align 4
+  store i32 %add, ptr %p, align 4
   ret void
 }
 
-define void @memop_unsigned_int_add(i32* nocapture %p, i32 %x) nounwind {
+define void @memop_unsigned_int_add(ptr nocapture %p, i32 %x) nounwind {
 entry:
 ; CHECK-LABEL: memop_unsigned_int_add:
 ; CHECK:  memw(r{{[0-9]+}}+#0) += r{{[0-9]+}}
-  %0 = load i32, i32* %p, align 4
+  %0 = load i32, ptr %p, align 4
   %add = add nsw i32 %0, %x
-  store i32 %add, i32* %p, align 4
+  store i32 %add, ptr %p, align 4
   ret void
 }
 
-define void @memop_unsigned_int_sub(i32* nocapture %p, i32 %x) nounwind {
+define void @memop_unsigned_int_sub(ptr nocapture %p, i32 %x) nounwind {
 entry:
 ; CHECK-LABEL: memop_unsigned_int_sub:
 ; CHECK:  memw(r{{[0-9]+}}+#0) -= r{{[0-9]+}}
-  %0 = load i32, i32* %p, align 4
+  %0 = load i32, ptr %p, align 4
   %sub = sub nsw i32 %0, %x
-  store i32 %sub, i32* %p, align 4
+  store i32 %sub, ptr %p, align 4
   ret void
 }
 
-define void @memop_unsigned_int_or(i32* nocapture %p, i32 %x) nounwind {
+define void @memop_unsigned_int_or(ptr nocapture %p, i32 %x) nounwind {
 entry:
 ; CHECK-LABEL: memop_unsigned_int_or:
 ; CHECK:  memw(r{{[0-9]+}}+#0) |= r{{[0-9]+}}
-  %0 = load i32, i32* %p, align 4
+  %0 = load i32, ptr %p, align 4
   %or = or i32 %0, %x
-  store i32 %or, i32* %p, align 4
+  store i32 %or, ptr %p, align 4
   ret void
 }
 
-define void @memop_unsigned_int_and(i32* nocapture %p, i32 %x) nounwind {
+define void @memop_unsigned_int_and(ptr nocapture %p, i32 %x) nounwind {
 entry:
 ; CHECK-LABEL: memop_unsigned_int_and:
 ; CHECK:  memw(r{{[0-9]+}}+#0) &= r{{[0-9]+}}
-  %0 = load i32, i32* %p, align 4
+  %0 = load i32, ptr %p, align 4
   %and = and i32 %0, %x
-  store i32 %and, i32* %p, align 4
+  store i32 %and, ptr %p, align 4
   ret void
 }
 
-define void @memop_unsigned_int_clrbit(i32* nocapture %p) nounwind {
+define void @memop_unsigned_int_clrbit(ptr nocapture %p) nounwind {
 entry:
 ; CHECK-LABEL: memop_unsigned_int_clrbit:
 ; CHECK:  memw(r{{[0-9]+}}+#0) = clrbit(#5)
-  %0 = load i32, i32* %p, align 4
+  %0 = load i32, ptr %p, align 4
   %and = and i32 %0, -33
-  store i32 %and, i32* %p, align 4
+  store i32 %and, ptr %p, align 4
   ret void
 }
 
-define void @memop_unsigned_int_setbit(i32* nocapture %p) nounwind {
+define void @memop_unsigned_int_setbit(ptr nocapture %p) nounwind {
 entry:
 ; CHECK-LABEL: memop_unsigned_int_setbit:
 ; CHECK:  memw(r{{[0-9]+}}+#0) = setbit(#7)
-  %0 = load i32, i32* %p, align 4
+  %0 = load i32, ptr %p, align 4
   %or = or i32 %0, 128
-  store i32 %or, i32* %p, align 4
+  store i32 %or, ptr %p, align 4
   ret void
 }
 
-define void @memop_unsigned_int_add5_index(i32* nocapture %p, i32 %i) nounwind {
+define void @memop_unsigned_int_add5_index(ptr nocapture %p, i32 %i) nounwind {
 entry:
 ; CHECK-LABEL: memop_unsigned_int_add5_index:
 ; CHECK:  memw(r{{[0-9]+}}+#0) += #5
-  %add.ptr = getelementptr inbounds i32, i32* %p, i32 %i
-  %0 = load i32, i32* %add.ptr, align 4
+  %add.ptr = getelementptr inbounds i32, ptr %p, i32 %i
+  %0 = load i32, ptr %add.ptr, align 4
   %add = add nsw i32 %0, 5
-  store i32 %add, i32* %add.ptr, align 4
+  store i32 %add, ptr %add.ptr, align 4
   ret void
 }
 
-define void @memop_unsigned_int_add_index(i32* nocapture %p, i32 %i, i32 %x) nounwind {
+define void @memop_unsigned_int_add_index(ptr nocapture %p, i32 %i, i32 %x) nounwind {
 entry:
 ; CHECK-LABEL: memop_unsigned_int_add_index:
 ; CHECK:  memw(r{{[0-9]+}}+#0) += r{{[0-9]+}}
-  %add.ptr = getelementptr inbounds i32, i32* %p, i32 %i
-  %0 = load i32, i32* %add.ptr, align 4
+  %add.ptr = getelementptr inbounds i32, ptr %p, i32 %i
+  %0 = load i32, ptr %add.ptr, align 4
   %add = add nsw i32 %0, %x
-  store i32 %add, i32* %add.ptr, align 4
+  store i32 %add, ptr %add.ptr, align 4
   ret void
 }
 
-define void @memop_unsigned_int_sub_index(i32* nocapture %p, i32 %i, i32 %x) nounwind {
+define void @memop_unsigned_int_sub_index(ptr nocapture %p, i32 %i, i32 %x) nounwind {
 entry:
 ; CHECK-LABEL: memop_unsigned_int_sub_index:
 ; CHECK:  memw(r{{[0-9]+}}+#0) -= r{{[0-9]+}}
-  %add.ptr = getelementptr inbounds i32, i32* %p, i32 %i
-  %0 = load i32, i32* %add.ptr, align 4
+  %add.ptr = getelementptr inbounds i32, ptr %p, i32 %i
+  %0 = load i32, ptr %add.ptr, align 4
   %sub = sub nsw i32 %0, %x
-  store i32 %sub, i32* %add.ptr, align 4
+  store i32 %sub, ptr %add.ptr, align 4
   ret void
 }
 
-define void @memop_unsigned_int_or_index(i32* nocapture %p, i32 %i, i32 %x) nounwind {
+define void @memop_unsigned_int_or_index(ptr nocapture %p, i32 %i, i32 %x) nounwind {
 entry:
 ; CHECK-LABEL: memop_unsigned_int_or_index:
 ; CHECK:  memw(r{{[0-9]+}}+#0) |= r{{[0-9]+}}
-  %add.ptr = getelementptr inbounds i32, i32* %p, i32 %i
-  %0 = load i32, i32* %add.ptr, align 4
+  %add.ptr = getelementptr inbounds i32, ptr %p, i32 %i
+  %0 = load i32, ptr %add.ptr, align 4
   %or = or i32 %0, %x
-  store i32 %or, i32* %add.ptr, align 4
+  store i32 %or, ptr %add.ptr, align 4
   ret void
 }
 
-define void @memop_unsigned_int_and_index(i32* nocapture %p, i32 %i, i32 %x) nounwind {
+define void @memop_unsigned_int_and_index(ptr nocapture %p, i32 %i, i32 %x) nounwind {
 entry:
 ; CHECK-LABEL: memop_unsigned_int_and_index:
 ; CHECK:  memw(r{{[0-9]+}}+#0) &= r{{[0-9]+}}
-  %add.ptr = getelementptr inbounds i32, i32* %p, i32 %i
-  %0 = load i32, i32* %add.ptr, align 4
+  %add.ptr = getelementptr inbounds i32, ptr %p, i32 %i
+  %0 = load i32, ptr %add.ptr, align 4
   %and = and i32 %0, %x
-  store i32 %and, i32* %add.ptr, align 4
+  store i32 %and, ptr %add.ptr, align 4
   ret void
 }
 
-define void @memop_unsigned_int_clrbit_index(i32* nocapture %p, i32 %i) nounwind {
+define void @memop_unsigned_int_clrbit_index(ptr nocapture %p, i32 %i) nounwind {
 entry:
 ; CHECK-LABEL: memop_unsigned_int_clrbit_index:
 ; CHECK:  memw(r{{[0-9]+}}+#0) = clrbit(#5)
-  %add.ptr = getelementptr inbounds i32, i32* %p, i32 %i
-  %0 = load i32, i32* %add.ptr, align 4
+  %add.ptr = getelementptr inbounds i32, ptr %p, i32 %i
+  %0 = load i32, ptr %add.ptr, align 4
   %and = and i32 %0, -33
-  store i32 %and, i32* %add.ptr, align 4
+  store i32 %and, ptr %add.ptr, align 4
   ret void
 }
 
-define void @memop_unsigned_int_setbit_index(i32* nocapture %p, i32 %i) nounwind {
+define void @memop_unsigned_int_setbit_index(ptr nocapture %p, i32 %i) nounwind {
 entry:
 ; CHECK-LABEL: memop_unsigned_int_setbit_index:
 ; CHECK:  memw(r{{[0-9]+}}+#0) = setbit(#7)
-  %add.ptr = getelementptr inbounds i32, i32* %p, i32 %i
-  %0 = load i32, i32* %add.ptr, align 4
+  %add.ptr = getelementptr inbounds i32, ptr %p, i32 %i
+  %0 = load i32, ptr %add.ptr, align 4
   %or = or i32 %0, 128
-  store i32 %or, i32* %add.ptr, align 4
+  store i32 %or, ptr %add.ptr, align 4
   ret void
 }
 
-define void @memop_unsigned_int_add5_index5(i32* nocapture %p) nounwind {
+define void @memop_unsigned_int_add5_index5(ptr nocapture %p) nounwind {
 entry:
 ; CHECK-LABEL: memop_unsigned_int_add5_index5:
 ; CHECK:  memw(r{{[0-9]+}}+#20) += #5
-  %add.ptr = getelementptr inbounds i32, i32* %p, i32 5
-  %0 = load i32, i32* %add.ptr, align 4
+  %add.ptr = getelementptr inbounds i32, ptr %p, i32 5
+  %0 = load i32, ptr %add.ptr, align 4
   %add = add nsw i32 %0, 5
-  store i32 %add, i32* %add.ptr, align 4
+  store i32 %add, ptr %add.ptr, align 4
   ret void
 }
 
-define void @memop_unsigned_int_add_index5(i32* nocapture %p, i32 %x) nounwind {
+define void @memop_unsigned_int_add_index5(ptr nocapture %p, i32 %x) nounwind {
 entry:
 ; CHECK-LABEL: memop_unsigned_int_add_index5:
 ; CHECK:  memw(r{{[0-9]+}}+#20) += r{{[0-9]+}}
-  %add.ptr = getelementptr inbounds i32, i32* %p, i32 5
-  %0 = load i32, i32* %add.ptr, align 4
+  %add.ptr = getelementptr inbounds i32, ptr %p, i32 5
+  %0 = load i32, ptr %add.ptr, align 4
   %add = add nsw i32 %0, %x
-  store i32 %add, i32* %add.ptr, align 4
+  store i32 %add, ptr %add.ptr, align 4
   ret void
 }
 
-define void @memop_unsigned_int_sub_index5(i32* nocapture %p, i32 %x) nounwind {
+define void @memop_unsigned_int_sub_index5(ptr nocapture %p, i32 %x) nounwind {
 entry:
 ; CHECK-LABEL: memop_unsigned_int_sub_index5:
 ; CHECK:  memw(r{{[0-9]+}}+#20) -= r{{[0-9]+}}
-  %add.ptr = getelementptr inbounds i32, i32* %p, i32 5
-  %0 = load i32, i32* %add.ptr, align 4
+  %add.ptr = getelementptr inbounds i32, ptr %p, i32 5
+  %0 = load i32, ptr %add.ptr, align 4
   %sub = sub nsw i32 %0, %x
-  store i32 %sub, i32* %add.ptr, align 4
+  store i32 %sub, ptr %add.ptr, align 4
   ret void
 }
 
-define void @memop_unsigned_int_or_index5(i32* nocapture %p, i32 %x) nounwind {
+define void @memop_unsigned_int_or_index5(ptr nocapture %p, i32 %x) nounwind {
 entry:
 ; CHECK-LABEL: memop_unsigned_int_or_index5:
 ; CHECK:  memw(r{{[0-9]+}}+#20) |= r{{[0-9]+}}
-  %add.ptr = getelementptr inbounds i32, i32* %p, i32 5
-  %0 = load i32, i32* %add.ptr, align 4
+  %add.ptr = getelementptr inbounds i32, ptr %p, i32 5
+  %0 = load i32, ptr %add.ptr, align 4
   %or = or i32 %0, %x
-  store i32 %or, i32* %add.ptr, align 4
+  store i32 %or, ptr %add.ptr, align 4
   ret void
 }
 
-define void @memop_unsigned_int_and_index5(i32* nocapture %p, i32 %x) nounwind {
+define void @memop_unsigned_int_and_index5(ptr nocapture %p, i32 %x) nounwind {
 entry:
 ; CHECK-LABEL: memop_unsigned_int_and_index5:
 ; CHECK:  memw(r{{[0-9]+}}+#20) &= r{{[0-9]+}}
-  %add.ptr = getelementptr inbounds i32, i32* %p, i32 5
-  %0 = load i32, i32* %add.ptr, align 4
+  %add.ptr = getelementptr inbounds i32, ptr %p, i32 5
+  %0 = load i32, ptr %add.ptr, align 4
   %and = and i32 %0, %x
-  store i32 %and, i32* %add.ptr, align 4
+  store i32 %and, ptr %add.ptr, align 4
   ret void
 }
 
-define void @memop_unsigned_int_clrbit_index5(i32* nocapture %p) nounwind {
+define void @memop_unsigned_int_clrbit_index5(ptr nocapture %p) nounwind {
 entry:
 ; CHECK-LABEL: memop_unsigned_int_clrbit_index5:
 ; CHECK:  memw(r{{[0-9]+}}+#20) = clrbit(#5)
-  %add.ptr = getelementptr inbounds i32, i32* %p, i32 5
-  %0 = load i32, i32* %add.ptr, align 4
+  %add.ptr = getelementptr inbounds i32, ptr %p, i32 5
+  %0 = load i32, ptr %add.ptr, align 4
   %and = and i32 %0, -33
-  store i32 %and, i32* %add.ptr, align 4
+  store i32 %and, ptr %add.ptr, align 4
   ret void
 }
 
-define void @memop_unsigned_int_setbit_index5(i32* nocapture %p) nounwind {
+define void @memop_unsigned_int_setbit_index5(ptr nocapture %p) nounwind {
 entry:
 ; CHECK-LABEL: memop_unsigned_int_setbit_index5:
 ; CHECK:  memw(r{{[0-9]+}}+#20) = setbit(#7)
-  %add.ptr = getelementptr inbounds i32, i32* %p, i32 5
-  %0 = load i32, i32* %add.ptr, align 4
+  %add.ptr = getelementptr inbounds i32, ptr %p, i32 5
+  %0 = load i32, ptr %add.ptr, align 4
   %or = or i32 %0, 128
-  store i32 %or, i32* %add.ptr, align 4
+  store i32 %or, ptr %add.ptr, align 4
   ret void
 }

diff  --git a/llvm/test/CodeGen/Hexagon/memops1.ll b/llvm/test/CodeGen/Hexagon/memops1.ll
index 14204ef3d8010..822df680920d8 100644
--- a/llvm/test/CodeGen/Hexagon/memops1.ll
+++ b/llvm/test/CodeGen/Hexagon/memops1.ll
@@ -2,32 +2,32 @@
 ; Generate MemOps for V4 and above.
 
 
-define void @f(i32* %p) nounwind {
+define void @f(ptr %p) nounwind {
 entry:
 ; CHECK:  memw(r{{[0-9]+}}+#40) -= #1
-  %p.addr = alloca i32*, align 4
-  store i32* %p, i32** %p.addr, align 4
-  %0 = load i32*, i32** %p.addr, align 4
-  %add.ptr = getelementptr inbounds i32, i32* %0, i32 10
-  %1 = load i32, i32* %add.ptr, align 4
+  %p.addr = alloca ptr, align 4
+  store ptr %p, ptr %p.addr, align 4
+  %0 = load ptr, ptr %p.addr, align 4
+  %add.ptr = getelementptr inbounds i32, ptr %0, i32 10
+  %1 = load i32, ptr %add.ptr, align 4
   %sub = sub nsw i32 %1, 1
-  store i32 %sub, i32* %add.ptr, align 4
+  store i32 %sub, ptr %add.ptr, align 4
   ret void
 }
 
-define void @g(i32* %p, i32 %i) nounwind {
+define void @g(ptr %p, i32 %i) nounwind {
 entry:
 ; CHECK: memw(r{{[0-9]+}}+#40) -= #1
-  %p.addr = alloca i32*, align 4
+  %p.addr = alloca ptr, align 4
   %i.addr = alloca i32, align 4
-  store i32* %p, i32** %p.addr, align 4
-  store i32 %i, i32* %i.addr, align 4
-  %0 = load i32*, i32** %p.addr, align 4
-  %1 = load i32, i32* %i.addr, align 4
-  %add.ptr = getelementptr inbounds i32, i32* %0, i32 %1
-  %add.ptr1 = getelementptr inbounds i32, i32* %add.ptr, i32 10
-  %2 = load i32, i32* %add.ptr1, align 4
+  store ptr %p, ptr %p.addr, align 4
+  store i32 %i, ptr %i.addr, align 4
+  %0 = load ptr, ptr %p.addr, align 4
+  %1 = load i32, ptr %i.addr, align 4
+  %add.ptr = getelementptr inbounds i32, ptr %0, i32 %1
+  %add.ptr1 = getelementptr inbounds i32, ptr %add.ptr, i32 10
+  %2 = load i32, ptr %add.ptr1, align 4
   %sub = sub nsw i32 %2, 1
-  store i32 %sub, i32* %add.ptr1, align 4
+  store i32 %sub, ptr %add.ptr1, align 4
   ret void
 }

diff  --git a/llvm/test/CodeGen/Hexagon/memops2.ll b/llvm/test/CodeGen/Hexagon/memops2.ll
index e427dd1af9447..74a4ed546d3ee 100644
--- a/llvm/test/CodeGen/Hexagon/memops2.ll
+++ b/llvm/test/CodeGen/Hexagon/memops2.ll
@@ -2,27 +2,27 @@
 ; Generate MemOps for V4 and above.
 
 
-define void @f(i16* nocapture %p) nounwind {
+define void @f(ptr nocapture %p) nounwind {
 entry:
 ; CHECK:  memh(r{{[0-9]+}}+#20) -= #1
-  %add.ptr = getelementptr inbounds i16, i16* %p, i32 10
-  %0 = load i16, i16* %add.ptr, align 2
+  %add.ptr = getelementptr inbounds i16, ptr %p, i32 10
+  %0 = load i16, ptr %add.ptr, align 2
   %conv2 = zext i16 %0 to i32
   %sub = add nsw i32 %conv2, 65535
   %conv1 = trunc i32 %sub to i16
-  store i16 %conv1, i16* %add.ptr, align 2
+  store i16 %conv1, ptr %add.ptr, align 2
   ret void
 }
 
-define void @g(i16* nocapture %p, i32 %i) nounwind {
+define void @g(ptr nocapture %p, i32 %i) nounwind {
 entry:
 ; CHECK:  memh(r{{[0-9]+}}+#20) -= #1
   %add.ptr.sum = add i32 %i, 10
-  %add.ptr1 = getelementptr inbounds i16, i16* %p, i32 %add.ptr.sum
-  %0 = load i16, i16* %add.ptr1, align 2
+  %add.ptr1 = getelementptr inbounds i16, ptr %p, i32 %add.ptr.sum
+  %0 = load i16, ptr %add.ptr1, align 2
   %conv3 = zext i16 %0 to i32
   %sub = add nsw i32 %conv3, 65535
   %conv2 = trunc i32 %sub to i16
-  store i16 %conv2, i16* %add.ptr1, align 2
+  store i16 %conv2, ptr %add.ptr1, align 2
   ret void
 }

diff  --git a/llvm/test/CodeGen/Hexagon/memops3.ll b/llvm/test/CodeGen/Hexagon/memops3.ll
index 606be7096e27d..6861b9b8e9691 100644
--- a/llvm/test/CodeGen/Hexagon/memops3.ll
+++ b/llvm/test/CodeGen/Hexagon/memops3.ll
@@ -2,27 +2,27 @@
 ; Generate MemOps for V4 and above.
 
 
-define void @f(i8* nocapture %p) nounwind {
+define void @f(ptr nocapture %p) nounwind {
 entry:
 ; CHECK:  memb(r{{[0-9]+}}+#10) -= #1
-  %add.ptr = getelementptr inbounds i8, i8* %p, i32 10
-  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %p, i32 10
+  %0 = load i8, ptr %add.ptr, align 1
   %conv = zext i8 %0 to i32
   %sub = add nsw i32 %conv, 255
   %conv1 = trunc i32 %sub to i8
-  store i8 %conv1, i8* %add.ptr, align 1
+  store i8 %conv1, ptr %add.ptr, align 1
   ret void
 }
 
-define void @g(i8* nocapture %p, i32 %i) nounwind {
+define void @g(ptr nocapture %p, i32 %i) nounwind {
 entry:
 ; CHECK:  memb(r{{[0-9]+}}+#10) -= #1
   %add.ptr.sum = add i32 %i, 10
-  %add.ptr1 = getelementptr inbounds i8, i8* %p, i32 %add.ptr.sum
-  %0 = load i8, i8* %add.ptr1, align 1
+  %add.ptr1 = getelementptr inbounds i8, ptr %p, i32 %add.ptr.sum
+  %0 = load i8, ptr %add.ptr1, align 1
   %conv = zext i8 %0 to i32
   %sub = add nsw i32 %conv, 255
   %conv2 = trunc i32 %sub to i8
-  store i8 %conv2, i8* %add.ptr1, align 1
+  store i8 %conv2, ptr %add.ptr1, align 1
   ret void
 }

diff  --git a/llvm/test/CodeGen/Hexagon/memops_global.ll b/llvm/test/CodeGen/Hexagon/memops_global.ll
index 586f58f0b0749..66d0531a238f4 100644
--- a/llvm/test/CodeGen/Hexagon/memops_global.ll
+++ b/llvm/test/CodeGen/Hexagon/memops_global.ll
@@ -11,9 +11,9 @@
 ; CHECK: memb(r{{[0-9]+}}+#0) += #1
 define void @f0() #0 {
 b0:
-  %v0 = load i8, i8* @g0, align 1, !tbaa !0
+  %v0 = load i8, ptr @g0, align 1, !tbaa !0
   %v1 = add i8 %v0, 1
-  store i8 %v1, i8* @g0, align 1, !tbaa !0
+  store i8 %v1, ptr @g0, align 1, !tbaa !0
   ret void
 }
 
@@ -21,9 +21,9 @@ b0:
 ; CHECK: memb(r{{[0-9]+}}+#0) -= #1
 define void @f1() #0 {
 b0:
-  %v0 = load i8, i8* @g0, align 1, !tbaa !0
+  %v0 = load i8, ptr @g0, align 1, !tbaa !0
   %v1 = add i8 %v0, -1
-  store i8 %v1, i8* @g0, align 1, !tbaa !0
+  store i8 %v1, ptr @g0, align 1, !tbaa !0
   ret void
 }
 
@@ -31,11 +31,11 @@ b0:
 ; CHECK: memb(r{{[0-9]+}}+#0) += #5
 define void @f2() #0 {
 b0:
-  %v0 = load i8, i8* @g0, align 1, !tbaa !0
+  %v0 = load i8, ptr @g0, align 1, !tbaa !0
   %v1 = zext i8 %v0 to i32
   %v2 = add nsw i32 %v1, 5
   %v3 = trunc i32 %v2 to i8
-  store i8 %v3, i8* @g0, align 1, !tbaa !0
+  store i8 %v3, ptr @g0, align 1, !tbaa !0
   ret void
 }
 
@@ -43,11 +43,11 @@ b0:
 ; CHECK: memb(r{{[0-9]+}}+#0) -= #5
 define void @f3() #0 {
 b0:
-  %v0 = load i8, i8* @g0, align 1, !tbaa !0
+  %v0 = load i8, ptr @g0, align 1, !tbaa !0
   %v1 = zext i8 %v0 to i32
   %v2 = add nsw i32 %v1, 251
   %v3 = trunc i32 %v2 to i8
-  store i8 %v3, i8* @g0, align 1, !tbaa !0
+  store i8 %v3, ptr @g0, align 1, !tbaa !0
   ret void
 }
 
@@ -55,11 +55,11 @@ b0:
 ; CHECK: memb(r{{[0-9]+}}+#0) -= #5
 define void @f4() #0 {
 b0:
-  %v0 = load i8, i8* @g0, align 1, !tbaa !0
+  %v0 = load i8, ptr @g0, align 1, !tbaa !0
   %v1 = zext i8 %v0 to i32
   %v2 = add nsw i32 %v1, 251
   %v3 = trunc i32 %v2 to i8
-  store i8 %v3, i8* @g0, align 1, !tbaa !0
+  store i8 %v3, ptr @g0, align 1, !tbaa !0
   ret void
 }
 
@@ -67,11 +67,11 @@ b0:
 ; CHECK: memb(r{{[0-9]+}}+#0) += #5
 define void @f5() #0 {
 b0:
-  %v0 = load i8, i8* @g0, align 1, !tbaa !0
+  %v0 = load i8, ptr @g0, align 1, !tbaa !0
   %v1 = zext i8 %v0 to i32
   %v2 = add nsw i32 %v1, 5
   %v3 = trunc i32 %v2 to i8
-  store i8 %v3, i8* @g0, align 1, !tbaa !0
+  store i8 %v3, ptr @g0, align 1, !tbaa !0
   ret void
 }
 
@@ -80,11 +80,11 @@ b0:
 define void @f6(i8 zeroext %a0) #0 {
 b0:
   %v0 = zext i8 %a0 to i32
-  %v1 = load i8, i8* @g0, align 1, !tbaa !0
+  %v1 = load i8, ptr @g0, align 1, !tbaa !0
   %v2 = zext i8 %v1 to i32
   %v3 = add nsw i32 %v2, %v0
   %v4 = trunc i32 %v3 to i8
-  store i8 %v4, i8* @g0, align 1, !tbaa !0
+  store i8 %v4, ptr @g0, align 1, !tbaa !0
   ret void
 }
 
@@ -93,11 +93,11 @@ b0:
 define void @f7(i8 zeroext %a0) #0 {
 b0:
   %v0 = zext i8 %a0 to i32
-  %v1 = load i8, i8* @g0, align 1, !tbaa !0
+  %v1 = load i8, ptr @g0, align 1, !tbaa !0
   %v2 = zext i8 %v1 to i32
   %v3 = sub nsw i32 %v2, %v0
   %v4 = trunc i32 %v3 to i8
-  store i8 %v4, i8* @g0, align 1, !tbaa !0
+  store i8 %v4, ptr @g0, align 1, !tbaa !0
   ret void
 }
 
@@ -105,9 +105,9 @@ b0:
 ; CHECK: memb(r{{[0-9]+}}+#0) |= r{{[0-9]+}}
 define void @f8(i8 zeroext %a0) #0 {
 b0:
-  %v0 = load i8, i8* @g0, align 1, !tbaa !0
+  %v0 = load i8, ptr @g0, align 1, !tbaa !0
   %v1 = or i8 %v0, %a0
-  store i8 %v1, i8* @g0, align 1, !tbaa !0
+  store i8 %v1, ptr @g0, align 1, !tbaa !0
   ret void
 }
 
@@ -115,9 +115,9 @@ b0:
 ; CHECK: memb(r{{[0-9]+}}+#0) &= r{{[0-9]+}}
 define void @f9(i8 zeroext %a0) #0 {
 b0:
-  %v0 = load i8, i8* @g0, align 1, !tbaa !0
+  %v0 = load i8, ptr @g0, align 1, !tbaa !0
   %v1 = and i8 %v0, %a0
-  store i8 %v1, i8* @g0, align 1, !tbaa !0
+  store i8 %v1, ptr @g0, align 1, !tbaa !0
   ret void
 }
 
@@ -125,11 +125,11 @@ b0:
 ; CHECK: memb(r{{[0-9]+}}+#0) = clrbit(#5)
 define void @f10() #0 {
 b0:
-  %v0 = load i8, i8* @g0, align 1, !tbaa !0
+  %v0 = load i8, ptr @g0, align 1, !tbaa !0
   %v1 = zext i8 %v0 to i32
   %v2 = and i32 %v1, 223
   %v3 = trunc i32 %v2 to i8
-  store i8 %v3, i8* @g0, align 1, !tbaa !0
+  store i8 %v3, ptr @g0, align 1, !tbaa !0
   ret void
 }
 
@@ -137,11 +137,11 @@ b0:
 ; CHECK: memb(r{{[0-9]+}}+#0) = setbit(#7)
 define void @f11() #0 {
 b0:
-  %v0 = load i8, i8* @g0, align 1, !tbaa !0
+  %v0 = load i8, ptr @g0, align 1, !tbaa !0
   %v1 = zext i8 %v0 to i32
   %v2 = or i32 %v1, 128
   %v3 = trunc i32 %v2 to i8
-  store i8 %v3, i8* @g0, align 1, !tbaa !0
+  store i8 %v3, ptr @g0, align 1, !tbaa !0
   ret void
 }
 
@@ -149,9 +149,9 @@ b0:
 ; CHECK: memb(r{{[0-9]+}}+#0) += #1
 define void @f12() #0 {
 b0:
-  %v0 = load i8, i8* @g1, align 1, !tbaa !0
+  %v0 = load i8, ptr @g1, align 1, !tbaa !0
   %v1 = add i8 %v0, 1
-  store i8 %v1, i8* @g1, align 1, !tbaa !0
+  store i8 %v1, ptr @g1, align 1, !tbaa !0
   ret void
 }
 
@@ -159,9 +159,9 @@ b0:
 ; CHECK: memb(r{{[0-9]+}}+#0) -= #1
 define void @f13() #0 {
 b0:
-  %v0 = load i8, i8* @g1, align 1, !tbaa !0
+  %v0 = load i8, ptr @g1, align 1, !tbaa !0
   %v1 = add i8 %v0, -1
-  store i8 %v1, i8* @g1, align 1, !tbaa !0
+  store i8 %v1, ptr @g1, align 1, !tbaa !0
   ret void
 }
 
@@ -169,11 +169,11 @@ b0:
 ; CHECK: memb(r{{[0-9]+}}+#0) += #5
 define void @f14() #0 {
 b0:
-  %v0 = load i8, i8* @g1, align 1, !tbaa !0
+  %v0 = load i8, ptr @g1, align 1, !tbaa !0
   %v1 = zext i8 %v0 to i32
   %v2 = add nsw i32 %v1, 5
   %v3 = trunc i32 %v2 to i8
-  store i8 %v3, i8* @g1, align 1, !tbaa !0
+  store i8 %v3, ptr @g1, align 1, !tbaa !0
   ret void
 }
 
@@ -181,11 +181,11 @@ b0:
 ; CHECK: memb(r{{[0-9]+}}+#0) -= #5
 define void @f15() #0 {
 b0:
-  %v0 = load i8, i8* @g1, align 1, !tbaa !0
+  %v0 = load i8, ptr @g1, align 1, !tbaa !0
   %v1 = zext i8 %v0 to i32
   %v2 = add nsw i32 %v1, 251
   %v3 = trunc i32 %v2 to i8
-  store i8 %v3, i8* @g1, align 1, !tbaa !0
+  store i8 %v3, ptr @g1, align 1, !tbaa !0
   ret void
 }
 
@@ -193,11 +193,11 @@ b0:
 ; CHECK: memb(r{{[0-9]+}}+#0) -= #5
 define void @f16() #0 {
 b0:
-  %v0 = load i8, i8* @g1, align 1, !tbaa !0
+  %v0 = load i8, ptr @g1, align 1, !tbaa !0
   %v1 = zext i8 %v0 to i32
   %v2 = add nsw i32 %v1, 251
   %v3 = trunc i32 %v2 to i8
-  store i8 %v3, i8* @g1, align 1, !tbaa !0
+  store i8 %v3, ptr @g1, align 1, !tbaa !0
   ret void
 }
 
@@ -205,11 +205,11 @@ b0:
 ; CHECK: memb(r{{[0-9]+}}+#0) += #5
 define void @f17() #0 {
 b0:
-  %v0 = load i8, i8* @g1, align 1, !tbaa !0
+  %v0 = load i8, ptr @g1, align 1, !tbaa !0
   %v1 = zext i8 %v0 to i32
   %v2 = add nsw i32 %v1, 5
   %v3 = trunc i32 %v2 to i8
-  store i8 %v3, i8* @g1, align 1, !tbaa !0
+  store i8 %v3, ptr @g1, align 1, !tbaa !0
   ret void
 }
 
@@ -218,11 +218,11 @@ b0:
 define void @f18(i8 signext %a0) #0 {
 b0:
   %v0 = zext i8 %a0 to i32
-  %v1 = load i8, i8* @g1, align 1, !tbaa !0
+  %v1 = load i8, ptr @g1, align 1, !tbaa !0
   %v2 = zext i8 %v1 to i32
   %v3 = add nsw i32 %v2, %v0
   %v4 = trunc i32 %v3 to i8
-  store i8 %v4, i8* @g1, align 1, !tbaa !0
+  store i8 %v4, ptr @g1, align 1, !tbaa !0
   ret void
 }
 
@@ -231,11 +231,11 @@ b0:
 define void @f19(i8 signext %a0) #0 {
 b0:
   %v0 = zext i8 %a0 to i32
-  %v1 = load i8, i8* @g1, align 1, !tbaa !0
+  %v1 = load i8, ptr @g1, align 1, !tbaa !0
   %v2 = zext i8 %v1 to i32
   %v3 = sub nsw i32 %v2, %v0
   %v4 = trunc i32 %v3 to i8
-  store i8 %v4, i8* @g1, align 1, !tbaa !0
+  store i8 %v4, ptr @g1, align 1, !tbaa !0
   ret void
 }
 
@@ -243,9 +243,9 @@ b0:
 ; CHECK: memb(r{{[0-9]+}}+#0) |= r{{[0-9]+}}
 define void @f20(i8 signext %a0) #0 {
 b0:
-  %v0 = load i8, i8* @g1, align 1, !tbaa !0
+  %v0 = load i8, ptr @g1, align 1, !tbaa !0
   %v1 = or i8 %v0, %a0
-  store i8 %v1, i8* @g1, align 1, !tbaa !0
+  store i8 %v1, ptr @g1, align 1, !tbaa !0
   ret void
 }
 
@@ -253,9 +253,9 @@ b0:
 ; CHECK: memb(r{{[0-9]+}}+#0) &= r{{[0-9]+}}
 define void @f21(i8 signext %a0) #0 {
 b0:
-  %v0 = load i8, i8* @g1, align 1, !tbaa !0
+  %v0 = load i8, ptr @g1, align 1, !tbaa !0
   %v1 = and i8 %v0, %a0
-  store i8 %v1, i8* @g1, align 1, !tbaa !0
+  store i8 %v1, ptr @g1, align 1, !tbaa !0
   ret void
 }
 
@@ -263,11 +263,11 @@ b0:
 ; CHECK: memb(r{{[0-9]+}}+#0) = clrbit(#5)
 define void @f22() #0 {
 b0:
-  %v0 = load i8, i8* @g1, align 1, !tbaa !0
+  %v0 = load i8, ptr @g1, align 1, !tbaa !0
   %v1 = zext i8 %v0 to i32
   %v2 = and i32 %v1, 223
   %v3 = trunc i32 %v2 to i8
-  store i8 %v3, i8* @g1, align 1, !tbaa !0
+  store i8 %v3, ptr @g1, align 1, !tbaa !0
   ret void
 }
 
@@ -275,11 +275,11 @@ b0:
 ; CHECK: memb(r{{[0-9]+}}+#0) = setbit(#7)
 define void @f23() #0 {
 b0:
-  %v0 = load i8, i8* @g1, align 1, !tbaa !0
+  %v0 = load i8, ptr @g1, align 1, !tbaa !0
   %v1 = zext i8 %v0 to i32
   %v2 = or i32 %v1, 128
   %v3 = trunc i32 %v2 to i8
-  store i8 %v3, i8* @g1, align 1, !tbaa !0
+  store i8 %v3, ptr @g1, align 1, !tbaa !0
   ret void
 }
 
@@ -287,9 +287,9 @@ b0:
 ; CHECK: memh(r{{[0-9]+}}+#0) += #1
 define void @f24() #0 {
 b0:
-  %v0 = load i16, i16* @g2, align 2, !tbaa !3
+  %v0 = load i16, ptr @g2, align 2, !tbaa !3
   %v1 = add i16 %v0, 1
-  store i16 %v1, i16* @g2, align 2, !tbaa !3
+  store i16 %v1, ptr @g2, align 2, !tbaa !3
   ret void
 }
 
@@ -297,9 +297,9 @@ b0:
 ; CHECK: memh(r{{[0-9]+}}+#0) -= #1
 define void @f25() #0 {
 b0:
-  %v0 = load i16, i16* @g2, align 2, !tbaa !3
+  %v0 = load i16, ptr @g2, align 2, !tbaa !3
   %v1 = add i16 %v0, -1
-  store i16 %v1, i16* @g2, align 2, !tbaa !3
+  store i16 %v1, ptr @g2, align 2, !tbaa !3
   ret void
 }
 
@@ -307,11 +307,11 @@ b0:
 ; CHECK: memh(r{{[0-9]+}}+#0) += #5
 define void @f26() #0 {
 b0:
-  %v0 = load i16, i16* @g2, align 2, !tbaa !3
+  %v0 = load i16, ptr @g2, align 2, !tbaa !3
   %v1 = zext i16 %v0 to i32
   %v2 = add nsw i32 %v1, 5
   %v3 = trunc i32 %v2 to i16
-  store i16 %v3, i16* @g2, align 2, !tbaa !3
+  store i16 %v3, ptr @g2, align 2, !tbaa !3
   ret void
 }
 
@@ -319,11 +319,11 @@ b0:
 ; CHECK: memh(r{{[0-9]+}}+#0) -= #5
 define void @f27() #0 {
 b0:
-  %v0 = load i16, i16* @g2, align 2, !tbaa !3
+  %v0 = load i16, ptr @g2, align 2, !tbaa !3
   %v1 = zext i16 %v0 to i32
   %v2 = add nsw i32 %v1, 65531
   %v3 = trunc i32 %v2 to i16
-  store i16 %v3, i16* @g2, align 2, !tbaa !3
+  store i16 %v3, ptr @g2, align 2, !tbaa !3
   ret void
 }
 
@@ -331,11 +331,11 @@ b0:
 ; CHECK: memh(r{{[0-9]+}}+#0) -= #5
 define void @f28() #0 {
 b0:
-  %v0 = load i16, i16* @g2, align 2, !tbaa !3
+  %v0 = load i16, ptr @g2, align 2, !tbaa !3
   %v1 = zext i16 %v0 to i32
   %v2 = add nsw i32 %v1, 65531
   %v3 = trunc i32 %v2 to i16
-  store i16 %v3, i16* @g2, align 2, !tbaa !3
+  store i16 %v3, ptr @g2, align 2, !tbaa !3
   ret void
 }
 
@@ -343,11 +343,11 @@ b0:
 ; CHECK: memh(r{{[0-9]+}}+#0) += #5
 define void @f29() #0 {
 b0:
-  %v0 = load i16, i16* @g2, align 2, !tbaa !3
+  %v0 = load i16, ptr @g2, align 2, !tbaa !3
   %v1 = zext i16 %v0 to i32
   %v2 = add nsw i32 %v1, 5
   %v3 = trunc i32 %v2 to i16
-  store i16 %v3, i16* @g2, align 2, !tbaa !3
+  store i16 %v3, ptr @g2, align 2, !tbaa !3
   ret void
 }
 
@@ -356,11 +356,11 @@ b0:
 define void @f30(i16 zeroext %a0) #0 {
 b0:
   %v0 = zext i16 %a0 to i32
-  %v1 = load i16, i16* @g2, align 2, !tbaa !3
+  %v1 = load i16, ptr @g2, align 2, !tbaa !3
   %v2 = zext i16 %v1 to i32
   %v3 = add nsw i32 %v2, %v0
   %v4 = trunc i32 %v3 to i16
-  store i16 %v4, i16* @g2, align 2, !tbaa !3
+  store i16 %v4, ptr @g2, align 2, !tbaa !3
   ret void
 }
 
@@ -369,11 +369,11 @@ b0:
 define void @f31(i16 zeroext %a0) #0 {
 b0:
   %v0 = zext i16 %a0 to i32
-  %v1 = load i16, i16* @g2, align 2, !tbaa !3
+  %v1 = load i16, ptr @g2, align 2, !tbaa !3
   %v2 = zext i16 %v1 to i32
   %v3 = sub nsw i32 %v2, %v0
   %v4 = trunc i32 %v3 to i16
-  store i16 %v4, i16* @g2, align 2, !tbaa !3
+  store i16 %v4, ptr @g2, align 2, !tbaa !3
   ret void
 }
 
@@ -381,9 +381,9 @@ b0:
 ; CHECK: memh(r{{[0-9]+}}+#0) |= r{{[0-9]+}}
 define void @f32(i16 zeroext %a0) #0 {
 b0:
-  %v0 = load i16, i16* @g2, align 2, !tbaa !3
+  %v0 = load i16, ptr @g2, align 2, !tbaa !3
   %v1 = or i16 %v0, %a0
-  store i16 %v1, i16* @g2, align 2, !tbaa !3
+  store i16 %v1, ptr @g2, align 2, !tbaa !3
   ret void
 }
 
@@ -391,9 +391,9 @@ b0:
 ; CHECK: memh(r{{[0-9]+}}+#0) &= r{{[0-9]+}}
 define void @f33(i16 zeroext %a0) #0 {
 b0:
-  %v0 = load i16, i16* @g2, align 2, !tbaa !3
+  %v0 = load i16, ptr @g2, align 2, !tbaa !3
   %v1 = and i16 %v0, %a0
-  store i16 %v1, i16* @g2, align 2, !tbaa !3
+  store i16 %v1, ptr @g2, align 2, !tbaa !3
   ret void
 }
 
@@ -401,11 +401,11 @@ b0:
 ; CHECK: memh(r{{[0-9]+}}+#0) = clrbit(#5)
 define void @f34() #0 {
 b0:
-  %v0 = load i16, i16* @g2, align 2, !tbaa !3
+  %v0 = load i16, ptr @g2, align 2, !tbaa !3
   %v1 = zext i16 %v0 to i32
   %v2 = and i32 %v1, 65503
   %v3 = trunc i32 %v2 to i16
-  store i16 %v3, i16* @g2, align 2, !tbaa !3
+  store i16 %v3, ptr @g2, align 2, !tbaa !3
   ret void
 }
 
@@ -413,11 +413,11 @@ b0:
 ; CHECK: memh(r{{[0-9]+}}+#0) = setbit(#7)
 define void @f35() #0 {
 b0:
-  %v0 = load i16, i16* @g2, align 2, !tbaa !3
+  %v0 = load i16, ptr @g2, align 2, !tbaa !3
   %v1 = zext i16 %v0 to i32
   %v2 = or i32 %v1, 128
   %v3 = trunc i32 %v2 to i16
-  store i16 %v3, i16* @g2, align 2, !tbaa !3
+  store i16 %v3, ptr @g2, align 2, !tbaa !3
   ret void
 }
 
@@ -425,9 +425,9 @@ b0:
 ; CHECK: memh(r{{[0-9]+}}+#0) += #1
 define void @f36() #0 {
 b0:
-  %v0 = load i16, i16* @g3, align 2, !tbaa !3
+  %v0 = load i16, ptr @g3, align 2, !tbaa !3
   %v1 = add i16 %v0, 1
-  store i16 %v1, i16* @g3, align 2, !tbaa !3
+  store i16 %v1, ptr @g3, align 2, !tbaa !3
   ret void
 }
 
@@ -435,9 +435,9 @@ b0:
 ; CHECK: memh(r{{[0-9]+}}+#0) -= #1
 define void @f37() #0 {
 b0:
-  %v0 = load i16, i16* @g3, align 2, !tbaa !3
+  %v0 = load i16, ptr @g3, align 2, !tbaa !3
   %v1 = add i16 %v0, -1
-  store i16 %v1, i16* @g3, align 2, !tbaa !3
+  store i16 %v1, ptr @g3, align 2, !tbaa !3
   ret void
 }
 
@@ -445,11 +445,11 @@ b0:
 ; CHECK: memh(r{{[0-9]+}}+#0) += #5
 define void @f38() #0 {
 b0:
-  %v0 = load i16, i16* @g3, align 2, !tbaa !3
+  %v0 = load i16, ptr @g3, align 2, !tbaa !3
   %v1 = zext i16 %v0 to i32
   %v2 = add nsw i32 %v1, 5
   %v3 = trunc i32 %v2 to i16
-  store i16 %v3, i16* @g3, align 2, !tbaa !3
+  store i16 %v3, ptr @g3, align 2, !tbaa !3
   ret void
 }
 
@@ -457,11 +457,11 @@ b0:
 ; CHECK: memh(r{{[0-9]+}}+#0) -= #5
 define void @f39() #0 {
 b0:
-  %v0 = load i16, i16* @g3, align 2, !tbaa !3
+  %v0 = load i16, ptr @g3, align 2, !tbaa !3
   %v1 = zext i16 %v0 to i32
   %v2 = add nsw i32 %v1, 65531
   %v3 = trunc i32 %v2 to i16
-  store i16 %v3, i16* @g3, align 2, !tbaa !3
+  store i16 %v3, ptr @g3, align 2, !tbaa !3
   ret void
 }
 
@@ -469,11 +469,11 @@ b0:
 ; CHECK: memh(r{{[0-9]+}}+#0) -= #5
 define void @f40() #0 {
 b0:
-  %v0 = load i16, i16* @g3, align 2, !tbaa !3
+  %v0 = load i16, ptr @g3, align 2, !tbaa !3
   %v1 = zext i16 %v0 to i32
   %v2 = add nsw i32 %v1, 65531
   %v3 = trunc i32 %v2 to i16
-  store i16 %v3, i16* @g3, align 2, !tbaa !3
+  store i16 %v3, ptr @g3, align 2, !tbaa !3
   ret void
 }
 
@@ -481,11 +481,11 @@ b0:
 ; CHECK: memh(r{{[0-9]+}}+#0) += #5
 define void @f41() #0 {
 b0:
-  %v0 = load i16, i16* @g3, align 2, !tbaa !3
+  %v0 = load i16, ptr @g3, align 2, !tbaa !3
   %v1 = zext i16 %v0 to i32
   %v2 = add nsw i32 %v1, 5
   %v3 = trunc i32 %v2 to i16
-  store i16 %v3, i16* @g3, align 2, !tbaa !3
+  store i16 %v3, ptr @g3, align 2, !tbaa !3
   ret void
 }
 
@@ -494,11 +494,11 @@ b0:
 define void @f42(i16 signext %a0) #0 {
 b0:
   %v0 = zext i16 %a0 to i32
-  %v1 = load i16, i16* @g3, align 2, !tbaa !3
+  %v1 = load i16, ptr @g3, align 2, !tbaa !3
   %v2 = zext i16 %v1 to i32
   %v3 = add nsw i32 %v2, %v0
   %v4 = trunc i32 %v3 to i16
-  store i16 %v4, i16* @g3, align 2, !tbaa !3
+  store i16 %v4, ptr @g3, align 2, !tbaa !3
   ret void
 }
 
@@ -507,11 +507,11 @@ b0:
 define void @f43(i16 signext %a0) #0 {
 b0:
   %v0 = zext i16 %a0 to i32
-  %v1 = load i16, i16* @g3, align 2, !tbaa !3
+  %v1 = load i16, ptr @g3, align 2, !tbaa !3
   %v2 = zext i16 %v1 to i32
   %v3 = sub nsw i32 %v2, %v0
   %v4 = trunc i32 %v3 to i16
-  store i16 %v4, i16* @g3, align 2, !tbaa !3
+  store i16 %v4, ptr @g3, align 2, !tbaa !3
   ret void
 }
 
@@ -519,9 +519,9 @@ b0:
 ; CHECK: memh(r{{[0-9]+}}+#0) |= r{{[0-9]+}}
 define void @f44(i16 signext %a0) #0 {
 b0:
-  %v0 = load i16, i16* @g3, align 2, !tbaa !3
+  %v0 = load i16, ptr @g3, align 2, !tbaa !3
   %v1 = or i16 %v0, %a0
-  store i16 %v1, i16* @g3, align 2, !tbaa !3
+  store i16 %v1, ptr @g3, align 2, !tbaa !3
   ret void
 }
 
@@ -529,9 +529,9 @@ b0:
 ; CHECK: memh(r{{[0-9]+}}+#0) &= r{{[0-9]+}}
 define void @f45(i16 signext %a0) #0 {
 b0:
-  %v0 = load i16, i16* @g3, align 2, !tbaa !3
+  %v0 = load i16, ptr @g3, align 2, !tbaa !3
   %v1 = and i16 %v0, %a0
-  store i16 %v1, i16* @g3, align 2, !tbaa !3
+  store i16 %v1, ptr @g3, align 2, !tbaa !3
   ret void
 }
 
@@ -539,11 +539,11 @@ b0:
 ; CHECK: memh(r{{[0-9]+}}+#0) = clrbit(#5)
 define void @f46() #0 {
 b0:
-  %v0 = load i16, i16* @g3, align 2, !tbaa !3
+  %v0 = load i16, ptr @g3, align 2, !tbaa !3
   %v1 = zext i16 %v0 to i32
   %v2 = and i32 %v1, 65503
   %v3 = trunc i32 %v2 to i16
-  store i16 %v3, i16* @g3, align 2, !tbaa !3
+  store i16 %v3, ptr @g3, align 2, !tbaa !3
   ret void
 }
 
@@ -551,11 +551,11 @@ b0:
 ; CHECK: memh(r{{[0-9]+}}+#0) = setbit(#7)
 define void @f47() #0 {
 b0:
-  %v0 = load i16, i16* @g3, align 2, !tbaa !3
+  %v0 = load i16, ptr @g3, align 2, !tbaa !3
   %v1 = zext i16 %v0 to i32
   %v2 = or i32 %v1, 128
   %v3 = trunc i32 %v2 to i16
-  store i16 %v3, i16* @g3, align 2, !tbaa !3
+  store i16 %v3, ptr @g3, align 2, !tbaa !3
   ret void
 }
 
@@ -563,9 +563,9 @@ b0:
 ; CHECK: memw(r{{[0-9]+}}+#0) += #1
 define void @f48() #0 {
 b0:
-  %v0 = load i32, i32* @g4, align 4, !tbaa !5
+  %v0 = load i32, ptr @g4, align 4, !tbaa !5
   %v1 = add nsw i32 %v0, 1
-  store i32 %v1, i32* @g4, align 4, !tbaa !5
+  store i32 %v1, ptr @g4, align 4, !tbaa !5
   ret void
 }
 
@@ -573,9 +573,9 @@ b0:
 ; CHECK: memw(r{{[0-9]+}}+#0) -= #1
 define void @f49() #0 {
 b0:
-  %v0 = load i32, i32* @g4, align 4, !tbaa !5
+  %v0 = load i32, ptr @g4, align 4, !tbaa !5
   %v1 = add nsw i32 %v0, -1
-  store i32 %v1, i32* @g4, align 4, !tbaa !5
+  store i32 %v1, ptr @g4, align 4, !tbaa !5
   ret void
 }
 
@@ -583,9 +583,9 @@ b0:
 ; CHECK: memw(r{{[0-9]+}}+#0) += #5
 define void @f50() #0 {
 b0:
-  %v0 = load i32, i32* @g4, align 4, !tbaa !5
+  %v0 = load i32, ptr @g4, align 4, !tbaa !5
   %v1 = add nsw i32 %v0, 5
-  store i32 %v1, i32* @g4, align 4, !tbaa !5
+  store i32 %v1, ptr @g4, align 4, !tbaa !5
   ret void
 }
 
@@ -593,9 +593,9 @@ b0:
 ; CHECK: memw(r{{[0-9]+}}+#0) -= #5
 define void @f51() #0 {
 b0:
-  %v0 = load i32, i32* @g4, align 4, !tbaa !5
+  %v0 = load i32, ptr @g4, align 4, !tbaa !5
   %v1 = add nsw i32 %v0, -5
-  store i32 %v1, i32* @g4, align 4, !tbaa !5
+  store i32 %v1, ptr @g4, align 4, !tbaa !5
   ret void
 }
 
@@ -603,9 +603,9 @@ b0:
 ; CHECK: memw(r{{[0-9]+}}+#0) -= #5
 define void @f52() #0 {
 b0:
-  %v0 = load i32, i32* @g4, align 4, !tbaa !5
+  %v0 = load i32, ptr @g4, align 4, !tbaa !5
   %v1 = add nsw i32 %v0, -5
-  store i32 %v1, i32* @g4, align 4, !tbaa !5
+  store i32 %v1, ptr @g4, align 4, !tbaa !5
   ret void
 }
 
@@ -613,9 +613,9 @@ b0:
 ; CHECK: memw(r{{[0-9]+}}+#0) += #5
 define void @f53() #0 {
 b0:
-  %v0 = load i32, i32* @g4, align 4, !tbaa !5
+  %v0 = load i32, ptr @g4, align 4, !tbaa !5
   %v1 = add nsw i32 %v0, 5
-  store i32 %v1, i32* @g4, align 4, !tbaa !5
+  store i32 %v1, ptr @g4, align 4, !tbaa !5
   ret void
 }
 
@@ -623,9 +623,9 @@ b0:
 ; CHECK: memw(r{{[0-9]+}}+#0) += r{{[0-9]+}}
 define void @f54(i32 %a0) #0 {
 b0:
-  %v0 = load i32, i32* @g4, align 4, !tbaa !5
+  %v0 = load i32, ptr @g4, align 4, !tbaa !5
   %v1 = add i32 %v0, %a0
-  store i32 %v1, i32* @g4, align 4, !tbaa !5
+  store i32 %v1, ptr @g4, align 4, !tbaa !5
   ret void
 }
 
@@ -633,9 +633,9 @@ b0:
 ; CHECK: memw(r{{[0-9]+}}+#0) -= r{{[0-9]+}}
 define void @f55(i32 %a0) #0 {
 b0:
-  %v0 = load i32, i32* @g4, align 4, !tbaa !5
+  %v0 = load i32, ptr @g4, align 4, !tbaa !5
   %v1 = sub i32 %v0, %a0
-  store i32 %v1, i32* @g4, align 4, !tbaa !5
+  store i32 %v1, ptr @g4, align 4, !tbaa !5
   ret void
 }
 
@@ -643,9 +643,9 @@ b0:
 ; CHECK: memw(r{{[0-9]+}}+#0) |= r{{[0-9]+}}
 define void @f56(i32 %a0) #0 {
 b0:
-  %v0 = load i32, i32* @g4, align 4, !tbaa !5
+  %v0 = load i32, ptr @g4, align 4, !tbaa !5
   %v1 = or i32 %v0, %a0
-  store i32 %v1, i32* @g4, align 4, !tbaa !5
+  store i32 %v1, ptr @g4, align 4, !tbaa !5
   ret void
 }
 
@@ -653,9 +653,9 @@ b0:
 ; CHECK: memw(r{{[0-9]+}}+#0) &= r{{[0-9]+}}
 define void @f57(i32 %a0) #0 {
 b0:
-  %v0 = load i32, i32* @g4, align 4, !tbaa !5
+  %v0 = load i32, ptr @g4, align 4, !tbaa !5
   %v1 = and i32 %v0, %a0
-  store i32 %v1, i32* @g4, align 4, !tbaa !5
+  store i32 %v1, ptr @g4, align 4, !tbaa !5
   ret void
 }
 
@@ -663,9 +663,9 @@ b0:
 ; CHECK: memw(r{{[0-9]+}}+#0) = clrbit(#5)
 define void @f58() #0 {
 b0:
-  %v0 = load i32, i32* @g4, align 4, !tbaa !5
+  %v0 = load i32, ptr @g4, align 4, !tbaa !5
   %v1 = and i32 %v0, -33
-  store i32 %v1, i32* @g4, align 4, !tbaa !5
+  store i32 %v1, ptr @g4, align 4, !tbaa !5
   ret void
 }
 
@@ -673,9 +673,9 @@ b0:
 ; CHECK: memw(r{{[0-9]+}}+#0) = setbit(#7)
 define void @f59() #0 {
 b0:
-  %v0 = load i32, i32* @g4, align 4, !tbaa !5
+  %v0 = load i32, ptr @g4, align 4, !tbaa !5
   %v1 = or i32 %v0, 128
-  store i32 %v1, i32* @g4, align 4, !tbaa !5
+  store i32 %v1, ptr @g4, align 4, !tbaa !5
   ret void
 }
 
@@ -683,9 +683,9 @@ b0:
 ; CHECK: memw(r{{[0-9]+}}+#0) += #1
 define void @f60() #0 {
 b0:
-  %v0 = load i32, i32* @g5, align 4, !tbaa !5
+  %v0 = load i32, ptr @g5, align 4, !tbaa !5
   %v1 = add i32 %v0, 1
-  store i32 %v1, i32* @g5, align 4, !tbaa !5
+  store i32 %v1, ptr @g5, align 4, !tbaa !5
   ret void
 }
 
@@ -693,9 +693,9 @@ b0:
 ; CHECK: memw(r{{[0-9]+}}+#0) -= #1
 define void @f61() #0 {
 b0:
-  %v0 = load i32, i32* @g5, align 4, !tbaa !5
+  %v0 = load i32, ptr @g5, align 4, !tbaa !5
   %v1 = add i32 %v0, -1
-  store i32 %v1, i32* @g5, align 4, !tbaa !5
+  store i32 %v1, ptr @g5, align 4, !tbaa !5
   ret void
 }
 
@@ -703,9 +703,9 @@ b0:
 ; CHECK: memw(r{{[0-9]+}}+#0) += #5
 define void @f62() #0 {
 b0:
-  %v0 = load i32, i32* @g5, align 4, !tbaa !5
+  %v0 = load i32, ptr @g5, align 4, !tbaa !5
   %v1 = add i32 %v0, 5
-  store i32 %v1, i32* @g5, align 4, !tbaa !5
+  store i32 %v1, ptr @g5, align 4, !tbaa !5
   ret void
 }
 
@@ -713,9 +713,9 @@ b0:
 ; CHECK: memw(r{{[0-9]+}}+#0) -= #5
 define void @f63() #0 {
 b0:
-  %v0 = load i32, i32* @g5, align 4, !tbaa !5
+  %v0 = load i32, ptr @g5, align 4, !tbaa !5
   %v1 = add i32 %v0, -5
-  store i32 %v1, i32* @g5, align 4, !tbaa !5
+  store i32 %v1, ptr @g5, align 4, !tbaa !5
   ret void
 }
 
@@ -723,9 +723,9 @@ b0:
 ; CHECK: memw(r{{[0-9]+}}+#0) -= #5
 define void @f64() #0 {
 b0:
-  %v0 = load i32, i32* @g5, align 4, !tbaa !5
+  %v0 = load i32, ptr @g5, align 4, !tbaa !5
   %v1 = add i32 %v0, -5
-  store i32 %v1, i32* @g5, align 4, !tbaa !5
+  store i32 %v1, ptr @g5, align 4, !tbaa !5
   ret void
 }
 
@@ -733,9 +733,9 @@ b0:
 ; CHECK: memw(r{{[0-9]+}}+#0) += #5
 define void @f65() #0 {
 b0:
-  %v0 = load i32, i32* @g5, align 4, !tbaa !5
+  %v0 = load i32, ptr @g5, align 4, !tbaa !5
   %v1 = add i32 %v0, 5
-  store i32 %v1, i32* @g5, align 4, !tbaa !5
+  store i32 %v1, ptr @g5, align 4, !tbaa !5
   ret void
 }
 
@@ -743,9 +743,9 @@ b0:
 ; CHECK: memw(r{{[0-9]+}}+#0) += r{{[0-9]+}}
 define void @f66(i32 %a0) #0 {
 b0:
-  %v0 = load i32, i32* @g5, align 4, !tbaa !5
+  %v0 = load i32, ptr @g5, align 4, !tbaa !5
   %v1 = add i32 %v0, %a0
-  store i32 %v1, i32* @g5, align 4, !tbaa !5
+  store i32 %v1, ptr @g5, align 4, !tbaa !5
   ret void
 }
 
@@ -753,9 +753,9 @@ b0:
 ; CHECK: memw(r{{[0-9]+}}+#0) -= r{{[0-9]+}}
 define void @f67(i32 %a0) #0 {
 b0:
-  %v0 = load i32, i32* @g5, align 4, !tbaa !5
+  %v0 = load i32, ptr @g5, align 4, !tbaa !5
   %v1 = sub i32 %v0, %a0
-  store i32 %v1, i32* @g5, align 4, !tbaa !5
+  store i32 %v1, ptr @g5, align 4, !tbaa !5
   ret void
 }
 
@@ -763,9 +763,9 @@ b0:
 ; CHECK: memw(r{{[0-9]+}}+#0) |= r{{[0-9]+}}
 define void @f68(i32 %a0) #0 {
 b0:
-  %v0 = load i32, i32* @g5, align 4, !tbaa !5
+  %v0 = load i32, ptr @g5, align 4, !tbaa !5
   %v1 = or i32 %v0, %a0
-  store i32 %v1, i32* @g5, align 4, !tbaa !5
+  store i32 %v1, ptr @g5, align 4, !tbaa !5
   ret void
 }
 
@@ -773,9 +773,9 @@ b0:
 ; CHECK: memw(r{{[0-9]+}}+#0) &= r{{[0-9]+}}
 define void @f69(i32 %a0) #0 {
 b0:
-  %v0 = load i32, i32* @g5, align 4, !tbaa !5
+  %v0 = load i32, ptr @g5, align 4, !tbaa !5
   %v1 = and i32 %v0, %a0
-  store i32 %v1, i32* @g5, align 4, !tbaa !5
+  store i32 %v1, ptr @g5, align 4, !tbaa !5
   ret void
 }
 
@@ -783,9 +783,9 @@ b0:
 ; CHECK: memw(r{{[0-9]+}}+#0) = clrbit(#5)
 define void @f70() #0 {
 b0:
-  %v0 = load i32, i32* @g5, align 4, !tbaa !5
+  %v0 = load i32, ptr @g5, align 4, !tbaa !5
   %v1 = and i32 %v0, -33
-  store i32 %v1, i32* @g5, align 4, !tbaa !5
+  store i32 %v1, ptr @g5, align 4, !tbaa !5
   ret void
 }
 
@@ -793,9 +793,9 @@ b0:
 ; CHECK: memw(r{{[0-9]+}}+#0) = setbit(#7)
 define void @f71() #0 {
 b0:
-  %v0 = load i32, i32* @g5, align 4, !tbaa !5
+  %v0 = load i32, ptr @g5, align 4, !tbaa !5
   %v1 = or i32 %v0, 128
-  store i32 %v1, i32* @g5, align 4, !tbaa !5
+  store i32 %v1, ptr @g5, align 4, !tbaa !5
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/memset-inline.ll b/llvm/test/CodeGen/Hexagon/memset-inline.ll
index 4a77e24f39c64..5118b01226199 100644
--- a/llvm/test/CodeGen/Hexagon/memset-inline.ll
+++ b/llvm/test/CodeGen/Hexagon/memset-inline.ll
@@ -10,18 +10,16 @@ target triple = "hexagon-unknown--elf"
 define i32 @f0() #0 {
 b0:
   %v0 = alloca [10 x i32], align 8
-  %v1 = bitcast [10 x i32]* %v0 to i8*
-  call void @llvm.memset.p0i8.i32(i8* align 8 %v1, i8 0, i32 7, i1 false)
-  %v2 = getelementptr inbounds [10 x i32], [10 x i32]* %v0, i32 0, i32 0
-  call void @f1(i32* %v2) #0
+  call void @llvm.memset.p0.i32(ptr align 8 %v0, i8 0, i32 7, i1 false)
+  call void @f1(ptr %v0) #0
   ret i32 0
 }
 
 ; Function Attrs: nounwind
-declare void @f1(i32*) #0
+declare void @f1(ptr) #0
 
 ; Function Attrs: argmemonly nounwind
-declare void @llvm.memset.p0i8.i32(i8* nocapture writeonly, i8, i32, i1) #1
+declare void @llvm.memset.p0.i32(ptr nocapture writeonly, i8, i32, i1) #1
 
 attributes #0 = { nounwind }
 attributes #1 = { argmemonly nounwind }

diff  --git a/llvm/test/CodeGen/Hexagon/minu-zext-16.ll b/llvm/test/CodeGen/Hexagon/minu-zext-16.ll
index e27507da3d445..ea9329a657c5d 100644
--- a/llvm/test/CodeGen/Hexagon/minu-zext-16.ll
+++ b/llvm/test/CodeGen/Hexagon/minu-zext-16.ll
@@ -1,10 +1,10 @@
 ; RUN: llc -march=hexagon < %s | FileCheck %s
 ; CHECK: minu
 
-define zeroext i16 @f(i16* noalias nocapture %src) nounwind readonly {
+define zeroext i16 @f(ptr noalias nocapture %src) nounwind readonly {
 entry:
-  %arrayidx = getelementptr inbounds i16, i16* %src, i32 1
-  %0 = load i16, i16* %arrayidx, align 1
+  %arrayidx = getelementptr inbounds i16, ptr %src, i32 1
+  %0 = load i16, ptr %arrayidx, align 1
   %cmp = icmp ult i16 %0, 32767
   %. = select i1 %cmp, i16 %0, i16 32767
   ret i16 %.

diff  --git a/llvm/test/CodeGen/Hexagon/minu-zext-8.ll b/llvm/test/CodeGen/Hexagon/minu-zext-8.ll
index 15dc1a164912d..7ebdbd8961cca 100644
--- a/llvm/test/CodeGen/Hexagon/minu-zext-8.ll
+++ b/llvm/test/CodeGen/Hexagon/minu-zext-8.ll
@@ -1,10 +1,10 @@
 ; RUN: llc -march=hexagon < %s | FileCheck %s
 ; CHECK: minu
 
-define zeroext i8 @f(i8* noalias nocapture %src) nounwind readonly {
+define zeroext i8 @f(ptr noalias nocapture %src) nounwind readonly {
 entry:
-  %arrayidx = getelementptr inbounds i8, i8* %src, i32 1
-  %0 = load i8, i8* %arrayidx, align 1
+  %arrayidx = getelementptr inbounds i8, ptr %src, i32 1
+  %0 = load i8, ptr %arrayidx, align 1
   %cmp = icmp ult i8 %0, 127
   %. = select i1 %cmp, i8 %0, i8 127
   ret i8 %.

diff  --git a/llvm/test/CodeGen/Hexagon/mipi-double-small.ll b/llvm/test/CodeGen/Hexagon/mipi-double-small.ll
index a1dc3ab76e013..8c67d46c47cd0 100644
--- a/llvm/test/CodeGen/Hexagon/mipi-double-small.ll
+++ b/llvm/test/CodeGen/Hexagon/mipi-double-small.ll
@@ -13,7 +13,7 @@ b1:                                               ; preds = %b0
 
 b2:                                               ; preds = %b1
   %v0 = tail call <32 x i32> @llvm.hexagon.V6.lo.128B(<64 x i32> undef)
-  store <32 x i32> %v0, <32 x i32>* undef, align 128
+  store <32 x i32> %v0, ptr undef, align 128
   unreachable
 
 b3:                                               ; preds = %b1

diff  --git a/llvm/test/CodeGen/Hexagon/misaligned-access.ll b/llvm/test/CodeGen/Hexagon/misaligned-access.ll
index 7eb85ffcc2296..b2e6b2713600e 100644
--- a/llvm/test/CodeGen/Hexagon/misaligned-access.ll
+++ b/llvm/test/CodeGen/Hexagon/misaligned-access.ll
@@ -8,10 +8,9 @@ declare i32 @f0(i64) #0
 define i32 @f1() #0 {
 b0:
   %v0 = alloca i32, align 4
-  %v1 = load i32, i32* @g0, align 4
-  store i32 %v1, i32* %v0, align 4
-  %v2 = bitcast i32* %v0 to i64*
-  %v3 = load i64, i64* %v2, align 8
+  %v1 = load i32, ptr @g0, align 4
+  store i32 %v1, ptr %v0, align 4
+  %v3 = load i64, ptr %v0, align 8
   %v4 = call i32 @f0(i64 %v3)
   ret i32 %v4
 }

diff  --git a/llvm/test/CodeGen/Hexagon/misaligned-const-load.ll b/llvm/test/CodeGen/Hexagon/misaligned-const-load.ll
index f209d80eed3b5..25a9f02e4f1d1 100644
--- a/llvm/test/CodeGen/Hexagon/misaligned-const-load.ll
+++ b/llvm/test/CodeGen/Hexagon/misaligned-const-load.ll
@@ -7,7 +7,7 @@ target triple = "hexagon"
 
 define i32 @bad_load() #0 !dbg !10 {
 entry:
-  %0 = load i32, i32* inttoptr (i32 74565 to i32*), align 4, !dbg !13, !tbaa !14
+  %0 = load i32, ptr inttoptr (i32 74565 to ptr), align 4, !dbg !13, !tbaa !14
   ret i32 %0, !dbg !18
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/misaligned-const-store.ll b/llvm/test/CodeGen/Hexagon/misaligned-const-store.ll
index 27f472ef0c909..75d3a4ec69357 100644
--- a/llvm/test/CodeGen/Hexagon/misaligned-const-store.ll
+++ b/llvm/test/CodeGen/Hexagon/misaligned-const-store.ll
@@ -7,7 +7,7 @@ target triple = "hexagon"
 
 define void @bad_store(i32 %a0) #0 !dbg !10 {
 entry:
-  store i32 %a0, i32* inttoptr (i32 74565 to i32*), align 4, !dbg !13, !tbaa !14
+  store i32 %a0, ptr inttoptr (i32 74565 to ptr), align 4, !dbg !13, !tbaa !14
   ret void, !dbg !18
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/misaligned_double_vector_store_not_fast.ll b/llvm/test/CodeGen/Hexagon/misaligned_double_vector_store_not_fast.ll
index 0945e17531be7..a2dde20eb4ab0 100644
--- a/llvm/test/CodeGen/Hexagon/misaligned_double_vector_store_not_fast.ll
+++ b/llvm/test/CodeGen/Hexagon/misaligned_double_vector_store_not_fast.ll
@@ -23,14 +23,12 @@ entry:
   %3 = bitcast <64 x i32> %2 to <128 x i16>
   %4 = shufflevector <128 x i16> %3, <128 x i16> undef, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
   %5 = add nuw nsw i32 %1, 32896
-  %6 = getelementptr inbounds i16, i16* undef, i32 %5
-  %7 = bitcast i16* %6 to <64 x i16>*
-  store <64 x i16> %4, <64 x i16>* %7, align 128
-  %8 = shufflevector <128 x i16> %3, <128 x i16> undef, <64 x i32> <i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127>
-  %9 = add nuw nsw i32 %1, 32960
-  %10 = getelementptr inbounds i16, i16* undef, i32 %9
-  %11 = bitcast i16* %10 to <64 x i16>*
-  store <64 x i16> %8, <64 x i16>* %11, align 128
+  %6 = getelementptr inbounds i16, ptr undef, i32 %5
+  store <64 x i16> %4, ptr %6, align 128
+  %7 = shufflevector <128 x i16> %3, <128 x i16> undef, <64 x i32> <i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127>
+  %8 = add nuw nsw i32 %1, 32960
+  %9 = getelementptr inbounds i16, ptr undef, i32 %8
+  store <64 x i16> %7, ptr %9, align 128
   br i1 false, label %"consume demosaiced", label %"for demosaiced.s0.y.y"
 
 "consume demosaiced":                             ; preds = %"for demosaiced.s0.y.y"

diff  --git a/llvm/test/CodeGen/Hexagon/misched-top-rptracker-sync.ll b/llvm/test/CodeGen/Hexagon/misched-top-rptracker-sync.ll
index c83389ee21089..379ee485296b1 100644
--- a/llvm/test/CodeGen/Hexagon/misched-top-rptracker-sync.ll
+++ b/llvm/test/CodeGen/Hexagon/misched-top-rptracker-sync.ll
@@ -7,62 +7,60 @@
 
 target triple = "hexagon"
 
-%struct.A = type { %struct.B*, %struct.B* }
-%struct.B = type { i8*, %struct.B*, %struct.B* }
+%struct.A = type { ptr, ptr }
+%struct.B = type { ptr, ptr, ptr }
 
 @.str.4 = external hidden unnamed_addr constant [41 x i8], align 1
 @__func__.fred = external hidden unnamed_addr constant [16 x i8], align 1
 @.str.5 = external hidden unnamed_addr constant [43 x i8], align 1
 
 ; Function Attrs: nounwind
-declare void @_Assert(i8*, i8*) #0
+declare void @_Assert(ptr, ptr) #0
 
 ; Function Attrs: nounwind
-define void @fred(%struct.A* %pA, %struct.B* %p) #0 !dbg !6 {
+define void @fred(ptr %pA, ptr %p) #0 !dbg !6 {
 entry:
-  tail call void @llvm.dbg.value(metadata %struct.A* %pA, i64 0, metadata !26, metadata !28), !dbg !29
-  tail call void @llvm.dbg.value(metadata %struct.B* %p, i64 0, metadata !27, metadata !28), !dbg !30
-  %cmp = icmp eq %struct.B* %p, null, !dbg !31
+  tail call void @llvm.dbg.value(metadata ptr %pA, i64 0, metadata !26, metadata !28), !dbg !29
+  tail call void @llvm.dbg.value(metadata ptr %p, i64 0, metadata !27, metadata !28), !dbg !30
+  %cmp = icmp eq ptr %p, null, !dbg !31
   br i1 %cmp, label %cond.false, label %cond.end, !dbg !31
 
 cond.false:                                       ; preds = %entry
-  tail call void @_Assert(i8* getelementptr inbounds ([41 x i8], [41 x i8]* @.str.4, i32 0, i32 0), i8* getelementptr inbounds ([16 x i8], [16 x i8]* @__func__.fred, i32 0, i32 0)) #0, !dbg !32
+  tail call void @_Assert(ptr @.str.4, ptr @__func__.fred) #0, !dbg !32
   br label %cond.end, !dbg !32
 
 cond.end:                                         ; preds = %cond.false, %entry
-  %cmp1 = icmp eq %struct.A* %pA, null, !dbg !34
+  %cmp1 = icmp eq ptr %pA, null, !dbg !34
   br i1 %cmp1, label %cond.false3, label %cond.end4, !dbg !34
 
 cond.false3:                                      ; preds = %cond.end
-  tail call void @_Assert(i8* getelementptr inbounds ([43 x i8], [43 x i8]* @.str.5, i32 0, i32 0), i8* getelementptr inbounds ([16 x i8], [16 x i8]* @__func__.fred, i32 0, i32 0)) #0, !dbg !35
+  tail call void @_Assert(ptr @.str.5, ptr @__func__.fred) #0, !dbg !35
   br label %cond.end4, !dbg !35
 
 cond.end4:                                        ; preds = %cond.false3, %cond.end
-  %p2 = getelementptr inbounds %struct.A, %struct.A* %pA, i32 0, i32 0, !dbg !36
-  %0 = load %struct.B*, %struct.B** %p2, align 4, !dbg !38, !tbaa !39
-  %cmp5 = icmp eq %struct.B* %0, null, !dbg !44
+  %0 = load ptr, ptr %pA, align 4, !dbg !38, !tbaa !39
+  %cmp5 = icmp eq ptr %0, null, !dbg !44
   br i1 %cmp5, label %if.then, label %if.end, !dbg !45
 
 if.then:                                          ; preds = %cond.end4
-  %p1 = getelementptr inbounds %struct.A, %struct.A* %pA, i32 0, i32 1, !dbg !46
-  store %struct.B* %p, %struct.B** %p1, align 4, !dbg !48, !tbaa !49
-  store %struct.B* %p, %struct.B** %p2, align 4, !dbg !50, !tbaa !39
-  %p4 = getelementptr inbounds %struct.B, %struct.B* %p, i32 0, i32 1, !dbg !51
-  store %struct.B* null, %struct.B** %p4, align 4, !dbg !52, !tbaa !53
-  %p5 = getelementptr inbounds %struct.B, %struct.B* %p, i32 0, i32 2, !dbg !55
-  store %struct.B* null, %struct.B** %p5, align 4, !dbg !56, !tbaa !57
+  %p1 = getelementptr inbounds %struct.A, ptr %pA, i32 0, i32 1, !dbg !46
+  store ptr %p, ptr %p1, align 4, !dbg !48, !tbaa !49
+  store ptr %p, ptr %pA, align 4, !dbg !50, !tbaa !39
+  %p4 = getelementptr inbounds %struct.B, ptr %p, i32 0, i32 1, !dbg !51
+  store ptr null, ptr %p4, align 4, !dbg !52, !tbaa !53
+  %p5 = getelementptr inbounds %struct.B, ptr %p, i32 0, i32 2, !dbg !55
+  store ptr null, ptr %p5, align 4, !dbg !56, !tbaa !57
   br label %return, !dbg !58
 
 if.end:                                           ; preds = %cond.end4
-  %1 = ptrtoint %struct.B* %0 to i32, !dbg !59
-  %p57 = getelementptr inbounds %struct.B, %struct.B* %p, i32 0, i32 2, !dbg !60
-  store %struct.B* null, %struct.B** %p57, align 4, !dbg !61, !tbaa !57
-  %p49 = getelementptr inbounds %struct.B, %struct.B* %p, i32 0, i32 1, !dbg !62
-  %2 = bitcast %struct.B** %p49 to i32*, !dbg !63
-  store i32 %1, i32* %2, align 4, !dbg !63, !tbaa !53
-  %p511 = getelementptr inbounds %struct.B, %struct.B* %0, i32 0, i32 2, !dbg !64
-  store %struct.B* %p, %struct.B** %p511, align 4, !dbg !65, !tbaa !57
-  store %struct.B* %p, %struct.B** %p2, align 4, !dbg !66, !tbaa !39
+  %1 = ptrtoint ptr %0 to i32, !dbg !59
+  %p57 = getelementptr inbounds %struct.B, ptr %p, i32 0, i32 2, !dbg !60
+  store ptr null, ptr %p57, align 4, !dbg !61, !tbaa !57
+  %p49 = getelementptr inbounds %struct.B, ptr %p, i32 0, i32 1, !dbg !62
+  store i32 %1, ptr %p49, align 4, !dbg !63, !tbaa !53
+  %p511 = getelementptr inbounds %struct.B, ptr %0, i32 0, i32 2, !dbg !64
+  store ptr %p, ptr %p511, align 4, !dbg !65, !tbaa !57
+  store ptr %p, ptr %pA, align 4, !dbg !66, !tbaa !39
   br label %return, !dbg !67
 
 return:                                           ; preds = %if.end, %if.then

diff  --git a/llvm/test/CodeGen/Hexagon/mpy.ll b/llvm/test/CodeGen/Hexagon/mpy.ll
index 7c1e8c8d3f07f..68d412fa1d0a7 100644
--- a/llvm/test/CodeGen/Hexagon/mpy.ll
+++ b/llvm/test/CodeGen/Hexagon/mpy.ll
@@ -6,15 +6,15 @@ b0:
   %v0 = alloca i32, align 4
   %v1 = alloca i32, align 4
   %v2 = alloca i32, align 4
-  store i32 %a0, i32* %v0, align 4
-  store i32 %a1, i32* %v1, align 4
-  store i32 %a2, i32* %v2, align 4
-  %v3 = load i32, i32* %v1, align 4
-  %v4 = load i32, i32* %v0, align 4
+  store i32 %a0, ptr %v0, align 4
+  store i32 %a1, ptr %v1, align 4
+  store i32 %a2, ptr %v2, align 4
+  %v3 = load i32, ptr %v1, align 4
+  %v4 = load i32, ptr %v0, align 4
   %v5 = mul nsw i32 %v3, %v4
-  %v6 = load i32, i32* %v2, align 4
+  %v6 = load i32, ptr %v2, align 4
   %v7 = add nsw i32 %v5, %v6
-  store i32 %v7, i32* %v1, align 4
+  store i32 %v7, ptr %v1, align 4
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/mpysin-imm.ll b/llvm/test/CodeGen/Hexagon/mpysin-imm.ll
index 2bf715a0a0adb..acb7e3fbe4275 100644
--- a/llvm/test/CodeGen/Hexagon/mpysin-imm.ll
+++ b/llvm/test/CodeGen/Hexagon/mpysin-imm.ll
@@ -10,7 +10,7 @@ target triple = "hexagon-unknown--elf"
 define i32 @f0(i32 %a0) #0 {
 b0:
   %v0 = mul nsw i32 %a0, 1536
-  store i32 %v0, i32* @g0, align 4
+  store i32 %v0, ptr @g0, align 4
   %v1 = sub nsw i32 0, %v0
   ret i32 %v1
 }

diff  --git a/llvm/test/CodeGen/Hexagon/mul64-sext.ll b/llvm/test/CodeGen/Hexagon/mul64-sext.ll
index d409e4de4277d..b615c0a596294 100644
--- a/llvm/test/CodeGen/Hexagon/mul64-sext.ll
+++ b/llvm/test/CodeGen/Hexagon/mul64-sext.ll
@@ -18,9 +18,9 @@ b2:
 ; CHECK: r0 = memb(r0+#0)
 ; CHECK: r1:0 = mpy(r2,r0)
 ; CHECK: jumpr r31
-define i64 @mul_2(i8* %a0, i64 %a1) #0 {
+define i64 @mul_2(ptr %a0, i64 %a1) #0 {
 b2:
-  %v3 = load i8, i8* %a0
+  %v3 = load i8, ptr %a0
   %v4 = sext i8 %v3 to i64
   %v5 = shl i64 %a1, 32
   %v6 = ashr exact i64 %v5, 32
@@ -76,11 +76,11 @@ b3:
 ; CHECK: r5:4 += mpy(r2,r0)
 ; CHECK: r1:0 = combine(r5,r4)
 ; CHECK: jumpr r31
-define i64 @mul_acc_2(i64 %a0, i32* %a1, i64 %a2) #0 {
+define i64 @mul_acc_2(i64 %a0, ptr %a1, i64 %a2) #0 {
 b3:
   %v4 = shl i64 %a0, 32
   %v5 = ashr exact i64 %v4, 32
-  %v6 = load i32, i32* %a1
+  %v6 = load i32, ptr %a1
   %v7 = sext i32 %v6 to i64
   %v8 = mul nsw i64 %v7, %v5
   %v9 = add i64 %a2, %v8
@@ -107,9 +107,9 @@ b3:
 ; CHECK: r6 = memw(r0+#0)
 ; CHECK: r1:0 -= mpy(r2,r6)
 ; CHECK: jumpr r31
-define i64 @mul_nac_2(i32* %a0, i64 %a1, i64 %a2) #0 {
+define i64 @mul_nac_2(ptr %a0, i64 %a1, i64 %a2) #0 {
 b3:
-  %v4 = load i32, i32* %a0
+  %v4 = load i32, ptr %a0
   %v5 = sext i32 %v4 to i64
   %v6 = shl i64 %a1, 32
   %v7 = ashr exact i64 %v6, 32

diff  --git a/llvm/test/CodeGen/Hexagon/mul64.ll b/llvm/test/CodeGen/Hexagon/mul64.ll
index e598b8f2cd174..1df6d3beac2aa 100644
--- a/llvm/test/CodeGen/Hexagon/mul64.ll
+++ b/llvm/test/CodeGen/Hexagon/mul64.ll
@@ -28,7 +28,7 @@ b0:
 
 ; Given int w[2], short h[4], signed char c[8], the below tests check for the
 ; generation of dpmpyuu_s0.
-; w[0] * h[0]
+; wptr h[0]
 ; CHECK-LABEL: f2:
 ; CHECK: = sxth
 ; CHECK: r1:0 = mpyu(
@@ -43,7 +43,7 @@ b0:
   ret i64 %v5
 }
 
-; w[0] * h[1]
+; wptr h[1]
 ; CHECK-LABEL: f3:
 ; CHECK: = asrh
 ; CHECK: r1:0 = mpyu(
@@ -57,7 +57,7 @@ b0:
   ret i64 %v4
 }
 
-; w[0] * h[2]
+; wptr h[2]
 ; CHECK-LABEL: f4:
 ; CHECK: = extract(
 ; CHECK: r1:0 = mpyu(
@@ -73,7 +73,7 @@ b0:
   ret i64 %v6
 }
 
-; w[0] * h[3]
+; wptr h[3]
 ; CHECK-LABEL: f5:
 ; CHECK: = extractu(
 ; CHECK: r1:0 = mpyu(
@@ -89,7 +89,7 @@ b0:
   ret i64 %v6
 }
 
-; w[1] * h[0]
+; wptr h[0]
 ; CHECK-LABEL: f6:
 ; CHECK: = sxth(
 ; CHECK: r1:0 = mpyu(
@@ -104,7 +104,7 @@ b0:
   ret i64 %v5
 }
 
-; w[0] * c[0]
+; wptr c[0]
 ; CHECK-LABEL: f7:
 ; CHECK: = and({{.*}}#255)
 ; CHECK: r1:0 = mpyu(
@@ -116,7 +116,7 @@ b0:
   ret i64 %v2
 }
 
-; w[0] * c[2]
+; wptr c[2]
 ; CHECK-LABEL: f8:
 ; CHECK: = extractu(
 ; CHECK: r1:0 = mpyu(
@@ -129,7 +129,7 @@ b0:
   ret i64 %v3
 }
 
-; w[0] * c[7]
+; wptr c[7]
 ; CHECK-LABEL: f9:
 ; CHECK: = lsr(
 ; CHECK: r1:0 = mpyu(
@@ -168,7 +168,7 @@ b0:
 
 ; Given unsigned int w[2], unsigned short h[4], unsigned char c[8], the below
 ; tests check for the generation of dpmpyss_s0.
-; w[0] * h[0]
+; wptr h[0]
 ; CHECK-LABEL: f12:
 ; CHECK: = sxth
 ; CHECK: r1:0 = mpy(
@@ -182,7 +182,7 @@ b0:
   ret i64 %v4
 }
 
-; w[0] * h[1]
+; wptr h[1]
 ; CHECK-LABEL: f13:
 ; CHECK: = asrh
 ; CHECK: r1:0 = mpy(
@@ -197,7 +197,7 @@ b0:
   ret i64 %v5
 }
 
-; w[0] * h[2]
+; wptr h[2]
 ; CHECK-LABEL: f14:
 ; CHECK: = extract(
 ; CHECK: r1:0 = mpy(
@@ -214,7 +214,7 @@ b0:
   ret i64 %v7
 }
 
-; w[0] * h[3]
+; wptr h[3]
 ; CHECK-LABEL: f15:
 ; CHECK: = sxth(
 ; CHECK: r1:0 = mpy(
@@ -227,7 +227,7 @@ b0:
   ret i64 %v3
 }
 
-; w[1] * h[0]
+; wptr h[0]
 ; CHECK-LABEL: f16:
 ; CHECK: = asrh(
 ; CHECK: r1:0 = mpy(
@@ -241,7 +241,7 @@ b0:
   ret i64 %v4
 }
 
-; w[0] * c[0]
+; wptr c[0]
 ; CHECK-LABEL: f17:
 ; CHECK: = sxtb(
 ; CHECK: r1:0 = mpy(
@@ -255,7 +255,7 @@ b0:
   ret i64 %v4
 }
 
-; w[0] * c[2]
+; wptr c[2]
 ; CHECK-LABEL: f18:
 ; CHECK: = extract(
 ; CHECK: r1:0 = mpy(
@@ -272,7 +272,7 @@ b0:
   ret i64 %v7
 }
 
-; w[0] * c[7]
+; wptr c[7]
 ; CHECK-LABEL: f19:
 ; CHECK: = sxtb(
 ; CHECK: r1:0 = mpy(

diff  --git a/llvm/test/CodeGen/Hexagon/mulhs.ll b/llvm/test/CodeGen/Hexagon/mulhs.ll
index 4c0840b57962e..73ac4c788ad3c 100644
--- a/llvm/test/CodeGen/Hexagon/mulhs.ll
+++ b/llvm/test/CodeGen/Hexagon/mulhs.ll
@@ -7,12 +7,12 @@
 target triple = "hexagon"
 
 ; Function Attrs: nounwind
-define i32 @fred(i64 %x, i64 %y, i64* nocapture %z) #0 {
+define i32 @fred(i64 %x, i64 %y, ptr nocapture %z) #0 {
 entry:
   %0 = tail call { i64, i1 } @llvm.smul.with.overflow.i64(i64 %x, i64 %y)
   %1 = extractvalue { i64, i1 } %0, 1
   %2 = extractvalue { i64, i1 } %0, 0
-  store i64 %2, i64* %z, align 8
+  store i64 %2, ptr %z, align 8
   %conv = zext i1 %1 to i32
   ret i32 %conv
 }

diff  --git a/llvm/test/CodeGen/Hexagon/multi-cycle.ll b/llvm/test/CodeGen/Hexagon/multi-cycle.ll
index 1e2ea26a7931c..a3fc6f0c575e0 100644
--- a/llvm/test/CodeGen/Hexagon/multi-cycle.ll
+++ b/llvm/test/CodeGen/Hexagon/multi-cycle.ll
@@ -12,18 +12,15 @@ target triple = "hexagon"
 
 @ZERO = global <16 x i32> zeroinitializer, align 64
 
-define void @fred(i16* nocapture readonly %a0, i32 %a1, i32 %a2, i16* nocapture %a3) #0 {
+define void @fred(ptr nocapture readonly %a0, i32 %a1, i32 %a2, ptr nocapture %a3) #0 {
 b4:
-  %v5 = bitcast i16* %a0 to <16 x i32>*
-  %v6 = getelementptr inbounds i16, i16* %a0, i32 %a1
-  %v7 = bitcast i16* %v6 to <16 x i32>*
+  %v6 = getelementptr inbounds i16, ptr %a0, i32 %a1
   %v8 = mul nsw i32 %a1, 2
-  %v9 = getelementptr inbounds i16, i16* %a0, i32 %v8
-  %v10 = bitcast i16* %v9 to <16 x i32>*
-  %v11 = load <16 x i32>, <16 x i32>* %v5, align 64, !tbaa !1
-  %v12 = load <16 x i32>, <16 x i32>* %v7, align 64, !tbaa !1
-  %v13 = load <16 x i32>, <16 x i32>* %v10, align 64, !tbaa !1
-  %v14 = load <16 x i32>, <16 x i32>* @ZERO, align 64, !tbaa !1
+  %v9 = getelementptr inbounds i16, ptr %a0, i32 %v8
+  %v11 = load <16 x i32>, ptr %a0, align 64, !tbaa !1
+  %v12 = load <16 x i32>, ptr %v6, align 64, !tbaa !1
+  %v13 = load <16 x i32>, ptr %v9, align 64, !tbaa !1
+  %v14 = load <16 x i32>, ptr @ZERO, align 64, !tbaa !1
   %v15 = tail call <16 x i32> @llvm.hexagon.V6.vsubh(<16 x i32> %v14, <16 x i32> %v14)
   %v16 = sdiv i32 %a2, 32
   %v17 = icmp sgt i32 %a2, 31
@@ -34,41 +31,37 @@ b18:                                              ; preds = %b4
   %v20 = add i32 %a1, 32
   %v21 = tail call <16 x i32> @llvm.hexagon.V6.vaddh(<16 x i32> %v12, <16 x i32> %v12)
   %v22 = tail call <16 x i32> @llvm.hexagon.V6.vaddh(<16 x i32> %v11, <16 x i32> %v13)
-  %v23 = getelementptr inbounds i16, i16* %a0, i32 %v19
-  %v24 = getelementptr inbounds i16, i16* %a0, i32 %v20
-  %v25 = getelementptr inbounds i16, i16* %a0, i32 32
+  %v23 = getelementptr inbounds i16, ptr %a0, i32 %v19
+  %v24 = getelementptr inbounds i16, ptr %a0, i32 %v20
+  %v25 = getelementptr inbounds i16, ptr %a0, i32 32
   %v26 = tail call <16 x i32> @llvm.hexagon.V6.vsubh(<16 x i32> %v11, <16 x i32> %v13)
   %v27 = tail call <16 x i32> @llvm.hexagon.V6.vaddh(<16 x i32> %v22, <16 x i32> %v21)
-  %v28 = bitcast i16* %v23 to <16 x i32>*
-  %v29 = bitcast i16* %v24 to <16 x i32>*
-  %v30 = bitcast i16* %v25 to <16 x i32>*
-  %v31 = bitcast i16* %a3 to <16 x i32>*
   br label %b32
 
 b32:                                              ; preds = %b32, %b18
   %v33 = phi i32 [ 0, %b18 ], [ %v63, %b32 ]
-  %v34 = phi <16 x i32>* [ %v31, %b18 ], [ %v62, %b32 ]
-  %v35 = phi <16 x i32>* [ %v28, %b18 ], [ %v46, %b32 ]
-  %v36 = phi <16 x i32>* [ %v29, %b18 ], [ %v44, %b32 ]
-  %v37 = phi <16 x i32>* [ %v30, %b18 ], [ %v42, %b32 ]
+  %v34 = phi ptr [ %a3, %b18 ], [ %v62, %b32 ]
+  %v35 = phi ptr [ %v23, %b18 ], [ %v46, %b32 ]
+  %v36 = phi ptr [ %v24, %b18 ], [ %v44, %b32 ]
+  %v37 = phi ptr [ %v25, %b18 ], [ %v42, %b32 ]
   %v38 = phi <16 x i32> [ %v15, %b18 ], [ %v39, %b32 ]
   %v39 = phi <16 x i32> [ %v26, %b18 ], [ %v56, %b32 ]
   %v40 = phi <16 x i32> [ %v27, %b18 ], [ %v51, %b32 ]
   %v41 = phi <16 x i32> [ %v15, %b18 ], [ %v40, %b32 ]
-  %v42 = getelementptr inbounds <16 x i32>, <16 x i32>* %v37, i32 1
-  %v43 = load <16 x i32>, <16 x i32>* %v37, align 64, !tbaa !1
-  %v44 = getelementptr inbounds <16 x i32>, <16 x i32>* %v36, i32 1
-  %v45 = load <16 x i32>, <16 x i32>* %v36, align 64, !tbaa !1
-  %v46 = getelementptr inbounds <16 x i32>, <16 x i32>* %v35, i32 1
-  %v47 = load <16 x i32>, <16 x i32>* %v35, align 64, !tbaa !1
+  %v42 = getelementptr inbounds <16 x i32>, ptr %v37, i32 1
+  %v43 = load <16 x i32>, ptr %v37, align 64, !tbaa !1
+  %v44 = getelementptr inbounds <16 x i32>, ptr %v36, i32 1
+  %v45 = load <16 x i32>, ptr %v36, align 64, !tbaa !1
+  %v46 = getelementptr inbounds <16 x i32>, ptr %v35, i32 1
+  %v47 = load <16 x i32>, ptr %v35, align 64, !tbaa !1
   %v48 = tail call <16 x i32> @llvm.hexagon.V6.vaddh(<16 x i32> %v43, <16 x i32> %v47)
   %v49 = tail call <16 x i32> @llvm.hexagon.V6.vaddh(<16 x i32> %v45, <16 x i32> %v45)
   %v50 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v40, <16 x i32> %v41, i32 62)
   %v51 = tail call <16 x i32> @llvm.hexagon.V6.vaddh(<16 x i32> %v48, <16 x i32> %v49)
   %v52 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v51, <16 x i32> %v40, i32 2)
   %v53 = tail call <16 x i32> @llvm.hexagon.V6.vabs
diff h(<16 x i32> %v50, <16 x i32> %v52)
-  %v54 = getelementptr inbounds <16 x i32>, <16 x i32>* %v34, i32 1
-  store <16 x i32> %v53, <16 x i32>* %v34, align 64, !tbaa !1
+  %v54 = getelementptr inbounds <16 x i32>, ptr %v34, i32 1
+  store <16 x i32> %v53, ptr %v34, align 64, !tbaa !1
   %v55 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v39, <16 x i32> %v38, i32 62)
   %v56 = tail call <16 x i32> @llvm.hexagon.V6.vsubh(<16 x i32> %v43, <16 x i32> %v47)
   %v57 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v56, <16 x i32> %v39, i32 2)
@@ -76,8 +69,8 @@ b32:                                              ; preds = %b32, %b18
   %v59 = tail call <16 x i32> @llvm.hexagon.V6.vaddh(<16 x i32> %v58, <16 x i32> %v55)
   %v60 = tail call <16 x i32> @llvm.hexagon.V6.vaddh(<16 x i32> %v59, <16 x i32> %v57)
   %v61 = tail call <16 x i32> @llvm.hexagon.V6.vabsh(<16 x i32> %v60)
-  %v62 = getelementptr inbounds <16 x i32>, <16 x i32>* %v34, i32 2
-  store <16 x i32> %v61, <16 x i32>* %v54, align 64, !tbaa !1
+  %v62 = getelementptr inbounds <16 x i32>, ptr %v34, i32 2
+  store <16 x i32> %v61, ptr %v54, align 64, !tbaa !1
   %v63 = add nsw i32 %v33, 1
   %v64 = icmp slt i32 %v63, %v16
   br i1 %v64, label %b32, label %b65

diff  --git a/llvm/test/CodeGen/Hexagon/mux-basic.ll b/llvm/test/CodeGen/Hexagon/mux-basic.ll
index ef1f7cb60e175..0707b744696a7 100644
--- a/llvm/test/CodeGen/Hexagon/mux-basic.ll
+++ b/llvm/test/CodeGen/Hexagon/mux-basic.ll
@@ -6,7 +6,7 @@ target triple = "hexagon"
 
 %struct.struct_t = type { i32, i32, i32 }
 
-define void @foo(%struct.struct_t* nocapture %p, i32 %x, i32 %y, i32 %z) nounwind {
+define void @foo(ptr nocapture %p, i32 %x, i32 %y, i32 %z) nounwind {
 entry:
   %cmp = icmp slt i32 %x, 4660
   %add = add nsw i32 %x, 1
@@ -14,12 +14,11 @@ entry:
   %x.add.y = select i1 %cmp, i32 %x, i32 %y
   %. = zext i1 %cmp to i32
   %b.0 = add nsw i32 %x.add.y, %z
-  %a3 = getelementptr inbounds %struct.struct_t, %struct.struct_t* %p, i32 0, i32 0
-  store i32 %add.y, i32* %a3, align 4, !tbaa !0
-  %b4 = getelementptr inbounds %struct.struct_t, %struct.struct_t* %p, i32 0, i32 1
-  store i32 %b.0, i32* %b4, align 4, !tbaa !0
-  %c5 = getelementptr inbounds %struct.struct_t, %struct.struct_t* %p, i32 0, i32 2
-  store i32 %., i32* %c5, align 4, !tbaa !0
+  store i32 %add.y, ptr %p, align 4, !tbaa !0
+  %b4 = getelementptr inbounds %struct.struct_t, ptr %p, i32 0, i32 1
+  store i32 %b.0, ptr %b4, align 4, !tbaa !0
+  %c5 = getelementptr inbounds %struct.struct_t, ptr %p, i32 0, i32 2
+  store i32 %., ptr %c5, align 4, !tbaa !0
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/muxii-crash.ll b/llvm/test/CodeGen/Hexagon/muxii-crash.ll
index 05165ffce0d34..32c71dc7c89ec 100644
--- a/llvm/test/CodeGen/Hexagon/muxii-crash.ll
+++ b/llvm/test/CodeGen/Hexagon/muxii-crash.ll
@@ -13,8 +13,8 @@ declare void @f0() #0
 define i32 @f1(i32 %a0) #0 {
 b0:
   %v0 = icmp slt i32 %a0, 3
-  %v1 = select i1 %v0, void ()* @f0, void ()* null
-  %v2 = ptrtoint void ()* %v1 to i32
+  %v1 = select i1 %v0, ptr @f0, ptr null
+  %v2 = ptrtoint ptr %v1 to i32
   ret i32 %v2
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/nbench1.ll b/llvm/test/CodeGen/Hexagon/nbench1.ll
index 8300a9ab89ca4..66c046a68614b 100644
--- a/llvm/test/CodeGen/Hexagon/nbench1.ll
+++ b/llvm/test/CodeGen/Hexagon/nbench1.ll
@@ -7,7 +7,7 @@
 ; CHECK: p{{[0-3]+}} = cmp.gtu(r{{[0-9]+}},r{{[0-9]+}})
 ; CHECK-NEXT: }
 
- at array = external dso_local local_unnamed_addr global i32*, align 4
+ at array = external dso_local local_unnamed_addr global ptr, align 4
 
 ; Function Attrs: nofree norecurse nounwind
 define dso_local void @NumSift(i32 %i, i32 %j) local_unnamed_addr #0 {
@@ -17,7 +17,7 @@ entry:
   br i1 %cmp.not37, label %while.end, label %while.body.lr.ph
 
 while.body.lr.ph:                                 ; preds = %entry
-  %0 = load i32*, i32** @array, align 4
+  %0 = load ptr, ptr @array, align 4
   %add16 = add i32 %j, 1
   br label %while.body
 
@@ -28,27 +28,27 @@ while.body:                                       ; preds = %while.body.lr.ph, %
   br i1 %cmp2, label %if.then, label %if.end7
 
 if.then:                                          ; preds = %while.body
-  %arrayidx = getelementptr inbounds i32, i32* %0, i32 %add39
-  %1 = load i32, i32* %arrayidx, align 4
+  %arrayidx = getelementptr inbounds i32, ptr %0, i32 %add39
+  %1 = load i32, ptr %arrayidx, align 4
   %add3 = or i32 %add39, 1
-  %arrayidx4 = getelementptr inbounds i32, i32* %0, i32 %add3
-  %2 = load i32, i32* %arrayidx4, align 4
+  %arrayidx4 = getelementptr inbounds i32, ptr %0, i32 %add3
+  %2 = load i32, ptr %arrayidx4, align 4
   %cmp5 = icmp ult i32 %1, %2
   %spec.select = select i1 %cmp5, i32 %add3, i32 %add39
   br label %if.end7
 
 if.end7:                                          ; preds = %if.then, %while.body
   %k.0 = phi i32 [ %add39, %while.body ], [ %spec.select, %if.then ]
-  %arrayidx8 = getelementptr inbounds i32, i32* %0, i32 %i.addr.038
-  %3 = load i32, i32* %arrayidx8, align 4
-  %arrayidx9 = getelementptr inbounds i32, i32* %0, i32 %k.0
-  %4 = load i32, i32* %arrayidx9, align 4
+  %arrayidx8 = getelementptr inbounds i32, ptr %0, i32 %i.addr.038
+  %3 = load i32, ptr %arrayidx8, align 4
+  %arrayidx9 = getelementptr inbounds i32, ptr %0, i32 %k.0
+  %4 = load i32, ptr %arrayidx9, align 4
   %cmp10 = icmp ult i32 %3, %4
   br i1 %cmp10, label %if.then11, label %if.end17
 
 if.then11:                                        ; preds = %if.end7
-  store i32 %3, i32* %arrayidx9, align 4
-  store i32 %4, i32* %arrayidx8, align 4
+  store i32 %3, ptr %arrayidx9, align 4
+  store i32 %4, ptr %arrayidx8, align 4
   br label %if.end17
 
 if.end17:                                         ; preds = %if.end7, %if.then11

diff  --git a/llvm/test/CodeGen/Hexagon/newvalueSameReg.ll b/llvm/test/CodeGen/Hexagon/newvalueSameReg.ll
index e9a633a494c40..c8537c578c470 100644
--- a/llvm/test/CodeGen/Hexagon/newvalueSameReg.ll
+++ b/llvm/test/CodeGen/Hexagon/newvalueSameReg.ll
@@ -9,19 +9,19 @@
 ; CHECK-NOT: cmp.eq([[REG0:(r[0-9]+)]].new,[[REG0]])
 ; CHECK: cmp.eq([[REG1:(r[0-9]+)]],[[REG1]])
 
-%s.0 = type { i16, i8, i32, i8*, i8*, i8*, i8*, i8*, i8*, i32*, [2 x i32], i8*, i8*, i8*, %s.1, i8*, [8 x i8], i8 }
+%s.0 = type { i16, i8, i32, ptr, ptr, ptr, ptr, ptr, ptr, ptr, [2 x i32], ptr, ptr, ptr, %s.1, ptr, [8 x i8], i8 }
 %s.1 = type { i32, i16, i16 }
 
 @g0 = external global %s.0
 @g1 = external unnamed_addr constant [23 x i8], align 8
 
 ; Function Attrs: nounwind
-declare void @f0(%s.0* nocapture, i8* nocapture readonly, ...) #0
+declare void @f0(ptr nocapture, ptr nocapture readonly, ...) #0
 
 define void @f1() #1 {
 b0:
-  %v0 = load i32*, i32** undef, align 4
-  %v1 = load i32, i32* undef, align 4
+  %v0 = load ptr, ptr undef, align 4
+  %v1 = load i32, ptr undef, align 4
   br i1 undef, label %b4, label %b1
 
 b1:                                               ; preds = %b0
@@ -29,8 +29,8 @@ b1:                                               ; preds = %b0
   %v3 = lshr i32 %v1, 5
   %v4 = add i32 %v3, -134217728
   %v5 = select i1 %v2, i32 %v4, i32 %v3
-  %v6 = getelementptr inbounds i32, i32* %v0, i32 %v5
-  %v7 = icmp ult i32* %v6, %v0
+  %v6 = getelementptr inbounds i32, ptr %v0, i32 %v5
+  %v7 = icmp ult ptr %v6, %v0
   %v8 = select i1 %v7, i32 0, i32 1
   br i1 undef, label %b2, label %b4
 
@@ -39,14 +39,14 @@ b2:                                               ; preds = %b1
   %v10 = lshr i32 %v1, 5
   %v11 = add i32 %v10, -134217728
   %v12 = select i1 %v9, i32 %v11, i32 %v10
-  %v13 = getelementptr inbounds i32, i32* %v0, i32 %v12
-  %v14 = icmp ult i32* %v13, %v0
+  %v13 = getelementptr inbounds i32, ptr %v0, i32 %v12
+  %v14 = icmp ult ptr %v13, %v0
   %v15 = select i1 %v14, i32 0, i32 1
   %v16 = icmp eq i32 %v8, %v15
   br i1 %v16, label %b4, label %b3
 
 b3:                                               ; preds = %b2
-  call void (%s.0*, i8*, ...) @f0(%s.0* @g0, i8* getelementptr inbounds ([23 x i8], [23 x i8]* @g1, i32 0, i32 0), i32 %v8, i32 %v15) #0
+  call void (ptr, ptr, ...) @f0(ptr @g0, ptr @g1, i32 %v8, i32 %v15) #0
   unreachable
 
 b4:                                               ; preds = %b2, %b1, %b0

diff  --git a/llvm/test/CodeGen/Hexagon/newvaluejump-kill.ll b/llvm/test/CodeGen/Hexagon/newvaluejump-kill.ll
index 9f4ab9daefde3..14ebdfde16eb4 100644
--- a/llvm/test/CodeGen/Hexagon/newvaluejump-kill.ll
+++ b/llvm/test/CodeGen/Hexagon/newvaluejump-kill.ll
@@ -11,7 +11,7 @@ target triple = "hexagon"
 define void @fred(i16 signext %a0, i16 signext %a1) #0 {
 b1:
   %v1 = sext i16 %a0 to i32
-  %v2 = getelementptr inbounds [182 x i16], [182 x i16]* @g0, i32 0, i32 %v1
+  %v2 = getelementptr inbounds [182 x i16], ptr @g0, i32 0, i32 %v1
   %v3 = sext i16 %a1 to i32
   %v4 = call i32 @llvm.hexagon.A2.asrh(i32 undef)
   %v5 = trunc i32 %v4 to i16
@@ -24,8 +24,8 @@ b6:                                               ; preds = %b1
 b8:                                               ; preds = %b8, %b6
   %v9 = phi i32 [ 128, %b6 ], [ %v13, %b8 ]
   %v10 = sub nsw i32 %v9, %v7
-  %v11 = getelementptr inbounds [182 x i16], [182 x i16]* @g0, i32 0, i32 %v10
-  %v12 = load i16, i16* %v11, align 2
+  %v11 = getelementptr inbounds [182 x i16], ptr @g0, i32 0, i32 %v10
+  %v12 = load i16, ptr %v11, align 2
   %v13 = add nuw nsw i32 %v9, 1
   br label %b8
 
@@ -36,8 +36,8 @@ b15:                                              ; preds = %b14
   unreachable
 
 b16:                                              ; preds = %b14
-  %v17 = getelementptr [182 x i16], [182 x i16]* @g0, i32 0, i32 %v3
-  %v18 = icmp ugt i16* %v17, %v2
+  %v17 = getelementptr [182 x i16], ptr @g0, i32 0, i32 %v3
+  %v18 = icmp ugt ptr %v17, %v2
   %v19 = or i1 %v18, undef
   br i1 %v19, label %b20, label %b21
 

diff  --git a/llvm/test/CodeGen/Hexagon/newvaluejump-postinc.ll b/llvm/test/CodeGen/Hexagon/newvaluejump-postinc.ll
index 44600b8fa8f77..5eefac8b84dc6 100644
--- a/llvm/test/CodeGen/Hexagon/newvaluejump-postinc.ll
+++ b/llvm/test/CodeGen/Hexagon/newvaluejump-postinc.ll
@@ -2,26 +2,26 @@
 ; CHECK-NOT: if {{.*}} cmp{{.*}}jump
 
 %s.0 = type opaque
-%s.1 = type { i8*, i8*, %s.2*, i32, [0 x i8] }
+%s.1 = type { ptr, ptr, ptr, i32, [0 x i8] }
 %s.2 = type opaque
 
 @g0 = private unnamed_addr constant [29 x i8] c"BUG: failure at %s:%d/%s()!\0A\00", align 1
 @g1 = private unnamed_addr constant [11 x i8] c"fs/namei.c\00", align 1
 @g2 = private unnamed_addr constant [8 x i8] c"putname\00", align 1
 @g3 = private unnamed_addr constant [5 x i8] c"BUG!\00", align 1
- at g4 = external global %s.0*, align 4
+ at g4 = external global ptr, align 4
 
 ; Function Attrs: nounwind
-define void @f0(%s.1* %a0) #0 {
+define void @f0(ptr %a0) #0 {
 b0:
-  %v0 = alloca %s.1*, align 4
-  store %s.1* %a0, %s.1** %v0, align 4
+  %v0 = alloca ptr, align 4
+  store ptr %a0, ptr %v0, align 4
   br label %b1, !llvm.loop !0
 
 b1:                                               ; preds = %b0
-  %v1 = load %s.1*, %s.1** %v0, align 4
-  %v2 = getelementptr inbounds %s.1, %s.1* %v1, i32 0, i32 3
-  %v3 = load i32, i32* %v2, align 4
+  %v1 = load ptr, ptr %v0, align 4
+  %v2 = getelementptr inbounds %s.1, ptr %v1, i32 0, i32 3
+  %v3 = load i32, ptr %v2, align 4
   %v4 = icmp sle i32 %v3, 0
   %v5 = xor i1 %v4, true
   %v6 = xor i1 %v5, true
@@ -34,8 +34,8 @@ b2:                                               ; preds = %b1
   br label %b3
 
 b3:                                               ; preds = %b2
-  %v10 = call i32 (i8*, ...) @f1(i8* getelementptr inbounds ([29 x i8], [29 x i8]* @g0, i32 0, i32 0), i8* getelementptr inbounds ([11 x i8], [11 x i8]* @g1, i32 0, i32 0), i32 246, i8* getelementptr inbounds ([8 x i8], [8 x i8]* @g2, i32 0, i32 0))
-  call void (i8*, ...) @f2(i8* getelementptr inbounds ([5 x i8], [5 x i8]* @g3, i32 0, i32 0))
+  %v10 = call i32 (ptr, ...) @f1(ptr @g0, ptr @g1, i32 246, ptr @g2)
+  call void (ptr, ...) @f2(ptr @g3)
   unreachable
 
 b4:                                               ; No predecessors!
@@ -45,11 +45,11 @@ b5:                                               ; preds = %b4, %b1
   br label %b6
 
 b6:                                               ; preds = %b5
-  %v11 = load %s.1*, %s.1** %v0, align 4
-  %v12 = getelementptr inbounds %s.1, %s.1* %v11, i32 0, i32 3
-  %v13 = load i32, i32* %v12, align 4
+  %v11 = load ptr, ptr %v0, align 4
+  %v12 = getelementptr inbounds %s.1, ptr %v11, i32 0, i32 3
+  %v13 = load i32, ptr %v12, align 4
   %v14 = add i32 %v13, -1
-  store i32 %v14, i32* %v12, align 4
+  store i32 %v14, ptr %v12, align 4
   %v15 = icmp sgt i32 %v14, 0
   br i1 %v15, label %b7, label %b8
 
@@ -57,31 +57,26 @@ b7:                                               ; preds = %b6
   br label %b11
 
 b8:                                               ; preds = %b6
-  %v16 = load %s.1*, %s.1** %v0, align 4
-  %v17 = getelementptr inbounds %s.1, %s.1* %v16, i32 0, i32 0
-  %v18 = load i8*, i8** %v17, align 4
-  %v19 = load %s.1*, %s.1** %v0, align 4
-  %v20 = getelementptr inbounds %s.1, %s.1* %v19, i32 0, i32 4
-  %v21 = getelementptr inbounds [0 x i8], [0 x i8]* %v20, i32 0, i32 0
-  %v22 = icmp ne i8* %v18, %v21
+  %v16 = load ptr, ptr %v0, align 4
+  %v18 = load ptr, ptr %v16, align 4
+  %v19 = load ptr, ptr %v0, align 4
+  %v20 = getelementptr inbounds %s.1, ptr %v19, i32 0, i32 4
+  %v22 = icmp ne ptr %v18, %v20
   br i1 %v22, label %b9, label %b10
 
 b9:                                               ; preds = %b8
-  %v23 = load %s.0*, %s.0** @g4, align 4
-  %v24 = load %s.1*, %s.1** %v0, align 4
-  %v25 = getelementptr inbounds %s.1, %s.1* %v24, i32 0, i32 0
-  %v26 = load i8*, i8** %v25, align 4
-  call void @f3(%s.0* %v23, i8* %v26)
-  %v27 = load %s.1*, %s.1** %v0, align 4
-  %v28 = bitcast %s.1* %v27 to i8*
-  call void @f4(i8* %v28)
+  %v23 = load ptr, ptr @g4, align 4
+  %v24 = load ptr, ptr %v0, align 4
+  %v26 = load ptr, ptr %v24, align 4
+  call void @f3(ptr %v23, ptr %v26)
+  %v27 = load ptr, ptr %v0, align 4
+  call void @f4(ptr %v27)
   br label %b11
 
 b10:                                              ; preds = %b8
-  %v29 = load %s.0*, %s.0** @g4, align 4
-  %v30 = load %s.1*, %s.1** %v0, align 4
-  %v31 = bitcast %s.1* %v30 to i8*
-  call void @f3(%s.0* %v29, i8* %v31)
+  %v29 = load ptr, ptr @g4, align 4
+  %v30 = load ptr, ptr %v0, align 4
+  call void @f3(ptr %v29, ptr %v30)
   br label %b11
 
 b11:                                              ; preds = %b10, %b9, %b7
@@ -92,16 +87,16 @@ b11:                                              ; preds = %b10, %b9, %b7
 declare i32 @llvm.expect.i32(i32, i32) #1
 
 ; Function Attrs: nounwind
-declare i32 @f1(i8*, ...) #0
+declare i32 @f1(ptr, ...) #0
 
 ; Function Attrs: noreturn
-declare void @f2(i8*, ...) #2
+declare void @f2(ptr, ...) #2
 
 ; Function Attrs: nounwind
-declare void @f3(%s.0*, i8*) #0
+declare void @f3(ptr, ptr) #0
 
 ; Function Attrs: nounwind
-declare void @f4(i8*) #0
+declare void @f4(ptr) #0
 
 attributes #0 = { nounwind }
 attributes #1 = { nounwind readnone }

diff  --git a/llvm/test/CodeGen/Hexagon/newvaluejump.ll b/llvm/test/CodeGen/Hexagon/newvaluejump.ll
index 0697d297d7167..8e96a4afdbabb 100644
--- a/llvm/test/CodeGen/Hexagon/newvaluejump.ll
+++ b/llvm/test/CodeGen/Hexagon/newvaluejump.ll
@@ -10,10 +10,10 @@ define i32 @f0(i32 %a0) #0 {
 b0:
   %v0 = alloca i32, align 4
   %v1 = alloca i32, align 4
-  %v2 = load i32, i32* @g0, align 4
-  store i32 %v2, i32* %v0, align 4
+  %v2 = load i32, ptr @g0, align 4
+  store i32 %v2, ptr %v0, align 4
   call void @f2(i32 1, i32 2)
-  %v3 = load i32, i32* @g1, align 4
+  %v3 = load i32, ptr @g1, align 4
   %v4 = icmp ne i32 %v3, 0
   br i1 %v4, label %b1, label %b2
 

diff  --git a/llvm/test/CodeGen/Hexagon/newvaluejump2.ll b/llvm/test/CodeGen/Hexagon/newvaluejump2.ll
index 99c9d1a60af7c..9dceea6917d1c 100644
--- a/llvm/test/CodeGen/Hexagon/newvaluejump2.ll
+++ b/llvm/test/CodeGen/Hexagon/newvaluejump2.ll
@@ -8,8 +8,8 @@ define i32 @main() nounwind {
 entry:
 ; CHECK: if (cmp.gt(r{{[0-9]+}}.new,r{{[0-9]+}})) jump:{{[t|nt]}} .LBB{{[0-9]+}}_{{[0-9]+}}
   %Reg2 = alloca i32, align 4
-  %0 = load i32, i32* %Reg2, align 4
-  %1 = load i32, i32* @Reg, align 4
+  %0 = load i32, ptr %Reg2, align 4
+  %1 = load i32, ptr @Reg, align 4
   %tobool = icmp sle i32 %0, %1
   br i1 %tobool, label %if.then, label %if.else
 

diff  --git a/llvm/test/CodeGen/Hexagon/newvaluejump3.ll b/llvm/test/CodeGen/Hexagon/newvaluejump3.ll
index 93479666ad53d..59a9b2461f0ce 100644
--- a/llvm/test/CodeGen/Hexagon/newvaluejump3.ll
+++ b/llvm/test/CodeGen/Hexagon/newvaluejump3.ll
@@ -7,14 +7,14 @@
 target triple = "hexagon"
 
 %type.0 = type { %type.1, [64 x i8] }
-%type.1 = type { [12 x i8], %type.2*, double }
+%type.1 = type { [12 x i8], ptr, double }
 %type.2 = type { i16, i16, [1 x %type.3] }
 %type.3 = type { i32 }
-%type.4 = type { %type.2*, i32 }
+%type.4 = type { ptr, i32 }
 
-define hidden fastcc i8* @fred(%type.0* nocapture readonly %a0, i8* readonly %a1) unnamed_addr #2 {
+define hidden fastcc ptr @fred(ptr nocapture readonly %a0, ptr readonly %a1) unnamed_addr #2 {
 b2:
-  %v3 = load i8, i8* %a1, align 1
+  %v3 = load i8, ptr %a1, align 1
   br i1 undef, label %b4, label %b24
 
 b4:                                               ; preds = %b2
@@ -31,14 +31,14 @@ b5:                                               ; preds = %b4
   unreachable
 
 b6:                                               ; preds = %b4
-  %v7 = getelementptr inbounds i8, i8* %a1, i32 2
+  %v7 = getelementptr inbounds i8, ptr %a1, i32 2
   br label %b16
 
 b8:                                               ; preds = %b4
   br label %b16
 
 b9:                                               ; preds = %b4
-  %v10 = tail call fastcc i8* @fred(%type.0* undef, i8* undef)
+  %v10 = tail call fastcc ptr @fred(ptr undef, ptr undef)
   br label %b24
 
 b11:                                              ; preds = %b4
@@ -57,20 +57,20 @@ b15:                                              ; preds = %b14
   unreachable
 
 b16:                                              ; preds = %b20, %b14, %b8, %b6
-  %v17 = phi i8* [ %v21, %b20 ], [ undef, %b14 ], [ undef, %b8 ], [ %v7, %b6 ]
+  %v17 = phi ptr [ %v21, %b20 ], [ undef, %b14 ], [ undef, %b8 ], [ %v7, %b6 ]
   %v18 = phi i32 [ 0, %b20 ], [ undef, %b14 ], [ 0, %b8 ], [ 8, %b6 ]
   %v19 = icmp sgt i32 %v18, 0
   br i1 %v19, label %b20, label %b24
 
 b20:                                              ; preds = %b16
-  %v21 = getelementptr inbounds i8, i8* %v17, i32 1
-  %v22 = load i8, i8* %v17, align 1
+  %v21 = getelementptr inbounds i8, ptr %v17, i32 1
+  %v22 = load i8, ptr %v17, align 1
   %v23 = icmp eq i8 %v22, undef
   br i1 %v23, label %b16, label %b24
 
 b24:                                              ; preds = %b20, %b16, %b9, %b2
-  %v25 = phi i8* [ null, %b2 ], [ null, %b9 ], [ %v17, %b16 ], [ null, %b20 ]
-  ret i8* %v25
+  %v25 = phi ptr [ null, %b2 ], [ null, %b9 ], [ %v17, %b16 ], [ null, %b20 ]
+  ret ptr %v25
 }
 
 attributes #0 = { argmemonly nounwind }

diff  --git a/llvm/test/CodeGen/Hexagon/newvaluestore.ll b/llvm/test/CodeGen/Hexagon/newvaluestore.ll
index cc1ff00ecdcd0..b3be23362ba00 100644
--- a/llvm/test/CodeGen/Hexagon/newvaluestore.ll
+++ b/llvm/test/CodeGen/Hexagon/newvaluestore.ll
@@ -3,11 +3,11 @@
 
 @i = global i32 0, align 4
 
-define i32 @main(i32 %x, i32* %p) nounwind {
+define i32 @main(i32 %x, ptr %p) nounwind {
 entry:
 ; CHECK: memw(r{{[0-9]+}}+#{{[0-9]+}}) = r{{[0-9]+}}.new
-  %t0 = load i32, i32* @i, align 4
-  store i32 %t0, i32* %p, align 4
+  %t0 = load i32, ptr @i, align 4
+  store i32 %t0, ptr %p, align 4
   ret i32 %x
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/newvaluestore2.ll b/llvm/test/CodeGen/Hexagon/newvaluestore2.ll
index b6b5cee42f4e0..8e6c0530a4b00 100644
--- a/llvm/test/CodeGen/Hexagon/newvaluestore2.ll
+++ b/llvm/test/CodeGen/Hexagon/newvaluestore2.ll
@@ -8,15 +8,15 @@ define void @f0(float %a0, float %a1) #0 {
 b0:
   %v0 = alloca float, align 4
   %v1 = alloca float, align 4
-  %v2 = alloca float*, align 4
+  %v2 = alloca ptr, align 4
   %v3 = alloca i32, align 4
-  %v4 = load float, float* %v0, align 4
-  %v5 = load float, float* %v1, align 4
+  %v4 = load float, ptr %v0, align 4
+  %v5 = load float, ptr %v1, align 4
   %v6 = fadd float %v5, %v4
-  %v7 = load i32, i32* %v3, align 4
-  %v8 = load float*, float** %v2, align 4
-  %v9 = getelementptr inbounds float, float* %v8, i32 %v7
-  store float %v6, float* %v9, align 4
+  %v7 = load i32, ptr %v3, align 4
+  %v8 = load ptr, ptr %v2, align 4
+  %v9 = getelementptr inbounds float, ptr %v8, i32 %v7
+  store float %v6, ptr %v9, align 4
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/no-packets-gather.ll b/llvm/test/CodeGen/Hexagon/no-packets-gather.ll
index e40f0a7ed853a..5def4ed40d7b7 100644
--- a/llvm/test/CodeGen/Hexagon/no-packets-gather.ll
+++ b/llvm/test/CodeGen/Hexagon/no-packets-gather.ll
@@ -28,21 +28,20 @@ target triple = "hexagon"
 ; CHECK-NEXT:   r31:30 = dealloc_return(r30):raw
 ; CHECK-NEXT: }
 
-define void @fred(i8* %p, i32 %x, i32 %y) local_unnamed_addr #0 {
+define void @fred(ptr %p, i32 %x, i32 %y) local_unnamed_addr #0 {
 entry:
   %v = alloca <16 x i32>, align 64
-  %0 = bitcast <16 x i32>* %v to i8*
-  call void @llvm.lifetime.start(i64 64, i8* nonnull %0) #3
-  tail call void @llvm.hexagon.V6.vgathermw(i8* %p, i32 %x, i32 %y, <16 x i32> undef)
-  call void @foo(i8* nonnull %0) #0
-  call void @llvm.lifetime.end(i64 64, i8* nonnull %0) #3
+  call void @llvm.lifetime.start(i64 64, ptr nonnull %v) #3
+  tail call void @llvm.hexagon.V6.vgathermw(ptr %p, i32 %x, i32 %y, <16 x i32> undef)
+  call void @foo(ptr nonnull %v) #0
+  call void @llvm.lifetime.end(i64 64, ptr nonnull %v) #3
   ret void
 }
 
-declare void @llvm.lifetime.start(i64, i8* nocapture) #1
-declare void @llvm.hexagon.V6.vgathermw(i8*, i32, i32, <16 x i32>) #1
-declare void @foo(i8*) local_unnamed_addr #0
-declare void @llvm.lifetime.end(i64, i8* nocapture) #1
+declare void @llvm.lifetime.start(i64, ptr nocapture) #1
+declare void @llvm.hexagon.V6.vgathermw(ptr, i32, i32, <16 x i32>) #1
+declare void @foo(ptr) local_unnamed_addr #0
+declare void @llvm.lifetime.end(i64, ptr nocapture) #1
 
 attributes #0 = { nounwind "target-cpu"="hexagonv65" }
 attributes #1 = { argmemonly nounwind }

diff  --git a/llvm/test/CodeGen/Hexagon/no-packets.ll b/llvm/test/CodeGen/Hexagon/no-packets.ll
index de58a204f406b..07198e4c7e621 100644
--- a/llvm/test/CodeGen/Hexagon/no-packets.ll
+++ b/llvm/test/CodeGen/Hexagon/no-packets.ll
@@ -47,7 +47,7 @@
 target triple = "hexagon"
 
 
-define void @fred(i32* nocapture %a0, i32* nocapture readonly %a1, i32* nocapture readonly %a2, i32 %a3) local_unnamed_addr #0 {
+define void @fred(ptr nocapture %a0, ptr nocapture readonly %a1, ptr nocapture readonly %a2, i32 %a3) local_unnamed_addr #0 {
 b4:
   %v5 = icmp sgt i32 %a3, 0
   br i1 %v5, label %b6, label %b21
@@ -57,16 +57,16 @@ b6:                                               ; preds = %b4
 
 b7:                                               ; preds = %b7, %b6
   %v8 = phi i32 [ %v18, %b7 ], [ 0, %b6 ]
-  %v9 = phi i32* [ %v17, %b7 ], [ %a0, %b6 ]
-  %v10 = phi i32* [ %v14, %b7 ], [ %a2, %b6 ]
-  %v11 = phi i32* [ %v12, %b7 ], [ %a1, %b6 ]
-  %v12 = getelementptr inbounds i32, i32* %v11, i32 1
-  %v13 = load i32, i32* %v11, align 4
-  %v14 = getelementptr inbounds i32, i32* %v10, i32 1
-  %v15 = load i32, i32* %v10, align 4
+  %v9 = phi ptr [ %v17, %b7 ], [ %a0, %b6 ]
+  %v10 = phi ptr [ %v14, %b7 ], [ %a2, %b6 ]
+  %v11 = phi ptr [ %v12, %b7 ], [ %a1, %b6 ]
+  %v12 = getelementptr inbounds i32, ptr %v11, i32 1
+  %v13 = load i32, ptr %v11, align 4
+  %v14 = getelementptr inbounds i32, ptr %v10, i32 1
+  %v15 = load i32, ptr %v10, align 4
   %v16 = add nsw i32 %v15, %v13
-  %v17 = getelementptr inbounds i32, i32* %v9, i32 1
-  store i32 %v16, i32* %v9, align 4
+  %v17 = getelementptr inbounds i32, ptr %v9, i32 1
+  store i32 %v16, ptr %v9, align 4
   %v18 = add nuw nsw i32 %v8, 1
   %v19 = icmp eq i32 %v18, %a3
   br i1 %v19, label %b20, label %b7

diff  --git a/llvm/test/CodeGen/Hexagon/no_struct_element.ll b/llvm/test/CodeGen/Hexagon/no_struct_element.ll
index 2ca2af3491180..add80f9252424 100644
--- a/llvm/test/CodeGen/Hexagon/no_struct_element.ll
+++ b/llvm/test/CodeGen/Hexagon/no_struct_element.ll
@@ -10,27 +10,25 @@ target triple = "hexagon"
 %s.3 = type { %s.4 }
 %s.4 = type { %s.5 }
 %s.5 = type { i32 }
-%s.6 = type { %s.6*, %s.6* }
+%s.6 = type { ptr, ptr }
 
 @g0 = internal global %s.0 zeroinitializer, align 1
 @g1 = private unnamed_addr constant [23 x i8] c"......................\00", align 1
 
 ; Function Attrs: nounwind
-define void @f0(i8* %a0) #0 {
+define void @f0(ptr %a0) #0 {
 b0:
   %v0 = alloca i32, align 4
-  %v1 = getelementptr inbounds i8, i8* %a0, i32 1028
-  store volatile i32 0, i32* %v0, align 4
-  %v2 = bitcast i8* %v1 to i32*
-  %v3 = load volatile i32, i32* %v0, align 4
-  store volatile i32 %v3, i32* %v2, align 4
-  %v4 = getelementptr inbounds i8, i8* %a0, i32 1032
-  %v5 = bitcast i8* %v4 to %s.1*
-  call void @f1(%s.1* %v5, i8* getelementptr inbounds ([23 x i8], [23 x i8]* @g1, i32 0, i32 0), %s.0* @g0) #0
+  %v1 = getelementptr inbounds i8, ptr %a0, i32 1028
+  store volatile i32 0, ptr %v0, align 4
+  %v3 = load volatile i32, ptr %v0, align 4
+  store volatile i32 %v3, ptr %v1, align 4
+  %v4 = getelementptr inbounds i8, ptr %a0, i32 1032
+  call void @f1(ptr %v4, ptr @g1, ptr @g0) #0
   ret void
 }
 
 ; Function Attrs: nounwind
-declare void @f1(%s.1*, i8*, %s.0*) #0
+declare void @f1(ptr, ptr, ptr) #0
 
 attributes #0 = { nounwind }

diff  --git a/llvm/test/CodeGen/Hexagon/noreturn-noepilog.ll b/llvm/test/CodeGen/Hexagon/noreturn-noepilog.ll
index 243c0e1dcc3dd..dd877c821bae0 100644
--- a/llvm/test/CodeGen/Hexagon/noreturn-noepilog.ll
+++ b/llvm/test/CodeGen/Hexagon/noreturn-noepilog.ll
@@ -16,7 +16,7 @@ target triple = "hexagon"
 @g0 = internal constant %s.0 <{ i16 1, i8 2, i8 3, i8 4 }>, align 4
 
 ; Function Attrs: noreturn
-declare void @f0(%s.0*, i32) #0
+declare void @f0(ptr, i32) #0
 
 define i64 @f1(i32 %a0, i32 %a1) {
 b0:
@@ -24,7 +24,7 @@ b0:
   br i1 %v0, label %b1, label %b2
 
 b1:                                               ; preds = %b0
-  call void @f0(%s.0* nonnull @g0, i32 %a0) #0
+  call void @f0(ptr nonnull @g0, i32 %a0) #0
   unreachable
 
 b2:                                               ; preds = %b0

diff  --git a/llvm/test/CodeGen/Hexagon/noreturn-notail.ll b/llvm/test/CodeGen/Hexagon/noreturn-notail.ll
index e5eb366a6ea80..4a684ad28c7b9 100644
--- a/llvm/test/CodeGen/Hexagon/noreturn-notail.ll
+++ b/llvm/test/CodeGen/Hexagon/noreturn-notail.ll
@@ -9,19 +9,19 @@
 target triple = "hexagon"
 
 ; Function Attrs: noreturn
-declare void @f0(i32, i32*) #0
+declare void @f0(i32, ptr) #0
 
-declare void @f1(i32*)
+declare void @f1(ptr)
 
 define i64 @f2(i32 %a0, i32 %a1) {
 b0:
   %v0 = alloca i32
-  call void @f1(i32* %v0)
+  call void @f1(ptr %v0)
   %v1 = icmp ugt i32 %a0, 3
   br i1 %v1, label %b1, label %b2
 
 b1:                                               ; preds = %b0
-  tail call void @f0(i32 %a0, i32* %v0) #0
+  tail call void @f0(i32 %a0, ptr %v0) #0
   unreachable
 
 b2:                                               ; preds = %b0

diff  --git a/llvm/test/CodeGen/Hexagon/noreturn-stack-elim.ll b/llvm/test/CodeGen/Hexagon/noreturn-stack-elim.ll
index e58215e62e03c..264c54b1632e7 100644
--- a/llvm/test/CodeGen/Hexagon/noreturn-stack-elim.ll
+++ b/llvm/test/CodeGen/Hexagon/noreturn-stack-elim.ll
@@ -19,12 +19,11 @@
 ; CHECK-FLAG-NOT: memd(r29+#-16) = r17:16
 ; CHECK-FLAG-NOT: allocframe
 
-define dso_local void @test1(i32 %a, %struct.A* %b) local_unnamed_addr #0 {
+define dso_local void @test1(i32 %a, ptr %b) local_unnamed_addr #0 {
 entry:
-  %n = getelementptr inbounds %struct.A, %struct.A* %b, i32 0, i32 0
-  store i32 %a, i32* %n, align 4
+  store i32 %a, ptr %b, align 4
   tail call void @f1() #3
-  tail call void @nrf1(%struct.A* %b) #4
+  tail call void @nrf1(ptr %b) #4
   unreachable
 }
 
@@ -40,9 +39,8 @@ entry:
 define dso_local void @test2() local_unnamed_addr #0 {
 entry:
   %a = alloca i32, align 4
-  %0 = bitcast i32* %a to i8*
-  call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %0) #4
-  call void @f3(i32* nonnull %a) #4
+  call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %a) #4
+  call void @f3(ptr nonnull %a) #4
   unreachable
 }
 
@@ -73,18 +71,18 @@ entry:
 define dso_local void @test4(i32 %n) local_unnamed_addr #0 {
 entry:
   %vla = alloca i32, i32 %n, align 8
-  call void @f3(i32* nonnull %vla) #4
+  call void @f3(ptr nonnull %vla) #4
   unreachable
 }
 
 
 declare dso_local void @f1() local_unnamed_addr
 declare dso_local void @f2(i32) local_unnamed_addr
-declare dso_local void @f3(i32*) local_unnamed_addr
+declare dso_local void @f3(ptr) local_unnamed_addr
 
-declare dso_local void @nrf1(%struct.A*) local_unnamed_addr #2
+declare dso_local void @nrf1(ptr) local_unnamed_addr #2
 
-declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #5
+declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #5
 
 attributes #0 = { noreturn nounwind }
 attributes #2 = { noreturn }

diff  --git a/llvm/test/CodeGen/Hexagon/not-op.ll b/llvm/test/CodeGen/Hexagon/not-op.ll
index 9d067e944dcaf..cd1d438e7dad9 100644
--- a/llvm/test/CodeGen/Hexagon/not-op.ll
+++ b/llvm/test/CodeGen/Hexagon/not-op.ll
@@ -4,8 +4,8 @@
 define i32 @f0(i32 %a0) #0 {
 b0:
   %v0 = alloca i32, align 4
-  store i32 %a0, i32* %v0, align 4
-  %v1 = load i32, i32* %v0, align 4
+  store i32 %a0, ptr %v0, align 4
+  %v1 = load i32, ptr %v0, align 4
   %v2 = xor i32 %v1, -1
   ret i32 %v2
 }

diff  --git a/llvm/test/CodeGen/Hexagon/notcheap.ll b/llvm/test/CodeGen/Hexagon/notcheap.ll
index 1731666dfdd67..8a9f1df572688 100644
--- a/llvm/test/CodeGen/Hexagon/notcheap.ll
+++ b/llvm/test/CodeGen/Hexagon/notcheap.ll
@@ -14,32 +14,32 @@
 @_ZGVZ3foovE1x = internal global i64 0, section ".bss._ZGVZ3foovE1x", align 8
 @__dso_handle = external dso_local global i8
 
-define dso_local i32* @_Z3foov() local_unnamed_addr optsize {
+define dso_local ptr @_Z3foov() local_unnamed_addr optsize {
 entry:
-  %0 = load atomic i8, i8* bitcast (i64* @_ZGVZ3foovE1x to i8*) acquire, align 8
+  %0 = load atomic i8, ptr @_ZGVZ3foovE1x acquire, align 8
   %guard.uninitialized = icmp eq i8 %0, 0
   br i1 %guard.uninitialized, label %init.check, label %init.end
 
 init.check:                                       ; preds = %entry
-  %1 = tail call i32 @__cxa_guard_acquire(i64* nonnull @_ZGVZ3foovE1x)
+  %1 = tail call i32 @__cxa_guard_acquire(ptr nonnull @_ZGVZ3foovE1x)
   %tobool = icmp eq i32 %1, 0
   br i1 %tobool, label %init.end, label %init
 
 init:                                             ; preds = %init.check
-  tail call void @_ZN6FooBazC1Ev(%struct.FooBaz* nonnull @_ZZ3foovE1x)
-  %2 = tail call i32 @__cxa_atexit(void (i8*)* bitcast (void (%struct.FooBaz*)* @_ZN6FooBazD1Ev to void (i8*)*), i8* bitcast (%struct.FooBaz* @_ZZ3foovE1x to i8*), i8* nonnull @__dso_handle)
-  tail call void @__cxa_guard_release(i64* nonnull @_ZGVZ3foovE1x)
+  tail call void @_ZN6FooBazC1Ev(ptr nonnull @_ZZ3foovE1x)
+  %2 = tail call i32 @__cxa_atexit(ptr @_ZN6FooBazD1Ev, ptr @_ZZ3foovE1x, ptr nonnull @__dso_handle)
+  tail call void @__cxa_guard_release(ptr nonnull @_ZGVZ3foovE1x)
   br label %init.end
 
 init.end:                                         ; preds = %init, %init.check, %entry
-  %3 = load i32, i32* getelementptr inbounds (%struct.FooBaz, %struct.FooBaz* @_ZZ3foovE1x, i32 0, i32 0), align 4
+  %3 = load i32, ptr @_ZZ3foovE1x, align 4
   %inc = add nsw i32 %3, 1
-  store i32 %inc, i32* getelementptr inbounds (%struct.FooBaz, %struct.FooBaz* @_ZZ3foovE1x, i32 0, i32 0), align 4
-  ret i32* getelementptr inbounds (%struct.FooBaz, %struct.FooBaz* @_ZZ3foovE1x, i32 0, i32 0)
+  store i32 %inc, ptr @_ZZ3foovE1x, align 4
+  ret ptr @_ZZ3foovE1x
 }
 
-declare dso_local i32 @__cxa_guard_acquire(i64*) local_unnamed_addr
-declare dso_local void @_ZN6FooBazC1Ev(%struct.FooBaz*) unnamed_addr
-declare dso_local void @_ZN6FooBazD1Ev(%struct.FooBaz*) unnamed_addr
-declare dso_local i32 @__cxa_atexit(void (i8*)*, i8*, i8*) local_unnamed_addr
-declare dso_local void @__cxa_guard_release(i64*) local_unnamed_addr
+declare dso_local i32 @__cxa_guard_acquire(ptr) local_unnamed_addr
+declare dso_local void @_ZN6FooBazC1Ev(ptr) unnamed_addr
+declare dso_local void @_ZN6FooBazD1Ev(ptr) unnamed_addr
+declare dso_local i32 @__cxa_atexit(ptr, ptr, ptr) local_unnamed_addr
+declare dso_local void @__cxa_guard_release(ptr) local_unnamed_addr

diff  --git a/llvm/test/CodeGen/Hexagon/ntstbit.ll b/llvm/test/CodeGen/Hexagon/ntstbit.ll
index 2b8526d8f4055..afd71c217cefb 100644
--- a/llvm/test/CodeGen/Hexagon/ntstbit.ll
+++ b/llvm/test/CodeGen/Hexagon/ntstbit.ll
@@ -43,16 +43,16 @@ b0:
   br i1 %v2, label %b2, label %b1
 
 b1:                                               ; preds = %b0
-  tail call void bitcast (void (...)* @f1 to void ()*)() #0
+  tail call void @f1() #0
   br label %b3
 
 b2:                                               ; preds = %b0
-  %v3 = tail call i32 bitcast (i32 (...)* @f2 to i32 ()*)() #0
+  %v3 = tail call i32 @f2() #0
   br label %b3
 
 b3:                                               ; preds = %b2, %b1
   %v4 = add nsw i32 %a1, 2
-  %v5 = tail call i32 bitcast (i32 (...)* @f3 to i32 (i32, i32)*)(i32 %a0, i32 %v4) #0
+  %v5 = tail call i32 @f3(i32 %a0, i32 %v4) #0
   ret i32 0
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/nv_store_vec.ll b/llvm/test/CodeGen/Hexagon/nv_store_vec.ll
index 628c10e25b111..53a8bc09dd69b 100644
--- a/llvm/test/CodeGen/Hexagon/nv_store_vec.ll
+++ b/llvm/test/CodeGen/Hexagon/nv_store_vec.ll
@@ -5,13 +5,11 @@
 ; CHECK: v{{[0-9]+}} = valign(v{{[0-9]+}},v{{[0-9]+}},r{{[0-9]+}})
 ; CHECK: vmem(r{{[0-9]+}}+#{{[0-9]+}}) = v{{[0-9]+}}.new
 
-define void @f0(i16* nocapture readonly %a0, i32 %a1, i16* nocapture %a2) #0 {
+define void @f0(ptr nocapture readonly %a0, i32 %a1, ptr nocapture %a2) #0 {
 b0:
-  %v0 = bitcast i16* %a0 to <16 x i32>*
-  %v1 = bitcast i16* %a2 to <16 x i32>*
-  %v2 = load <16 x i32>, <16 x i32>* %v0, align 64
+  %v2 = load <16 x i32>, ptr %a0, align 64
   %v3 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v2, <16 x i32> undef, i32 %a1)
-  store <16 x i32> %v3, <16 x i32>* %v1, align 64
+  store <16 x i32> %v3, ptr %a2, align 64
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/opt-addr-mode-subreg-use.ll b/llvm/test/CodeGen/Hexagon/opt-addr-mode-subreg-use.ll
index 667b284ca0955..9b098d08f5362 100644
--- a/llvm/test/CodeGen/Hexagon/opt-addr-mode-subreg-use.ll
+++ b/llvm/test/CodeGen/Hexagon/opt-addr-mode-subreg-use.ll
@@ -3,45 +3,45 @@
 
 target triple = "hexagon"
 
-%s.0 = type { i32*, i32*, i32* }
-%s.1 = type { i32*, i32*, i32* }
-%s.2 = type { i32*, i32**, i32**, i32**, i32***, i32* }
-%s.3 = type { i32*, i32*, i32* }
-%s.4 = type { i32*, i32*, i32* }
-%s.5 = type { i32*, i32*, i32 }
+%s.0 = type { ptr, ptr, ptr }
+%s.1 = type { ptr, ptr, ptr }
+%s.2 = type { ptr, ptr, ptr, ptr, ptr, ptr }
+%s.3 = type { ptr, ptr, ptr }
+%s.4 = type { ptr, ptr, ptr }
+%s.5 = type { ptr, ptr, i32 }
 
 ; Function Attrs: nounwind optsize
-declare zeroext i1 @f0(i32*) #0 align 2
+declare zeroext i1 @f0(ptr) #0 align 2
 
 ; Function Attrs: nounwind optsize
-declare zeroext i1 @f1(i32*) #0 align 2
+declare zeroext i1 @f1(ptr) #0 align 2
 
 ; Function Attrs: optsize
-declare hidden void @f2(i32* noalias nocapture sret(i32), i32) #1 align 2
+declare hidden void @f2(ptr noalias nocapture sret(i32), i32) #1 align 2
 
 ; Function Attrs: optsize
-declare hidden void @f3(i32* noalias nocapture sret(i32), i32) #1 align 2
+declare hidden void @f3(ptr noalias nocapture sret(i32), i32) #1 align 2
 
 ; Function Attrs: optsize
-declare hidden void @f4(i32* noalias nocapture sret(i32), i32) #1 align 2
+declare hidden void @f4(ptr noalias nocapture sret(i32), i32) #1 align 2
 
 ; Function Attrs: optsize
-declare hidden void @f5(i32* noalias nocapture sret(i32), i32) #1 align 2
+declare hidden void @f5(ptr noalias nocapture sret(i32), i32) #1 align 2
 
 ; Function Attrs: optsize
-declare hidden void @f6(i32* noalias nocapture sret(i32), i32) #1 align 2
+declare hidden void @f6(ptr noalias nocapture sret(i32), i32) #1 align 2
 
 ; Function Attrs: optsize
-declare hidden void @f7(i32* noalias nocapture sret(i32), i32) #1 align 2
+declare hidden void @f7(ptr noalias nocapture sret(i32), i32) #1 align 2
 
 ; Function Attrs: optsize
-declare zeroext i1 @f8(i32*, i32*, i64) #1 align 2
+declare zeroext i1 @f8(ptr, ptr, i64) #1 align 2
 
 ; Function Attrs: nounwind optsize
-declare i32* @f9(i32* nocapture readonly) #0 align 2
+declare ptr @f9(ptr nocapture readonly) #0 align 2
 
 ; Function Attrs: optsize
-define void @f10(i32* %a0, i32* dereferenceable(64) %a1) #1 align 2 {
+define void @f10(ptr %a0, ptr dereferenceable(64) %a1) #1 align 2 {
 b0:
   %v0 = alloca %s.0, align 4
   %v1 = alloca %s.1, align 4
@@ -55,39 +55,39 @@ b1:                                               ; preds = %b0
   br i1 undef, label %b3, label %b2
 
 b2:                                               ; preds = %b1
-  %v6 = ptrtoint %s.0* %v0 to i32
+  %v6 = ptrtoint ptr %v0 to i32
   %v7 = zext i32 %v6 to i64
   %v8 = shl nuw i64 %v7, 32
-  %v9 = or i64 %v8, zext (i32 ptrtoint (void (i32*, i32)* @f2 to i32) to i64)
-  %v10 = ptrtoint %s.4* %v4 to i32
+  %v9 = or i64 %v8, zext (i32 ptrtoint (ptr @f2 to i32) to i64)
+  %v10 = ptrtoint ptr %v4 to i32
   %v11 = zext i32 %v10 to i64
   %v12 = shl nuw i64 %v11, 32
-  %v13 = or i64 %v12, zext (i32 ptrtoint (void (i32*, i32)* @f5 to i32) to i64)
-  %v14 = ptrtoint %s.5* %v5 to i32
+  %v13 = or i64 %v12, zext (i32 ptrtoint (ptr @f5 to i32) to i64)
+  %v14 = ptrtoint ptr %v5 to i32
   %v15 = zext i32 %v14 to i64
   %v16 = shl nuw i64 %v15, 32
-  %v17 = or i64 %v16, zext (i32 ptrtoint (void (i32*, i32)* @f6 to i32) to i64)
-  %v18 = ptrtoint %s.1* %v1 to i32
+  %v17 = or i64 %v16, zext (i32 ptrtoint (ptr @f6 to i32) to i64)
+  %v18 = ptrtoint ptr %v1 to i32
   %v19 = zext i32 %v18 to i64
   %v20 = shl nuw i64 %v19, 32
-  %v21 = or i64 %v20, zext (i32 ptrtoint (void (i32*, i32)* @f3 to i32) to i64)
-  %v22 = ptrtoint %s.2* %v2 to i32
+  %v21 = or i64 %v20, zext (i32 ptrtoint (ptr @f3 to i32) to i64)
+  %v22 = ptrtoint ptr %v2 to i32
   %v23 = zext i32 %v22 to i64
   %v24 = shl nuw i64 %v23, 32
-  %v25 = or i64 %v24, zext (i32 ptrtoint (void (i32*, i32)* @f4 to i32) to i64)
-  %v26 = ptrtoint %s.3* %v3 to i32
+  %v25 = or i64 %v24, zext (i32 ptrtoint (ptr @f4 to i32) to i64)
+  %v26 = ptrtoint ptr %v3 to i32
   %v27 = zext i32 %v26 to i64
   %v28 = shl nuw i64 %v27, 32
-  %v29 = or i64 %v28, zext (i32 ptrtoint (void (i32*, i32)* @f7 to i32) to i64)
-  %v30 = call i32* @f9(i32* nonnull null) #1
+  %v29 = or i64 %v28, zext (i32 ptrtoint (ptr @f7 to i32) to i64)
+  %v30 = call ptr @f9(ptr nonnull null) #1
   br i1 undef, label %b5, label %b4
 
 b3:                                               ; preds = %b1
   unreachable
 
 b4:                                               ; preds = %b2
-  store i32* null, i32** null, align 4
-  %v31 = call zeroext i1 @f0(i32* null) #0
+  store ptr null, ptr null, align 4
+  %v31 = call zeroext i1 @f0(ptr null) #0
   br i1 %v31, label %b6, label %b32
 
 b5:                                               ; preds = %b2
@@ -106,7 +106,7 @@ b9:                                               ; preds = %b8
   br i1 undef, label %b10, label %b32
 
 b10:                                              ; preds = %b9
-  %v32 = call zeroext i1 @f1(i32* null) #0
+  %v32 = call zeroext i1 @f1(ptr null) #0
   br i1 %v32, label %b11, label %b32
 
 b11:                                              ; preds = %b10
@@ -116,7 +116,7 @@ b12:                                              ; preds = %b11
   unreachable
 
 b13:                                              ; preds = %b11
-  %v33 = call zeroext i1 @f0(i32* undef) #0
+  %v33 = call zeroext i1 @f0(ptr undef) #0
   br i1 %v33, label %b14, label %b32
 
 b14:                                              ; preds = %b13
@@ -126,7 +126,7 @@ b15:                                              ; preds = %b14
   unreachable
 
 b16:                                              ; preds = %b14
-  %v34 = call zeroext i1 @f1(i32* null) #0
+  %v34 = call zeroext i1 @f1(ptr null) #0
   br i1 %v34, label %b18, label %b17
 
 b17:                                              ; preds = %b16
@@ -151,11 +151,11 @@ b23:                                              ; preds = %b21
   br i1 undef, label %b24, label %b32
 
 b24:                                              ; preds = %b23
-  %v35 = call zeroext i1 @f8(i32* nonnull %a1, i32* undef, i64 undef) #1
+  %v35 = call zeroext i1 @f8(ptr nonnull %a1, ptr undef, i64 undef) #1
   br i1 %v35, label %b25, label %b32
 
 b25:                                              ; preds = %b24
-  %v36 = call zeroext i1 @f8(i32* nonnull %a1, i32* undef, i64 %v9) #1
+  %v36 = call zeroext i1 @f8(ptr nonnull %a1, ptr undef, i64 %v9) #1
   unreachable
 
 b26:                                              ; preds = %b19
@@ -168,24 +168,24 @@ b28:                                              ; preds = %b27
   br i1 undef, label %b31, label %b29
 
 b29:                                              ; preds = %b28
-  %v37 = call zeroext i1 @f8(i32* nonnull %a1, i32* null, i64 %v21) #1
-  %v38 = call zeroext i1 @f8(i32* nonnull %a1, i32* undef, i64 %v25) #1
+  %v37 = call zeroext i1 @f8(ptr nonnull %a1, ptr null, i64 %v21) #1
+  %v38 = call zeroext i1 @f8(ptr nonnull %a1, ptr undef, i64 %v25) #1
   br i1 %v38, label %b30, label %b32
 
 b30:                                              ; preds = %b29
-  %v39 = call zeroext i1 @f8(i32* nonnull %a1, i32* undef, i64 %v29) #1
+  %v39 = call zeroext i1 @f8(ptr nonnull %a1, ptr undef, i64 %v29) #1
   unreachable
 
 b31:                                              ; preds = %b28
-  %v40 = call zeroext i1 @f8(i32* nonnull %a1, i32* null, i64 %v13) #1
-  %v41 = call zeroext i1 @f8(i32* nonnull %a1, i32* undef, i64 %v17) #1
+  %v40 = call zeroext i1 @f8(ptr nonnull %a1, ptr null, i64 %v13) #1
+  %v41 = call zeroext i1 @f8(ptr nonnull %a1, ptr undef, i64 %v17) #1
   br i1 %v41, label %b33, label %b32
 
 b32:                                              ; preds = %b31, %b29, %b27, %b26, %b24, %b23, %b21, %b18, %b13, %b10, %b9, %b8, %b7, %b6, %b4
   unreachable
 
 b33:                                              ; preds = %b31
-  store i32* %a0, i32** undef, align 4
+  store ptr %a0, ptr undef, align 4
   unreachable
 
 b34:                                              ; preds = %b0

diff  --git a/llvm/test/CodeGen/Hexagon/opt-addr-mode.ll b/llvm/test/CodeGen/Hexagon/opt-addr-mode.ll
index 705cd045ea307..2a88d88fa8db9 100644
--- a/llvm/test/CodeGen/Hexagon/opt-addr-mode.ll
+++ b/llvm/test/CodeGen/Hexagon/opt-addr-mode.ll
@@ -12,7 +12,7 @@
 
 declare i32 @foo(i32, i32) #0
 
-define i32 @fred(i32 %a0, i32 %a1, i32* %p) #0 {
+define i32 @fred(i32 %a0, i32 %a1, ptr %p) #0 {
 entry:
   %call24 = tail call i32 @foo(i32 %a0, i32 1) #0
   %tobool26 = icmp eq i32 %call24, 0
@@ -32,7 +32,7 @@ while.body.us.preheader:                          ; preds = %while.body.lr.ph
 
 while.body.us:                                    ; preds = %while.body.us.preheader, %while.cond.backedge.us
   %call27.us = phi i32 [ %call.us, %while.cond.backedge.us ], [ %call24, %while.body.us.preheader ]
-  %x0 = load i32, i32* %p, align 4, !tbaa !4
+  %x0 = load i32, ptr %p, align 4, !tbaa !4
   %cmp.us = icmp sgt i32 %x0, 0
   br i1 %cmp.us, label %if.then.us, label %if.end.us
 
@@ -43,17 +43,17 @@ if.then.us:                                       ; preds = %while.body.us
   br label %if.end.us
 
 if.end.us:                                        ; preds = %if.then.us, %while.body.us
-  %x1 = load i32, i32* %p, align 4, !tbaa !4
+  %x1 = load i32, ptr %p, align 4, !tbaa !4
   %call8.us = tail call i32 @foo(i32 %sub, i32 %a1) #0
   %tobool11.us = icmp eq i32 %call8.us, 0
   br i1 %tobool11.us, label %while.cond.backedge.us, label %if.then12.us
 
 if.then12.us:                                     ; preds = %if.end.us
-  %x3 = load i32, i32* %p, align 4, !tbaa !4
+  %x3 = load i32, ptr %p, align 4, !tbaa !4
   %sub13.us = sub i32 %x3, %x1
-  %x4 = load i32, i32* @global_1, align 4, !tbaa !4
-  %arrayidx.us = getelementptr inbounds [128 x i32], [128 x i32]* @global_2, i32 0, i32 %x4
-  store i32 %sub13.us, i32* %arrayidx.us, align 4, !tbaa !4
+  %x4 = load i32, ptr @global_1, align 4, !tbaa !4
+  %arrayidx.us = getelementptr inbounds [128 x i32], ptr @global_2, i32 0, i32 %x4
+  store i32 %sub13.us, ptr %arrayidx.us, align 4, !tbaa !4
   br label %while.cond.backedge.us
 
 while.cond.backedge.us:                           ; preds = %if.then12.us, %if.end.us
@@ -63,7 +63,7 @@ while.cond.backedge.us:                           ; preds = %if.then12.us, %if.e
 
 while.body:                                       ; preds = %while.body.preheader, %while.cond.backedge
   %call27 = phi i32 [ %call, %while.cond.backedge ], [ %call24, %while.body.preheader ]
-  %x5 = load i32, i32* %p, align 4, !tbaa !4
+  %x5 = load i32, ptr %p, align 4, !tbaa !4
   %cmp = icmp sgt i32 %x5, 0
   br i1 %cmp, label %if.then, label %if.end
 
@@ -78,9 +78,9 @@ if.end:                                           ; preds = %if.then, %while.bod
   br i1 %tobool11, label %while.cond.backedge, label %if.then12
 
 if.then12:                                        ; preds = %if.end
-  %x7 = load i32, i32* @global_1, align 4, !tbaa !4
-  %arrayidx = getelementptr inbounds [128 x i32], [128 x i32]* @global_2, i32 0, i32 %x7
-  store i32 0, i32* %arrayidx, align 4, !tbaa !4
+  %x7 = load i32, ptr @global_1, align 4, !tbaa !4
+  %arrayidx = getelementptr inbounds [128 x i32], ptr @global_2, i32 0, i32 %x7
+  store i32 0, ptr %arrayidx, align 4, !tbaa !4
   br label %while.cond.backedge
 
 while.cond.backedge:                              ; preds = %if.then12, %if.end

diff  --git a/llvm/test/CodeGen/Hexagon/opt-fabs.ll b/llvm/test/CodeGen/Hexagon/opt-fabs.ll
index 9c94f853ba50f..86974bf84de28 100644
--- a/llvm/test/CodeGen/Hexagon/opt-fabs.ll
+++ b/llvm/test/CodeGen/Hexagon/opt-fabs.ll
@@ -6,8 +6,8 @@
 define float @my_fabsf(float %x) nounwind {
 entry:
   %x.addr = alloca float, align 4
-  store float %x, float* %x.addr, align 4
-  %0 = load float, float* %x.addr, align 4
+  store float %x, ptr %x.addr, align 4
+  %0 = load float, ptr %x.addr, align 4
   %call = call float @fabsf(float %0) readnone
   ret float %call
 }

diff  --git a/llvm/test/CodeGen/Hexagon/opt-fneg.ll b/llvm/test/CodeGen/Hexagon/opt-fneg.ll
index 5c122c90010a2..d874e71f44bf3 100644
--- a/llvm/test/CodeGen/Hexagon/opt-fneg.ll
+++ b/llvm/test/CodeGen/Hexagon/opt-fneg.ll
@@ -6,8 +6,8 @@ entry:
 ; CHECK-LABEL: foo:
 ; CHECK: r{{[0-9]+}} = togglebit(r{{[0-9]+}},#31)
   %x.addr = alloca float, align 4
-  store float %x, float* %x.addr, align 4
-  %0 = load float, float* %x.addr, align 4
+  store float %x, ptr %x.addr, align 4
+  %0 = load float, ptr %x.addr, align 4
   %sub = fsub float -0.000000e+00, %0
   ret float %sub
 }

diff  --git a/llvm/test/CodeGen/Hexagon/opt-glob-addrs-000.ll b/llvm/test/CodeGen/Hexagon/opt-glob-addrs-000.ll
index 7e2061052a7e0..b8baa651e88f9 100644
--- a/llvm/test/CodeGen/Hexagon/opt-glob-addrs-000.ll
+++ b/llvm/test/CodeGen/Hexagon/opt-glob-addrs-000.ll
@@ -79,24 +79,24 @@ target triple = "hexagon-unknown--elf"
 @g0 = private unnamed_addr constant [4 x i8] c"%s\0A\00", align 1
 @g1 = internal constant [10 x [10 x i8]] [[10 x i8] c"[0000]\00\00\00\00", [10 x i8] c"[0001]\00\00\00\00", [10 x i8] c"[0002]\00\00\00\00", [10 x i8] c"[0003]\00\00\00\00", [10 x i8] c"[0004]\00\00\00\00", [10 x i8] c"[0005]\00\00\00\00", [10 x i8] c"[0006]\00\00\00\00", [10 x i8] c"[0007]\00\00\00\00", [10 x i8] c"[0008]\00\00\00\00", [10 x i8] c"[0009]\00\00\00\00"], align 16
 
-declare i32 @f0(i8*, i8*)
+declare i32 @f0(ptr, ptr)
 
 ; Function Attrs: nounwind
-define i32 @f1(i32 %a0, i8** %a1) #0 {
+define i32 @f1(i32 %a0, ptr %a1) #0 {
 b0:
   %v01 = alloca i32, align 4
   %v12 = alloca i32, align 4
-  %v23 = alloca i8**, align 4
+  %v23 = alloca ptr, align 4
   %v34 = alloca i32, align 4
-  store i32 0, i32* %v01
-  store i32 %a0, i32* %v12, align 4
-  store i8** %a1, i8*** %v23, align 4
-  %v45 = load i8**, i8*** %v23, align 4
-  %v56 = getelementptr inbounds i8*, i8** %v45, i32 1
-  %v67 = load i8*, i8** %v56, align 4
-  %v78 = call i32 @f2(i8* %v67)
-  store i32 %v78, i32* %v34, align 4
-  %v89 = load i32, i32* %v34, align 4
+  store i32 0, ptr %v01
+  store i32 %a0, ptr %v12, align 4
+  store ptr %a1, ptr %v23, align 4
+  %v45 = load ptr, ptr %v23, align 4
+  %v56 = getelementptr inbounds ptr, ptr %v45, i32 1
+  %v67 = load ptr, ptr %v56, align 4
+  %v78 = call i32 @f2(ptr %v67)
+  store i32 %v78, ptr %v34, align 4
+  %v89 = load i32, ptr %v34, align 4
   switch i32 %v89, label %b11 [
     i32 0, label %b1
     i32 1, label %b2
@@ -111,63 +111,52 @@ b0:
   ]
 
 b1:                                               ; preds = %b0
-  %v910 = getelementptr inbounds [10 x [10 x i8]], [10 x [10 x i8]]* @g1, i32 0, i32 0
-  %v10 = getelementptr inbounds [10 x i8], [10 x i8]* %v910, i32 0, i32 0
-  %v11 = call i32 @f0(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @g0, i32 0, i32 0), i8* %v10)
+  %v11 = call i32 @f0(ptr @g0, ptr @g1)
   br label %b2
 
 b2:                                               ; preds = %b1, %b0
-  %v1211 = getelementptr inbounds [10 x [10 x i8]], [10 x [10 x i8]]* @g1, i32 0, i32 1
-  %v13 = getelementptr inbounds [10 x i8], [10 x i8]* %v1211, i32 0, i32 0
-  %v14 = call i32 @f0(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @g0, i32 0, i32 0), i8* %v13)
+  %v1211 = getelementptr inbounds [10 x [10 x i8]], ptr @g1, i32 0, i32 1
+  %v14 = call i32 @f0(ptr @g0, ptr %v1211)
   br label %b3
 
 b3:                                               ; preds = %b2, %b0
-  %v15 = getelementptr inbounds [10 x [10 x i8]], [10 x [10 x i8]]* @g1, i32 0, i32 2
-  %v16 = getelementptr inbounds [10 x i8], [10 x i8]* %v15, i32 0, i32 0
-  %v17 = call i32 @f0(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @g0, i32 0, i32 0), i8* %v16)
+  %v15 = getelementptr inbounds [10 x [10 x i8]], ptr @g1, i32 0, i32 2
+  %v17 = call i32 @f0(ptr @g0, ptr %v15)
   br label %b4
 
 b4:                                               ; preds = %b3, %b0
-  %v18 = getelementptr inbounds [10 x [10 x i8]], [10 x [10 x i8]]* @g1, i32 0, i32 3
-  %v19 = getelementptr inbounds [10 x i8], [10 x i8]* %v18, i32 0, i32 0
-  %v20 = call i32 @f0(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @g0, i32 0, i32 0), i8* %v19)
+  %v18 = getelementptr inbounds [10 x [10 x i8]], ptr @g1, i32 0, i32 3
+  %v20 = call i32 @f0(ptr @g0, ptr %v18)
   br label %b5
 
 b5:                                               ; preds = %b4, %b0
-  %v21 = getelementptr inbounds [10 x [10 x i8]], [10 x [10 x i8]]* @g1, i32 0, i32 4
-  %v22 = getelementptr inbounds [10 x i8], [10 x i8]* %v21, i32 0, i32 0
-  %v2312 = call i32 @f0(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @g0, i32 0, i32 0), i8* %v22)
+  %v21 = getelementptr inbounds [10 x [10 x i8]], ptr @g1, i32 0, i32 4
+  %v2312 = call i32 @f0(ptr @g0, ptr %v21)
   br label %b6
 
 b6:                                               ; preds = %b5, %b0
-  %v24 = getelementptr inbounds [10 x [10 x i8]], [10 x [10 x i8]]* @g1, i32 0, i32 5
-  %v25 = getelementptr inbounds [10 x i8], [10 x i8]* %v24, i32 0, i32 0
-  %v26 = call i32 @f0(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @g0, i32 0, i32 0), i8* %v25)
+  %v24 = getelementptr inbounds [10 x [10 x i8]], ptr @g1, i32 0, i32 5
+  %v26 = call i32 @f0(ptr @g0, ptr %v24)
   br label %b7
 
 b7:                                               ; preds = %b6, %b0
-  %v27 = getelementptr inbounds [10 x [10 x i8]], [10 x [10 x i8]]* @g1, i32 0, i32 6
-  %v28 = getelementptr inbounds [10 x i8], [10 x i8]* %v27, i32 0, i32 0
-  %v29 = call i32 @f0(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @g0, i32 0, i32 0), i8* %v28)
+  %v27 = getelementptr inbounds [10 x [10 x i8]], ptr @g1, i32 0, i32 6
+  %v29 = call i32 @f0(ptr @g0, ptr %v27)
   br label %b8
 
 b8:                                               ; preds = %b7, %b0
-  %v30 = getelementptr inbounds [10 x [10 x i8]], [10 x [10 x i8]]* @g1, i32 0, i32 7
-  %v31 = getelementptr inbounds [10 x i8], [10 x i8]* %v30, i32 0, i32 0
-  %v32 = call i32 @f0(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @g0, i32 0, i32 0), i8* %v31)
+  %v30 = getelementptr inbounds [10 x [10 x i8]], ptr @g1, i32 0, i32 7
+  %v32 = call i32 @f0(ptr @g0, ptr %v30)
   br label %b9
 
 b9:                                               ; preds = %b8, %b0
-  %v33 = getelementptr inbounds [10 x [10 x i8]], [10 x [10 x i8]]* @g1, i32 0, i32 8
-  %v3413 = getelementptr inbounds [10 x i8], [10 x i8]* %v33, i32 0, i32 0
-  %v35 = call i32 @f0(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @g0, i32 0, i32 0), i8* %v3413)
+  %v33 = getelementptr inbounds [10 x [10 x i8]], ptr @g1, i32 0, i32 8
+  %v35 = call i32 @f0(ptr @g0, ptr %v33)
   br label %b10
 
 b10:                                              ; preds = %b9, %b0
-  %v36 = getelementptr inbounds [10 x [10 x i8]], [10 x [10 x i8]]* @g1, i32 0, i32 9
-  %v37 = getelementptr inbounds [10 x i8], [10 x i8]* %v36, i32 0, i32 0
-  %v38 = call i32 @f0(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @g0, i32 0, i32 0), i8* %v37)
+  %v36 = getelementptr inbounds [10 x [10 x i8]], ptr @g1, i32 0, i32 9
+  %v38 = call i32 @f0(ptr @g0, ptr %v36)
   br label %b11
 
 b11:                                              ; preds = %b10, %b0
@@ -175,6 +164,6 @@ b11:                                              ; preds = %b10, %b0
 }
 
 ; Function Attrs: nounwind
-declare i32 @f2(i8*) #0
+declare i32 @f2(ptr) #0
 
 attributes #0 = { nounwind }

diff  --git a/llvm/test/CodeGen/Hexagon/opt-glob-addrs-001.ll b/llvm/test/CodeGen/Hexagon/opt-glob-addrs-001.ll
index 2be0433612218..c7d6b44a94d7f 100644
--- a/llvm/test/CodeGen/Hexagon/opt-glob-addrs-001.ll
+++ b/llvm/test/CodeGen/Hexagon/opt-glob-addrs-001.ll
@@ -25,12 +25,12 @@ target triple = "hexagon-unknown--elf"
 ; Function Attrs: nounwind readonly
 define i32 @f0(i32 %a0) #0 {
 b0:
-  %v0 = load i32, i32* @g10, align 4, !tbaa !0
+  %v0 = load i32, ptr @g10, align 4, !tbaa !0
   %v1 = icmp sgt i32 %v0, 0
   br i1 %v1, label %b1, label %b21
 
 b1:                                               ; preds = %b0
-  %v2 = getelementptr inbounds [842 x i32], [842 x i32]* @g1, i32 0, i32 %a0
+  %v2 = getelementptr inbounds [842 x i32], ptr @g1, i32 0, i32 %a0
   br label %b2
 
 b2:                                               ; preds = %b19, %b1
@@ -39,69 +39,69 @@ b2:                                               ; preds = %b19, %b1
   %v5 = phi i32 [ 0, %b1 ], [ %v77, %b19 ]
   %v6 = phi i32 [ 0, %b1 ], [ %v76, %b19 ]
   %v7 = phi i32 [ 0, %b1 ], [ %v80, %b19 ]
-  %v8 = getelementptr inbounds [750 x i32], [750 x i32]* @g5, i32 0, i32 %v7
-  %v9 = load i32, i32* %v8, align 4, !tbaa !0
+  %v8 = getelementptr inbounds [750 x i32], ptr @g5, i32 0, i32 %v7
+  %v9 = load i32, ptr %v8, align 4, !tbaa !0
   %v10 = icmp eq i32 %v9, 0
   br i1 %v10, label %b19, label %b3
 
 b3:                                               ; preds = %b2
-  %v11 = getelementptr inbounds [750 x i32], [750 x i32]* @g4, i32 0, i32 %v7
-  %v12 = load i32, i32* %v11, align 4, !tbaa !0
-  %v13 = load i32, i32* %v2, align 4, !tbaa !0
-  %v14 = getelementptr inbounds [750 x i32], [750 x i32]* @g4, i32 0, i32 %v13
-  %v15 = load i32, i32* %v14, align 4, !tbaa !0
+  %v11 = getelementptr inbounds [750 x i32], ptr @g4, i32 0, i32 %v7
+  %v12 = load i32, ptr %v11, align 4, !tbaa !0
+  %v13 = load i32, ptr %v2, align 4, !tbaa !0
+  %v14 = getelementptr inbounds [750 x i32], ptr @g4, i32 0, i32 %v13
+  %v15 = load i32, ptr %v14, align 4, !tbaa !0
   %v16 = icmp eq i32 %v12, %v15
   br i1 %v16, label %b4, label %b8
 
 b4:                                               ; preds = %b3
-  %v17 = getelementptr inbounds [750 x i32], [750 x i32]* @g6, i32 0, i32 %v7
-  %v18 = load i32, i32* %v17, align 4, !tbaa !0
+  %v17 = getelementptr inbounds [750 x i32], ptr @g6, i32 0, i32 %v7
+  %v18 = load i32, ptr %v17, align 4, !tbaa !0
   %v19 = icmp eq i32 %v18, 25
   br i1 %v19, label %b5, label %b19
 
 b5:                                               ; preds = %b4
-  %v20 = getelementptr inbounds [750 x i32], [750 x i32]* @g2, i32 0, i32 %v7
-  %v21 = load i32, i32* %v20, align 4, !tbaa !0
+  %v20 = getelementptr inbounds [750 x i32], ptr @g2, i32 0, i32 %v7
+  %v21 = load i32, ptr %v20, align 4, !tbaa !0
   %v22 = icmp slt i32 %v21, 19
   br i1 %v22, label %b6, label %b19
 
 b6:                                               ; preds = %b5
-  %v23 = getelementptr inbounds [750 x i32], [750 x i32]* @g9, i32 0, i32 %v7
-  %v24 = load i32, i32* %v23, align 4, !tbaa !0
+  %v23 = getelementptr inbounds [750 x i32], ptr @g9, i32 0, i32 %v7
+  %v24 = load i32, ptr %v23, align 4, !tbaa !0
   %v25 = icmp eq i32 %v24, 0
   br i1 %v25, label %b19, label %b7
 
 b7:                                               ; preds = %b6
-  %v26 = getelementptr inbounds [750 x i32], [750 x i32]* @g8, i32 0, i32 %v7
-  %v27 = load i32, i32* %v26, align 4, !tbaa !0
+  %v26 = getelementptr inbounds [750 x i32], ptr @g8, i32 0, i32 %v7
+  %v27 = load i32, ptr %v26, align 4, !tbaa !0
   %v28 = mul nsw i32 %v27, 50
   %v29 = add nsw i32 %v28, %v3
   br label %b19
 
 b8:                                               ; preds = %b3
-  %v30 = getelementptr inbounds [750 x i32], [750 x i32]* @g9, i32 0, i32 %v7
-  %v31 = load i32, i32* %v30, align 4, !tbaa !0
+  %v30 = getelementptr inbounds [750 x i32], ptr @g9, i32 0, i32 %v7
+  %v31 = load i32, ptr %v30, align 4, !tbaa !0
   %v32 = icmp eq i32 %v31, 0
   br i1 %v32, label %b13, label %b9
 
 b9:                                               ; preds = %b8
-  %v33 = getelementptr inbounds [750 x i32], [750 x i32]* @g7, i32 0, i32 %v7
-  %v34 = load i32, i32* %v33, align 4, !tbaa !0
+  %v33 = getelementptr inbounds [750 x i32], ptr @g7, i32 0, i32 %v7
+  %v34 = load i32, ptr %v33, align 4, !tbaa !0
   %v35 = icmp eq i32 %v34, 0
   br i1 %v35, label %b10, label %b13
 
 b10:                                              ; preds = %b9
-  %v36 = getelementptr inbounds [750 x i32], [750 x i32]* @g6, i32 0, i32 %v7
-  %v37 = load i32, i32* %v36, align 4, !tbaa !0
+  %v36 = getelementptr inbounds [750 x i32], ptr @g6, i32 0, i32 %v7
+  %v37 = load i32, ptr %v36, align 4, !tbaa !0
   %v38 = icmp slt i32 %v37, 18
   br i1 %v38, label %b11, label %b13
 
 b11:                                              ; preds = %b10
-  %v39 = getelementptr inbounds [0 x i32], [0 x i32]* @g11, i32 0, i32 %v37
-  %v40 = load i32, i32* %v39, align 4, !tbaa !0
+  %v39 = getelementptr inbounds [0 x i32], ptr @g11, i32 0, i32 %v37
+  %v40 = load i32, ptr %v39, align 4, !tbaa !0
   %v41 = add nsw i32 %v40, 50
-  %v42 = getelementptr inbounds [750 x i32], [750 x i32]* @g8, i32 0, i32 %v7
-  %v43 = load i32, i32* %v42, align 4, !tbaa !0
+  %v42 = getelementptr inbounds [750 x i32], ptr @g8, i32 0, i32 %v7
+  %v43 = load i32, ptr %v42, align 4, !tbaa !0
   %v44 = mul nsw i32 %v41, %v43
   %v45 = icmp slt i32 %v44, %v4
   br i1 %v45, label %b12, label %b19
@@ -110,39 +110,39 @@ b12:                                              ; preds = %b11
   br label %b19
 
 b13:                                              ; preds = %b10, %b9, %b8
-  %v46 = getelementptr inbounds [750 x i32], [750 x i32]* @g2, i32 0, i32 %v7
-  %v47 = load i32, i32* %v46, align 4, !tbaa !0
+  %v46 = getelementptr inbounds [750 x i32], ptr @g2, i32 0, i32 %v7
+  %v47 = load i32, ptr %v46, align 4, !tbaa !0
   %v48 = and i32 %v47, 31
-  %v49 = getelementptr inbounds [0 x i32], [0 x i32]* @g12, i32 0, i32 %v48
-  %v50 = load i32, i32* %v49, align 4, !tbaa !0
+  %v49 = getelementptr inbounds [0 x i32], ptr @g12, i32 0, i32 %v48
+  %v50 = load i32, ptr %v49, align 4, !tbaa !0
   %v51 = icmp eq i32 %v50, 0
   br i1 %v51, label %b19, label %b14
 
 b14:                                              ; preds = %b13
-  %v52 = getelementptr inbounds [750 x i32], [750 x i32]* @g2, i32 0, i32 %v13
-  %v53 = load i32, i32* %v52, align 4, !tbaa !0
+  %v52 = getelementptr inbounds [750 x i32], ptr @g2, i32 0, i32 %v13
+  %v53 = load i32, ptr %v52, align 4, !tbaa !0
   %v54 = icmp slt i32 %v53, 11
   br i1 %v54, label %b15, label %b19
 
 b15:                                              ; preds = %b14
-  %v55 = getelementptr inbounds [750 x i32], [750 x i32]* @g6, i32 0, i32 %v7
-  %v56 = load i32, i32* %v55, align 4, !tbaa !0
+  %v55 = getelementptr inbounds [750 x i32], ptr @g6, i32 0, i32 %v7
+  %v56 = load i32, ptr %v55, align 4, !tbaa !0
   %v57 = icmp slt i32 %v56, 11
   br i1 %v57, label %b16, label %b19
 
 b16:                                              ; preds = %b15
-  %v58 = getelementptr inbounds [0 x i32], [0 x i32]* @g11, i32 0, i32 %v56
-  %v59 = load i32, i32* %v58, align 4, !tbaa !0
+  %v58 = getelementptr inbounds [0 x i32], ptr @g11, i32 0, i32 %v56
+  %v59 = load i32, ptr %v58, align 4, !tbaa !0
   %v60 = add nsw i32 %v59, 50
-  %v61 = getelementptr inbounds [750 x i32], [750 x i32]* @g3, i32 0, i32 %v7
-  %v62 = load i32, i32* %v61, align 4, !tbaa !0
-  %v63 = getelementptr inbounds [450 x i32], [450 x i32]* @g0, i32 0, i32 %v62
-  %v64 = load i32, i32* %v63, align 4, !tbaa !0
+  %v61 = getelementptr inbounds [750 x i32], ptr @g3, i32 0, i32 %v7
+  %v62 = load i32, ptr %v61, align 4, !tbaa !0
+  %v63 = getelementptr inbounds [450 x i32], ptr @g0, i32 0, i32 %v62
+  %v64 = load i32, ptr %v63, align 4, !tbaa !0
   %v65 = mul nsw i32 %v64, %v60
   %v66 = sdiv i32 %v65, 2
   %v67 = add nsw i32 %v66, %v6
-  %v68 = getelementptr inbounds [750 x i32], [750 x i32]* @g8, i32 0, i32 %v7
-  %v69 = load i32, i32* %v68, align 4, !tbaa !0
+  %v68 = getelementptr inbounds [750 x i32], ptr @g8, i32 0, i32 %v7
+  %v69 = load i32, ptr %v68, align 4, !tbaa !0
   %v70 = icmp sgt i32 %v69, 1
   br i1 %v70, label %b17, label %b18
 

diff  --git a/llvm/test/CodeGen/Hexagon/opt-glob-addrs-003.ll b/llvm/test/CodeGen/Hexagon/opt-glob-addrs-003.ll
index bc7dee6158266..d322fe9cd47f7 100644
--- a/llvm/test/CodeGen/Hexagon/opt-glob-addrs-003.ll
+++ b/llvm/test/CodeGen/Hexagon/opt-glob-addrs-003.ll
@@ -23,12 +23,12 @@ target triple = "hexagon"
 @g14 = external global i16
 
 ; Function Attrs: nounwind
-define signext i16 @f0(i16 signext %a0, i16* nocapture readonly %a1) #0 {
+define signext i16 @f0(i16 signext %a0, ptr nocapture readonly %a1) #0 {
 b0:
   %v0 = alloca i32, align 4
   %v1 = alloca i32, align 4
-  store i32 327685, i32* %v0, align 4
-  store i32 1048592, i32* %v1, align 4
+  store i32 327685, ptr %v0, align 4
+  store i32 1048592, ptr %v1, align 4
   %v2 = sext i16 %a0 to i32
   switch i32 %v2, label %b35 [
     i32 0, label %b1
@@ -40,64 +40,62 @@ b0:
   ]
 
 b1:                                               ; preds = %b0
-  %v3 = load i16, i16* %a1, align 2, !tbaa !0
+  %v3 = load i16, ptr %a1, align 2, !tbaa !0
   %v4 = icmp eq i16 %v3, -1
   br i1 %v4, label %b2, label %b4
 
 b2:                                               ; preds = %b1
-  %v5 = load i16, i16* @g0, align 2, !tbaa !0
+  %v5 = load i16, ptr @g0, align 2, !tbaa !0
   %v6 = add i16 %v5, 1
-  store i16 %v6, i16* @g0, align 2, !tbaa !0
+  store i16 %v6, ptr @g0, align 2, !tbaa !0
   %v7 = icmp sgt i16 %v6, 2
   br i1 %v7, label %b3, label %b5
 
 b3:                                               ; preds = %b2
-  store i16 3, i16* @g0, align 2, !tbaa !0
+  store i16 3, ptr @g0, align 2, !tbaa !0
   br label %b35
 
 b4:                                               ; preds = %b1
-  store i16 0, i16* @g0, align 2, !tbaa !0
+  store i16 0, ptr @g0, align 2, !tbaa !0
   br label %b5
 
 b5:                                               ; preds = %b4, %b2
-  %v8 = load i16, i16* %a1, align 2, !tbaa !0
+  %v8 = load i16, ptr %a1, align 2, !tbaa !0
   %v9 = icmp ne i16 %v8, 0
-  %v10 = load i16, i16* getelementptr inbounds ([2 x i16], [2 x i16]* @g1, i32 0, i32 0), align 2
+  %v10 = load i16, ptr @g1, align 2
   %v11 = icmp eq i16 %v10, 0
   %v12 = and i1 %v9, %v11
   br i1 %v12, label %b6, label %b35
 
 b6:                                               ; preds = %b5
-  %v13 = bitcast i32* %v0 to i16*
-  %v14 = bitcast i32* %v1 to i16*
-  call void @f1(i16* %v13, i16* %v14, i16* getelementptr inbounds ([10 x i16], [10 x i16]* @g2, i32 0, i32 0), i16* getelementptr inbounds (%s.0, %s.0* @g3, i32 0, i32 2, i32 0), i16* getelementptr inbounds ([160 x i16], [160 x i16]* @g4, i32 0, i32 0))
-  %v15 = load i16, i16* getelementptr inbounds ([10 x i16], [10 x i16]* @g2, i32 0, i32 0), align 2, !tbaa !0
-  %v16 = load i16, i16* getelementptr inbounds ([10 x i16], [10 x i16]* @g2, i32 0, i32 1), align 2, !tbaa !0
+  call void @f1(ptr %v0, ptr %v1, ptr @g2, ptr getelementptr inbounds (%s.0, ptr @g3, i32 0, i32 2, i32 0), ptr @g4)
+  %v15 = load i16, ptr @g2, align 2, !tbaa !0
+  %v16 = load i16, ptr getelementptr inbounds ([10 x i16], ptr @g2, i32 0, i32 1), align 2, !tbaa !0
   %v17 = icmp sgt i16 %v15, %v16
   %v18 = select i1 %v17, i16 %v15, i16 %v16
-  %v19 = load i16, i16* getelementptr inbounds ([10 x i16], [10 x i16]* @g2, i32 0, i32 2), align 2, !tbaa !0
+  %v19 = load i16, ptr getelementptr inbounds ([10 x i16], ptr @g2, i32 0, i32 2), align 2, !tbaa !0
   %v20 = icmp sgt i16 %v18, %v19
   %v21 = select i1 %v20, i16 %v18, i16 %v19
-  %v22 = load i16, i16* getelementptr inbounds ([10 x i16], [10 x i16]* @g2, i32 0, i32 3), align 2, !tbaa !0
+  %v22 = load i16, ptr getelementptr inbounds ([10 x i16], ptr @g2, i32 0, i32 3), align 2, !tbaa !0
   %v23 = icmp sgt i16 %v21, %v22
   %v24 = select i1 %v23, i16 %v21, i16 %v22
-  %v25 = load i16, i16* getelementptr inbounds ([10 x i16], [10 x i16]* @g2, i32 0, i32 4), align 2, !tbaa !0
+  %v25 = load i16, ptr getelementptr inbounds ([10 x i16], ptr @g2, i32 0, i32 4), align 2, !tbaa !0
   %v26 = icmp sle i16 %v24, %v25
   %v27 = xor i1 %v23, true
   %v28 = or i1 %v26, %v27
   %v29 = select i1 %v26, i16 %v25, i16 %v22
   %v30 = select i1 %v28, i16 %v29, i16 %v21
-  %v31 = load i16, i16* getelementptr inbounds ([10 x i16], [10 x i16]* @g2, i32 0, i32 5), align 2, !tbaa !0
-  %v32 = load i16, i16* getelementptr inbounds ([10 x i16], [10 x i16]* @g2, i32 0, i32 6), align 2, !tbaa !0
+  %v31 = load i16, ptr getelementptr inbounds ([10 x i16], ptr @g2, i32 0, i32 5), align 2, !tbaa !0
+  %v32 = load i16, ptr getelementptr inbounds ([10 x i16], ptr @g2, i32 0, i32 6), align 2, !tbaa !0
   %v33 = icmp slt i16 %v31, %v32
   %v34 = select i1 %v33, i16 %v31, i16 %v32
-  %v35 = load i16, i16* getelementptr inbounds ([10 x i16], [10 x i16]* @g2, i32 0, i32 7), align 2, !tbaa !0
+  %v35 = load i16, ptr getelementptr inbounds ([10 x i16], ptr @g2, i32 0, i32 7), align 2, !tbaa !0
   %v36 = icmp slt i16 %v34, %v35
   %v37 = select i1 %v36, i16 %v34, i16 %v35
-  %v38 = load i16, i16* getelementptr inbounds ([10 x i16], [10 x i16]* @g2, i32 0, i32 8), align 2, !tbaa !0
+  %v38 = load i16, ptr getelementptr inbounds ([10 x i16], ptr @g2, i32 0, i32 8), align 2, !tbaa !0
   %v39 = icmp slt i16 %v37, %v38
   %v40 = select i1 %v39, i16 %v37, i16 %v38
-  %v41 = load i16, i16* getelementptr inbounds ([10 x i16], [10 x i16]* @g2, i32 0, i32 9), align 2, !tbaa !0
+  %v41 = load i16, ptr getelementptr inbounds ([10 x i16], ptr @g2, i32 0, i32 9), align 2, !tbaa !0
   %v42 = icmp sge i16 %v40, %v41
   %v43 = xor i1 %v39, true
   %v44 = or i1 %v42, %v43
@@ -107,9 +105,9 @@ b6:                                               ; preds = %b5
   br i1 %v47, label %b7, label %b35
 
 b7:                                               ; preds = %b6
-  %v48 = load i16, i16* @g5, align 2, !tbaa !0
+  %v48 = load i16, ptr @g5, align 2, !tbaa !0
   %v49 = icmp eq i16 %v48, 4
-  %v50 = load i16, i16* @g6, align 2
+  %v50 = load i16, ptr @g6, align 2
   %v51 = icmp eq i16 %v50, 0
   %v52 = and i1 %v49, %v51
   br i1 %v52, label %b35, label %b8
@@ -118,25 +116,25 @@ b8:                                               ; preds = %b7
   br label %b35
 
 b9:                                               ; preds = %b0
-  store i16 0, i16* @g0, align 2, !tbaa !0
-  %v53 = load i16, i16* %a1, align 2, !tbaa !0
+  store i16 0, ptr @g0, align 2, !tbaa !0
+  %v53 = load i16, ptr %a1, align 2, !tbaa !0
   %v54 = icmp eq i16 %v53, 0
   %v55 = zext i1 %v54 to i16
-  %v56 = getelementptr i16, i16* %a1, i32 1
-  %v57 = load i16, i16* %v56, align 2, !tbaa !0
+  %v56 = getelementptr i16, ptr %a1, i32 1
+  %v57 = load i16, ptr %v56, align 2, !tbaa !0
   %v58 = icmp eq i16 %v57, 0
   %v59 = zext i1 %v58 to i16
   %v60 = add nuw nsw i16 %v59, %v55
-  %v61 = getelementptr inbounds i16, i16* %a1, i32 2
-  %v62 = load i16, i16* %v61, align 2, !tbaa !0
+  %v61 = getelementptr inbounds i16, ptr %a1, i32 2
+  %v62 = load i16, ptr %v61, align 2, !tbaa !0
   %v63 = icmp ult i16 %v62, 256
   %v64 = zext i1 %v63 to i16
   %v65 = add nuw nsw i16 %v64, %v60
-  %v66 = load i16, i16* getelementptr inbounds ([2 x i16], [2 x i16]* @g1, i32 0, i32 0), align 2
+  %v66 = load i16, ptr @g1, align 2
   %v67 = icmp eq i16 %v65, 3
   %v68 = icmp ne i16 %v66, 0
   %v69 = or i1 %v68, %v67
-  %v70 = load i16, i16* getelementptr inbounds (%s.0, %s.0* @g3, i32 0, i32 9), align 2
+  %v70 = load i16, ptr getelementptr inbounds (%s.0, ptr @g3, i32 0, i32 9), align 2
   %v71 = icmp eq i16 %v70, 3
   %v72 = or i1 %v71, %v69
   br i1 %v72, label %b35, label %b10
@@ -145,36 +143,36 @@ b10:                                              ; preds = %b9
   br label %b35
 
 b11:                                              ; preds = %b0
-  store i16 0, i16* @g0, align 2, !tbaa !0
-  %v73 = load i16, i16* %a1, align 2, !tbaa !0
+  store i16 0, ptr @g0, align 2, !tbaa !0
+  %v73 = load i16, ptr %a1, align 2, !tbaa !0
   %v74 = icmp eq i16 %v73, 0
   %v75 = zext i1 %v74 to i16
-  %v76 = getelementptr i16, i16* %a1, i32 1
-  %v77 = load i16, i16* %v76, align 2, !tbaa !0
+  %v76 = getelementptr i16, ptr %a1, i32 1
+  %v77 = load i16, ptr %v76, align 2, !tbaa !0
   %v78 = icmp eq i16 %v77, 0
   %v79 = zext i1 %v78 to i16
   %v80 = add nuw nsw i16 %v79, %v75
-  %v81 = getelementptr inbounds i16, i16* %a1, i32 2
-  %v82 = load i16, i16* %v81, align 2, !tbaa !0
+  %v81 = getelementptr inbounds i16, ptr %a1, i32 2
+  %v82 = load i16, ptr %v81, align 2, !tbaa !0
   %v83 = icmp ult i16 %v82, 256
   %v84 = zext i1 %v83 to i16
   %v85 = add nuw nsw i16 %v84, %v80
   %v86 = icmp ne i16 %v85, 3
-  %v87 = load i16, i16* getelementptr inbounds ([2 x i16], [2 x i16]* @g1, i32 0, i32 0), align 2
+  %v87 = load i16, ptr @g1, align 2
   %v88 = icmp eq i16 %v87, 0
   %v89 = and i1 %v88, %v86
   br i1 %v89, label %b12, label %b35
 
 b12:                                              ; preds = %b11
-  %v90 = load i16, i16* @g5, align 2, !tbaa !0
+  %v90 = load i16, ptr @g5, align 2, !tbaa !0
   switch i16 %v90, label %b14 [
     i16 1, label %b35
     i16 2, label %b13
   ]
 
 b13:                                              ; preds = %b12
-  %v91 = load i16, i16* @g7, align 2, !tbaa !0
-  %v92 = load i16, i16* @g6, align 2
+  %v91 = load i16, ptr @g7, align 2, !tbaa !0
+  %v92 = load i16, ptr @g6, align 2
   %v93 = or i16 %v92, %v91
   %v94 = icmp eq i16 %v93, 0
   br i1 %v94, label %b35, label %b14
@@ -183,27 +181,27 @@ b14:                                              ; preds = %b13, %b12
   br label %b35
 
 b15:                                              ; preds = %b0
-  store i16 0, i16* @g0, align 2, !tbaa !0
-  %v95 = load i16, i16* %a1, align 2, !tbaa !0
+  store i16 0, ptr @g0, align 2, !tbaa !0
+  %v95 = load i16, ptr %a1, align 2, !tbaa !0
   %v96 = icmp eq i16 %v95, 0
   %v97 = zext i1 %v96 to i16
-  %v98 = getelementptr i16, i16* %a1, i32 1
-  %v99 = load i16, i16* %v98, align 2, !tbaa !0
+  %v98 = getelementptr i16, ptr %a1, i32 1
+  %v99 = load i16, ptr %v98, align 2, !tbaa !0
   %v100 = icmp eq i16 %v99, 0
   %v101 = zext i1 %v100 to i16
   %v102 = add nuw nsw i16 %v101, %v97
-  %v103 = getelementptr i16, i16* %a1, i32 2
-  %v104 = load i16, i16* %v103, align 2, !tbaa !0
+  %v103 = getelementptr i16, ptr %a1, i32 2
+  %v104 = load i16, ptr %v103, align 2, !tbaa !0
   %v105 = icmp eq i16 %v104, 0
   %v106 = zext i1 %v105 to i16
   %v107 = add nuw nsw i16 %v106, %v102
-  %v108 = getelementptr i16, i16* %a1, i32 3
-  %v109 = load i16, i16* %v108, align 2, !tbaa !0
+  %v108 = getelementptr i16, ptr %a1, i32 3
+  %v109 = load i16, ptr %v108, align 2, !tbaa !0
   %v110 = icmp eq i16 %v109, 0
   %v111 = zext i1 %v110 to i16
   %v112 = add nuw nsw i16 %v111, %v107
-  %v113 = getelementptr i16, i16* %a1, i32 4
-  %v114 = load i16, i16* %v113, align 2, !tbaa !0
+  %v113 = getelementptr i16, ptr %a1, i32 4
+  %v114 = load i16, ptr %v113, align 2, !tbaa !0
   %v115 = icmp eq i16 %v114, 0
   %v116 = zext i1 %v115 to i16
   %v117 = add nuw nsw i16 %v116, %v112
@@ -211,7 +209,7 @@ b15:                                              ; preds = %b0
   br i1 %v118, label %b35, label %b16
 
 b16:                                              ; preds = %b15
-  %v119 = load i16, i16* getelementptr inbounds (%s.0, %s.0* @g3, i32 0, i32 3), align 2, !tbaa !4
+  %v119 = load i16, ptr getelementptr inbounds (%s.0, ptr @g3, i32 0, i32 3), align 2, !tbaa !4
   switch i16 %v119, label %b17 [
     i16 120, label %b19
     i16 115, label %b19
@@ -222,20 +220,20 @@ b17:                                              ; preds = %b16
   br i1 %v120, label %b35, label %b18
 
 b18:                                              ; preds = %b17
-  tail call void @f2(i16* getelementptr inbounds (%s.0, %s.0* @g3, i32 0, i32 2, i32 0), i16* getelementptr inbounds ([10 x i16], [10 x i16]* @g2, i32 0, i32 0))
-  %v121 = load i16, i16* getelementptr inbounds ([10 x i16], [10 x i16]* @g2, i32 0, i32 0), align 2, !tbaa !0
-  %v122 = load i16, i16* getelementptr inbounds ([10 x i16], [10 x i16]* @g2, i32 0, i32 1), align 2, !tbaa !0
-  %v123 = load i16, i16* getelementptr inbounds ([10 x i16], [10 x i16]* @g2, i32 0, i32 2), align 2, !tbaa !0
+  tail call void @f2(ptr getelementptr inbounds (%s.0, ptr @g3, i32 0, i32 2, i32 0), ptr @g2)
+  %v121 = load i16, ptr @g2, align 2, !tbaa !0
+  %v122 = load i16, ptr getelementptr inbounds ([10 x i16], ptr @g2, i32 0, i32 1), align 2, !tbaa !0
+  %v123 = load i16, ptr getelementptr inbounds ([10 x i16], ptr @g2, i32 0, i32 2), align 2, !tbaa !0
   %v124 = icmp sgt i16 %v122, %v123
   %v125 = select i1 %v124, i16 %v122, i16 %v123
   %v126 = icmp sgt i16 %v121, %v125
   %v127 = select i1 %v126, i16 %v121, i16 %v125
-  %v128 = load i16, i16* getelementptr inbounds ([10 x i16], [10 x i16]* @g2, i32 0, i32 6), align 2, !tbaa !0
-  %v129 = load i16, i16* getelementptr inbounds ([10 x i16], [10 x i16]* @g2, i32 0, i32 7), align 2, !tbaa !0
-  %v130 = load i16, i16* getelementptr inbounds ([10 x i16], [10 x i16]* @g2, i32 0, i32 8), align 2, !tbaa !0
+  %v128 = load i16, ptr getelementptr inbounds ([10 x i16], ptr @g2, i32 0, i32 6), align 2, !tbaa !0
+  %v129 = load i16, ptr getelementptr inbounds ([10 x i16], ptr @g2, i32 0, i32 7), align 2, !tbaa !0
+  %v130 = load i16, ptr getelementptr inbounds ([10 x i16], ptr @g2, i32 0, i32 8), align 2, !tbaa !0
   %v131 = icmp slt i16 %v129, %v130
   %v132 = select i1 %v131, i16 %v129, i16 %v130
-  %v133 = load i16, i16* getelementptr inbounds ([10 x i16], [10 x i16]* @g2, i32 0, i32 9), align 2, !tbaa !0
+  %v133 = load i16, ptr getelementptr inbounds ([10 x i16], ptr @g2, i32 0, i32 9), align 2, !tbaa !0
   %v134 = icmp slt i16 %v132, %v133
   %v135 = select i1 %v134, i16 %v132, i16 %v133
   %v136 = icmp slt i16 %v128, %v135
@@ -247,57 +245,57 @@ b19:                                              ; preds = %b18, %b16, %b16
   br label %b35
 
 b20:                                              ; preds = %b0
-  store i16 0, i16* @g0, align 2, !tbaa !0
-  %v139 = load i16, i16* %a1, align 2, !tbaa !0
+  store i16 0, ptr @g0, align 2, !tbaa !0
+  %v139 = load i16, ptr %a1, align 2, !tbaa !0
   %v140 = icmp eq i16 %v139, 0
   %v141 = zext i1 %v140 to i16
-  %v142 = getelementptr i16, i16* %a1, i32 1
-  %v143 = load i16, i16* %v142, align 2, !tbaa !0
+  %v142 = getelementptr i16, ptr %a1, i32 1
+  %v143 = load i16, ptr %v142, align 2, !tbaa !0
   %v144 = icmp eq i16 %v143, 0
   %v145 = zext i1 %v144 to i16
   %v146 = add nuw nsw i16 %v145, %v141
-  %v147 = getelementptr i16, i16* %a1, i32 2
-  %v148 = load i16, i16* %v147, align 2, !tbaa !0
+  %v147 = getelementptr i16, ptr %a1, i32 2
+  %v148 = load i16, ptr %v147, align 2, !tbaa !0
   %v149 = icmp eq i16 %v148, 0
   %v150 = zext i1 %v149 to i16
   %v151 = add nuw nsw i16 %v150, %v146
-  %v152 = getelementptr i16, i16* %a1, i32 3
-  %v153 = load i16, i16* %v152, align 2, !tbaa !0
+  %v152 = getelementptr i16, ptr %a1, i32 3
+  %v153 = load i16, ptr %v152, align 2, !tbaa !0
   %v154 = icmp eq i16 %v153, 0
   %v155 = zext i1 %v154 to i16
   %v156 = add nuw nsw i16 %v155, %v151
-  %v157 = getelementptr i16, i16* %a1, i32 4
-  %v158 = load i16, i16* %v157, align 2, !tbaa !0
+  %v157 = getelementptr i16, ptr %a1, i32 4
+  %v158 = load i16, ptr %v157, align 2, !tbaa !0
   %v159 = icmp eq i16 %v158, 0
   %v160 = zext i1 %v159 to i16
   %v161 = add nuw nsw i16 %v160, %v156
-  %v162 = getelementptr i16, i16* %a1, i32 5
-  %v163 = load i16, i16* %v162, align 2, !tbaa !0
+  %v162 = getelementptr i16, ptr %a1, i32 5
+  %v163 = load i16, ptr %v162, align 2, !tbaa !0
   %v164 = icmp eq i16 %v163, 0
   %v165 = zext i1 %v164 to i16
   %v166 = add nuw nsw i16 %v165, %v161
-  %v167 = getelementptr i16, i16* %a1, i32 6
-  %v168 = load i16, i16* %v167, align 2, !tbaa !0
+  %v167 = getelementptr i16, ptr %a1, i32 6
+  %v168 = load i16, ptr %v167, align 2, !tbaa !0
   %v169 = icmp eq i16 %v168, 0
   %v170 = zext i1 %v169 to i16
   %v171 = add nuw nsw i16 %v170, %v166
-  %v172 = getelementptr i16, i16* %a1, i32 7
-  %v173 = load i16, i16* %v172, align 2, !tbaa !0
+  %v172 = getelementptr i16, ptr %a1, i32 7
+  %v173 = load i16, ptr %v172, align 2, !tbaa !0
   %v174 = icmp eq i16 %v173, 0
   %v175 = zext i1 %v174 to i16
   %v176 = add i16 %v175, %v171
-  %v177 = getelementptr i16, i16* %a1, i32 8
-  %v178 = load i16, i16* %v177, align 2, !tbaa !0
+  %v177 = getelementptr i16, ptr %a1, i32 8
+  %v178 = load i16, ptr %v177, align 2, !tbaa !0
   %v179 = icmp eq i16 %v178, 0
   %v180 = zext i1 %v179 to i16
   %v181 = add i16 %v180, %v176
-  %v182 = getelementptr i16, i16* %a1, i32 9
-  %v183 = load i16, i16* %v182, align 2, !tbaa !0
+  %v182 = getelementptr i16, ptr %a1, i32 9
+  %v183 = load i16, ptr %v182, align 2, !tbaa !0
   %v184 = icmp eq i16 %v183, 0
   %v185 = zext i1 %v184 to i16
   %v186 = add i16 %v185, %v181
-  %v187 = getelementptr inbounds i16, i16* %a1, i32 10
-  %v188 = load i16, i16* %v187, align 2, !tbaa !0
+  %v187 = getelementptr inbounds i16, ptr %a1, i32 10
+  %v188 = load i16, ptr %v187, align 2, !tbaa !0
   %v189 = icmp ult i16 %v188, 32
   %v190 = zext i1 %v189 to i16
   %v191 = add i16 %v190, %v186
@@ -305,122 +303,122 @@ b20:                                              ; preds = %b0
   br i1 %v192, label %b35, label %b21
 
 b21:                                              ; preds = %b20
-  tail call void @f3(i16* getelementptr inbounds (%s.0, %s.0* @g3, i32 0, i32 2, i32 0), i16* getelementptr inbounds ([10 x i16], [10 x i16]* @g2, i32 0, i32 0))
-  %v193 = load i16, i16* @g8, align 2, !tbaa !0
+  tail call void @f3(ptr getelementptr inbounds (%s.0, ptr @g3, i32 0, i32 2, i32 0), ptr @g2)
+  %v193 = load i16, ptr @g8, align 2, !tbaa !0
   %v194 = icmp eq i16 %v193, 0
   br i1 %v194, label %b22, label %b35
 
 b22:                                              ; preds = %b21
-  %v195 = load i16, i16* getelementptr inbounds (%s.0, %s.0* @g3, i32 0, i32 3), align 2, !tbaa !4
+  %v195 = load i16, ptr getelementptr inbounds (%s.0, ptr @g3, i32 0, i32 3), align 2, !tbaa !4
   %v196 = icmp sgt i16 %v195, 100
   br i1 %v196, label %b35, label %b23
 
 b23:                                              ; preds = %b22
-  %v197 = load i16, i16* getelementptr inbounds ([10 x i16], [10 x i16]* @g2, i32 0, i32 0), align 2, !tbaa !0
-  %v198 = load i16, i16* getelementptr inbounds ([10 x i16], [10 x i16]* @g2, i32 0, i32 1), align 2, !tbaa !0
+  %v197 = load i16, ptr @g2, align 2, !tbaa !0
+  %v198 = load i16, ptr getelementptr inbounds ([10 x i16], ptr @g2, i32 0, i32 1), align 2, !tbaa !0
   %v199 = icmp sgt i16 %v197, %v198
   %v200 = select i1 %v199, i16 %v197, i16 %v198
-  %v201 = load i16, i16* getelementptr inbounds ([10 x i16], [10 x i16]* @g2, i32 0, i32 4), align 2, !tbaa !0
-  %v202 = load i16, i16* getelementptr inbounds ([10 x i16], [10 x i16]* @g2, i32 0, i32 5), align 2, !tbaa !0
+  %v201 = load i16, ptr getelementptr inbounds ([10 x i16], ptr @g2, i32 0, i32 4), align 2, !tbaa !0
+  %v202 = load i16, ptr getelementptr inbounds ([10 x i16], ptr @g2, i32 0, i32 5), align 2, !tbaa !0
   %v203 = icmp slt i16 %v201, %v202
   %v204 = select i1 %v203, i16 %v201, i16 %v202
-  %v205 = load i16, i16* getelementptr inbounds ([10 x i16], [10 x i16]* @g2, i32 0, i32 6), align 2, !tbaa !0
+  %v205 = load i16, ptr getelementptr inbounds ([10 x i16], ptr @g2, i32 0, i32 6), align 2, !tbaa !0
   %v206 = icmp slt i16 %v204, %v205
   %v207 = select i1 %v206, i16 %v204, i16 %v205
   %v208 = icmp slt i16 %v200, %v207
   br i1 %v208, label %b24, label %b35
 
 b24:                                              ; preds = %b23
-  %v209 = load i16, i16* @g5, align 2, !tbaa !0
+  %v209 = load i16, ptr @g5, align 2, !tbaa !0
   switch i16 %v209, label %b26 [
     i16 1, label %b35
     i16 2, label %b25
   ]
 
 b25:                                              ; preds = %b24
-  %v210 = load i16, i16* @g7, align 2, !tbaa !0
-  %v211 = load i16, i16* @g6, align 2
+  %v210 = load i16, ptr @g7, align 2, !tbaa !0
+  %v211 = load i16, ptr @g6, align 2
   %v212 = or i16 %v211, %v210
   %v213 = icmp eq i16 %v212, 0
   br i1 %v213, label %b35, label %b27
 
 b26:                                              ; preds = %b24
-  %v214 = load i16, i16* @g6, align 2
+  %v214 = load i16, ptr @g6, align 2
   %v215 = icmp eq i16 %v214, 0
   br i1 %v215, label %b28, label %b35
 
 b27:                                              ; preds = %b25
-  %v216 = load i16, i16* @g9, align 2
+  %v216 = load i16, ptr @g9, align 2
   %v217 = icmp eq i16 %v216, 0
   br i1 %v217, label %b28, label %b35
 
 b28:                                              ; preds = %b27, %b26
   %v218 = tail call signext i16 @f4(i16 signext %v195, i16 signext 20)
-  store i16 %v218, i16* @g10, align 2, !tbaa !0
-  %v219 = load i16, i16* @g11, align 2, !tbaa !0
+  store i16 %v218, ptr @g10, align 2, !tbaa !0
+  %v219 = load i16, ptr @g11, align 2, !tbaa !0
   %v220 = tail call signext i16 @f6(i16 signext %v218, i16 signext %v219)
   %v221 = tail call signext i16 @f5(i16 signext %v220)
   %v222 = icmp sgt i16 %v221, 15
   br i1 %v222, label %b29, label %b35
 
 b29:                                              ; preds = %b28
-  call void @llvm.memset.p0i8.i32(i8* align 2 bitcast ([192 x i16]* @g12 to i8*), i8 0, i32 256, i1 false)
-  call void @llvm.memset.p0i8.i32(i8* align 4 bitcast ([10 x i32]* @g13 to i8*), i8 0, i32 40, i1 false)
+  call void @llvm.memset.p0.i32(ptr align 2 @g12, i8 0, i32 256, i1 false)
+  call void @llvm.memset.p0.i32(ptr align 4 @g13, i8 0, i32 40, i1 false)
   tail call void @f7()
   br label %b35
 
 b30:                                              ; preds = %b0
-  store i16 0, i16* @g0, align 2, !tbaa !0
-  %v223 = load i16, i16* %a1, align 2, !tbaa !0
+  store i16 0, ptr @g0, align 2, !tbaa !0
+  %v223 = load i16, ptr %a1, align 2, !tbaa !0
   %v224 = icmp eq i16 %v223, 0
   %v225 = zext i1 %v224 to i16
-  %v226 = getelementptr i16, i16* %a1, i32 1
-  %v227 = load i16, i16* %v226, align 2, !tbaa !0
+  %v226 = getelementptr i16, ptr %a1, i32 1
+  %v227 = load i16, ptr %v226, align 2, !tbaa !0
   %v228 = icmp eq i16 %v227, 0
   %v229 = zext i1 %v228 to i16
   %v230 = add nuw nsw i16 %v229, %v225
-  %v231 = getelementptr i16, i16* %a1, i32 2
-  %v232 = load i16, i16* %v231, align 2, !tbaa !0
+  %v231 = getelementptr i16, ptr %a1, i32 2
+  %v232 = load i16, ptr %v231, align 2, !tbaa !0
   %v233 = icmp eq i16 %v232, 0
   %v234 = zext i1 %v233 to i16
   %v235 = add nuw nsw i16 %v234, %v230
-  %v236 = getelementptr i16, i16* %a1, i32 3
-  %v237 = load i16, i16* %v236, align 2, !tbaa !0
+  %v236 = getelementptr i16, ptr %a1, i32 3
+  %v237 = load i16, ptr %v236, align 2, !tbaa !0
   %v238 = icmp eq i16 %v237, 0
   %v239 = zext i1 %v238 to i16
   %v240 = add nuw nsw i16 %v239, %v235
-  %v241 = getelementptr i16, i16* %a1, i32 4
-  %v242 = load i16, i16* %v241, align 2, !tbaa !0
+  %v241 = getelementptr i16, ptr %a1, i32 4
+  %v242 = load i16, ptr %v241, align 2, !tbaa !0
   %v243 = icmp eq i16 %v242, 0
   %v244 = zext i1 %v243 to i16
   %v245 = add nuw nsw i16 %v244, %v240
-  %v246 = getelementptr i16, i16* %a1, i32 5
-  %v247 = load i16, i16* %v246, align 2, !tbaa !0
+  %v246 = getelementptr i16, ptr %a1, i32 5
+  %v247 = load i16, ptr %v246, align 2, !tbaa !0
   %v248 = icmp eq i16 %v247, 0
   %v249 = zext i1 %v248 to i16
   %v250 = add nuw nsw i16 %v249, %v245
-  %v251 = getelementptr i16, i16* %a1, i32 6
-  %v252 = load i16, i16* %v251, align 2, !tbaa !0
+  %v251 = getelementptr i16, ptr %a1, i32 6
+  %v252 = load i16, ptr %v251, align 2, !tbaa !0
   %v253 = icmp eq i16 %v252, 0
   %v254 = zext i1 %v253 to i16
   %v255 = add nuw nsw i16 %v254, %v250
-  %v256 = getelementptr i16, i16* %a1, i32 7
-  %v257 = load i16, i16* %v256, align 2, !tbaa !0
+  %v256 = getelementptr i16, ptr %a1, i32 7
+  %v257 = load i16, ptr %v256, align 2, !tbaa !0
   %v258 = icmp eq i16 %v257, 0
   %v259 = zext i1 %v258 to i16
   %v260 = add i16 %v259, %v255
-  %v261 = getelementptr i16, i16* %a1, i32 8
-  %v262 = load i16, i16* %v261, align 2, !tbaa !0
+  %v261 = getelementptr i16, ptr %a1, i32 8
+  %v262 = load i16, ptr %v261, align 2, !tbaa !0
   %v263 = icmp eq i16 %v262, 0
   %v264 = zext i1 %v263 to i16
   %v265 = add i16 %v264, %v260
-  %v266 = getelementptr i16, i16* %a1, i32 9
-  %v267 = load i16, i16* %v266, align 2, !tbaa !0
+  %v266 = getelementptr i16, ptr %a1, i32 9
+  %v267 = load i16, ptr %v266, align 2, !tbaa !0
   %v268 = icmp eq i16 %v267, 0
   %v269 = zext i1 %v268 to i16
   %v270 = add i16 %v269, %v265
-  %v271 = getelementptr inbounds i16, i16* %a1, i32 10
-  %v272 = load i16, i16* %v271, align 2, !tbaa !0
+  %v271 = getelementptr inbounds i16, ptr %a1, i32 10
+  %v272 = load i16, ptr %v271, align 2, !tbaa !0
   %v273 = icmp ult i16 %v272, 32
   %v274 = zext i1 %v273 to i16
   %v275 = add i16 %v274, %v270
@@ -428,26 +426,26 @@ b30:                                              ; preds = %b0
   br i1 %v276, label %b35, label %b31
 
 b31:                                              ; preds = %b30
-  tail call void @f3(i16* getelementptr inbounds (%s.0, %s.0* @g3, i32 0, i32 2, i32 0), i16* getelementptr inbounds ([10 x i16], [10 x i16]* @g2, i32 0, i32 0))
-  %v277 = load i16, i16* @g14, align 2, !tbaa !0
+  tail call void @f3(ptr getelementptr inbounds (%s.0, ptr @g3, i32 0, i32 2, i32 0), ptr @g2)
+  %v277 = load i16, ptr @g14, align 2, !tbaa !0
   %v278 = icmp eq i16 %v277, 0
   br i1 %v278, label %b32, label %b34
 
 b32:                                              ; preds = %b31
-  %v279 = load i16, i16* getelementptr inbounds (%s.0, %s.0* @g3, i32 0, i32 3), align 2, !tbaa !4
+  %v279 = load i16, ptr getelementptr inbounds (%s.0, ptr @g3, i32 0, i32 3), align 2, !tbaa !4
   %v280 = icmp sgt i16 %v279, 100
   br i1 %v280, label %b35, label %b33
 
 b33:                                              ; preds = %b32
-  %v281 = load i16, i16* getelementptr inbounds ([10 x i16], [10 x i16]* @g2, i32 0, i32 0), align 2, !tbaa !0
-  %v282 = load i16, i16* getelementptr inbounds ([10 x i16], [10 x i16]* @g2, i32 0, i32 1), align 2, !tbaa !0
+  %v281 = load i16, ptr @g2, align 2, !tbaa !0
+  %v282 = load i16, ptr getelementptr inbounds ([10 x i16], ptr @g2, i32 0, i32 1), align 2, !tbaa !0
   %v283 = icmp sgt i16 %v281, %v282
   %v284 = select i1 %v283, i16 %v281, i16 %v282
-  %v285 = load i16, i16* getelementptr inbounds ([10 x i16], [10 x i16]* @g2, i32 0, i32 4), align 2, !tbaa !0
-  %v286 = load i16, i16* getelementptr inbounds ([10 x i16], [10 x i16]* @g2, i32 0, i32 5), align 2, !tbaa !0
+  %v285 = load i16, ptr getelementptr inbounds ([10 x i16], ptr @g2, i32 0, i32 4), align 2, !tbaa !0
+  %v286 = load i16, ptr getelementptr inbounds ([10 x i16], ptr @g2, i32 0, i32 5), align 2, !tbaa !0
   %v287 = icmp slt i16 %v285, %v286
   %v288 = select i1 %v287, i16 %v285, i16 %v286
-  %v289 = load i16, i16* getelementptr inbounds ([10 x i16], [10 x i16]* @g2, i32 0, i32 6), align 2, !tbaa !0
+  %v289 = load i16, ptr getelementptr inbounds ([10 x i16], ptr @g2, i32 0, i32 6), align 2, !tbaa !0
   %v290 = icmp slt i16 %v288, %v289
   %v291 = select i1 %v290, i16 %v288, i16 %v289
   %v292 = icmp slt i16 %v284, %v291
@@ -462,13 +460,13 @@ b35:                                              ; preds = %b34, %b33, %b32, %b
 }
 
 ; Function Attrs: nounwind
-declare void @f1(i16*, i16*, i16*, i16*, i16*) #0
+declare void @f1(ptr, ptr, ptr, ptr, ptr) #0
 
 ; Function Attrs: nounwind
-declare void @f2(i16*, i16*) #0
+declare void @f2(ptr, ptr) #0
 
 ; Function Attrs: nounwind
-declare void @f3(i16*, i16*) #0
+declare void @f3(ptr, ptr) #0
 
 ; Function Attrs: nounwind
 declare signext i16 @f4(i16 signext, i16 signext) #0
@@ -483,7 +481,7 @@ declare signext i16 @f6(i16 signext, i16 signext) #0
 declare void @f7() #0
 
 ; Function Attrs: argmemonly nounwind
-declare void @llvm.memset.p0i8.i32(i8* nocapture writeonly, i8, i32, i1) #1
+declare void @llvm.memset.p0.i32(ptr nocapture writeonly, i8, i32, i1) #1
 
 attributes #0 = { nounwind }
 attributes #1 = { argmemonly nounwind }

diff  --git a/llvm/test/CodeGen/Hexagon/opt-sext-intrinsics.ll b/llvm/test/CodeGen/Hexagon/opt-sext-intrinsics.ll
index 2f4a01094fbe9..3a348908d3552 100644
--- a/llvm/test/CodeGen/Hexagon/opt-sext-intrinsics.ll
+++ b/llvm/test/CodeGen/Hexagon/opt-sext-intrinsics.ll
@@ -16,7 +16,7 @@ b0:
   %v6 = tail call i32 @llvm.hexagon.A2.addh.l16.sat.ll(i32 %v0, i32 %v1)
   %v7 = shl i32 %v6, 16
   %v8 = ashr exact i32 %v7, 16
-  %v9 = load i32, i32* @g0, align 4
+  %v9 = load i32, ptr @g0, align 4
   %v10 = icmp ne i32 %v9, %v6
   %v11 = zext i1 %v10 to i32
   ret i32 %v11

diff  --git a/llvm/test/CodeGen/Hexagon/opt-spill-volatile.ll b/llvm/test/CodeGen/Hexagon/opt-spill-volatile.ll
index 1c86716132fde..48ac3fd5dab5a 100644
--- a/llvm/test/CodeGen/Hexagon/opt-spill-volatile.ll
+++ b/llvm/test/CodeGen/Hexagon/opt-spill-volatile.ll
@@ -10,18 +10,17 @@ target triple = "hexagon"
 define i32 @foo(i32 %a) #0 {
 entry:
   %x = alloca i32, align 4
-  %x.0.x.0..sroa_cast = bitcast i32* %x to i8*
-  call void @llvm.lifetime.start.p0i8(i64 4, i8* %x.0.x.0..sroa_cast)
-  store volatile i32 0, i32* %x, align 4
-  %call = tail call i32 bitcast (i32 (...)* @bar to i32 ()*)() #0
-  %x.0.x.0. = load volatile i32, i32* %x, align 4
+  call void @llvm.lifetime.start.p0(i64 4, ptr %x)
+  store volatile i32 0, ptr %x, align 4
+  %call = tail call i32 @bar() #0
+  %x.0.x.0. = load volatile i32, ptr %x, align 4
   %add = add nsw i32 %x.0.x.0., %a
-  call void @llvm.lifetime.end.p0i8(i64 4, i8* %x.0.x.0..sroa_cast)
+  call void @llvm.lifetime.end.p0(i64 4, ptr %x)
   ret i32 %add
 }
 
-declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #1
-declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #1
+declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
 
 declare i32 @bar(...) #0
 

diff  --git a/llvm/test/CodeGen/Hexagon/packed-store.ll b/llvm/test/CodeGen/Hexagon/packed-store.ll
index b7f6808272600..59bb2975770e6 100644
--- a/llvm/test/CodeGen/Hexagon/packed-store.ll
+++ b/llvm/test/CodeGen/Hexagon/packed-store.ll
@@ -12,29 +12,29 @@ target triple = "hexagon-unknown-linux-gnu"
 define i32 @f0(i32 %a0) #0 {
 b0:
   %v0 = alloca i32, align 4
-  store i32 %a0, i32* %v0, align 4
-  store i8 1, i8* getelementptr inbounds (%s.0, %s.0* @g0, i32 0, i32 1), align 1
-  %v1 = load i16, i16* getelementptr inbounds (%s.0, %s.0* @g0, i32 0, i32 2), align 1
+  store i32 %a0, ptr %v0, align 4
+  store i8 1, ptr getelementptr inbounds (%s.0, ptr @g0, i32 0, i32 1), align 1
+  %v1 = load i16, ptr getelementptr inbounds (%s.0, ptr @g0, i32 0, i32 2), align 1
   %v2 = add i16 %v1, 1
-  store i16 %v2, i16* getelementptr inbounds (%s.0, %s.0* @g0, i32 0, i32 2), align 1
-  %v3 = load i32, i32* %v0, align 4
+  store i16 %v2, ptr getelementptr inbounds (%s.0, ptr @g0, i32 0, i32 2), align 1
+  %v3 = load i32, ptr %v0, align 4
   %v4 = icmp ne i32 %v3, 0
   br i1 %v4, label %b1, label %b2
 
 b1:                                               ; preds = %b0
-  %v5 = load i16, i16* getelementptr inbounds (%s.0, %s.0* @g0, i32 0, i32 2), align 1
+  %v5 = load i16, ptr getelementptr inbounds (%s.0, ptr @g0, i32 0, i32 2), align 1
   %v6 = zext i16 %v5 to i32
   %v7 = or i32 %v6, 6144
   %v8 = trunc i32 %v7 to i16
-  store i16 %v8, i16* getelementptr inbounds (%s.0, %s.0* @g0, i32 0, i32 2), align 1
+  store i16 %v8, ptr getelementptr inbounds (%s.0, ptr @g0, i32 0, i32 2), align 1
   br label %b3
 
 b2:                                               ; preds = %b0
-  %v9 = load i16, i16* getelementptr inbounds (%s.0, %s.0* @g0, i32 0, i32 2), align 1
+  %v9 = load i16, ptr getelementptr inbounds (%s.0, ptr @g0, i32 0, i32 2), align 1
   %v10 = zext i16 %v9 to i32
   %v11 = or i32 %v10, 2048
   %v12 = trunc i32 %v11 to i16
-  store i16 %v12, i16* getelementptr inbounds (%s.0, %s.0* @g0, i32 0, i32 2), align 1
+  store i16 %v12, ptr getelementptr inbounds (%s.0, ptr @g0, i32 0, i32 2), align 1
   br label %b3
 
 b3:                                               ; preds = %b2, %b1

diff  --git a/llvm/test/CodeGen/Hexagon/packetize-allocframe.ll b/llvm/test/CodeGen/Hexagon/packetize-allocframe.ll
index ecc6cf542f16b..e0d860271256d 100644
--- a/llvm/test/CodeGen/Hexagon/packetize-allocframe.ll
+++ b/llvm/test/CodeGen/Hexagon/packetize-allocframe.ll
@@ -4,7 +4,7 @@
 ; CFI instructions while forming packet for allocframe. Refer to 7d7d99622
 ; which replaced PROLOG_LABEL with CFI_INSTRUCTION.
 
- at g0 = external constant i8*
+ at g0 = external constant ptr
 
 ; We used to emit:
 ;      {
@@ -20,45 +20,44 @@
 ; CHECK-NEXT:   allocframe(#0)
 ; CHECK-NEXT: }
 
-define i32 @f0() personality i8* bitcast (i32 (...)* @f3 to i8*) {
+define i32 @f0() personality ptr @f3 {
 b0:
-  %v0 = tail call i8* @f1(i32 4) #1
-  %v1 = bitcast i8* %v0 to i32*
-  store i32 20, i32* %v1, align 4, !tbaa !0
-  invoke void @f2(i8* %v0, i8* bitcast (i8** @g0 to i8*), i8* null) #2
+  %v0 = tail call ptr @f1(i32 4) #1
+  store i32 20, ptr %v0, align 4, !tbaa !0
+  invoke void @f2(ptr %v0, ptr @g0, ptr null) #2
           to label %b4 unwind label %b1
 
 b1:                                               ; preds = %b0
-  %v2 = landingpad { i8*, i32 }
-          catch i8* bitcast (i8** @g0 to i8*)
-  %v3 = extractvalue { i8*, i32 } %v2, 1
-  %v4 = tail call i32 @llvm.eh.typeid.for(i8* bitcast (i8** @g0 to i8*)) #1
+  %v2 = landingpad { ptr, i32 }
+          catch ptr @g0
+  %v3 = extractvalue { ptr, i32 } %v2, 1
+  %v4 = tail call i32 @llvm.eh.typeid.for(ptr @g0) #1
   %v5 = icmp eq i32 %v3, %v4
   br i1 %v5, label %b2, label %b3
 
 b2:                                               ; preds = %b1
-  %v6 = extractvalue { i8*, i32 } %v2, 0
-  %v7 = tail call i8* @f4(i8* %v6) #1
+  %v6 = extractvalue { ptr, i32 } %v2, 0
+  %v7 = tail call ptr @f4(ptr %v6) #1
   tail call void @f5() #1
   ret i32 1
 
 b3:                                               ; preds = %b1
-  resume { i8*, i32 } %v2
+  resume { ptr, i32 } %v2
 
 b4:                                               ; preds = %b0
   unreachable
 }
 
-declare i8* @f1(i32)
+declare ptr @f1(i32)
 
-declare void @f2(i8*, i8*, i8*)
+declare void @f2(ptr, ptr, ptr)
 
 declare i32 @f3(...)
 
 ; Function Attrs: nounwind readnone
-declare i32 @llvm.eh.typeid.for(i8*) #0
+declare i32 @llvm.eh.typeid.for(ptr) #0
 
-declare i8* @f4(i8*)
+declare ptr @f4(ptr)
 
 declare void @f5()
 

diff  --git a/llvm/test/CodeGen/Hexagon/packetize-call-r29.ll b/llvm/test/CodeGen/Hexagon/packetize-call-r29.ll
index e4900ee74a44a..dab2df75ab884 100644
--- a/llvm/test/CodeGen/Hexagon/packetize-call-r29.ll
+++ b/llvm/test/CodeGen/Hexagon/packetize-call-r29.ll
@@ -6,20 +6,20 @@
 ; CHECK: }
 ; CHECK: r29 = #0
 
-define protected void @f0(i8* %a0, i8* %a1, ...) local_unnamed_addr {
+define protected void @f0(ptr %a0, ptr %a1, ...) local_unnamed_addr {
 b0:
-  call void @llvm.va_start(i8* nonnull undef)
+  call void @llvm.va_start(ptr nonnull undef)
   call void @f1()
-  call void @llvm.stackrestore(i8* null)
+  call void @llvm.stackrestore(ptr null)
   ret void
 }
 
 ; Function Attrs: nounwind
-declare void @llvm.va_start(i8*) #0
+declare void @llvm.va_start(ptr) #0
 
 declare protected void @f1() local_unnamed_addr
 
 ; Function Attrs: nounwind
-declare void @llvm.stackrestore(i8*) #0
+declare void @llvm.stackrestore(ptr) #0
 
 attributes #0 = { nounwind }

diff  --git a/llvm/test/CodeGen/Hexagon/packetize-cfi-location.ll b/llvm/test/CodeGen/Hexagon/packetize-cfi-location.ll
index 0d80a7bb289d0..5188667bfa4b6 100644
--- a/llvm/test/CodeGen/Hexagon/packetize-cfi-location.ll
+++ b/llvm/test/CodeGen/Hexagon/packetize-cfi-location.ll
@@ -1,7 +1,7 @@
 ; RUN: llc -march=hexagon < %s | FileCheck %s
 
 target triple = "hexagon"
-%type.0 = type { i32, i8**, i32, i32, i32 }
+%type.0 = type { i32, ptr, i32, i32, i32 }
 
 ; Check that CFI is before the packet with call+allocframe.
 ; CHECK-LABEL: danny:
@@ -15,34 +15,34 @@ target triple = "hexagon"
 ;   allocframe(#0)
 ; }
 
-define i8* @danny(%type.0* %p0, i32 %p1) #0 {
+define ptr @danny(ptr %p0, i32 %p1) #0 {
 entry:
-  %t0 = getelementptr inbounds %type.0, %type.0* %p0, i32 0, i32 4
-  %t1 = load i32, i32* %t0, align 4
+  %t0 = getelementptr inbounds %type.0, ptr %p0, i32 0, i32 4
+  %t1 = load i32, ptr %t0, align 4
   %th = icmp ugt i32 %t1, %p1
   br i1 %th, label %if.end, label %if.then
 
 if.then:                                          ; preds = %entry
-  tail call void @throw(%type.0* nonnull %p0)
+  tail call void @throw(ptr nonnull %p0)
   unreachable
 
 if.end:                                           ; preds = %entry
-  %t6 = getelementptr inbounds %type.0, %type.0* %p0, i32 0, i32 3
-  %t2 = load i32, i32* %t6, align 4
+  %t6 = getelementptr inbounds %type.0, ptr %p0, i32 0, i32 3
+  %t2 = load i32, ptr %t6, align 4
   %t9 = add i32 %t2, %p1
   %ta = lshr i32 %t9, 4
   %tb = and i32 %t9, 15
-  %t7 = getelementptr inbounds %type.0, %type.0* %p0, i32 0, i32 2
-  %t3 = load i32, i32* %t7, align 4
+  %t7 = getelementptr inbounds %type.0, ptr %p0, i32 0, i32 2
+  %t3 = load i32, ptr %t7, align 4
   %tc = icmp ult i32 %ta, %t3
   %td = select i1 %tc, i32 0, i32 %t3
   %te = sub i32 %ta, %td
-  %t8 = getelementptr inbounds %type.0, %type.0* %p0, i32 0, i32 1
-  %t4 = load i8**, i8*** %t8, align 4
-  %tf = getelementptr inbounds i8*, i8** %t4, i32 %te
-  %t5 = load i8*, i8** %tf, align 4
-  %tg = getelementptr inbounds i8, i8* %t5, i32 %tb
-  ret i8* %tg
+  %t8 = getelementptr inbounds %type.0, ptr %p0, i32 0, i32 1
+  %t4 = load ptr, ptr %t8, align 4
+  %tf = getelementptr inbounds ptr, ptr %t4, i32 %te
+  %t5 = load ptr, ptr %tf, align 4
+  %tg = getelementptr inbounds i8, ptr %t5, i32 %tb
+  ret ptr %tg
 }
 
 ; Check that CFI is after allocframe.
@@ -50,12 +50,12 @@ if.end:                                           ; preds = %entry
 ; CHECK: allocframe
 ; CHECK: cfi_def_cfa
 
-define void @sammy(%type.0* %p0, i32 %p1) #0 {
+define void @sammy(ptr %p0, i32 %p1) #0 {
 entry:
   %t0 = icmp sgt i32 %p1, 0
   br i1 %t0, label %if.then, label %if.else
 if.then:
-  call void @throw(%type.0* nonnull %p0)
+  call void @throw(ptr nonnull %p0)
   br label %if.end
 if.else:
   call void @nothrow() #2
@@ -64,7 +64,7 @@ if.end:
   ret void
 }
 
-declare void @throw(%type.0*) #1
+declare void @throw(ptr) #1
 declare void @nothrow() #2
 
 attributes #0 = { "target-cpu"="hexagonv55" }

diff  --git a/llvm/test/CodeGen/Hexagon/packetize-impdef-1.ll b/llvm/test/CodeGen/Hexagon/packetize-impdef-1.ll
index 47f9a860e80c9..6e84602fb7eaa 100644
--- a/llvm/test/CodeGen/Hexagon/packetize-impdef-1.ll
+++ b/llvm/test/CodeGen/Hexagon/packetize-impdef-1.ll
@@ -12,13 +12,12 @@
 ; CHECK: memd(r29+#0) = r{{[0-9]+}}:{{[0-9]+}}
 ; CHECK: memd(r29+#0) = r{{[0-9]+}}:{{[0-9]+}}
 
-define i8** @f0(i8* %a0) local_unnamed_addr {
+define ptr @f0(ptr %a0) local_unnamed_addr {
 b0:
-  %v0 = tail call i8* @f1(i32 0)
-  %v1 = tail call i8* @f1(i32 8)
-  %v2 = bitcast i8* %v1 to i8**
-  %v3 = load i32, i32* undef, align 4
-  %v4 = tail call i8* @f4(i8* %a0, i32 0, i32 %v3)
+  %v0 = tail call ptr @f1(i32 0)
+  %v1 = tail call ptr @f1(i32 8)
+  %v3 = load i32, ptr undef, align 4
+  %v4 = tail call ptr @f4(ptr %a0, i32 0, i32 %v3)
   %v5 = sub nsw i32 %v3, 0
   br label %b1
 
@@ -42,10 +41,10 @@ b5:                                               ; preds = %b4
   br i1 undef, label %b27, label %b6
 
 b6:                                               ; preds = %b5
-  %v6 = ptrtoint i8* %v4 to i32
+  %v6 = ptrtoint ptr %v4 to i32
   %v7 = sub i32 0, %v6
-  %v8 = call i8* @f4(i8* nonnull %v4, i32 0, i32 %v7)
-  %v9 = call i8* @f4(i8* nonnull %v4, i32 undef, i32 %v5)
+  %v8 = call ptr @f4(ptr nonnull %v4, i32 0, i32 %v7)
+  %v9 = call ptr @f4(ptr nonnull %v4, i32 undef, i32 %v5)
   br label %b7
 
 b7:                                               ; preds = %b6
@@ -56,7 +55,7 @@ b8:                                               ; preds = %b7
 
 b9:                                               ; preds = %b8, %b7
   %v10 = phi i32 [ 2, %b8 ], [ 0, %b7 ]
-  %v11 = load i8, i8* %v9, align 1
+  %v11 = load i8, ptr %v9, align 1
   switch i8 %v11, label %b12 [
     i8 43, label %b10
     i8 45, label %b10
@@ -66,9 +65,9 @@ b10:                                              ; preds = %b9, %b9
   br i1 undef, label %b11, label %b12
 
 b11:                                              ; preds = %b10
-  %v12 = call i64 @f6(i8* nonnull %v9, i8** nonnull undef, i32 10)
-  %v13 = load i8*, i8** undef, align 4
-  %v14 = ptrtoint i8* %v13 to i32
+  %v12 = call i64 @f6(ptr nonnull %v9, ptr nonnull undef, i32 10)
+  %v13 = load ptr, ptr undef, align 4
+  %v14 = ptrtoint ptr %v13 to i32
   br label %b15
 
 b12:                                              ; preds = %b10, %b9
@@ -87,7 +86,7 @@ b15:                                              ; preds = %b13, %b11
   %v15 = phi i32 [ undef, %b13 ], [ %v14, %b11 ]
   %v16 = phi i32 [ 2, %b13 ], [ 1, %b11 ]
   %v17 = phi i64 [ undef, %b13 ], [ %v12, %b11 ]
-  %v18 = call i32* @f5()
+  %v18 = call ptr @f5()
   br label %b16
 
 b16:                                              ; preds = %b15
@@ -96,7 +95,7 @@ b16:                                              ; preds = %b15
   br i1 %v20, label %b17, label %b18
 
 b17:                                              ; preds = %b16
-  call void @f2(i8* %v8)
+  call void @f2(ptr %v8)
   br label %b27
 
 b18:                                              ; preds = %b16
@@ -135,24 +134,24 @@ b26:                                              ; preds = %b25
   unreachable
 
 b27:                                              ; preds = %b17, %b5
-  call void @f2(i8* %v4)
-  call void @f2(i8* %v0)
-  %v29 = call i8* @f3(i8* undef, i8* nonnull %a0)
-  ret i8** %v2
+  call void @f2(ptr %v4)
+  call void @f2(ptr %v0)
+  %v29 = call ptr @f3(ptr undef, ptr nonnull %a0)
+  ret ptr %v1
 
 b28:                                              ; preds = %b25
-  call void @f2(i8* %v9)
+  call void @f2(ptr %v9)
   unreachable
 }
 
-declare i8* @f1(i32) local_unnamed_addr
+declare ptr @f1(i32) local_unnamed_addr
 
-declare void @f2(i8* nocapture) local_unnamed_addr
+declare void @f2(ptr nocapture) local_unnamed_addr
 
-declare i8* @f3(i8*, i8* nocapture readonly) local_unnamed_addr
+declare ptr @f3(ptr, ptr nocapture readonly) local_unnamed_addr
 
-declare i8* @f4(i8*, i32, i32) local_unnamed_addr
+declare ptr @f4(ptr, i32, i32) local_unnamed_addr
 
-declare i32* @f5() local_unnamed_addr
+declare ptr @f5() local_unnamed_addr
 
-declare i64 @f6(i8*, i8**, i32) local_unnamed_addr
+declare i64 @f6(ptr, ptr, i32) local_unnamed_addr

diff  --git a/llvm/test/CodeGen/Hexagon/packetize-impdef.ll b/llvm/test/CodeGen/Hexagon/packetize-impdef.ll
index a047883234e7c..c9c1e555ab7f9 100644
--- a/llvm/test/CodeGen/Hexagon/packetize-impdef.ll
+++ b/llvm/test/CodeGen/Hexagon/packetize-impdef.ll
@@ -6,11 +6,11 @@
 ;
 ; CHECK: f1:
 
-%0 = type { i8 (i8)*, i8 (i8, %1*)*, i8 (i8)* }
+%0 = type { ptr, ptr, ptr }
 %1 = type { [16384 x i16], [8192 x i16], [8192 x i16], [8192 x i32], i32, i32, i32, %2, %2, i32, i32, i32, i32 }
 %2 = type { i32, i32, i32 }
 %3 = type { %4 }
-%4 = type { i32, i8* }
+%4 = type { i32, ptr }
 %5 = type { i8, i32, i32, i32, i16, i16, i16, i16, i8, i16, %6, %6, i32, i16, i16, i16, i16, i8 }
 %6 = type { i32, i32, i32, i32, i32, i32, i32, i8, i8 }
 %7 = type { i8, i8, i8, i8, i32, i32, i32, i32, i32, i32, %2, %2, %8, i8 }
@@ -21,9 +21,9 @@
 @g2 = external hidden constant %3, align 4
 @g3 = external hidden constant %3, align 4
 
-declare void @f0(%3*, i32, i32)
+declare void @f0(ptr, i32, i32)
 
-define hidden fastcc i32 @f1(%5* %a0, %7* %a1, %2* %a2) {
+define hidden fastcc i32 @f1(ptr %a0, ptr %a1, ptr %a2) {
 b0:
   br i1 undef, label %b1, label %b2
 
@@ -40,7 +40,7 @@ b4:                                               ; preds = %b2
   br i1 undef, label %b6, label %b5
 
 b5:                                               ; preds = %b4
-  %v0 = getelementptr inbounds %5, %5* %a0, i32 0, i32 1
+  %v0 = getelementptr inbounds %5, ptr %a0, i32 0, i32 1
   br label %b7
 
 b6:                                               ; preds = %b4
@@ -49,8 +49,8 @@ b6:                                               ; preds = %b4
 b7:                                               ; preds = %b52, %b5
   %v1 = phi i32 [ undef, %b5 ], [ %v43, %b52 ]
   %v2 = phi i32 [ 5, %b5 ], [ %v45, %b52 ]
-  %v3 = load i32, i32* undef, align 4
-  %v4 = load i32, i32* %v0, align 4
+  %v3 = load i32, ptr undef, align 4
+  %v4 = load i32, ptr %v0, align 4
   %v5 = sext i32 %v4 to i64
   %v6 = sdiv i64 0, %v5
   %v7 = trunc i64 %v6 to i32
@@ -58,11 +58,11 @@ b7:                                               ; preds = %b52, %b5
   br i1 %v8, label %b8, label %b9
 
 b8:                                               ; preds = %b7
-  call void @f0(%3* @g2, i32 %v3, i32 %v4)
+  call void @f0(ptr @g2, i32 %v3, i32 %v4)
   br label %b54
 
 b9:                                               ; preds = %b7
-  %v9 = load i8, i8* undef, align 1
+  %v9 = load i8, ptr undef, align 1
   %v10 = zext i8 %v9 to i32
   br i1 undef, label %b10, label %b11
 
@@ -76,8 +76,8 @@ b12:                                              ; preds = %b11
   br i1 undef, label %b13, label %b47
 
 b13:                                              ; preds = %b12
-  %v11 = getelementptr inbounds [7 x %0], [7 x %0]* @g0, i32 0, i32 %v10, i32 2
-  %v12 = load i8 (i8)*, i8 (i8)** %v11, align 4
+  %v11 = getelementptr inbounds [7 x %0], ptr @g0, i32 0, i32 %v10, i32 2
+  %v12 = load ptr, ptr %v11, align 4
   %v13 = call zeroext i8 %v12(i8 zeroext %v9)
   br i1 undef, label %b14, label %b47
 
@@ -122,8 +122,8 @@ b26:                                              ; preds = %b24
 
 b27:                                              ; preds = %b36, %b26
   %v14 = phi i32 [ 16, %b26 ], [ %v30, %b36 ]
-  %v15 = getelementptr inbounds %1, %1* @g1, i32 0, i32 2, i32 %v14
-  %v16 = load i16, i16* %v15, align 2
+  %v15 = getelementptr inbounds %1, ptr @g1, i32 0, i32 2, i32 %v14
+  %v16 = load i16, ptr %v15, align 2
   %v17 = sext i16 %v16 to i32
   %v18 = select i1 undef, i32 undef, i32 %v17
   %v19 = sext i32 %v18 to i64
@@ -151,15 +151,15 @@ b31:                                              ; preds = %b29
   br i1 %v28, label %b32, label %b33
 
 b32:                                              ; preds = %b31
-  store i32 %v26, i32* undef, align 4
+  store i32 %v26, ptr undef, align 4
   br label %b36
 
 b33:                                              ; preds = %b31
   br i1 undef, label %b34, label %b35
 
 b34:                                              ; preds = %b33
-  %v29 = getelementptr inbounds %1, %1* @g1, i32 0, i32 3, i32 %v14
-  store i32 undef, i32* %v29, align 4
+  %v29 = getelementptr inbounds %1, ptr @g1, i32 0, i32 3, i32 %v14
+  store i32 undef, ptr %v29, align 4
   br label %b36
 
 b35:                                              ; preds = %b33
@@ -190,7 +190,7 @@ b42:                                              ; preds = %b41
   br label %b47
 
 b43:                                              ; preds = %b41
-  %v33 = load i64, i64* undef, align 8
+  %v33 = load i64, ptr undef, align 8
   br label %b44
 
 b44:                                              ; preds = %b44, %b43
@@ -234,7 +234,7 @@ b52:                                              ; preds = %b51, %b49
   br i1 %v46, label %b54, label %b7
 
 b53:                                              ; preds = %b50
-  call void @f0(%3* @g3, i32 %v43, i32 undef)
+  call void @f0(ptr @g3, i32 %v43, i32 undef)
   unreachable
 
 b54:                                              ; preds = %b52, %b48, %b8

diff  --git a/llvm/test/CodeGen/Hexagon/packetize-l2fetch.ll b/llvm/test/CodeGen/Hexagon/packetize-l2fetch.ll
index 4d2459a04e35e..8c057b67da033 100644
--- a/llvm/test/CodeGen/Hexagon/packetize-l2fetch.ll
+++ b/llvm/test/CodeGen/Hexagon/packetize-l2fetch.ll
@@ -11,21 +11,21 @@
 target triple = "hexagon"
 
 @g0 = external global [32768 x i8], align 8
- at g1 = external local_unnamed_addr global [15 x i8*], align 8
+ at g1 = external local_unnamed_addr global [15 x ptr], align 8
 
 ; Function Attrs: nounwind
 define void @f0() local_unnamed_addr #0 {
 b0:
-  store i8* inttoptr (i32 and (i32 sext (i8 ptrtoint (i8* getelementptr inbounds ([32768 x i8], [32768 x i8]* @g0, i32 0, i32 10000) to i8) to i32), i32 -65536) to i8*), i8** getelementptr inbounds ([15 x i8*], [15 x i8*]* @g1, i32 0, i32 1), align 4
-  store i8* inttoptr (i32 and (i32 sext (i8 ptrtoint (i8* getelementptr inbounds ([32768 x i8], [32768 x i8]* @g0, i32 0, i32 10000) to i8) to i32), i32 -65536) to i8*), i8** getelementptr inbounds ([15 x i8*], [15 x i8*]* @g1, i32 0, i32 6), align 8
+  store ptr inttoptr (i32 and (i32 sext (i8 ptrtoint (ptr getelementptr inbounds ([32768 x i8], ptr @g0, i32 0, i32 10000) to i8) to i32), i32 -65536) to ptr), ptr getelementptr inbounds ([15 x ptr], ptr @g1, i32 0, i32 1), align 4
+  store ptr inttoptr (i32 and (i32 sext (i8 ptrtoint (ptr getelementptr inbounds ([32768 x i8], ptr @g0, i32 0, i32 10000) to i8) to i32), i32 -65536) to ptr), ptr getelementptr inbounds ([15 x ptr], ptr @g1, i32 0, i32 6), align 8
   tail call void @f1()
-  %v0 = load i8*, i8** getelementptr inbounds ([15 x i8*], [15 x i8*]* @g1, i32 0, i32 0), align 8
-  tail call void @llvm.hexagon.Y5.l2fetch(i8* %v0, i64 -9223372036854775808)
+  %v0 = load ptr, ptr @g1, align 8
+  tail call void @llvm.hexagon.Y5.l2fetch(ptr %v0, i64 -9223372036854775808)
   ret void
 }
 
 ; Function Attrs: nounwind
-declare void @llvm.hexagon.Y5.l2fetch(i8*, i64) #1
+declare void @llvm.hexagon.Y5.l2fetch(ptr, i64) #1
 
 ; Function Attrs: nounwind
 declare void @f1() #1

diff  --git a/llvm/test/CodeGen/Hexagon/packetize-return-arg.ll b/llvm/test/CodeGen/Hexagon/packetize-return-arg.ll
index b18fc23eca81f..e702b184de60c 100644
--- a/llvm/test/CodeGen/Hexagon/packetize-return-arg.ll
+++ b/llvm/test/CodeGen/Hexagon/packetize-return-arg.ll
@@ -7,31 +7,31 @@
 target triple = "hexagon-unknown--elf"
 
 ; Function Attrs: nounwind
-define i8* @fred(i8* %user_context, i32 %x) #0 {
+define ptr @fred(ptr %user_context, i32 %x) #0 {
 entry:
   %and14 = add i32 %x, 255
   %add1 = and i32 %and14, -128
-  %call = tail call i8* @malloc(i32 %add1) #1
-  %cmp = icmp eq i8* %call, null
+  %call = tail call ptr @malloc(i32 %add1) #1
+  %cmp = icmp eq ptr %call, null
   br i1 %cmp, label %cleanup, label %if.end
 
 if.end:                                           ; preds = %entry
-  %0 = ptrtoint i8* %call to i32
+  %0 = ptrtoint ptr %call to i32
   %sub4 = add i32 %0, 131
   %and5 = and i32 %sub4, -128
-  %1 = inttoptr i32 %and5 to i8*
-  %2 = inttoptr i32 %and5 to i8**
-  %arrayidx = getelementptr inbounds i8*, i8** %2, i32 -1
-  store i8* %call, i8** %arrayidx, align 4
+  %1 = inttoptr i32 %and5 to ptr
+  %2 = inttoptr i32 %and5 to ptr
+  %arrayidx = getelementptr inbounds ptr, ptr %2, i32 -1
+  store ptr %call, ptr %arrayidx, align 4
   br label %cleanup
 
 cleanup:                                          ; preds = %if.end, %entry
-  %retval.0 = phi i8* [ %1, %if.end ], [ null, %entry ]
-  ret i8* %retval.0
+  %retval.0 = phi ptr [ %1, %if.end ], [ null, %entry ]
+  ret ptr %retval.0
 }
 
 ; Function Attrs: nounwind
-declare noalias i8* @malloc(i32) local_unnamed_addr #1
+declare noalias ptr @malloc(i32) local_unnamed_addr #1
 
 attributes #0 = { nounwind }
 attributes #1 = { nobuiltin nounwind }

diff  --git a/llvm/test/CodeGen/Hexagon/packetize-tailcall-arg.ll b/llvm/test/CodeGen/Hexagon/packetize-tailcall-arg.ll
index 17afd7df94a3d..db617dfd2f524 100644
--- a/llvm/test/CodeGen/Hexagon/packetize-tailcall-arg.ll
+++ b/llvm/test/CodeGen/Hexagon/packetize-tailcall-arg.ll
@@ -8,15 +8,14 @@
 ; CHECK: {
 ; CHECK-NOT: {
 
-define void @fred(i8* %p) nounwind {
+define void @fred(ptr %p) nounwind {
 entry:
-  %arrayidx = getelementptr inbounds i8, i8* %p, i32 -4
-  %t0 = bitcast i8* %arrayidx to i8**
-  %t1 = load i8*, i8** %t0, align 4
-  tail call void @free(i8* %t1)
+  %arrayidx = getelementptr inbounds i8, ptr %p, i32 -4
+  %t1 = load ptr, ptr %arrayidx, align 4
+  tail call void @free(ptr %t1)
   ret void
 }
 
 ; Function Attrs: nounwind
-declare void @free(i8* nocapture) nounwind
+declare void @free(ptr nocapture) nounwind
 

diff  --git a/llvm/test/CodeGen/Hexagon/packetize-volatiles.ll b/llvm/test/CodeGen/Hexagon/packetize-volatiles.ll
index 33c7c640974d6..df594258b8403 100644
--- a/llvm/test/CodeGen/Hexagon/packetize-volatiles.ll
+++ b/llvm/test/CodeGen/Hexagon/packetize-volatiles.ll
@@ -4,17 +4,17 @@
 target triple = "hexagon-unknown-linux-gnu"
 
 ; Function Attrs: nounwind
-define void @f0(i8* nocapture %a0, i8* nocapture %a1) #0 {
+define void @f0(ptr nocapture %a0, ptr nocapture %a1) #0 {
 b0:
   br label %b1
 
 b1:                                               ; preds = %b1, %b0
-  %v0 = phi i8* [ %a1, %b0 ], [ %v2, %b1 ]
-  %v1 = phi i8* [ %a0, %b0 ], [ %v4, %b1 ]
-  %v2 = getelementptr inbounds i8, i8* %v0, i32 1
-  %v3 = load volatile i8, i8* %v0, align 1, !tbaa !0
-  %v4 = getelementptr inbounds i8, i8* %v1, i32 1
-  store volatile i8 %v3, i8* %v1, align 1, !tbaa !0
+  %v0 = phi ptr [ %a1, %b0 ], [ %v2, %b1 ]
+  %v1 = phi ptr [ %a0, %b0 ], [ %v4, %b1 ]
+  %v2 = getelementptr inbounds i8, ptr %v0, i32 1
+  %v3 = load volatile i8, ptr %v0, align 1, !tbaa !0
+  %v4 = getelementptr inbounds i8, ptr %v1, i32 1
+  store volatile i8 %v3, ptr %v1, align 1, !tbaa !0
   %v5 = icmp eq i8 %v3, 0
   br i1 %v5, label %b2, label %b1
 

diff  --git a/llvm/test/CodeGen/Hexagon/partword-cmpxchg.ll b/llvm/test/CodeGen/Hexagon/partword-cmpxchg.ll
index 3a52d8546b85a..1ef6b95930a9b 100644
--- a/llvm/test/CodeGen/Hexagon/partword-cmpxchg.ll
+++ b/llvm/test/CodeGen/Hexagon/partword-cmpxchg.ll
@@ -2,27 +2,27 @@
 
 ; CHECK-LABEL: danny
 ; CHECK: memw_locked
-define i8 @danny(i8* %a0) unnamed_addr #0 {
+define i8 @danny(ptr %a0) unnamed_addr #0 {
 start:
-  %v0 = cmpxchg i8* %a0, i8 0, i8 1 seq_cst seq_cst
+  %v0 = cmpxchg ptr %a0, i8 0, i8 1 seq_cst seq_cst
   %v1 = extractvalue { i8, i1 } %v0, 0
   ret i8 %v1
 }
 
 ; CHECK-LABEL: sammy
 ; CHECK: memw_locked
-define i16 @sammy(i16* %a0) unnamed_addr #0 {
+define i16 @sammy(ptr %a0) unnamed_addr #0 {
 start:
-  %v0 = cmpxchg i16* %a0, i16 0, i16 1 seq_cst seq_cst
+  %v0 = cmpxchg ptr %a0, i16 0, i16 1 seq_cst seq_cst
   %v1 = extractvalue { i16, i1 } %v0, 0
   ret i16 %v1
 }
 
 ; CHECK-LABEL: kirby
 ; CHECK: memw_locked
-define i32 @kirby(i32* %a0) unnamed_addr #0 {
+define i32 @kirby(ptr %a0) unnamed_addr #0 {
 start:
-  %v0 = cmpxchg i32* %a0, i32 0, i32 1 seq_cst seq_cst
+  %v0 = cmpxchg ptr %a0, i32 0, i32 1 seq_cst seq_cst
   %v1 = extractvalue { i32, i1 } %v0, 0
   ret i32 %v1
 }

diff  --git a/llvm/test/CodeGen/Hexagon/peephole-kill-flags.ll b/llvm/test/CodeGen/Hexagon/peephole-kill-flags.ll
index 2031a71c7e627..4bf49c404a369 100644
--- a/llvm/test/CodeGen/Hexagon/peephole-kill-flags.ll
+++ b/llvm/test/CodeGen/Hexagon/peephole-kill-flags.ll
@@ -6,12 +6,12 @@
 target triple = "hexagon"
 
 ; Function Attrs: nounwind
-define i32 @f0(i32* %a0, i32 %a1) #0 {
+define i32 @f0(ptr %a0, i32 %a1) #0 {
 b0:
   br label %b1
 
 b1:                                               ; preds = %b0
-  %v0 = load i32, i32* %a0, align 4
+  %v0 = load i32, ptr %a0, align 4
   %v1 = mul nsw i32 2, %v0
   %v2 = icmp slt i32 %a1, %v1
   br i1 %v2, label %b2, label %b3

diff  --git a/llvm/test/CodeGen/Hexagon/peephole-move-phi.ll b/llvm/test/CodeGen/Hexagon/peephole-move-phi.ll
index 906c5bfe4033c..70cb6939d2f86 100644
--- a/llvm/test/CodeGen/Hexagon/peephole-move-phi.ll
+++ b/llvm/test/CodeGen/Hexagon/peephole-move-phi.ll
@@ -29,7 +29,7 @@ b3:                                               ; preds = %b3, %b2
   %v6 = tail call <128 x i1> @llvm.hexagon.V6.vgtub.128B(<32 x i32> %v5, <32 x i32> undef) #2
   %v7 = tail call <128 x i1> @llvm.hexagon.V6.pred.or.128B(<128 x i1> %v6, <128 x i1> undef) #2
   %v8 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<128 x i1> %v7, <32 x i32> undef, <32 x i32> undef) #2
-  tail call void asm sideeffect "if($0) vmem($1)=$2;", "q,r,v,~{memory}"(<128 x i1> undef, <32 x i32>* undef, <32 x i32> %v8) #2
+  tail call void asm sideeffect "if($0) vmem($1)=$2;", "q,r,v,~{memory}"(<128 x i1> undef, ptr undef, <32 x i32> %v8) #2
   br label %b3
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/pic-jt-big.ll b/llvm/test/CodeGen/Hexagon/pic-jt-big.ll
index 25ee04521b647..d2c8681c9e585 100644
--- a/llvm/test/CodeGen/Hexagon/pic-jt-big.ll
+++ b/llvm/test/CodeGen/Hexagon/pic-jt-big.ll
@@ -22,24 +22,24 @@ b0:
   ]
 
 b1:                                               ; preds = %b0
-  tail call void bitcast (void (...)* @f1 to void ()*)() #0
+  tail call void @f1() #0
   br label %b8
 
 b2:                                               ; preds = %b0
-  %v0 = load i32, i32* @g0, align 4, !tbaa !0
+  %v0 = load i32, ptr @g0, align 4, !tbaa !0
   %v1 = add nsw i32 %v0, 99
   br label %b9
 
 b3:                                               ; preds = %b0
-  %v2 = load i32, i32* @g1, align 4, !tbaa !0
-  %v3 = load i32, i32* @g0, align 4, !tbaa !0
+  %v2 = load i32, ptr @g1, align 4, !tbaa !0
+  %v3 = load i32, ptr @g0, align 4, !tbaa !0
   %v4 = add nsw i32 %v3, %v2
   tail call void @f2(i32 %v4) #0
   br label %b8
 
 b4:                                               ; preds = %b0
-  %v5 = load i32, i32* @g1, align 4, !tbaa !0
-  %v6 = load i32, i32* @g0, align 4, !tbaa !0
+  %v5 = load i32, ptr @g1, align 4, !tbaa !0
+  %v6 = load i32, ptr @g0, align 4, !tbaa !0
   %v7 = mul nsw i32 %v6, 2
   %v8 = add i32 %v5, 9
   %v9 = add i32 %v8, %v7

diff  --git a/llvm/test/CodeGen/Hexagon/pic-jumptables.ll b/llvm/test/CodeGen/Hexagon/pic-jumptables.ll
index caf3f9cb81fe5..c19b251fed039 100644
--- a/llvm/test/CodeGen/Hexagon/pic-jumptables.ll
+++ b/llvm/test/CodeGen/Hexagon/pic-jumptables.ll
@@ -16,7 +16,7 @@ entry:
   ]
 
 sw.bb:                                            ; preds = %entry
-  tail call void bitcast (void (...)* @baz1 to void ()*)() nounwind
+  tail call void @baz1() nounwind
   br label %sw.epilog
 
 sw.bb1:                                           ; preds = %entry

diff  --git a/llvm/test/CodeGen/Hexagon/pic-local.ll b/llvm/test/CodeGen/Hexagon/pic-local.ll
index 6544b3d32165a..163d3ea438d06 100644
--- a/llvm/test/CodeGen/Hexagon/pic-local.ll
+++ b/llvm/test/CodeGen/Hexagon/pic-local.ll
@@ -8,12 +8,12 @@ define internal void @f2() {
   ret void
 }
 
-define void()* @get_f1() {
+define ptr @get_f1() {
   ; CHECK:  r0 = add(pc,##.Lf1 at PCREL)
-  ret void()* @f1
+  ret ptr @f1
 }
 
-define void()* @get_f2() {
+define ptr @get_f2() {
   ; CHECK: r0 = add(pc,##f2 at PCREL)
-  ret void()* @f2
+  ret ptr @f2
 }

diff  --git a/llvm/test/CodeGen/Hexagon/pic-regusage.ll b/llvm/test/CodeGen/Hexagon/pic-regusage.ll
index 36b5072dd7928..a977a353e0f7c 100644
--- a/llvm/test/CodeGen/Hexagon/pic-regusage.ll
+++ b/llvm/test/CodeGen/Hexagon/pic-regusage.ll
@@ -10,21 +10,21 @@
 @.str = private unnamed_addr constant [4 x i8] c"%d\0A\00", align 1
 
 ; Function Attrs: nounwind optsize
-define i32 @_Z7testR14Pi(i32* nocapture %res) #0 {
+define i32 @_Z7testR14Pi(ptr nocapture %res) #0 {
 entry:
-  %0 = load i32, i32* %res, align 4
+  %0 = load i32, ptr %res, align 4
   %1 = tail call { i32, i32 } asm "r0=$2\0A\09$1=add(r0,#$3)\0A\09$0=add(r0,#$4)\0A\09", "=r,=r,r,i,i,~{r0},~{r1},~{r2},~{r3},~{r4},~{r5},~{r6},~{r7},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r15},~{r16},~{r17},~{r18},~{r19},~{r20},~{r21},~{r22},~{r23},~{r24},~{r25},~{r26},~{r27}"(i32 %0, i32 40, i32 50) #1
   %asmresult = extractvalue { i32, i32 } %1, 0
   %asmresult1 = extractvalue { i32, i32 } %1, 1
-  store i32 %asmresult, i32* %res, align 4
-  %call = tail call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i32 0, i32 0), i32 %asmresult1) #2
-  %2 = load i32, i32* %res, align 4
-  %call2 = tail call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i32 0, i32 0), i32 %2) #2
+  store i32 %asmresult, ptr %res, align 4
+  %call = tail call i32 (ptr, ...) @printf(ptr @.str, i32 %asmresult1) #2
+  %2 = load i32, ptr %res, align 4
+  %call2 = tail call i32 (ptr, ...) @printf(ptr @.str, i32 %2) #2
   ret i32 0
 }
 
 ; Function Attrs: nounwind optsize
-declare i32 @printf(i8*, ...) #0
+declare i32 @printf(ptr, ...) #0
 
 ; Same as above for R15.
 ; CHECK: call __save_r16_through_r27
@@ -32,16 +32,16 @@ declare i32 @printf(i8*, ...) #0
 ; CHECK: r15 =
 
 ; Function Attrs: nounwind optsize
-define i32 @_Z7testR15Pi(i32* nocapture %res) #0 {
+define i32 @_Z7testR15Pi(ptr nocapture %res) #0 {
 entry:
-  %0 = load i32, i32* %res, align 4
+  %0 = load i32, ptr %res, align 4
   %1 = tail call { i32, i32 } asm "r0=$2\0A\09$1=add(r0,#$3)\0A\09$0=add(r0,#$4)\0A\09", "=r,=r,r,i,i,~{r0},~{r1},~{r2},~{r3},~{r4},~{r5},~{r6},~{r7},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r16},~{r17},~{r18},~{r19},~{r20},~{r21},~{r22},~{r23},~{r24},~{r25},~{r26},~{r27}"(i32 %0, i32 40, i32 50) #1
   %asmresult = extractvalue { i32, i32 } %1, 0
   %asmresult1 = extractvalue { i32, i32 } %1, 1
-  store i32 %asmresult, i32* %res, align 4
-  %call = tail call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i32 0, i32 0), i32 %asmresult1) #2
-  %2 = load i32, i32* %res, align 4
-  %call2 = tail call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i32 0, i32 0), i32 %2) #2
+  store i32 %asmresult, ptr %res, align 4
+  %call = tail call i32 (ptr, ...) @printf(ptr @.str, i32 %asmresult1) #2
+  %2 = load i32, ptr %res, align 4
+  %call2 = tail call i32 (ptr, ...) @printf(ptr @.str, i32 %2) #2
   ret i32 0
 }
 
@@ -51,16 +51,16 @@ entry:
 ; CHECK: r28 =
 
 ; Function Attrs: nounwind optsize
-define i32 @_Z7testR28Pi(i32* nocapture %res) #0 {
+define i32 @_Z7testR28Pi(ptr nocapture %res) #0 {
 entry:
-  %0 = load i32, i32* %res, align 4
+  %0 = load i32, ptr %res, align 4
   %1 = tail call { i32, i32 } asm "r0=$2\0A\09$1=add(r0,#$3)\0A\09$0=add(r0,#$4)\0A\09", "=r,=r,r,i,i,~{r0},~{r1},~{r2},~{r3},~{r4},~{r5},~{r6},~{r7},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15},~{r16},~{r17},~{r18},~{r19},~{r20},~{r21},~{r22},~{r23},~{r24},~{r25},~{r26}"(i32 %0, i32 40, i32 50) #1
   %asmresult = extractvalue { i32, i32 } %1, 0
   %asmresult1 = extractvalue { i32, i32 } %1, 1
-  store i32 %asmresult, i32* %res, align 4
-  %call = tail call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i32 0, i32 0), i32 %asmresult1) #2
-  %2 = load i32, i32* %res, align 4
-  %call2 = tail call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i32 0, i32 0), i32 %2) #2
+  store i32 %asmresult, ptr %res, align 4
+  %call = tail call i32 (ptr, ...) @printf(ptr @.str, i32 %asmresult1) #2
+  %2 = load i32, ptr %res, align 4
+  %call2 = tail call i32 (ptr, ...) @printf(ptr @.str, i32 %2) #2
   ret i32 0
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/pic-sdata.ll b/llvm/test/CodeGen/Hexagon/pic-sdata.ll
index 446734b9ca01a..356646653ef53 100644
--- a/llvm/test/CodeGen/Hexagon/pic-sdata.ll
+++ b/llvm/test/CodeGen/Hexagon/pic-sdata.ll
@@ -13,7 +13,7 @@
 ; PIC: r[[R0:[0-9]+]] = add(pc,##_GLOBAL_OFFSET_TABLE_ at PCREL)
 ; PIC: = memw(r[[R0]]+##g0 at GOT)
 define i32 @f0() #0 {
-  %v0 = load i32, i32* @g0
+  %v0 = load i32, ptr @g0
   ret i32 %v0
 }
 
@@ -22,7 +22,7 @@ define i32 @f0() #0 {
 ; PIC: r[[R1:[0-9]+]] = add(pc,##_GLOBAL_OFFSET_TABLE_ at PCREL)
 ; PIC: = memw(r[[R1]]+##g1 at GOT)
 define i32 @f1() #0 {
-  %v0 = load i32, i32* @g1
+  %v0 = load i32, ptr @g1
   ret i32 %v0
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/pic-simple.ll b/llvm/test/CodeGen/Hexagon/pic-simple.ll
index aeb21ef7de1cf..b17dd599effa1 100644
--- a/llvm/test/CodeGen/Hexagon/pic-simple.ll
+++ b/llvm/test/CodeGen/Hexagon/pic-simple.ll
@@ -9,8 +9,8 @@
 
 define i32 @foo() nounwind {
 entry:
-  %0 = load i32, i32* @src, align 4, !tbaa !0
-  store i32 %0, i32* @dst, align 4, !tbaa !0
+  %0 = load i32, ptr @src, align 4, !tbaa !0
+  store i32 %0, ptr @dst, align 4, !tbaa !0
   %call = tail call i32 @baz(i32 %0) nounwind
   ret i32 0
 }

diff  --git a/llvm/test/CodeGen/Hexagon/pic-static.ll b/llvm/test/CodeGen/Hexagon/pic-static.ll
index 95da5f060d721..8ef75a5f95d94 100644
--- a/llvm/test/CodeGen/Hexagon/pic-static.ll
+++ b/llvm/test/CodeGen/Hexagon/pic-static.ll
@@ -5,12 +5,12 @@
 ; CHECK: r{{[0-9]+}} = memw(r{{[0-9]+}}+##bar at GOT)
 
 @x = internal global i32 9, align 4
- at bar = external global i32*
+ at bar = external global ptr
 
 define i32 @foo(i32 %y) nounwind {
 entry:
-  store i32* @x, i32** @bar, align 4, !tbaa !0
-  %0 = load i32, i32* @x, align 4, !tbaa !3
+  store ptr @x, ptr @bar, align 4, !tbaa !0
+  %0 = load i32, ptr @x, align 4, !tbaa !3
   %add = add nsw i32 %0, %y
   ret i32 %add
 }

diff  --git a/llvm/test/CodeGen/Hexagon/plt-rel.ll b/llvm/test/CodeGen/Hexagon/plt-rel.ll
index d1d97a62263cd..ff0c455459199 100644
--- a/llvm/test/CodeGen/Hexagon/plt-rel.ll
+++ b/llvm/test/CodeGen/Hexagon/plt-rel.ll
@@ -14,19 +14,19 @@ target triple = "hexagon"
 ; Function Attrs: norecurse nounwind
 define void @_Z14SigUsr1Handleri(i32) local_unnamed_addr #0 {
 entry:
-  store volatile i32 1, i32* @_ZL13g_usr1_called, align 4
+  store volatile i32 1, ptr @_ZL13g_usr1_called, align 4
   ret void
 }
 
 ; Function Attrs: norecurse nounwind
 define zeroext i1 @_Z27CheckForMonitorCancellationv() local_unnamed_addr #0 {
 entry:
-  %0 = load volatile i32, i32* @_ZL13g_usr1_called, align 4
+  %0 = load volatile i32, ptr @_ZL13g_usr1_called, align 4
   %tobool = icmp eq i32 %0, 0
   br i1 %tobool, label %return, label %if.then
 
 if.then:                                          ; preds = %entry
-  store volatile i32 0, i32* @_ZL13g_usr1_called, align 4
+  store volatile i32 0, ptr @_ZL13g_usr1_called, align 4
   br label %return
 
 return:                                           ; preds = %entry, %if.then

diff  --git a/llvm/test/CodeGen/Hexagon/pmpyw_acc.ll b/llvm/test/CodeGen/Hexagon/pmpyw_acc.ll
index 8cbb8ac6ef36c..881d74464a36d 100644
--- a/llvm/test/CodeGen/Hexagon/pmpyw_acc.ll
+++ b/llvm/test/CodeGen/Hexagon/pmpyw_acc.ll
@@ -10,31 +10,31 @@ b0:
   %v3 = alloca i32, align 4
   %v4 = alloca i64, align 8
   %v5 = alloca i64, align 8
-  store i32 %a0, i32* %v0, align 4
-  store i32 %a1, i32* %v1, align 4
-  store i32 %a2, i32* %v2, align 4
-  store i32 %a3, i32* %v3, align 4
-  %v6 = load i32, i32* %v0, align 4
-  %v7 = load i32, i32* %v1, align 4
+  store i32 %a0, ptr %v0, align 4
+  store i32 %a1, ptr %v1, align 4
+  store i32 %a2, ptr %v2, align 4
+  store i32 %a3, ptr %v3, align 4
+  %v6 = load i32, ptr %v0, align 4
+  %v7 = load i32, ptr %v1, align 4
   %v8 = call i64 @llvm.hexagon.M4.pmpyw(i32 %v6, i32 %v7)
-  store i64 %v8, i64* %v5, align 8
-  %v9 = load i64, i64* %v5, align 8
-  store i64 %v9, i64* %v4, align 8
-  %v10 = load i64, i64* %v5, align 8
-  %v11 = load i32, i32* %v3, align 4
-  %v12 = load i64, i64* %v5, align 8
+  store i64 %v8, ptr %v5, align 8
+  %v9 = load i64, ptr %v5, align 8
+  store i64 %v9, ptr %v4, align 8
+  %v10 = load i64, ptr %v5, align 8
+  %v11 = load i32, ptr %v3, align 4
+  %v12 = load i64, ptr %v5, align 8
   %v13 = lshr i64 %v12, 32
   %v14 = trunc i64 %v13 to i32
   %v15 = call i64 @llvm.hexagon.M4.pmpyw.acc(i64 %v10, i32 %v11, i32 %v14)
-  store i64 %v15, i64* %v5, align 8
-  %v16 = load i64, i64* %v4, align 8
-  %v17 = load i64, i64* %v5, align 8
+  store i64 %v15, ptr %v5, align 8
+  %v16 = load i64, ptr %v4, align 8
+  %v17 = load i64, ptr %v5, align 8
   %v18 = lshr i64 %v17, 32
   %v19 = trunc i64 %v18 to i32
-  %v20 = load i32, i32* %v2, align 4
+  %v20 = load i32, ptr %v2, align 4
   %v21 = call i64 @llvm.hexagon.M4.pmpyw.acc(i64 %v16, i32 %v19, i32 %v20)
-  store i64 %v21, i64* %v4, align 8
-  %v22 = load i64, i64* %v4, align 8
+  store i64 %v21, ptr %v4, align 8
+  %v22 = load i64, ptr %v4, align 8
   %v23 = trunc i64 %v22 to i32
   ret i32 %v23
 }

diff  --git a/llvm/test/CodeGen/Hexagon/post-inc-aa-metadata.ll b/llvm/test/CodeGen/Hexagon/post-inc-aa-metadata.ll
index 43e4070966bd7..7d04ef8ff2e46 100644
--- a/llvm/test/CodeGen/Hexagon/post-inc-aa-metadata.ll
+++ b/llvm/test/CodeGen/Hexagon/post-inc-aa-metadata.ll
@@ -8,20 +8,20 @@
 target triple = "hexagon"
 
 ; Function Attrs: norecurse nounwind
-define void @fred(<16 x i32>* nocapture %p, <16 x i32>* nocapture readonly %q, i32 %n) local_unnamed_addr #0 {
+define void @fred(ptr nocapture %p, ptr nocapture readonly %q, i32 %n) local_unnamed_addr #0 {
 entry:
   %tobool2 = icmp eq i32 %n, 0
   br i1 %tobool2, label %while.end, label %while.body
 
 while.body:                                       ; preds = %entry, %while.body
   %n.addr.05 = phi i32 [ %dec, %while.body ], [ %n, %entry ]
-  %q.addr.04 = phi <16 x i32>* [ %incdec.ptr, %while.body ], [ %q, %entry ]
-  %p.addr.03 = phi <16 x i32>* [ %incdec.ptr1, %while.body ], [ %p, %entry ]
+  %q.addr.04 = phi ptr [ %incdec.ptr, %while.body ], [ %q, %entry ]
+  %p.addr.03 = phi ptr [ %incdec.ptr1, %while.body ], [ %p, %entry ]
   %dec = add i32 %n.addr.05, -1
-  %incdec.ptr = getelementptr inbounds <16 x i32>, <16 x i32>* %q.addr.04, i32 1
-  %0 = load <16 x i32>, <16 x i32>* %q.addr.04, align 64, !tbaa !1
-  %incdec.ptr1 = getelementptr inbounds <16 x i32>, <16 x i32>* %p.addr.03, i32 1
-  store <16 x i32> %0, <16 x i32>* %p.addr.03, align 64, !tbaa !1
+  %incdec.ptr = getelementptr inbounds <16 x i32>, ptr %q.addr.04, i32 1
+  %0 = load <16 x i32>, ptr %q.addr.04, align 64, !tbaa !1
+  %incdec.ptr1 = getelementptr inbounds <16 x i32>, ptr %p.addr.03, i32 1
+  store <16 x i32> %0, ptr %p.addr.03, align 64, !tbaa !1
   %tobool = icmp eq i32 %dec, 0
   br i1 %tobool, label %while.end, label %while.body
 

diff  --git a/llvm/test/CodeGen/Hexagon/postinc-aggr-dag-cycle.ll b/llvm/test/CodeGen/Hexagon/postinc-aggr-dag-cycle.ll
index e9d7e424dee5f..dbda3bd0418d0 100644
--- a/llvm/test/CodeGen/Hexagon/postinc-aggr-dag-cycle.ll
+++ b/llvm/test/CodeGen/Hexagon/postinc-aggr-dag-cycle.ll
@@ -10,9 +10,9 @@ target triple = "hexagon"
 %s.3 = type { %s.4 }
 %s.4 = type { %s.5 }
 %s.5 = type { i32 }
-%s.6 = type { %s.6*, %s.6* }
+%s.6 = type { ptr, ptr }
 
- at g0 = external constant %s.0*
+ at g0 = external constant ptr
 @g1 = external global i32
 @g2 = internal global %s.1 zeroinitializer, section ".data..percpu", align 4
 @g3 = external global [3 x i32]
@@ -25,10 +25,9 @@ target triple = "hexagon"
 define internal i32 @f0() #0 section ".init.text" {
 b0:
   %v0 = alloca i32, align 4
-  %v1 = load %s.0*, %s.0** @g0, align 4, !tbaa !0
-  %v2 = getelementptr inbounds %s.0, %s.0* %v1, i32 0, i32 0, i32 0
-  %v3 = tail call i32 @f1(i32* %v2, i32 3, i32 0) #0
-  %v4 = load i32, i32* @g1, align 4, !tbaa !4
+  %v1 = load ptr, ptr @g0, align 4, !tbaa !0
+  %v3 = tail call i32 @f1(ptr %v1, i32 3, i32 0) #0
+  %v4 = load i32, ptr @g1, align 4, !tbaa !4
   %v5 = icmp ult i32 %v3, %v4
   br i1 %v5, label %b1, label %b4
 
@@ -37,23 +36,21 @@ b1:                                               ; preds = %b0
 
 b2:                                               ; preds = %b2, %b1
   %v6 = phi i32 [ %v18, %b2 ], [ %v3, %b1 ]
-  %v7 = tail call i32 asm "", "=r,0"(%s.1* @g2) #0, !srcloc !6
-  %v8 = getelementptr inbounds [3 x i32], [3 x i32]* @g3, i32 0, i32 %v6
-  %v9 = load i32, i32* %v8, align 4, !tbaa !7
+  %v7 = tail call i32 asm "", "=r,0"(ptr @g2) #0, !srcloc !6
+  %v8 = getelementptr inbounds [3 x i32], ptr @g3, i32 0, i32 %v6
+  %v9 = load i32, ptr %v8, align 4, !tbaa !7
   %v10 = add i32 %v9, %v7
-  %v11 = inttoptr i32 %v10 to %s.1*
-  store volatile i32 0, i32* %v0, align 4
-  %v12 = getelementptr inbounds %s.1, %s.1* %v11, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0
-  %v13 = load volatile i32, i32* %v0, align 4
-  store volatile i32 %v13, i32* %v12, align 4
-  %v14 = getelementptr inbounds %s.1, %s.1* %v11, i32 0, i32 2
-  %v15 = getelementptr inbounds %s.6, %s.6* %v14, i32 0, i32 0
-  store %s.6* %v14, %s.6** %v15, align 4, !tbaa !9
-  %v16 = getelementptr inbounds %s.1, %s.1* %v11, i32 0, i32 2, i32 1
-  store %s.6* %v14, %s.6** %v16, align 4, !tbaa !11
+  %v11 = inttoptr i32 %v10 to ptr
+  store volatile i32 0, ptr %v0, align 4
+  %v13 = load volatile i32, ptr %v0, align 4
+  store volatile i32 %v13, ptr %v11, align 4
+  %v14 = getelementptr inbounds %s.1, ptr %v11, i32 0, i32 2
+  store ptr %v14, ptr %v14, align 4, !tbaa !9
+  %v16 = getelementptr inbounds %s.1, ptr %v11, i32 0, i32 2, i32 1
+  store ptr %v14, ptr %v16, align 4, !tbaa !11
   %v17 = add i32 %v6, 1
-  %v18 = tail call i32 @f1(i32* %v2, i32 3, i32 %v17) #0
-  %v19 = load i32, i32* @g1, align 4, !tbaa !4
+  %v18 = tail call i32 @f1(ptr %v1, i32 3, i32 %v17) #0
+  %v19 = load i32, ptr @g1, align 4, !tbaa !4
   %v20 = icmp ult i32 %v18, %v19
   br i1 %v20, label %b2, label %b3
 
@@ -66,8 +63,8 @@ b4:                                               ; preds = %b3, %b0
   br i1 %v22, label %b6, label %b5, !prof !12
 
 b5:                                               ; preds = %b4
-  %v23 = tail call i32 (i8*, ...) @f3(i8* getelementptr inbounds ([29 x i8], [29 x i8]* @g4, i32 0, i32 0), i8* getelementptr inbounds ([22 x i8], [22 x i8]* @g5, i32 0, i32 0), i32 354, i8* getelementptr inbounds ([14 x i8], [14 x i8]* @g6, i32 0, i32 0)) #0
-  tail call void (i8*, ...) @f4(i8* getelementptr inbounds ([5 x i8], [5 x i8]* @g7, i32 0, i32 0)) #1
+  %v23 = tail call i32 (ptr, ...) @f3(ptr @g4, ptr @g5, i32 354, ptr @g6) #0
+  tail call void (ptr, ...) @f4(ptr @g7) #1
   unreachable
 
 b6:                                               ; preds = %b4
@@ -75,16 +72,16 @@ b6:                                               ; preds = %b4
 }
 
 ; Function Attrs: nounwind
-declare i32 @f1(i32*, i32, i32) #0
+declare i32 @f1(ptr, i32, i32) #0
 
 ; Function Attrs: nounwind
 declare i32 @f2() #0
 
 ; Function Attrs: nounwind
-declare i32 @f3(i8*, ...) #0
+declare i32 @f3(ptr, ...) #0
 
 ; Function Attrs: noreturn
-declare void @f4(i8*, ...) #1
+declare void @f4(ptr, ...) #1
 
 attributes #0 = { nounwind "target-cpu"="hexagonv55" }
 attributes #1 = { noreturn }

diff  --git a/llvm/test/CodeGen/Hexagon/postinc-float.ll b/llvm/test/CodeGen/Hexagon/postinc-float.ll
index 55bf82ddff583..fed29bf304861 100644
--- a/llvm/test/CodeGen/Hexagon/postinc-float.ll
+++ b/llvm/test/CodeGen/Hexagon/postinc-float.ll
@@ -3,23 +3,23 @@
 ; CHECK-LABEL: ldf
 ; CHECK: memw(r{{[0-9]+}}++#4)
 ; CHECK: memw(r{{[0-9]+}}++#4)
-define float @ldf(float* nocapture readonly %x, float* nocapture readonly %y) local_unnamed_addr #0 {
+define float @ldf(ptr nocapture readonly %x, ptr nocapture readonly %y) local_unnamed_addr #0 {
 entry:
   br label %for.body
 
 for.body:
-  %arrayidx.phi = phi float* [ %x, %entry ], [ %arrayidx.inc, %for.body ]
-  %arrayidx1.phi = phi float* [ %y, %entry ], [ %arrayidx1.inc, %for.body ]
+  %arrayidx.phi = phi ptr [ %x, %entry ], [ %arrayidx.inc, %for.body ]
+  %arrayidx1.phi = phi ptr [ %y, %entry ], [ %arrayidx1.inc, %for.body ]
   %i.09 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
   %acc.08 = phi float [ 0.000000e+00, %entry ], [ %add, %for.body ]
-  %0 = load float, float* %arrayidx.phi, align 4
-  %1 = load float, float* %arrayidx1.phi, align 4
+  %0 = load float, ptr %arrayidx.phi, align 4
+  %1 = load float, ptr %arrayidx1.phi, align 4
   %mul = fmul contract float %0, %1
   %add = fadd contract float %acc.08, %mul
   %inc = add nuw nsw i32 %i.09, 1
   %exitcond = icmp eq i32 %inc, 1024
-  %arrayidx.inc = getelementptr float, float* %arrayidx.phi, i32 1
-  %arrayidx1.inc = getelementptr float, float* %arrayidx1.phi, i32 1
+  %arrayidx.inc = getelementptr float, ptr %arrayidx.phi, i32 1
+  %arrayidx1.inc = getelementptr float, ptr %arrayidx1.phi, i32 1
   br i1 %exitcond, label %for.end, label %for.body
 
 for.end:
@@ -29,23 +29,23 @@ for.end:
 ; CHECK-LABEL: ldd
 ; CHECK: memd(r{{[0-9]+}}++#8)
 ; CHECK: memd(r{{[0-9]+}}++#8)
-define double @ldd(double* nocapture readonly %x, double* nocapture readonly %y) local_unnamed_addr #0 {
+define double @ldd(ptr nocapture readonly %x, ptr nocapture readonly %y) local_unnamed_addr #0 {
 entry:
   br label %for.body
 
 for.body:
-  %arrayidx.phi = phi double* [ %x, %entry ], [ %arrayidx.inc, %for.body ]
-  %arrayidx1.phi = phi double* [ %y, %entry ], [ %arrayidx1.inc, %for.body ]
+  %arrayidx.phi = phi ptr [ %x, %entry ], [ %arrayidx.inc, %for.body ]
+  %arrayidx1.phi = phi ptr [ %y, %entry ], [ %arrayidx1.inc, %for.body ]
   %i.09 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
   %acc.08 = phi double [ 0.000000e+00, %entry ], [ %add, %for.body ]
-  %0 = load double, double* %arrayidx.phi, align 8
-  %1 = load double, double* %arrayidx1.phi, align 8
+  %0 = load double, ptr %arrayidx.phi, align 8
+  %1 = load double, ptr %arrayidx1.phi, align 8
   %mul = fmul contract double %0, %1
   %add = fadd contract double %acc.08, %mul
   %inc = add nuw nsw i32 %i.09, 1
   %exitcond = icmp eq i32 %inc, 1024
-  %arrayidx.inc = getelementptr double, double* %arrayidx.phi, i32 1
-  %arrayidx1.inc = getelementptr double, double* %arrayidx1.phi, i32 1
+  %arrayidx.inc = getelementptr double, ptr %arrayidx.phi, i32 1
+  %arrayidx1.inc = getelementptr double, ptr %arrayidx1.phi, i32 1
   br i1 %exitcond, label %for.end, label %for.body
 
 for.end:
@@ -54,15 +54,15 @@ for.end:
 
 ; CHECK-LABEL: stf
 ; CHECK: memw(r{{[0-9]+}}++#4)
-define double* @stf(float* returned %p) local_unnamed_addr #0 {
+define ptr @stf(ptr returned %p) local_unnamed_addr #0 {
 entry:
   br label %for.body
 
 for.body:
-  %arrayidx.phi = phi float* [ %arrayidx.inc, %for.body ], [ %p, %entry ]
+  %arrayidx.phi = phi ptr [ %arrayidx.inc, %for.body ], [ %p, %entry ]
   %call = tail call float @foof() #2
-  store float %call, float* %arrayidx.phi, align 8
-  %arrayidx.inc = getelementptr float, float* %arrayidx.phi, i32 1
+  store float %call, ptr %arrayidx.phi, align 8
+  %arrayidx.inc = getelementptr float, ptr %arrayidx.phi, i32 1
   br label %for.body
 }
 
@@ -70,15 +70,15 @@ declare float @foof() local_unnamed_addr #1
 
 ; CHECK-LABEL: std
 ; CHECK: memd(r{{[0-9]+}}++#8)
-define double* @std(double* returned %p) local_unnamed_addr #0 {
+define ptr @std(ptr returned %p) local_unnamed_addr #0 {
 entry:
   br label %for.body
 
 for.body:
-  %arrayidx.phi = phi double* [ %arrayidx.inc, %for.body ], [ %p, %entry ]
+  %arrayidx.phi = phi ptr [ %arrayidx.inc, %for.body ], [ %p, %entry ]
   %call = tail call double @food() #2
-  store double %call, double* %arrayidx.phi, align 8
-  %arrayidx.inc = getelementptr double, double* %arrayidx.phi, i32 1
+  store double %call, ptr %arrayidx.phi, align 8
+  %arrayidx.inc = getelementptr double, ptr %arrayidx.phi, i32 1
   br label %for.body
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/postinc-load.ll b/llvm/test/CodeGen/Hexagon/postinc-load.ll
index 825e16976a53d..6d4ef7d2ca440 100644
--- a/llvm/test/CodeGen/Hexagon/postinc-load.ll
+++ b/llvm/test/CodeGen/Hexagon/postinc-load.ll
@@ -3,22 +3,22 @@
 ; Check that post-increment load instructions are being generated.
 ; CHECK: r{{[0-9]+}} = memw(r{{[0-9]+}}++#4)
 
-define i32 @f0(i32* nocapture %a0, i16* nocapture %a1, i32 %a2) #0 {
+define i32 @f0(ptr nocapture %a0, ptr nocapture %a1, i32 %a2) #0 {
 b0:
   br label %b1
 
 b1:                                               ; preds = %b1, %b0
   %v0 = phi i32 [ %v11, %b1 ], [ 10, %b0 ]
-  %v1 = phi i32* [ %a0, %b0 ], [ %v9, %b1 ]
-  %v2 = phi i16* [ %a1, %b0 ], [ %v10, %b1 ]
+  %v1 = phi ptr [ %a0, %b0 ], [ %v9, %b1 ]
+  %v2 = phi ptr [ %a1, %b0 ], [ %v10, %b1 ]
   %v3 = phi i32 [ 0, %b0 ], [ %v8, %b1 ]
-  %v4 = load i32, i32* %v1, align 4
-  %v5 = load i16, i16* %v2, align 2
+  %v4 = load i32, ptr %v1, align 4
+  %v5 = load i16, ptr %v2, align 2
   %v6 = sext i16 %v5 to i32
   %v7 = add i32 %v4, %v3
   %v8 = add i32 %v7, %v6
-  %v9 = getelementptr i32, i32* %v1, i32 1
-  %v10 = getelementptr i16, i16* %v2, i32 1
+  %v9 = getelementptr i32, ptr %v1, i32 1
+  %v10 = getelementptr i16, ptr %v2, i32 1
   %v11 = add i32 %v0, -1
   %v12 = icmp eq i32 %v11, 0
   br i1 %v12, label %b2, label %b1

diff  --git a/llvm/test/CodeGen/Hexagon/postinc-offset.ll b/llvm/test/CodeGen/Hexagon/postinc-offset.ll
index 4173de289c1e7..bfd2518f5aa3f 100644
--- a/llvm/test/CodeGen/Hexagon/postinc-offset.ll
+++ b/llvm/test/CodeGen/Hexagon/postinc-offset.ll
@@ -7,9 +7,9 @@
 ; CHECK: }
 
 
-define void @f0(i32* %a0) #0 {
+define void @f0(ptr %a0) #0 {
 b0:
-  store i32 -1, i32* %a0, align 8, !tbaa !0
+  store i32 -1, ptr %a0, align 8, !tbaa !0
   br label %b4
 
 b1:                                               ; preds = %b3
@@ -24,14 +24,13 @@ b3:                                               ; preds = %b4
 
 b4:                                               ; preds = %b4, %b0
   %v1 = phi <2 x i32> [ %v6, %b4 ], [ zeroinitializer, %b0 ]
-  %v2 = phi i32* [ %v9, %b4 ], [ %a0, %b0 ]
+  %v2 = phi ptr [ %v9, %b4 ], [ %a0, %b0 ]
   %v3 = phi i32 [ %v7, %b4 ], [ 0, %b0 ]
-  %v4 = bitcast i32* %v2 to <2 x i32>*
-  %v5 = load <2 x i32>, <2 x i32>* %v4, align 8
+  %v5 = load <2 x i32>, ptr %v2, align 8
   %v6 = add <2 x i32> %v5, %v1
   %v7 = add nsw i32 %v3, 2
   %v8 = icmp slt i32 %v3, 4
-  %v9 = getelementptr i32, i32* %v2, i32 2
+  %v9 = getelementptr i32, ptr %v2, i32 2
   br i1 %v8, label %b4, label %b3
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/postinc-order.ll b/llvm/test/CodeGen/Hexagon/postinc-order.ll
index 7d0d467ec86af..405dd2a8cec74 100644
--- a/llvm/test/CodeGen/Hexagon/postinc-order.ll
+++ b/llvm/test/CodeGen/Hexagon/postinc-order.ll
@@ -4,7 +4,7 @@
 ; CHECK: memd(r{{[0-9]+}}++#8) = r
 
 ; Function Attrs: nounwind
-define void @f0(i32 %a0, i16* nocapture %a1, i16 signext %a2) #0 {
+define void @f0(i32 %a0, ptr nocapture %a1, i16 signext %a2) #0 {
 b0:
   %v0 = icmp eq i32 %a0, 0
   br i1 %v0, label %b2, label %b3
@@ -46,21 +46,20 @@ b8:                                               ; preds = %b7
 b9:                                               ; preds = %b9, %b5
   %v11 = phi i32 [ 0, %b5 ], [ %v12, %b9 ]
   %v12 = add nsw i32 %v11, 4
-  %v13 = getelementptr i16, i16* %a1, i32 %v11
-  %v14 = bitcast i16* %v13 to <4 x i16>*
-  %v15 = load <4 x i16>, <4 x i16>* %v14, align 16
+  %v13 = getelementptr i16, ptr %a1, i32 %v11
+  %v15 = load <4 x i16>, ptr %v13, align 16
   %v16 = add <4 x i16> %v15, %v8
-  store <4 x i16> %v16, <4 x i16>* %v14, align 16
+  store <4 x i16> %v16, ptr %v13, align 16
   %v17 = icmp slt i32 %v12, %v3
   br i1 %v17, label %b9, label %b6
 
 b10:                                              ; preds = %b10, %b8
   %v18 = phi i32 [ %v19, %b10 ], [ %v9, %b8 ]
   %v19 = add nsw i32 %v18, 1
-  %v20 = getelementptr i16, i16* %a1, i32 %v18
-  %v21 = load i16, i16* %v20, align 2
+  %v20 = getelementptr i16, ptr %a1, i32 %v18
+  %v21 = load i16, ptr %v20, align 2
   %v22 = add i16 %v21, %a2
-  store i16 %v22, i16* %v20, align 2
+  store i16 %v22, ptr %v20, align 2
   %v23 = icmp eq i32 %v19, %a0
   br i1 %v23, label %b1, label %b10
 }

diff  --git a/llvm/test/CodeGen/Hexagon/postinc-store.ll b/llvm/test/CodeGen/Hexagon/postinc-store.ll
index 2dabc7991e39b..bd8226cf5d374 100644
--- a/llvm/test/CodeGen/Hexagon/postinc-store.ll
+++ b/llvm/test/CodeGen/Hexagon/postinc-store.ll
@@ -3,22 +3,22 @@
 ; Check that post-increment store instructions are being generated.
 ; CHECK: memw(r{{[0-9]+}}++#4) = r{{[0-9]+}}
 
-define i32 @f0(i32* nocapture %a0, i16* nocapture %a1, i32 %a2) #0 {
+define i32 @f0(ptr nocapture %a0, ptr nocapture %a1, i32 %a2) #0 {
 b0:
   br label %b1
 
 b1:                                               ; preds = %b1, %b0
   %v0 = phi i32 [ %v10, %b1 ], [ 10, %b0 ]
-  %v1 = phi i32* [ %a0, %b0 ], [ %v8, %b1 ]
-  %v2 = phi i16* [ %a1, %b0 ], [ %v9, %b1 ]
-  %v3 = load i32, i32* %v1, align 4
-  %v4 = load i16, i16* %v2, align 2
+  %v1 = phi ptr [ %a0, %b0 ], [ %v8, %b1 ]
+  %v2 = phi ptr [ %a1, %b0 ], [ %v9, %b1 ]
+  %v3 = load i32, ptr %v1, align 4
+  %v4 = load i16, ptr %v2, align 2
   %v5 = sext i16 %v4 to i32
   %v6 = mul i32 %v3, 2
   %v7 = add i32 %v6, %v5
-  store i32 %v7, i32* %v1, align 4
-  %v8 = getelementptr i32, i32* %v1, i32 1
-  %v9 = getelementptr i16, i16* %v2, i32 1
+  store i32 %v7, ptr %v1, align 4
+  %v8 = getelementptr i32, ptr %v1, i32 1
+  %v9 = getelementptr i16, ptr %v2, i32 1
   %v10 = add i32 %v0, -1
   %v11 = icmp eq i32 %v10, 0
   br i1 %v11, label %b2, label %b1

diff  --git a/llvm/test/CodeGen/Hexagon/pred-absolute-store.ll b/llvm/test/CodeGen/Hexagon/pred-absolute-store.ll
index 1ed6bb2aacb77..38169d7037912 100644
--- a/llvm/test/CodeGen/Hexagon/pred-absolute-store.ll
+++ b/llvm/test/CodeGen/Hexagon/pred-absolute-store.ll
@@ -10,7 +10,7 @@ entry:
   br i1 %cmp, label %if.then, label %if.end
 
 if.then:
-  store i32 %a, i32* @gvar, align 4
+  store i32 %a, ptr @gvar, align 4
   br label %if.end
 
 if.end:

diff  --git a/llvm/test/CodeGen/Hexagon/pred-gp.ll b/llvm/test/CodeGen/Hexagon/pred-gp.ll
index 4d50abf628387..a36bba79506e8 100644
--- a/llvm/test/CodeGen/Hexagon/pred-gp.ll
+++ b/llvm/test/CodeGen/Hexagon/pred-gp.ll
@@ -14,12 +14,12 @@ b0:
   br i1 %v0, label %b2, label %b1
 
 b1:                                               ; preds = %b0
-  %v1 = load i32, i32* @g1, align 4
+  %v1 = load i32, ptr @g1, align 4
   br label %b3
 
 b2:                                               ; preds = %b0
-  %v2 = load i32, i32* @g0, align 4
-  store i32 %v2, i32* @g1, align 4
+  %v2 = load i32, ptr @g0, align 4
+  store i32 %v2, ptr @g1, align 4
   br label %b3
 
 b3:                                               ; preds = %b2, %b1

diff  --git a/llvm/test/CodeGen/Hexagon/pred-instrs.ll b/llvm/test/CodeGen/Hexagon/pred-instrs.ll
index 27986f872d978..0e62e0d820331 100644
--- a/llvm/test/CodeGen/Hexagon/pred-instrs.ll
+++ b/llvm/test/CodeGen/Hexagon/pred-instrs.ll
@@ -24,8 +24,8 @@ b2:                                               ; preds = %b0
 
 b3:                                               ; preds = %b2, %b1
   %v5 = phi i32 [ %v4, %b2 ], [ %v2, %b1 ]
-  store i32 %v5, i32* @g0, align 4
-  %v6 = load i32, i32* @g1, align 4
+  store i32 %v5, ptr @g0, align 4
+  %v6 = load i32, ptr @g1, align 4
   ret i32 %v6
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/pred-taken-jump.ll b/llvm/test/CodeGen/Hexagon/pred-taken-jump.ll
index 573f2478f518b..71c28204658dc 100644
--- a/llvm/test/CodeGen/Hexagon/pred-taken-jump.ll
+++ b/llvm/test/CodeGen/Hexagon/pred-taken-jump.ll
@@ -6,16 +6,16 @@
 
 ; CHECK-NOT: if ({{!?}}p{{[0-3]}}) jump:t
 
-%s.0 = type { %s.0*, i8 }
+%s.0 = type { ptr, i8 }
 
-define i32 @f0(%s.0** nocapture %a0, i32 %a1) #0 {
+define i32 @f0(ptr nocapture %a0, i32 %a1) #0 {
 b0:
   %v0 = and i32 %a1, 63
   %v1 = icmp eq i32 %v0, %a1
   br i1 %v1, label %b1, label %b7
 
 b1:                                               ; preds = %b0
-  %v2 = tail call i8* @f1()
+  %v2 = tail call ptr @f1()
   br label %b2
 
 b2:                                               ; preds = %b4, %b1
@@ -26,8 +26,8 @@ b2:                                               ; preds = %b4, %b1
   br i1 %v6, label %b3, label %b5
 
 b3:                                               ; preds = %b2
-  %v7 = tail call %s.0* @f2(i8* undef, i8* %v2)
-  %v8 = icmp eq %s.0* %v7, null
+  %v7 = tail call ptr @f2(ptr undef, ptr %v2)
+  %v8 = icmp eq ptr %v7, null
   br i1 %v8, label %b7, label %b4
 
 b4:                                               ; preds = %b3
@@ -45,8 +45,8 @@ b7:                                               ; preds = %b6, %b5, %b3, %b0
   ret i32 %v10
 }
 
-declare i8* @f1()
+declare ptr @f1()
 
-declare %s.0* @f2(i8*, i8*)
+declare ptr @f2(ptr, ptr)
 
 attributes #0 = { nounwind "target-cpu"="hexagonv55" }

diff  --git a/llvm/test/CodeGen/Hexagon/predicate-logical.ll b/llvm/test/CodeGen/Hexagon/predicate-logical.ll
index e3ba4d8643db3..89e84a4da2dd4 100644
--- a/llvm/test/CodeGen/Hexagon/predicate-logical.ll
+++ b/llvm/test/CodeGen/Hexagon/predicate-logical.ll
@@ -3,20 +3,20 @@
 
 target triple = "hexagon"
 
-define i32 @foo(i64* nocapture %p, i64* nocapture %q) nounwind readonly {
+define i32 @foo(ptr nocapture %p, ptr nocapture %q) nounwind readonly {
 entry:
-  %incdec.ptr = getelementptr inbounds i64, i64* %p, i32 1
-  %0 = load i64, i64* %p, align 8, !tbaa !0
-  %incdec.ptr1 = getelementptr inbounds i64, i64* %q, i32 1
-  %1 = load i64, i64* %q, align 8, !tbaa !0
+  %incdec.ptr = getelementptr inbounds i64, ptr %p, i32 1
+  %0 = load i64, ptr %p, align 8, !tbaa !0
+  %incdec.ptr1 = getelementptr inbounds i64, ptr %q, i32 1
+  %1 = load i64, ptr %q, align 8, !tbaa !0
   %2 = tail call i32 @llvm.hexagon.A2.vcmpwgtu(i64 %0, i64 %1)
-  %incdec.ptr2 = getelementptr inbounds i64, i64* %p, i32 2
-  %3 = load i64, i64* %incdec.ptr, align 8, !tbaa !0
-  %incdec.ptr3 = getelementptr inbounds i64, i64* %q, i32 2
-  %4 = load i64, i64* %incdec.ptr1, align 8, !tbaa !0
+  %incdec.ptr2 = getelementptr inbounds i64, ptr %p, i32 2
+  %3 = load i64, ptr %incdec.ptr, align 8, !tbaa !0
+  %incdec.ptr3 = getelementptr inbounds i64, ptr %q, i32 2
+  %4 = load i64, ptr %incdec.ptr1, align 8, !tbaa !0
   %5 = tail call i32 @llvm.hexagon.A2.vcmpwgtu(i64 %3, i64 %4)
-  %6 = load i64, i64* %incdec.ptr2, align 8, !tbaa !0
-  %7 = load i64, i64* %incdec.ptr3, align 8, !tbaa !0
+  %6 = load i64, ptr %incdec.ptr2, align 8, !tbaa !0
+  %7 = load i64, ptr %incdec.ptr3, align 8, !tbaa !0
   %8 = tail call i32 @llvm.hexagon.A2.vcmpwgtu(i64 %6, i64 %7)
   %and = and i32 %5, %2
   %or = or i32 %8, %and

diff  --git a/llvm/test/CodeGen/Hexagon/predicate-rcmp.ll b/llvm/test/CodeGen/Hexagon/predicate-rcmp.ll
index 78991e0dbe705..775327b89de5d 100644
--- a/llvm/test/CodeGen/Hexagon/predicate-rcmp.ll
+++ b/llvm/test/CodeGen/Hexagon/predicate-rcmp.ll
@@ -14,6 +14,6 @@ entry:
   %a102 = zext i1 %tobool250 to i8
   %detected.0 = xor i8 %a102, 1
   %conv253 = zext i8 %detected.0 to i32
-  store i32 %conv253, i32* @var, align 4
+  store i32 %conv253, ptr @var, align 4
   ret void
 }

diff  --git a/llvm/test/CodeGen/Hexagon/predtfrs.ll b/llvm/test/CodeGen/Hexagon/predtfrs.ll
index 890b84b091b15..086d771471d49 100644
--- a/llvm/test/CodeGen/Hexagon/predtfrs.ll
+++ b/llvm/test/CodeGen/Hexagon/predtfrs.ll
@@ -11,21 +11,19 @@
 @g0 = common global i16 0, align 2
 
 ; Function Attrs: nounwind
-define void @f0(%s.0* nocapture %a0, %s.1* nocapture %a1, %s.1* nocapture %a2) #0 {
+define void @f0(ptr nocapture %a0, ptr nocapture %a1, ptr nocapture %a2) #0 {
 b0:
-  %v0 = load i16, i16* @g0, align 2, !tbaa !0
+  %v0 = load i16, ptr @g0, align 2, !tbaa !0
   %v1 = icmp eq i16 %v0, 3
   %v2 = select i1 %v1, i32 -1, i32 34
-  %v3 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 0
-  %v4 = load i32, i32* %v3, align 4
+  %v4 = load i32, ptr %a0, align 4
   %v5 = zext i32 %v4 to i64
-  %v6 = getelementptr inbounds %s.0, %s.0* %a0, i32 1, i32 0
-  %v7 = load i32, i32* %v6, align 4
+  %v6 = getelementptr inbounds %s.0, ptr %a0, i32 1, i32 0
+  %v7 = load i32, ptr %v6, align 4
   %v8 = zext i32 %v7 to i64
   %v9 = shl nuw i64 %v8, 32
   %v10 = or i64 %v9, %v5
-  %v11 = getelementptr inbounds %s.1, %s.1* %a1, i32 0, i32 0
-  %v12 = load i64, i64* %v11, align 8, !tbaa !4
+  %v12 = load i64, ptr %a1, align 8, !tbaa !4
   %v13 = tail call i64 @llvm.hexagon.M2.vrcmpyr.s0(i64 %v10, i64 %v12)
   %v14 = tail call i64 @llvm.hexagon.S2.asr.i.p(i64 %v13, i32 14)
   %v15 = lshr i64 %v14, 32
@@ -34,8 +32,8 @@ b0:
   %v18 = trunc i64 %v14 to i32
   %v19 = tail call i32 @llvm.hexagon.C2.mux(i32 %v17, i32 %v2, i32 %v18)
   %v20 = zext i32 %v19 to i64
-  %v21 = getelementptr inbounds %s.1, %s.1* %a2, i32 2, i32 0
-  store i64 %v20, i64* %v21, align 8
+  %v21 = getelementptr inbounds %s.1, ptr %a2, i32 2, i32 0
+  store i64 %v20, ptr %v21, align 8
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/prefetch-intr.ll b/llvm/test/CodeGen/Hexagon/prefetch-intr.ll
index b510553b1fb86..b41b2dc67cfef 100644
--- a/llvm/test/CodeGen/Hexagon/prefetch-intr.ll
+++ b/llvm/test/CodeGen/Hexagon/prefetch-intr.ll
@@ -7,14 +7,13 @@ target triple = "hexagon"
 define i32 @f0() #0 {
 b0:
   %v0 = alloca i32, align 4
-  store i32 0, i32* %v0, align 4, !tbaa !0
-  %v1 = bitcast i32* %v0 to i8*
-  call void @llvm.hexagon.prefetch(i8* %v1)
+  store i32 0, ptr %v0, align 4, !tbaa !0
+  call void @llvm.hexagon.prefetch(ptr %v0)
   ret i32 0
 }
 
 ; Function Attrs: nounwind
-declare void @llvm.hexagon.prefetch(i8*) #1
+declare void @llvm.hexagon.prefetch(ptr) #1
 
 attributes #0 = { nounwind "target-cpu"="hexagonv60" }
 attributes #1 = { nounwind }

diff  --git a/llvm/test/CodeGen/Hexagon/prefetch-shuffler-ice.ll b/llvm/test/CodeGen/Hexagon/prefetch-shuffler-ice.ll
index bd2ddec14fcaa..3140c95e3ae97 100644
--- a/llvm/test/CodeGen/Hexagon/prefetch-shuffler-ice.ll
+++ b/llvm/test/CodeGen/Hexagon/prefetch-shuffler-ice.ll
@@ -5,15 +5,15 @@
 target triple = "hexagon"
 
 ; Function Attrs: nounwind optsize
-define void @f0(i32* nocapture %a0, i8* %a1) #0 {
+define void @f0(ptr nocapture %a0, ptr %a1) #0 {
 b0:
-  call void @llvm.hexagon.prefetch(i8* %a1)
-  store i32 0, i32* %a0, align 4, !tbaa !0
+  call void @llvm.hexagon.prefetch(ptr %a1)
+  store i32 0, ptr %a0, align 4, !tbaa !0
   ret void
 }
 
 ; Function Attrs: nounwind
-declare void @llvm.hexagon.prefetch(i8*) #1
+declare void @llvm.hexagon.prefetch(ptr) #1
 
 attributes #0 = { nounwind optsize "target-cpu"="hexagonv55" }
 attributes #1 = { nounwind }

diff  --git a/llvm/test/CodeGen/Hexagon/prob-types.ll b/llvm/test/CodeGen/Hexagon/prob-types.ll
index 80bb4e8d3088a..129baffa4a644 100644
--- a/llvm/test/CodeGen/Hexagon/prob-types.ll
+++ b/llvm/test/CodeGen/Hexagon/prob-types.ll
@@ -41,17 +41,17 @@ declare <32 x i32> @llvm.hexagon.V6.hi.128B(<64 x i32>) #0
 declare <32 x i32> @llvm.hexagon.V6.vaddw.128B(<32 x i32>, <32 x i32>) #0
 
 ; Function Attrs: nounwind
-define hidden void @f0(<32 x i32>* %a0, <32 x i32>* %a1, i32 %a2, <32 x i32> %a3, <32 x i32> %a4, <32 x i32> %a5, i32 %a6, <32 x i32> %a7) #1 {
+define hidden void @f0(ptr %a0, ptr %a1, i32 %a2, <32 x i32> %a3, <32 x i32> %a4, <32 x i32> %a5, i32 %a6, <32 x i32> %a7) #1 {
 b0:
   br label %b1
 
 b1:                                               ; preds = %b1, %b0
-  %v0 = phi <32 x i32>* [ %v38, %b1 ], [ %a0, %b0 ]
-  %v1 = phi <32 x i32>* [ %v4, %b1 ], [ %a1, %b0 ]
+  %v0 = phi ptr [ %v38, %b1 ], [ %a0, %b0 ]
+  %v1 = phi ptr [ %v4, %b1 ], [ %a1, %b0 ]
   %v2 = phi i32 [ %v39, %b1 ], [ %a2, %b0 ]
   %v3 = phi <32 x i32> [ %v34, %b1 ], [ %a3, %b0 ]
-  %v4 = getelementptr inbounds <32 x i32>, <32 x i32>* %v1, i32 1
-  %v5 = load <32 x i32>, <32 x i32>* %v1, align 128, !tbaa !0
+  %v4 = getelementptr inbounds <32 x i32>, ptr %v1, i32 1
+  %v5 = load <32 x i32>, ptr %v1, align 128, !tbaa !0
   %v6 = tail call <32 x i32> @llvm.hexagon.V6.vdmpybus.128B(<32 x i32> %v5, i32 16843009) #2
   %v7 = tail call <32 x i32> @llvm.hexagon.V6.vlalignbi.128B(<32 x i32> %v6, <32 x i32> %a4, i32 2) #2
   %v8 = tail call <32 x i32> @llvm.hexagon.V6.vaddh.128B(<32 x i32> %v6, <32 x i32> %v7) #2
@@ -81,14 +81,14 @@ b1:                                               ; preds = %b1, %b0
   %v32 = tail call <32 x i32> @llvm.hexagon.V6.vaddw.128B(<32 x i32> %v22, <32 x i32> %v31) #2
   %v33 = tail call <32 x i32> @llvm.hexagon.V6.hi.128B(<64 x i32> %v26) #2
   %v34 = tail call <32 x i32> @llvm.hexagon.V6.vaddw.128B(<32 x i32> %v22, <32 x i32> %v33) #2
-  %v35 = getelementptr inbounds <32 x i32>, <32 x i32>* %v0, i32 1
-  store <32 x i32> %v28, <32 x i32>* %v0, align 128, !tbaa !0
-  %v36 = getelementptr inbounds <32 x i32>, <32 x i32>* %v0, i32 2
-  store <32 x i32> %v30, <32 x i32>* %v35, align 128, !tbaa !0
-  %v37 = getelementptr inbounds <32 x i32>, <32 x i32>* %v0, i32 3
-  store <32 x i32> %v32, <32 x i32>* %v36, align 128, !tbaa !0
-  %v38 = getelementptr inbounds <32 x i32>, <32 x i32>* %v0, i32 4
-  store <32 x i32> %v34, <32 x i32>* %v37, align 128, !tbaa !0
+  %v35 = getelementptr inbounds <32 x i32>, ptr %v0, i32 1
+  store <32 x i32> %v28, ptr %v0, align 128, !tbaa !0
+  %v36 = getelementptr inbounds <32 x i32>, ptr %v0, i32 2
+  store <32 x i32> %v30, ptr %v35, align 128, !tbaa !0
+  %v37 = getelementptr inbounds <32 x i32>, ptr %v0, i32 3
+  store <32 x i32> %v32, ptr %v36, align 128, !tbaa !0
+  %v38 = getelementptr inbounds <32 x i32>, ptr %v0, i32 4
+  store <32 x i32> %v34, ptr %v37, align 128, !tbaa !0
   %v39 = add nsw i32 %v2, 128
   %v40 = icmp slt i32 %v39, %a6
   br i1 %v40, label %b1, label %b2

diff  --git a/llvm/test/CodeGen/Hexagon/prof-early-if.ll b/llvm/test/CodeGen/Hexagon/prof-early-if.ll
index b0f21110b7dee..5f0c9c8f10a58 100644
--- a/llvm/test/CodeGen/Hexagon/prof-early-if.ll
+++ b/llvm/test/CodeGen/Hexagon/prof-early-if.ll
@@ -20,15 +20,15 @@ declare i64 @llvm.hexagon.A2.vaddws(i64, i64) #0
 declare i64 @llvm.hexagon.A2.vsubws(i64, i64) #0
 declare i32 @llvm.hexagon.A4.modwrapu(i32, i32) #0
 
-define void @f0(i32 %a0, i64* %a1) #1 {
+define void @f0(i32 %a0, ptr %a1) #1 {
 b0:
   br label %b1
 
 b1:                                               ; preds = %b5, %b0
   %v0 = phi i32 [ 0, %b0 ], [ %v26, %b5 ]
   %v1 = phi i32 [ 0, %b0 ], [ %v25, %b5 ]
-  %v2 = load i32, i32* @g1, align 4
-  %v3 = load i32, i32* @g2, align 8
+  %v2 = load i32, ptr @g1, align 4
+  %v3 = load i32, ptr @g2, align 8
   %v4 = and i32 %v3, %v2
   br label %b2
 
@@ -38,21 +38,21 @@ b2:                                               ; preds = %b4, %b1
   %v7 = phi i32 [ %v9, %b4 ], [ %v4, %b1 ]
   %v8 = tail call i32 @llvm.hexagon.S2.cl0(i32 %v7)
   %v9 = tail call i32 @llvm.hexagon.S2.setbit.r(i32 %v7, i32 %v8)
-  %v10 = getelementptr [10 x %s.0], [10 x %s.0]* inttoptr (i32 -121502345 to [10 x %s.0]*), i32 0, i32 %v1
-  %v11 = getelementptr %s.0, %s.0* %v10, i32 0, i32 12, i32 %v8
-  %v12 = load i32, i32* %v11, align 4
+  %v10 = getelementptr [10 x %s.0], ptr inttoptr (i32 -121502345 to ptr), i32 0, i32 %v1
+  %v11 = getelementptr %s.0, ptr %v10, i32 0, i32 12, i32 %v8
+  %v12 = load i32, ptr %v11, align 4
   %v13 = tail call i64 @llvm.hexagon.M2.vmpy2s.s0(i32 %v12, i32 %v12)
-  %v14 = getelementptr %s.0, %s.0* %v10, i32 0, i32 13, i32 %v8
-  %v15 = load i32, i32* %v14, align 4
+  %v14 = getelementptr %s.0, ptr %v10, i32 0, i32 13, i32 %v8
+  %v15 = load i32, ptr %v14, align 4
   %v16 = tail call i64 @llvm.hexagon.M2.vmac2s.s0(i64 %v13, i32 %v15, i32 %v15)
-  %v17 = load i8, i8* @g3, align 1
+  %v17 = load i8, ptr @g3, align 1
   %v18 = and i8 %v17, 1
   %v19 = icmp eq i8 %v18, 0
   br i1 %v19, label %b3, label %b4, !prof !0
 
 b3:                                               ; preds = %b2
   %v20 = tail call i64 @llvm.hexagon.A2.vaddws(i64 %v5, i64 %v16)
-  store i64 %v20, i64* %a1, align 8
+  store i64 %v20, ptr %a1, align 8
   br label %b4
 
 b4:                                               ; preds = %b3, %b2
@@ -69,7 +69,7 @@ b5:                                               ; preds = %b4
   br i1 %v27, label %b6, label %b1, !prof !1
 
 b6:                                               ; preds = %b5
-  store i64 %v16, i64* @g0, align 8
+  store i64 %v16, ptr @g0, align 8
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/propagate-vcombine.ll b/llvm/test/CodeGen/Hexagon/propagate-vcombine.ll
index 989322a0fea09..37983f6ce6df8 100644
--- a/llvm/test/CodeGen/Hexagon/propagate-vcombine.ll
+++ b/llvm/test/CodeGen/Hexagon/propagate-vcombine.ll
@@ -7,13 +7,13 @@
 ; CHECK-NOT: vcombine
 
 define void @danny() #0 {
-  %t0 = load <16 x i32>, <16 x i32>* @v0, align 64
-  %t1 = load <16 x i32>, <16 x i32>* @v1, align 64
+  %t0 = load <16 x i32>, ptr @v0, align 64
+  %t1 = load <16 x i32>, ptr @v1, align 64
   %t2 = call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %t0, <16 x i32> %t1)
   %t3 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %t2)
   %t4 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %t2)
-  store <16 x i32> %t3, <16 x i32>* @v0, align 64
-  store <16 x i32> %t4, <16 x i32>* @v1, align 64
+  store <16 x i32> %t3, ptr @v0, align 64
+  store <16 x i32> %t4, ptr @v1, align 64
   ret void
 }
 
@@ -24,13 +24,13 @@ define void @danny() #0 {
 ; CHECK-NOT: vcombine
 
 define void @sammy() #1 {
-  %t0 = load <32 x i32>, <32 x i32>* @w0, align 128
-  %t1 = load <32 x i32>, <32 x i32>* @w1, align 128
+  %t0 = load <32 x i32>, ptr @w0, align 128
+  %t1 = load <32 x i32>, ptr @w1, align 128
   %t2 = call <64 x i32> @llvm.hexagon.V6.vcombine.128B(<32 x i32> %t0, <32 x i32> %t1)
   %t3 = tail call <32 x i32> @llvm.hexagon.V6.lo.128B(<64 x i32> %t2)
   %t4 = tail call <32 x i32> @llvm.hexagon.V6.hi.128B(<64 x i32> %t2)
-  store <32 x i32> %t3, <32 x i32>* @w0, align 128
-  store <32 x i32> %t4, <32 x i32>* @w1, align 128
+  store <32 x i32> %t3, ptr @w0, align 128
+  store <32 x i32> %t4, ptr @w1, align 128
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/ps_call_nr.ll b/llvm/test/CodeGen/Hexagon/ps_call_nr.ll
index 50a5536ef20dc..31163e8dea5a6 100644
--- a/llvm/test/CodeGen/Hexagon/ps_call_nr.ll
+++ b/llvm/test/CodeGen/Hexagon/ps_call_nr.ll
@@ -9,13 +9,13 @@
 ; CHECK: {
 ; CHECK: call
 
-%s.0 = type <{ i8*, i8*, i16, i8, i8, i8 }>
+%s.0 = type <{ ptr, ptr, i16, i8, i8, i8 }>
 
 @g0 = external constant %s.0, section ".rodata.trace", align 1
 
 define void @f0() local_unnamed_addr {
 b0:
-  %v0 = load i32, i32* undef, align 4
+  %v0 = load i32, ptr undef, align 4
   %v1 = trunc i32 %v0 to i2
   switch i2 %v1, label %b4 [
     i2 1, label %b1
@@ -28,12 +28,12 @@ b1:                                               ; preds = %b0
   unreachable
 
 b2:                                               ; preds = %b0, %b0
-  %v2 = load i32, i32* undef, align 4
+  %v2 = load i32, ptr undef, align 4
   %v3 = lshr i32 %v2, 14
   %v4 = and i32 %v3, 2047
   %v5 = lshr i32 %v2, 3
   %v6 = and i32 %v5, 2047
-  tail call void @f1(%s.0* nonnull @g0, i32 %v6, i32 %v4, i32 0, i32 0)
+  tail call void @f1(ptr nonnull @g0, i32 %v6, i32 %v4, i32 0, i32 0)
   unreachable
 
 b3:                                               ; preds = %b0
@@ -43,4 +43,4 @@ b4:                                               ; preds = %b0
   unreachable
 }
 
-declare void @f1(%s.0*, i32, i32, i32, i32) local_unnamed_addr
+declare void @f1(ptr, i32, i32, i32, i32) local_unnamed_addr

diff  --git a/llvm/test/CodeGen/Hexagon/rdf-copy-undef.ll b/llvm/test/CodeGen/Hexagon/rdf-copy-undef.ll
index c2b98fc21ca77..0a0f7348813dd 100644
--- a/llvm/test/CodeGen/Hexagon/rdf-copy-undef.ll
+++ b/llvm/test/CodeGen/Hexagon/rdf-copy-undef.ll
@@ -11,21 +11,21 @@ target triple = "hexagon"
 @g0 = external unnamed_addr global i1, align 4
 
 ; Function Attrs: nounwind
-declare i8* @llvm.stacksave() #0
+declare ptr @llvm.stacksave() #0
 
 ; Function Attrs: nounwind
-declare void @llvm.stackrestore(i8*) #0
+declare void @llvm.stackrestore(ptr) #0
 
 ; Function Attrs: norecurse nounwind
-declare fastcc void @f0(i16 signext, i16 signext, i16 signext, i16* nocapture readonly, i16 signext, i16* nocapture) unnamed_addr #1
+declare fastcc void @f0(i16 signext, i16 signext, i16 signext, ptr nocapture readonly, i16 signext, ptr nocapture) unnamed_addr #1
 
 ; Function Attrs: norecurse nounwind
 declare fastcc signext i16 @f1(i16 signext, i16 signext) unnamed_addr #1
 
 ; Function Attrs: norecurse nounwind
-define fastcc i32 @f2(i16* nocapture readonly %a0, i16 signext %a1, i16 signext %a2, i16* nocapture readonly %a3, i16 signext %a4, i16* nocapture readonly %a51, i16* nocapture %a6) unnamed_addr #1 {
+define fastcc i32 @f2(ptr nocapture readonly %a0, i16 signext %a1, i16 signext %a2, ptr nocapture readonly %a3, i16 signext %a4, ptr nocapture readonly %a51, ptr nocapture %a6) unnamed_addr #1 {
 b0:
-  %v0 = tail call i8* @llvm.stacksave()
+  %v0 = tail call ptr @llvm.stacksave()
   %v1 = tail call fastcc signext i16 @f1(i16 signext %a2, i16 signext %a1)
   br i1 undef, label %b7, label %b1
 
@@ -62,17 +62,17 @@ b8:                                               ; preds = %b7, %b3, %b2
 b9:                                               ; preds = %b8, %b7
   %v8 = phi i16 [ 0, %b8 ], [ %v6, %b7 ]
   %v9 = phi i32 [ %v7, %b8 ], [ 0, %b7 ]
-  %v10 = load i16, i16* undef, align 2, !tbaa !4
+  %v10 = load i16, ptr undef, align 2, !tbaa !4
   %v11 = sext i16 %v10 to i32
   %v12 = zext i16 %v10 to i32
   br i1 undef, label %b10, label %b11
 
 b10:                                              ; preds = %b9
-  store i1 true, i1* @g0, align 4
+  store i1 true, ptr @g0, align 4
   br label %b11
 
 b11:                                              ; preds = %b10, %b9
-  %v13 = load i16, i16* undef, align 2, !tbaa !4
+  %v13 = load i16, ptr undef, align 2, !tbaa !4
   %v14 = sext i16 %v13 to i32
   %v15 = shl nuw i32 %v12, 16
   %v16 = and i32 %v9, 65535
@@ -91,7 +91,7 @@ b11:                                              ; preds = %b10, %b9
   br i1 %v28, label %b12, label %b13
 
 b12:                                              ; preds = %b11
-  store i1 true, i1* @g0, align 4
+  store i1 true, ptr @g0, align 4
   br label %b13
 
 b13:                                              ; preds = %b12, %b11
@@ -113,7 +113,7 @@ b17:                                              ; preds = %b15
   br i1 %v31, label %b18, label %b19
 
 b18:                                              ; preds = %b17
-  store i1 true, i1* @g0, align 4
+  store i1 true, ptr @g0, align 4
   br label %b20
 
 b19:                                              ; preds = %b17, %b16
@@ -125,7 +125,7 @@ b20:                                              ; preds = %b19, %b18
   br i1 %v33, label %b21, label %b22
 
 b21:                                              ; preds = %b20
-  store i1 true, i1* @g0, align 4
+  store i1 true, ptr @g0, align 4
   br label %b23
 
 b22:                                              ; preds = %b20
@@ -135,12 +135,12 @@ b23:                                              ; preds = %b22, %b21
   %v34 = add nsw i32 %v32, 32768
   %v35 = lshr i32 %v34, 16
   %v36 = trunc i32 %v35 to i16
-  store i16 %v36, i16* undef, align 2, !tbaa !4
+  store i16 %v36, ptr undef, align 2, !tbaa !4
   br i1 undef, label %b24, label %b15
 
 b24:                                              ; preds = %b23, %b13
-  call fastcc void @f0(i16 signext undef, i16 signext %a1, i16 signext %a2, i16* %a3, i16 signext %a4, i16* %a6)
-  call void @llvm.stackrestore(i8* %v0)
+  call fastcc void @f0(i16 signext undef, i16 signext %a1, i16 signext %a2, ptr %a3, i16 signext %a4, ptr %a6)
+  call void @llvm.stackrestore(ptr %v0)
   ret i32 undef
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/rdf-copy-undef2.ll b/llvm/test/CodeGen/Hexagon/rdf-copy-undef2.ll
index 28bf4c67cd750..0d2b398b720bb 100644
--- a/llvm/test/CodeGen/Hexagon/rdf-copy-undef2.ll
+++ b/llvm/test/CodeGen/Hexagon/rdf-copy-undef2.ll
@@ -3,17 +3,17 @@
 
 target triple = "hexagon"
 
-declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #0
-declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #0
+declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #0
+declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #0
 declare signext i16 @cat(i16 signext) #1
-declare void @danny(i16 signext, i16 signext, i16 signext, i16* nocapture readonly, i16 signext, i16* nocapture) #1
-declare void @sammy(i16* nocapture readonly, i16* nocapture readonly, i16* nocapture readonly, i32* nocapture, i16* nocapture, i16 signext, i16 signext, i16 signext) #1
-declare i8* @llvm.stacksave() #2
-declare void @llvm.stackrestore(i8*) #2
+declare void @danny(i16 signext, i16 signext, i16 signext, ptr nocapture readonly, i16 signext, ptr nocapture) #1
+declare void @sammy(ptr nocapture readonly, ptr nocapture readonly, ptr nocapture readonly, ptr nocapture, ptr nocapture, i16 signext, i16 signext, i16 signext) #1
+declare ptr @llvm.stacksave() #2
+declare void @llvm.stackrestore(ptr) #2
 
-define i32 @fred(i16 signext %p0, i16 signext %p1, i16* nocapture readonly %p2, i16 signext %p3, i16* nocapture readonly %p4, i16* nocapture %p5) #1 {
+define i32 @fred(i16 signext %p0, i16 signext %p1, ptr nocapture readonly %p2, i16 signext %p3, ptr nocapture readonly %p4, ptr nocapture %p5) #1 {
 entry:
-  %0 = tail call i8* @llvm.stacksave()
+  %0 = tail call ptr @llvm.stacksave()
   %vla = alloca i16, i32 undef, align 8
   %call17 = call signext i16 @cat(i16 signext 1) #1
   br i1 undef, label %for.cond23.preheader, label %for.end47
@@ -25,27 +25,27 @@ for.cond23.preheader:                             ; preds = %for.end40, %entry
 for.body27:                                       ; preds = %for.body27, %for.cond23.preheader
   %indvars.iv = phi i32 [ %indvars.iv.next, %for.body27 ], [ 0, %for.cond23.preheader ]
   %call30 = call signext i16 @cat(i16 signext 7) #1
-  %arrayidx32 = getelementptr inbounds i16, i16* %vla, i32 %indvars.iv
-  store i16 %call30, i16* %arrayidx32, align 2
-  %arrayidx37 = getelementptr inbounds i16, i16* undef, i32 %indvars.iv
+  %arrayidx32 = getelementptr inbounds i16, ptr %vla, i32 %indvars.iv
+  store i16 %call30, ptr %arrayidx32, align 2
+  %arrayidx37 = getelementptr inbounds i16, ptr undef, i32 %indvars.iv
   %indvars.iv.next = add nuw nsw i32 %indvars.iv, 1
   %exitcond = icmp eq i16 undef, %p3
   br i1 %exitcond, label %for.end40, label %for.body27
 
 for.end40:                                        ; preds = %for.body27, %for.cond23.preheader
-  call void @sammy(i16* nonnull undef, i16* undef, i16* %p4, i32* null, i16* undef, i16 signext undef, i16 signext undef, i16 signext undef) #1
+  call void @sammy(ptr nonnull undef, ptr undef, ptr %p4, ptr null, ptr undef, i16 signext undef, i16 signext undef, i16 signext undef) #1
   %inc46 = add nuw nsw i16 %i.190, 1
   %exitcond94 = icmp eq i16 %inc46, %call17
   br i1 %exitcond94, label %for.end47.loopexit, label %for.cond23.preheader
 
 for.end47.loopexit:                               ; preds = %for.end40
-  %.pre = load i16, i16* undef, align 2
+  %.pre = load i16, ptr undef, align 2
   br label %for.end47
 
 for.end47:                                        ; preds = %for.end47.loopexit, %entry
   %1 = phi i16 [ %.pre, %for.end47.loopexit ], [ 0, %entry ]
-  call void @danny(i16 signext %1, i16 signext %p0, i16 signext %p1, i16* %p2, i16 signext %p3, i16* %p5) #1
-  call void @llvm.stackrestore(i8* %0)
+  call void @danny(i16 signext %1, i16 signext %p0, i16 signext %p1, ptr %p2, i16 signext %p3, ptr %p5) #1
+  call void @llvm.stackrestore(ptr %0)
   ret i32 undef
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/rdf-copy.ll b/llvm/test/CodeGen/Hexagon/rdf-copy.ll
index 0a6a43a1cb05b..5d0f22343dbf5 100644
--- a/llvm/test/CodeGen/Hexagon/rdf-copy.ll
+++ b/llvm/test/CodeGen/Hexagon/rdf-copy.ll
@@ -23,30 +23,29 @@ target datalayout = "e-p:32:32:32-i64:64:64-i32:32:32-i16:16:16-i1:32:32-f64:64:
 target triple = "hexagon"
 
 %union.t = type { %struct.t, [64 x i8] }
-%struct.t = type { [12 x i8], %struct.r*, double }
+%struct.t = type { [12 x i8], ptr, double }
 %struct.r = type opaque
 
-define %union.t* @foo(%union.t* %chain) nounwind readonly {
+define ptr @foo(ptr %chain) nounwind readonly {
 entry:
-  %tobool = icmp eq %union.t* %chain, null
+  %tobool = icmp eq ptr %chain, null
   br i1 %tobool, label %if.end, label %while.cond.preheader
 
 while.cond.preheader:                             ; preds = %entry
   br label %while.cond
 
 while.cond:                                       ; preds = %while.cond.preheader, %while.cond
-  %chain.addr.0 = phi %union.t* [ %0, %while.cond ], [ %chain, %while.cond.preheader ]
-  %chain1 = bitcast %union.t* %chain.addr.0 to %union.t**
-  %0 = load %union.t*, %union.t** %chain1, align 4, !tbaa !0
-  %tobool2 = icmp eq %union.t* %0, null
+  %chain.addr.0 = phi ptr [ %0, %while.cond ], [ %chain, %while.cond.preheader ]
+  %0 = load ptr, ptr %chain.addr.0, align 4, !tbaa !0
+  %tobool2 = icmp eq ptr %0, null
   br i1 %tobool2, label %if.end.loopexit, label %while.cond
 
 if.end.loopexit:                                  ; preds = %while.cond
   br label %if.end
 
 if.end:                                           ; preds = %if.end.loopexit, %entry
-  %chain.addr.1 = phi %union.t* [ null, %entry ], [ %chain.addr.0, %if.end.loopexit ]
-  ret %union.t* %chain.addr.1
+  %chain.addr.1 = phi ptr [ null, %entry ], [ %chain.addr.0, %if.end.loopexit ]
+  ret ptr %chain.addr.1
 }
 
 !0 = !{!"any pointer", !1}

diff  --git a/llvm/test/CodeGen/Hexagon/rdf-dead-loop.ll b/llvm/test/CodeGen/Hexagon/rdf-dead-loop.ll
index 656c15a58d574..1bb64434f6e2e 100644
--- a/llvm/test/CodeGen/Hexagon/rdf-dead-loop.ll
+++ b/llvm/test/CodeGen/Hexagon/rdf-dead-loop.ll
@@ -8,14 +8,13 @@ entry:
 
 body:
   %ip_vec30 = phi <2 x i32> [ %ip_vec, %body ], [ zeroinitializer, %entry ]
-  %scevgep.phi = phi i32* [ %scevgep.inc, %body ], [ undef, %entry ]
+  %scevgep.phi = phi ptr [ %scevgep.inc, %body ], [ undef, %entry ]
   %polly.indvar = phi i32 [ %polly.indvar_next, %body ], [ 0, %entry ]
-  %vector_ptr = bitcast i32* %scevgep.phi to <2 x i32>*
-  %_p_vec_full = load <2 x i32>, <2 x i32>* %vector_ptr, align 8
+  %_p_vec_full = load <2 x i32>, ptr %scevgep.phi, align 8
   %ip_vec = add <2 x i32> %_p_vec_full, %ip_vec30
   %polly.indvar_next = add nsw i32 %polly.indvar, 2
   %polly.loop_cond = icmp slt i32 %polly.indvar, 4
-  %scevgep.inc = getelementptr i32, i32* %scevgep.phi, i32 2
+  %scevgep.inc = getelementptr i32, ptr %scevgep.phi, i32 2
   br i1 %polly.loop_cond, label %body, label %exit
 
 exit:

diff  --git a/llvm/test/CodeGen/Hexagon/rdf-def-mask.ll b/llvm/test/CodeGen/Hexagon/rdf-def-mask.ll
index 91aec7750dbc9..cc57096a4580e 100644
--- a/llvm/test/CodeGen/Hexagon/rdf-def-mask.ll
+++ b/llvm/test/CodeGen/Hexagon/rdf-def-mask.ll
@@ -21,13 +21,13 @@ b3:                                               ; preds = %b1
   %v7 = tail call i32 @llvm.hexagon.S2.asl.r.r(i32 %a0, i32 %v6)
   %v8 = add nsw i32 %v7, -8
   %v9 = tail call i32 @llvm.hexagon.S2.asl.r.r(i32 %a0, i32 %v5)
-  %v10 = getelementptr inbounds [9 x i16], [9 x i16]* @g0, i32 0, i32 %v8
-  %v11 = load i16, i16* %v10, align 2
+  %v10 = getelementptr inbounds [9 x i16], ptr @g0, i32 0, i32 %v8
+  %v11 = load i16, ptr %v10, align 2
   %v12 = sext i16 %v11 to i32
   %v13 = shl nsw i32 %v12, 16
   %v14 = add nsw i32 %v7, -7
-  %v15 = getelementptr inbounds [9 x i16], [9 x i16]* @g0, i32 0, i32 %v14
-  %v16 = load i16, i16* %v15, align 2
+  %v15 = getelementptr inbounds [9 x i16], ptr @g0, i32 0, i32 %v14
+  %v16 = load i16, ptr %v15, align 2
   %v17 = sub i16 %v11, %v16
   %v18 = and i32 %v9, 65535
   %v19 = zext i16 %v17 to i32

diff  --git a/llvm/test/CodeGen/Hexagon/rdf-extra-livein.ll b/llvm/test/CodeGen/Hexagon/rdf-extra-livein.ll
index 372e45dda1891..3352f6f50593f 100644
--- a/llvm/test/CodeGen/Hexagon/rdf-extra-livein.ll
+++ b/llvm/test/CodeGen/Hexagon/rdf-extra-livein.ll
@@ -8,7 +8,7 @@ target triple = "hexagon"
 
 @.str.13 = external unnamed_addr constant [60 x i8], align 1
 
-declare void @printf(i8* nocapture readonly, ...) local_unnamed_addr #0
+declare void @printf(ptr nocapture readonly, ...) local_unnamed_addr #0
 
 declare void @danny() local_unnamed_addr #0
 declare zeroext i8 @sammy() local_unnamed_addr #0
@@ -58,7 +58,7 @@ if.end88:                                         ; preds = %if.then81, %sw.epil
   %mul92 = fmul float undef, %div89
   %div93 = fdiv float %mul92, 1.000000e+06
   %conv107 = fpext float %div93 to double
-  call void (i8*, ...) @printf(i8* getelementptr inbounds ([60 x i8], [60 x i8]* @.str.13, i32 0, i32 0), double %conv107, double undef, i64 undef, i32 undef) #0
+  call void (ptr, ...) @printf(ptr @.str.13, double %conv107, double undef, i64 undef, i32 undef) #0
   br i1 undef, label %if.end88.do.body_crit_edge, label %if.then124
 
 if.end88.do.body_crit_edge:                       ; preds = %if.end88

diff  --git a/llvm/test/CodeGen/Hexagon/rdf-filter-defs.ll b/llvm/test/CodeGen/Hexagon/rdf-filter-defs.ll
index 5c55c500634de..68ae19acb8d65 100644
--- a/llvm/test/CodeGen/Hexagon/rdf-filter-defs.ll
+++ b/llvm/test/CodeGen/Hexagon/rdf-filter-defs.ll
@@ -8,27 +8,26 @@ target triple = "hexagon"
 %type.0 = type { %type.1, %type.3, i32, i32 }
 %type.1 = type { %type.2 }
 %type.2 = type { i8 }
-%type.3 = type { i8*, [12 x i8] }
+%type.3 = type { ptr, [12 x i8] }
 %type.4 = type { i8 }
 
-define weak_odr dereferenceable(28) %type.0* @fred(%type.0* %p0, i32 %p1, %type.0* dereferenceable(28) %p2, i32 %p3, i32 %p4) local_unnamed_addr align 2 {
+define weak_odr dereferenceable(28) ptr @fred(ptr %p0, i32 %p1, ptr dereferenceable(28) %p2, i32 %p3, i32 %p4) local_unnamed_addr align 2 {
 b0:
-  %t0 = getelementptr inbounds %type.0, %type.0* %p0, i32 0, i32 2
-  %t1 = load i32, i32* %t0, align 4
+  %t0 = getelementptr inbounds %type.0, ptr %p0, i32 0, i32 2
+  %t1 = load i32, ptr %t0, align 4
   %t2 = icmp ult i32 %t1, %p1
-  %t3 = getelementptr inbounds %type.0, %type.0* %p2, i32 0, i32 2
+  %t3 = getelementptr inbounds %type.0, ptr %p2, i32 0, i32 2
   br i1 %t2, label %b2, label %b1
 
 b1:
-  %t4 = load i32, i32* %t3, align 4
+  %t4 = load i32, ptr %t3, align 4
   %t5 = icmp ult i32 %t4, %p3
   br i1 %t5, label %b2, label %b3
 
 b2:
-  %t6 = bitcast %type.0* %p0 to %type.4*
-  tail call void @blah(%type.4* %t6)
-  %t7 = load i32, i32* %t3, align 4
-  %t8 = load i32, i32* %t0, align 4
+  tail call void @blah(ptr %p0)
+  %t7 = load i32, ptr %t3, align 4
+  %t8 = load i32, ptr %t0, align 4
   br label %b3
 
 b3:
@@ -42,8 +41,7 @@ b3:
   br i1 %t15, label %b5, label %b4
 
 b4:
-  %t16 = bitcast %type.0* %p0 to %type.4*
-  tail call void @danny(%type.4* %t16)
+  tail call void @danny(ptr %p0)
   br label %b5
 
 b5:
@@ -51,26 +49,25 @@ b5:
   br i1 %t17, label %b33, label %b6
 
 b6:
-  %t18 = load i32, i32* %t0, align 4
+  %t18 = load i32, ptr %t0, align 4
   %t19 = add i32 %t18, %t13
   %t20 = icmp eq i32 %t19, -1
   br i1 %t20, label %b7, label %b8
 
 b7:
-  %t21 = bitcast %type.0* %p0 to %type.4*
-  tail call void @danny(%type.4* %t21)
+  tail call void @danny(ptr %p0)
   br label %b8
 
 b8:
-  %t22 = getelementptr inbounds %type.0, %type.0* %p0, i32 0, i32 3
-  %t23 = load i32, i32* %t22, align 4
+  %t22 = getelementptr inbounds %type.0, ptr %p0, i32 0, i32 3
+  %t23 = load i32, ptr %t22, align 4
   %t24 = icmp ult i32 %t23, %t19
   br i1 %t24, label %b9, label %b10
 
 b9:
-  %t25 = load i32, i32* %t0, align 4
-  tail call void @sammy(%type.0* nonnull %p0, i32 %t19, i32 %t25)
-  %t26 = load i32, i32* %t22, align 4
+  %t25 = load i32, ptr %t0, align 4
+  tail call void @sammy(ptr nonnull %p0, i32 %t19, i32 %t25)
+  %t26 = load i32, ptr %t22, align 4
   br label %b15
 
 b10:
@@ -79,136 +76,123 @@ b10:
 
 b11:
   %t28 = icmp ugt i32 %t23, 15
-  %t29 = getelementptr inbounds %type.0, %type.0* %p0, i32 0, i32 1
+  %t29 = getelementptr inbounds %type.0, ptr %p0, i32 0, i32 1
   br i1 %t28, label %b12, label %b13
 
 b12:
-  %t30 = getelementptr inbounds %type.3, %type.3* %t29, i32 0, i32 0
-  %t31 = load i8*, i8** %t30, align 4
+  %t31 = load ptr, ptr %t29, align 4
   br label %b14
 
 b13:
-  %t32 = bitcast %type.3* %t29 to i8*
   br label %b14
 
 b14:
-  %t33 = phi i8* [ %t31, %b12 ], [ %t32, %b13 ]
-  store i32 0, i32* %t0, align 4
+  %t33 = phi ptr [ %t31, %b12 ], [ %t29, %b13 ]
+  store i32 0, ptr %t0, align 4
   br label %b31
 
 b15:
   %t34 = phi i32 [ %t26, %b9 ], [ %t23, %b10 ]
   %t35 = icmp ugt i32 %t34, 15
-  %t36 = getelementptr inbounds %type.0, %type.0* %p0, i32 0, i32 1
+  %t36 = getelementptr inbounds %type.0, ptr %p0, i32 0, i32 1
   br i1 %t35, label %b16, label %b17
 
 b16:
-  %t37 = getelementptr inbounds %type.3, %type.3* %t36, i32 0, i32 0
-  %t38 = load i8*, i8** %t37, align 4
+  %t38 = load ptr, ptr %t36, align 4
   br label %b18
 
 b17:
-  %t39 = bitcast %type.3* %t36 to i8*
-  %t40 = bitcast %type.3* %t36 to i8*
   br label %b18
 
 b18:
-  %t41 = phi i8* [ %t38, %b16 ], [ %t39, %b17 ]
-  %t42 = phi i8* [ %t38, %b16 ], [ %t40, %b17 ]
-  %t43 = getelementptr inbounds i8, i8* %t41, i32 %p1
-  %t44 = getelementptr inbounds i8, i8* %t43, i32 %t13
-  %t45 = getelementptr inbounds i8, i8* %t42, i32 %p1
-  %t46 = load i32, i32* %t0, align 4
+  %t41 = phi ptr [ %t38, %b16 ], [ %t36, %b17 ]
+  %t42 = phi ptr [ %t38, %b16 ], [ %t36, %b17 ]
+  %t43 = getelementptr inbounds i8, ptr %t41, i32 %p1
+  %t44 = getelementptr inbounds i8, ptr %t43, i32 %t13
+  %t45 = getelementptr inbounds i8, ptr %t42, i32 %p1
+  %t46 = load i32, ptr %t0, align 4
   %t47 = sub i32 %t46, %p1
-  tail call void @llvm.memmove.p0i8.p0i8.i32(i8* %t44, i8* %t45, i32 %t47, i1 false) #1
-  %t48 = icmp eq %type.0* %p0, %p2
-  %t49 = load i32, i32* %t22, align 4
+  tail call void @llvm.memmove.p0.p0.i32(ptr %t44, ptr %t45, i32 %t47, i1 false) #1
+  %t48 = icmp eq ptr %p0, %p2
+  %t49 = load i32, ptr %t22, align 4
   %t50 = icmp ugt i32 %t49, 15
   br i1 %t50, label %b19, label %b20
 
 b19:
-  %t51 = getelementptr inbounds %type.3, %type.3* %t36, i32 0, i32 0
-  %t52 = load i8*, i8** %t51, align 4
+  %t52 = load ptr, ptr %t36, align 4
   br label %b21
 
 b20:
-  %t53 = bitcast %type.3* %t36 to i8*
   br label %b21
 
 b21:
-  %t54 = phi i8* [ %t52, %b19 ], [ %t53, %b20 ]
-  %t55 = getelementptr inbounds i8, i8* %t54, i32 %p1
+  %t54 = phi ptr [ %t52, %b19 ], [ %t36, %b20 ]
+  %t55 = getelementptr inbounds i8, ptr %t54, i32 %p1
   br i1 %t48, label %b22, label %b26
 
 b22:
   br i1 %t50, label %b23, label %b24
 
 b23:
-  %t56 = getelementptr inbounds %type.3, %type.3* %t36, i32 0, i32 0
-  %t57 = load i8*, i8** %t56, align 4
+  %t57 = load ptr, ptr %t36, align 4
   br label %b25
 
 b24:
-  %t58 = bitcast %type.3* %t36 to i8*
   br label %b25
 
 b25:
-  %t59 = phi i8* [ %t57, %b23 ], [ %t58, %b24 ]
+  %t59 = phi ptr [ %t57, %b23 ], [ %t36, %b24 ]
   %t60 = icmp ult i32 %p1, %p3
   %t61 = select i1 %t60, i32 %t13, i32 0
   %t62 = add i32 %t61, %p3
-  %t63 = getelementptr inbounds i8, i8* %t59, i32 %t62
-  tail call void @llvm.memmove.p0i8.p0i8.i32(i8* %t55, i8* %t63, i32 %t13, i1 false) #1
+  %t63 = getelementptr inbounds i8, ptr %t59, i32 %t62
+  tail call void @llvm.memmove.p0.p0.i32(ptr %t55, ptr %t63, i32 %t13, i1 false) #1
   br label %b27
 
 b26:
-  %t64 = getelementptr inbounds %type.0, %type.0* %p2, i32 0, i32 3
-  %t65 = load i32, i32* %t64, align 4
+  %t64 = getelementptr inbounds %type.0, ptr %p2, i32 0, i32 3
+  %t65 = load i32, ptr %t64, align 4
   %t66 = icmp ugt i32 %t65, 15
-  %t67 = getelementptr inbounds %type.0, %type.0* %p2, i32 0, i32 1
-  %t68 = getelementptr inbounds %type.3, %type.3* %t67, i32 0, i32 0
-  %t69 = load i8*, i8** %t68, align 4
-  %t70 = bitcast %type.3* %t67 to i8*
-  %t71 = select i1 %t66, i8* %t69, i8* %t70
-  %t72 = getelementptr inbounds i8, i8* %t71, i32 %p3
-  tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %t55, i8* %t72, i32 %t13, i1 false) #1
+  %t67 = getelementptr inbounds %type.0, ptr %p2, i32 0, i32 1
+  %t69 = load ptr, ptr %t67, align 4
+  %t71 = select i1 %t66, ptr %t69, ptr %t67
+  %t72 = getelementptr inbounds i8, ptr %t71, i32 %p3
+  tail call void @llvm.memcpy.p0.p0.i32(ptr %t55, ptr %t72, i32 %t13, i1 false) #1
   br label %b27
 
 b27:
-  %t73 = load i32, i32* %t22, align 4
+  %t73 = load i32, ptr %t22, align 4
   %t74 = icmp ugt i32 %t73, 15
   br i1 %t74, label %b28, label %b29
 
 b28:
-  %t75 = getelementptr inbounds %type.3, %type.3* %t36, i32 0, i32 0
-  %t76 = load i8*, i8** %t75, align 4
+  %t76 = load ptr, ptr %t36, align 4
   br label %b30
 
 b29:
-  %t77 = bitcast %type.3* %t36 to i8*
   br label %b30
 
 b30:
-  %t78 = phi i8* [ %t76, %b28 ], [ %t77, %b29 ]
-  store i32 %t19, i32* %t0, align 4
-  %t79 = getelementptr inbounds i8, i8* %t78, i32 %t19
+  %t78 = phi ptr [ %t76, %b28 ], [ %t36, %b29 ]
+  store i32 %t19, ptr %t0, align 4
+  %t79 = getelementptr inbounds i8, ptr %t78, i32 %t19
   br label %b31
 
 b31:
-  %t80 = phi i8* [ %t33, %b14 ], [ %t79, %b30 ]
-  store i8 0, i8* %t80, align 1
+  %t80 = phi ptr [ %t33, %b14 ], [ %t79, %b30 ]
+  store i8 0, ptr %t80, align 1
   br label %b33
 
 b33:
-  ret %type.0* %p0
+  ret ptr %p0
 }
 
-declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture writeonly, i8* nocapture readonly, i32, i1) #0
-declare void @llvm.memmove.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i1) #0
+declare void @llvm.memcpy.p0.p0.i32(ptr nocapture writeonly, ptr nocapture readonly, i32, i1) #0
+declare void @llvm.memmove.p0.p0.i32(ptr nocapture, ptr nocapture readonly, i32, i1) #0
 
-declare void @blah(%type.4*) local_unnamed_addr
-declare void @danny(%type.4*) local_unnamed_addr
-declare void @sammy(%type.0*, i32, i32) local_unnamed_addr align 2
+declare void @blah(ptr) local_unnamed_addr
+declare void @danny(ptr) local_unnamed_addr
+declare void @sammy(ptr, i32, i32) local_unnamed_addr align 2
 
 attributes #0 = { argmemonly nounwind }
 attributes #1 = { nounwind }

diff  --git a/llvm/test/CodeGen/Hexagon/rdf-ignore-undef.ll b/llvm/test/CodeGen/Hexagon/rdf-ignore-undef.ll
index d52676b0e87e4..65f67cc03a39b 100644
--- a/llvm/test/CodeGen/Hexagon/rdf-ignore-undef.ll
+++ b/llvm/test/CodeGen/Hexagon/rdf-ignore-undef.ll
@@ -4,13 +4,13 @@
 
 target triple = "hexagon"
 
-%struct.1 = type { i16, i8, i32, i8*, i8*, i8*, i8*, i8*, i8*, i32* }
+%struct.1 = type { i16, i8, i32, ptr, ptr, ptr, ptr, ptr, ptr, ptr }
 %struct.0 = type { i32, i32, i32, i32, i32, i32, i32, i32, i32 }
 
-declare void @foo(i8*, %struct.0*) local_unnamed_addr #0
-declare void @bar(%struct.1*, %struct.0* readonly) local_unnamed_addr #0
+declare void @foo(ptr, ptr) local_unnamed_addr #0
+declare void @bar(ptr, ptr readonly) local_unnamed_addr #0
 
-define i32 @fred(i32 %argc, i8** nocapture readonly %argv) local_unnamed_addr #0 {
+define i32 @fred(i32 %argc, ptr nocapture readonly %argv) local_unnamed_addr #0 {
 entry:
   br label %do.body
 
@@ -19,11 +19,11 @@ do.body:                                          ; preds = %if.end88.do.body_cr
   br i1 %cond, label %if.end49, label %if.then124
 
 if.end49:                                         ; preds = %do.body
-  call void @foo(i8* nonnull undef, %struct.0* nonnull undef) #0
+  call void @foo(ptr nonnull undef, ptr nonnull undef) #0
   br i1 undef, label %if.end55, label %if.then53
 
 if.then53:                                        ; preds = %if.end49
-  call void @bar(%struct.1* null, %struct.0* nonnull undef)
+  call void @bar(ptr null, ptr nonnull undef)
   br label %if.end55
 
 if.end55:                                         ; preds = %if.then53, %if.end49
@@ -42,7 +42,7 @@ if.then81:                                        ; preds = %sw.epilog79
   br label %if.end88
 
 if.end88:                                         ; preds = %if.then81, %sw.epilog79
-  store float 0.000000e+00, float* undef, align 4
+  store float 0.000000e+00, ptr undef, align 4
   br i1 undef, label %if.end88.do.body_crit_edge, label %if.then124
 
 if.end88.do.body_crit_edge:                       ; preds = %if.end88

diff  --git a/llvm/test/CodeGen/Hexagon/rdf-inline-asm-fixed.ll b/llvm/test/CodeGen/Hexagon/rdf-inline-asm-fixed.ll
index f42ecf7b7756d..557523172a10a 100644
--- a/llvm/test/CodeGen/Hexagon/rdf-inline-asm-fixed.ll
+++ b/llvm/test/CodeGen/Hexagon/rdf-inline-asm-fixed.ll
@@ -12,19 +12,18 @@ target triple = "hexagon"
 define i32 @foo(i32 %status) #0 {
 entry:
   %arg1 = alloca i32, align 4
-  %0 = bitcast i32* %arg1 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 4, i8* %0) #2
-  store i32 %status, i32* %arg1, align 4, !tbaa !1
-  %1 = call i32 asm sideeffect "r0 = #$1\0Ar1 = $2\0Ar2 = $4\0Atrap0 (#0)\0A$0 = r0", "=r,i,r,*m,r,~{r0},~{r1},~{r2}"(i32 24, i32* nonnull %arg1, i32* elementtype(i32) nonnull %arg1, i32 %status) #2, !srcloc !5
-  call void @llvm.lifetime.end.p0i8(i64 4, i8* %0) #2
-  ret i32 %1
+  call void @llvm.lifetime.start.p0(i64 4, ptr %arg1) #2
+  store i32 %status, ptr %arg1, align 4, !tbaa !1
+  %0 = call i32 asm sideeffect "r0 = #$1\0Ar1 = $2\0Ar2 = $4\0Atrap0 (#0)\0A$0 = r0", "=r,i,r,*m,r,~{r0},~{r1},~{r2}"(i32 24, ptr nonnull %arg1, ptr elementtype(i32) nonnull %arg1, i32 %status) #2, !srcloc !5
+  call void @llvm.lifetime.end.p0(i64 4, ptr %arg1) #2
+  ret i32 %0
 }
 
 ; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #1
+declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
 
 ; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #1
+declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
 
 attributes #0 = { nounwind "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="hexagonv5" "target-features"="-hvx" "unsafe-fp-math"="false" "use-soft-float"="false" }
 attributes #1 = { argmemonly nounwind }

diff  --git a/llvm/test/CodeGen/Hexagon/rdf-inline-asm.ll b/llvm/test/CodeGen/Hexagon/rdf-inline-asm.ll
index fcba80d2cbcd2..5f2a88ca575ad 100644
--- a/llvm/test/CodeGen/Hexagon/rdf-inline-asm.ll
+++ b/llvm/test/CodeGen/Hexagon/rdf-inline-asm.ll
@@ -4,7 +4,7 @@
 target datalayout = "e-m:e-p:32:32:32-i64:64:64-i32:32:32-i16:16:16-i1:8:8-f64:64:64-f32:32:32-v64:64:64-v32:32:32-a:0-n16:32"
 target triple = "hexagon"
 
- at x = common global i32* null, align 4
+ at x = common global ptr null, align 4
 
 ; Function Attrs: nounwind
 define i32 @inotify_init() #0 {
@@ -15,8 +15,8 @@ entry:
 
 if.then:                                          ; preds = %entry
   %sub = sub nsw i32 0, %0
-  %1 = load i32*, i32** @x, align 4, !tbaa !2
-  store i32 %sub, i32* %1, align 4, !tbaa !6
+  %1 = load ptr, ptr @x, align 4, !tbaa !2
+  store i32 %sub, ptr %1, align 4, !tbaa !6
   br label %if.end
 
 if.end:                                           ; preds = %if.then, %entry

diff  --git a/llvm/test/CodeGen/Hexagon/rdf-kill-last-op.ll b/llvm/test/CodeGen/Hexagon/rdf-kill-last-op.ll
index b07fd7e492bd4..56c02be9a033c 100644
--- a/llvm/test/CodeGen/Hexagon/rdf-kill-last-op.ll
+++ b/llvm/test/CodeGen/Hexagon/rdf-kill-last-op.ll
@@ -4,7 +4,7 @@
 target triple = "hexagon"
 
 %s.0 = type { %s.1 }
-%s.1 = type { i32, i8* }
+%s.1 = type { i32, ptr }
 
 @g0 = external unnamed_addr constant [6 x [2 x i32]], align 8
 @g1 = external constant %s.0, align 4
@@ -90,9 +90,9 @@ b21:                                              ; preds = %b35, %b19
   br i1 %v6, label %b35, label %b22
 
 b22:                                              ; preds = %b21
-  %v7 = load i32, i32* undef, align 4, !tbaa !0
-  %v8 = load i32, i32* undef, align 4, !tbaa !4
-  %v9 = load i32, i32* undef, align 4, !tbaa !4
+  %v7 = load i32, ptr undef, align 4, !tbaa !0
+  %v8 = load i32, ptr undef, align 4, !tbaa !4
+  %v9 = load i32, ptr undef, align 4, !tbaa !4
   %v10 = icmp ne i32 %v8, 0
   %v11 = and i1 %v10, undef
   %v12 = and i1 undef, %v11
@@ -109,15 +109,15 @@ b24:                                              ; preds = %b23, %b22
   %v17 = icmp ugt i16 %v16, undef
   %v18 = zext i1 %v17 to i32
   %v19 = add nsw i32 %v18, %v0
-  %v20 = load i8, i8* undef, align 4, !tbaa !6
+  %v20 = load i8, ptr undef, align 4, !tbaa !6
   %v21 = zext i8 %v20 to i32
   %v22 = sub nsw i32 6, %v21
   %v23 = add nsw i32 %v22, -1
   br i1 false, label %b39, label %b25, !prof !19
 
 b25:                                              ; preds = %b24
-  %v24 = getelementptr inbounds [6 x [2 x i32]], [6 x [2 x i32]]* @g0, i32 0, i32 %v21, i32 0
-  %v25 = load i32, i32* %v24, align 8, !tbaa !0
+  %v24 = getelementptr inbounds [6 x [2 x i32]], ptr @g0, i32 0, i32 %v21, i32 0
+  %v25 = load i32, ptr %v24, align 8, !tbaa !0
   %v26 = icmp eq i32 undef, %v25
   br i1 %v26, label %b26, label %b27
 
@@ -125,8 +125,8 @@ b26:                                              ; preds = %b25
   br i1 undef, label %b32, label %b27
 
 b27:                                              ; preds = %b26, %b25
-  %v27 = getelementptr inbounds [6 x [2 x i32]], [6 x [2 x i32]]* @g0, i32 0, i32 %v23, i32 0
-  %v28 = load i32, i32* %v27, align 8, !tbaa !0
+  %v27 = getelementptr inbounds [6 x [2 x i32]], ptr @g0, i32 0, i32 %v23, i32 0
+  %v28 = load i32, ptr %v27, align 8, !tbaa !0
   %v29 = icmp eq i32 undef, %v28
   br i1 %v29, label %b28, label %b29
 
@@ -134,8 +134,8 @@ b28:                                              ; preds = %b27
   br i1 undef, label %b32, label %b29
 
 b29:                                              ; preds = %b28, %b27
-  %v30 = load i32, i32* undef, align 4, !tbaa !4
-  %v31 = load i32, i32* undef, align 4, !tbaa !4
+  %v30 = load i32, ptr undef, align 4, !tbaa !4
+  %v31 = load i32, ptr undef, align 4, !tbaa !4
   %v32 = icmp ne i32 %v30, 0
   %v33 = and i1 %v32, undef
   %v34 = and i1 undef, %v33
@@ -156,7 +156,7 @@ b32:                                              ; preds = %b31, %b28, %b26
   br i1 undef, label %b33, label %b34
 
 b33:                                              ; preds = %b32
-  call void (%s.0*, i32, ...) @f1(%s.0* nonnull @g1, i32 6, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 %v7) #0
+  call void (ptr, i32, ...) @f1(ptr nonnull @g1, i32 6, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 %v7) #0
   br label %b34
 
 b34:                                              ; preds = %b33, %b32
@@ -187,7 +187,7 @@ b40:                                              ; preds = %b6
 }
 
 ; Function Attrs: nounwind
-declare void @f1(%s.0*, i32, ...) #0
+declare void @f1(ptr, i32, ...) #0
 
 attributes #0 = { nounwind "target-cpu"="hexagonv55" }
 

diff  --git a/llvm/test/CodeGen/Hexagon/rdf-multiple-phis-up.ll b/llvm/test/CodeGen/Hexagon/rdf-multiple-phis-up.ll
index d23846ac6ed41..e1a6eee4d844d 100644
--- a/llvm/test/CodeGen/Hexagon/rdf-multiple-phis-up.ll
+++ b/llvm/test/CodeGen/Hexagon/rdf-multiple-phis-up.ll
@@ -6,34 +6,33 @@
 
 target triple = "hexagon"
 
-%struct.0 = type { i8*, i8*, [2 x i8*], i32, i32, i8*, i32, i32, i32, i32, i32, [2 x i32], i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 }
+%struct.0 = type { ptr, ptr, [2 x ptr], i32, i32, ptr, i32, i32, i32, i32, i32, [2 x i32], i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 }
 
-define i32 @fred(i8* %p0) local_unnamed_addr #0 {
+define i32 @fred(ptr %p0) local_unnamed_addr #0 {
 entry:
-  %0 = bitcast i8* %p0 to %struct.0*
   br i1 undef, label %if.then21, label %for.body.i
 
 if.then21:                                        ; preds = %entry
-  %.pr = load i32, i32* undef, align 4
+  %.pr = load i32, ptr undef, align 4
   switch i32 %.pr, label %cleanup [
     i32 1, label %for.body.i
     i32 3, label %if.then60
   ]
 
 for.body.i:                                       ; preds = %for.body.i, %if.then21, %entry
-  %1 = load i8, i8* undef, align 1
-  %cmp7.i = icmp ugt i8 %1, -17
+  %0 = load i8, ptr undef, align 1
+  %cmp7.i = icmp ugt i8 %0, -17
   br i1 %cmp7.i, label %cleanup, label %for.body.i
 
 if.then60:                                        ; preds = %if.then21
-  %call61 = call i32 @foo(%struct.0* nonnull %0) #0
+  %call61 = call i32 @foo(ptr nonnull %p0) #0
   br label %cleanup
 
 cleanup:                                          ; preds = %if.then60, %for.body.i, %if.then21
   ret i32 undef
 }
 
-declare i32 @foo(%struct.0*) local_unnamed_addr #0
+declare i32 @foo(ptr) local_unnamed_addr #0
 
 
 attributes #0 = { nounwind }

diff  --git a/llvm/test/CodeGen/Hexagon/rdf-phi-shadows.ll b/llvm/test/CodeGen/Hexagon/rdf-phi-shadows.ll
index f26ab9b0cef2c..462df4aff0f9b 100644
--- a/llvm/test/CodeGen/Hexagon/rdf-phi-shadows.ll
+++ b/llvm/test/CodeGen/Hexagon/rdf-phi-shadows.ll
@@ -3,16 +3,16 @@
 ; CHECK: call printf
 target triple = "hexagon"
 
-%struct.1 = type { i16, i8, i32, i8*, i8*, i8*, i8*, i8*, i8*, i32* }
+%struct.1 = type { i16, i8, i32, ptr, ptr, ptr, ptr, ptr, ptr, ptr }
 %struct.0 = type { i32, i32, i32, i32, i32, i32, i32, i32, i32 }
 
-declare void @foo(%struct.1*, %struct.0* readonly) local_unnamed_addr #0
+declare void @foo(ptr, ptr readonly) local_unnamed_addr #0
 declare zeroext i8 @bar() local_unnamed_addr #0
-declare i32 @printf(i8* nocapture readonly, ...) local_unnamed_addr #0
+declare i32 @printf(ptr nocapture readonly, ...) local_unnamed_addr #0
 
 @.str = private unnamed_addr constant [5 x i8] c"blah\00", align 1
 
-define i32 @main(i32 %argc, i8** nocapture readonly %argv) local_unnamed_addr #0 {
+define i32 @main(i32 %argc, ptr nocapture readonly %argv) local_unnamed_addr #0 {
 entry:
   %t0 = alloca %struct.0, align 4
   br label %do.body
@@ -25,7 +25,7 @@ if.end49:                                         ; preds = %do.body
   br i1 undef, label %if.end55, label %if.then53
 
 if.then53:                                        ; preds = %if.end49
-  call void @foo(%struct.1* null, %struct.0* nonnull %t0)
+  call void @foo(ptr null, ptr nonnull %t0)
   br label %if.end55
 
 if.end55:                                         ; preds = %if.then53, %if.end49
@@ -50,7 +50,7 @@ if.end88:                                         ; preds = %if.then81, %sw.epil
   %div89 = fdiv float 1.000000e+00, %t1
   %.sroa.speculated = select i1 undef, float 0.000000e+00, float undef
   %conv108 = fpext float %.sroa.speculated to double
-  %call113 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str, i32 0, i32 0), double undef, double %conv108, i64 undef, i32 undef) #0
+  %call113 = call i32 (ptr, ...) @printf(ptr @.str, double undef, double %conv108, i64 undef, i32 undef) #0
   br i1 undef, label %if.end88.do.body_crit_edge, label %if.then124
 
 if.end88.do.body_crit_edge:                       ; preds = %if.end88

diff  --git a/llvm/test/CodeGen/Hexagon/rdf-phi-up.ll b/llvm/test/CodeGen/Hexagon/rdf-phi-up.ll
index d4e7264712385..d8739e960e8d6 100644
--- a/llvm/test/CodeGen/Hexagon/rdf-phi-up.ll
+++ b/llvm/test/CodeGen/Hexagon/rdf-phi-up.ll
@@ -5,47 +5,45 @@
 
 target triple = "hexagon"
 
-%struct.0 = type { i32, i16, i8* }
+%struct.0 = type { i32, i16, ptr }
 
-declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #1
-declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #1
+declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
 
-define i32 @fred(i8* readonly %p0, i32* %p1) local_unnamed_addr #0 {
+define i32 @fred(ptr readonly %p0, ptr %p1) local_unnamed_addr #0 {
 entry:
   %v0 = alloca i16, align 2
-  %v1 = icmp eq i8* %p0, null
+  %v1 = icmp eq ptr %p0, null
   br i1 %v1, label %if.then, label %lor.lhs.false
 
 lor.lhs.false:                                    ; preds = %entry
-  %v2 = bitcast i8* %p0 to %struct.0**
-  %v3 = load %struct.0*, %struct.0** %v2, align 4
-  %v4 = icmp eq %struct.0* %v3, null
+  %v3 = load ptr, ptr %p0, align 4
+  %v4 = icmp eq ptr %v3, null
   br i1 %v4, label %if.then, label %if.else
 
 if.then:                                          ; preds = %lor.lhs.false, %ent
-  %v5 = icmp eq i32* %p1, null
+  %v5 = icmp eq ptr %p1, null
   br i1 %v5, label %cleanup, label %if.then3
 
 if.then3:                                         ; preds = %if.then
-  store i32 0, i32* %p1, align 4
+  store i32 0, ptr %p1, align 4
   br label %cleanup
 
 if.else:                                          ; preds = %lor.lhs.false
-  %v6 = bitcast i16* %v0 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 2, i8* nonnull %v6) #0
-  store i16 0, i16* %v0, align 2
-  %v7 = call i32 @foo(%struct.0* nonnull %v3, i16* nonnull %v0) #0
-  %v8 = icmp eq i32* %p1, null
+  call void @llvm.lifetime.start.p0(i64 2, ptr nonnull %v0) #0
+  store i16 0, ptr %v0, align 2
+  %v7 = call i32 @foo(ptr nonnull %v3, ptr nonnull %v0) #0
+  %v8 = icmp eq ptr %p1, null
   br i1 %v8, label %if.end7, label %if.then6
 
 if.then6:                                         ; preds = %if.else
-  %v9 = load i16, i16* %v0, align 2
+  %v9 = load i16, ptr %v0, align 2
   %v10 = zext i16 %v9 to i32
-  store i32 %v10, i32* %p1, align 4
+  store i32 %v10, ptr %p1, align 4
   br label %if.end7
 
 if.end7:                                          ; preds = %if.else, %if.then6
-  call void @llvm.lifetime.end.p0i8(i64 2, i8* nonnull %v6) #0
+  call void @llvm.lifetime.end.p0(i64 2, ptr nonnull %v0) #0
   br label %cleanup
 
 cleanup:                                          ; preds = %if.then3, %if.then,
@@ -53,7 +51,7 @@ cleanup:                                          ; preds = %if.then3, %if.then,
   ret i32 %v11
 }
 
-declare i32 @foo(%struct.0*, i16*) local_unnamed_addr #0
+declare i32 @foo(ptr, ptr) local_unnamed_addr #0
 
 attributes #0 = { nounwind }
 attributes #1 = { argmemonly nounwind }

diff  --git a/llvm/test/CodeGen/Hexagon/redundant-branching2.ll b/llvm/test/CodeGen/Hexagon/redundant-branching2.ll
index e9305e9fd274a..30f69f370a0ca 100644
--- a/llvm/test/CodeGen/Hexagon/redundant-branching2.ll
+++ b/llvm/test/CodeGen/Hexagon/redundant-branching2.ll
@@ -13,7 +13,7 @@ target triple = "hexagon-unknown--elf"
 declare void @f0() #0
 
 ; Function Attrs: nounwind
-define void @f1(i8* %a0, i32 %a1, i8* %a2, i32* %a3, i32 %a4) #0 {
+define void @f1(ptr %a0, i32 %a1, ptr %a2, ptr %a3, i32 %a4) #0 {
 b0:
   br i1 undef, label %b8, label %b1
 
@@ -22,26 +22,26 @@ b1:                                               ; preds = %b0
   br i1 false, label %b8, label %b2
 
 b2:                                               ; preds = %b1
-  %v0 = getelementptr inbounds i8, i8* %a0, i32 undef
+  %v0 = getelementptr inbounds i8, ptr %a0, i32 undef
   %v1 = sub i32 0, %a1
   %v2 = icmp eq i32 %a1, %a4
   br label %b3
 
 b3:                                               ; preds = %b6, %b2
-  %v3 = phi i8* [ %a2, %b2 ], [ %v17, %b6 ]
-  %v4 = phi i8* [ %v0, %b2 ], [ null, %b6 ]
+  %v3 = phi ptr [ %a2, %b2 ], [ %v17, %b6 ]
+  %v4 = phi ptr [ %v0, %b2 ], [ null, %b6 ]
   %v5 = phi i32 [ 1, %b2 ], [ 0, %b6 ]
   br i1 %v2, label %b4, label %b5
 
 b4:                                               ; preds = %b3
-  %v6 = load i8, i8* %v3, align 1
+  %v6 = load i8, ptr %v3, align 1
   br label %b6
 
 b5:                                               ; preds = %b3
-  %v7 = load i8, i8* %v4, align 1
+  %v7 = load i8, ptr %v4, align 1
   %v8 = zext i8 %v7 to i32
-  %v9 = getelementptr inbounds i8, i8* %v4, i32 %v1
-  %v10 = load i8, i8* %v9, align 1
+  %v9 = getelementptr inbounds i8, ptr %v4, i32 %v1
+  %v10 = load i8, ptr %v9, align 1
   %v11 = zext i8 %v10 to i32
   %v12 = sub nsw i32 %v8, %v11
   br label %b6
@@ -51,7 +51,7 @@ b6:                                               ; preds = %b5, %b4
   %v14 = phi i32 [ %v12, %b5 ], [ 0, %b4 ]
   %v15 = zext i8 %v13 to i32
   %v16 = mul nsw i32 %v14, %v14
-  %v17 = getelementptr inbounds i8, i8* %v3, i32 1
+  %v17 = getelementptr inbounds i8, ptr %v3, i32 1
   %v18 = sub nsw i32 0, %v15
   %v19 = mul nsw i32 %v18, %v18
   %v20 = add nuw i32 %v16, 0
@@ -60,7 +60,7 @@ b6:                                               ; preds = %b5, %b4
   %v23 = lshr i32 %v22, 1
   %v24 = add nuw nsw i32 %v23, %v19
   %v25 = add nsw i32 %v24, 0
-  store i32 %v25, i32* %a3, align 4
+  store i32 %v25, ptr %a3, align 4
   %v26 = icmp eq i32 %v5, %a4
   br i1 %v26, label %b7, label %b3
 

diff  --git a/llvm/test/CodeGen/Hexagon/reg-eq-cmp.ll b/llvm/test/CodeGen/Hexagon/reg-eq-cmp.ll
index b25e7926234cb..49a70f0843f20 100644
--- a/llvm/test/CodeGen/Hexagon/reg-eq-cmp.ll
+++ b/llvm/test/CodeGen/Hexagon/reg-eq-cmp.ll
@@ -10,13 +10,13 @@
 ; CHECK: r{{[0-9]+}} = cmp.eq(r{{[0-9]+}},#65)
 define i32 @f0() #0 {
 b0:
-  %v0 = load i8, i8* @g0, align 1, !tbaa !0
+  %v0 = load i8, ptr @g0, align 1, !tbaa !0
   %v1 = icmp eq i8 %v0, 65
   %v2 = zext i1 %v1 to i32
-  %v3 = load i32, i32* @g1, align 4, !tbaa !3
+  %v3 = load i32, ptr @g1, align 4, !tbaa !3
   %v4 = or i32 %v2, %v3
-  store i32 %v4, i32* @g1, align 4, !tbaa !3
-  store i8 66, i8* @g2, align 1, !tbaa !0
+  store i32 %v4, ptr @g1, align 4, !tbaa !3
+  store i8 66, ptr @g2, align 1, !tbaa !0
   ret i32 undef
 }
 
@@ -24,14 +24,14 @@ b0:
 ; CHECK: r{{[0-9]+}} = cmp.eq(r{{[0-9]+}},r{{[0-9]+}})
 define i32 @f1() #0 {
 b0:
-  %v0 = load i8, i8* @g0, align 1, !tbaa !0
-  %v1 = load i8, i8* @g3, align 1, !tbaa !0
+  %v0 = load i8, ptr @g0, align 1, !tbaa !0
+  %v1 = load i8, ptr @g3, align 1, !tbaa !0
   %v2 = icmp eq i8 %v0, %v1
   %v3 = zext i1 %v2 to i32
-  %v4 = load i32, i32* @g1, align 4, !tbaa !3
+  %v4 = load i32, ptr @g1, align 4, !tbaa !3
   %v5 = or i32 %v3, %v4
-  store i32 %v5, i32* @g1, align 4, !tbaa !3
-  store i8 66, i8* @g2, align 1, !tbaa !0
+  store i32 %v5, ptr @g1, align 4, !tbaa !3
+  store i8 66, ptr @g2, align 1, !tbaa !0
   ret i32 undef
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/reg-scav-imp-use-dbl-vec.ll b/llvm/test/CodeGen/Hexagon/reg-scav-imp-use-dbl-vec.ll
index 7801045352289..920e7a4871505 100644
--- a/llvm/test/CodeGen/Hexagon/reg-scav-imp-use-dbl-vec.ll
+++ b/llvm/test/CodeGen/Hexagon/reg-scav-imp-use-dbl-vec.ll
@@ -6,10 +6,10 @@
 
 target triple = "hexagon-unknown--elf"
 
-%s.0 = type { i64, i8*, [4 x i32], [4 x i32], [4 x i32], i32, i8, i8, [6 x i8] }
+%s.0 = type { i64, ptr, [4 x i32], [4 x i32], [4 x i32], i32, i8, i8, [6 x i8] }
 
 ; Function Attrs: nounwind
-declare noalias i8* @f0() local_unnamed_addr #0
+declare noalias ptr @f0() local_unnamed_addr #0
 
 ; Function Attrs: nounwind
 declare void @f1() local_unnamed_addr #0
@@ -54,12 +54,11 @@ declare <32 x i32> @llvm.hexagon.V6.vaslw.acc.128B(<32 x i32>, <32 x i32>, i32)
 declare <32 x i32> @llvm.hexagon.V6.valignb.128B(<32 x i32>, <32 x i32>, i32) #1
 
 ; Function Attrs: noreturn nounwind
-define void @f2(%s.0* noalias nocapture readonly %a01, i32 %a1, i32 %a2, i32 %a3, i32 %a4, i32 %a5, i32 %a6) local_unnamed_addr #2 {
+define void @f2(ptr noalias nocapture readonly %a01, i32 %a1, i32 %a2, i32 %a3, i32 %a4, i32 %a5, i32 %a6) local_unnamed_addr #2 {
 b0:
-  %v0 = getelementptr inbounds %s.0, %s.0* %a01, i32 0, i32 1
-  %v1 = bitcast i8** %v0 to i16**
-  %v2 = load i16*, i16** %v1, align 4
-  %v3 = tail call i8* @f0()
+  %v0 = getelementptr inbounds %s.0, ptr %a01, i32 0, i32 1
+  %v2 = load ptr, ptr %v0, align 4
+  %v3 = tail call ptr @f0()
   %v4 = icmp sgt i32 %a1, 0
   %v5 = select i1 %v4, i32 0, i32 %a1
   %v6 = or i32 %v5, 1
@@ -78,8 +77,8 @@ b2:                                               ; preds = %b0
   %v13 = ashr i32 %a4, 6
   %v14 = ashr i32 %a2, 6
   %v15 = icmp ult i32 %v10, 128
-  %v16 = tail call i8* @f0()
-  %v17 = icmp eq i8* %v16, null
+  %v16 = tail call ptr @f0()
+  %v17 = icmp eq ptr %v16, null
   br i1 %v17, label %b6, label %b3, !prof !2
 
 b3:                                               ; preds = %b2
@@ -100,7 +99,7 @@ b6:                                               ; preds = %b5, %b2
   unreachable
 
 b7:                                               ; preds = %b8, %b3
-  %v22 = phi i8* [ %v16, %b3 ], [ %v28, %b8 ]
+  %v22 = phi ptr [ %v16, %b3 ], [ %v28, %b8 ]
   %v23 = phi i32 [ 1, %b3 ], [ %v27, %b8 ]
   %v24 = sub i32 %v23, %a3
   %v25 = mul i32 %v24, %v12
@@ -109,8 +108,8 @@ b7:                                               ; preds = %b8, %b3
 
 b8:                                               ; preds = %b13, %b7
   %v27 = add nuw nsw i32 %v23, 1
-  %v28 = tail call i8* @f0()
-  %v29 = icmp eq i8* %v28, null
+  %v28 = tail call ptr @f0()
+  %v29 = icmp eq ptr %v28, null
   br i1 %v29, label %b5, label %b7, !prof !2
 
 b9:                                               ; preds = %b7
@@ -121,7 +120,6 @@ b9:                                               ; preds = %b7
   %v34 = tail call <32 x i32> @llvm.hexagon.V6.lvsplatw.128B(i32 undef) #3
   %v35 = tail call <32 x i32> @llvm.hexagon.V6.lvsplatw.128B(i32 8) #3
   %v36 = tail call <64 x i32> @llvm.hexagon.V6.vcombine.128B(<32 x i32> %v35, <32 x i32> %v35)
-  %v37 = bitcast i8* %v22 to i16*
   br i1 %v15, label %b13, label %b10
 
 b10:                                              ; preds = %b9
@@ -143,24 +141,20 @@ b11:                                              ; preds = %b11, %b10
   %v50 = phi i32 [ %v125, %b11 ], [ undef, %b10 ]
   %v51 = add i32 %v49, %v33
   %v52 = shl nsw i32 %v51, 6
-  %v53 = getelementptr inbounds i16, i16* %v2, i32 %v52
-  %v54 = bitcast i16* %v53 to <32 x i32>*
-  %v55 = load <32 x i32>, <32 x i32>* %v54, align 128, !tbaa !3
+  %v53 = getelementptr inbounds i16, ptr %v2, i32 %v52
+  %v55 = load <32 x i32>, ptr %v53, align 128, !tbaa !3
   %v56 = add i32 %v49, %v32
   %v57 = shl nsw i32 %v56, 6
-  %v58 = getelementptr inbounds i16, i16* %v2, i32 %v57
-  %v59 = bitcast i16* %v58 to <32 x i32>*
-  %v60 = load <32 x i32>, <32 x i32>* %v59, align 128, !tbaa !3
+  %v58 = getelementptr inbounds i16, ptr %v2, i32 %v57
+  %v60 = load <32 x i32>, ptr %v58, align 128, !tbaa !3
   %v61 = add i32 %v31, %v49
   %v62 = shl nsw i32 %v61, 6
-  %v63 = getelementptr inbounds i16, i16* %v2, i32 %v62
-  %v64 = bitcast i16* %v63 to <32 x i32>*
-  %v65 = load <32 x i32>, <32 x i32>* %v64, align 128, !tbaa !3
+  %v63 = getelementptr inbounds i16, ptr %v2, i32 %v62
+  %v65 = load <32 x i32>, ptr %v63, align 128, !tbaa !3
   %v66 = add i32 %v49, %v30
   %v67 = shl nsw i32 %v66, 6
-  %v68 = getelementptr inbounds i16, i16* %v2, i32 %v67
-  %v69 = bitcast i16* %v68 to <32 x i32>*
-  %v70 = load <32 x i32>, <32 x i32>* %v69, align 128, !tbaa !3
+  %v68 = getelementptr inbounds i16, ptr %v2, i32 %v67
+  %v70 = load <32 x i32>, ptr %v68, align 128, !tbaa !3
   %v71 = tail call <32 x i32> @llvm.hexagon.V6.valignb.128B(<32 x i32> %v55, <32 x i32> undef, i32 92)
   %v72 = tail call <32 x i32> @llvm.hexagon.V6.vasrh.128B(<32 x i32> %v71, i32 1) #3
   %v73 = tail call <32 x i32> @llvm.hexagon.V6.vaddh.128B(<32 x i32> %v72, <32 x i32> %v34) #3
@@ -193,29 +187,24 @@ b11:                                              ; preds = %b11, %b10
   %v100 = tail call <32 x i32> @llvm.hexagon.V6.lo.128B(<64 x i32> %v99) #3
   %v101 = tail call <32 x i32> @llvm.hexagon.V6.vshufeh.128B(<32 x i32> undef, <32 x i32> %v100) #3
   %v102 = shl nsw i32 %v49, 6
-  %v103 = getelementptr inbounds i16, i16* %v37, i32 %v102
-  %v104 = bitcast i16* %v103 to <32 x i32>*
-  store <32 x i32> %v101, <32 x i32>* %v104, align 128, !tbaa !6
+  %v103 = getelementptr inbounds i16, ptr %v22, i32 %v102
+  store <32 x i32> %v101, ptr %v103, align 128, !tbaa !6
   %v105 = or i32 %v49, 1
   %v106 = add i32 %v105, %v32
   %v107 = shl nsw i32 %v106, 6
-  %v108 = getelementptr inbounds i16, i16* %v2, i32 %v107
-  %v109 = bitcast i16* %v108 to <32 x i32>*
-  %v110 = load <32 x i32>, <32 x i32>* %v109, align 128, !tbaa !3
+  %v108 = getelementptr inbounds i16, ptr %v2, i32 %v107
+  %v110 = load <32 x i32>, ptr %v108, align 128, !tbaa !3
   %v111 = add i32 %v105, %v30
   %v112 = shl nsw i32 %v111, 6
-  %v113 = getelementptr inbounds i16, i16* %v2, i32 %v112
-  %v114 = bitcast i16* %v113 to <32 x i32>*
-  %v115 = load <32 x i32>, <32 x i32>* %v114, align 128, !tbaa !3
+  %v113 = getelementptr inbounds i16, ptr %v2, i32 %v112
+  %v115 = load <32 x i32>, ptr %v113, align 128, !tbaa !3
   %v116 = add i32 %v105, %v26
   %v117 = shl nsw i32 %v116, 6
-  %v118 = getelementptr inbounds i16, i16* %v2, i32 %v117
-  %v119 = bitcast i16* %v118 to <32 x i32>*
-  %v120 = load <32 x i32>, <32 x i32>* %v119, align 128, !tbaa !3
+  %v118 = getelementptr inbounds i16, ptr %v2, i32 %v117
+  %v120 = load <32 x i32>, ptr %v118, align 128, !tbaa !3
   %v121 = shl nsw i32 %v105, 6
-  %v122 = getelementptr inbounds i16, i16* %v37, i32 %v121
-  %v123 = bitcast i16* %v122 to <32 x i32>*
-  store <32 x i32> %v45, <32 x i32>* %v123, align 128, !tbaa !6
+  %v122 = getelementptr inbounds i16, ptr %v22, i32 %v121
+  store <32 x i32> %v45, ptr %v122, align 128, !tbaa !6
   %v124 = add nuw nsw i32 %v49, 2
   %v125 = add i32 %v50, -2
   %v126 = icmp eq i32 %v125, 0
@@ -228,19 +217,16 @@ b13:                                              ; preds = %b12, %b9
   %v127 = phi i32 [ 0, %b9 ], [ %v124, %b12 ]
   %v128 = add i32 %v127, %v33
   %v129 = shl nsw i32 %v128, 6
-  %v130 = getelementptr inbounds i16, i16* %v2, i32 %v129
-  %v131 = bitcast i16* %v130 to <32 x i32>*
-  %v132 = load <32 x i32>, <32 x i32>* %v131, align 128, !tbaa !3
+  %v130 = getelementptr inbounds i16, ptr %v2, i32 %v129
+  %v132 = load <32 x i32>, ptr %v130, align 128, !tbaa !3
   %v133 = add i32 %v127, %v30
   %v134 = shl nsw i32 %v133, 6
-  %v135 = getelementptr inbounds i16, i16* %v2, i32 %v134
-  %v136 = bitcast i16* %v135 to <32 x i32>*
-  %v137 = load <32 x i32>, <32 x i32>* %v136, align 128, !tbaa !3
+  %v135 = getelementptr inbounds i16, ptr %v2, i32 %v134
+  %v137 = load <32 x i32>, ptr %v135, align 128, !tbaa !3
   %v138 = add i32 %v127, %v26
   %v139 = shl nsw i32 %v138, 6
-  %v140 = getelementptr inbounds i16, i16* %v2, i32 %v139
-  %v141 = bitcast i16* %v140 to <32 x i32>*
-  %v142 = load <32 x i32>, <32 x i32>* %v141, align 128, !tbaa !3
+  %v140 = getelementptr inbounds i16, ptr %v2, i32 %v139
+  %v142 = load <32 x i32>, ptr %v140, align 128, !tbaa !3
   %v143 = tail call <32 x i32> @llvm.hexagon.V6.valignb.128B(<32 x i32> %v132, <32 x i32> undef, i32 92)
   %v144 = tail call <32 x i32> @llvm.hexagon.V6.vasrh.128B(<32 x i32> %v143, i32 1) #3
   %v145 = tail call <32 x i32> @llvm.hexagon.V6.vaddh.128B(<32 x i32> %v144, <32 x i32> %v34) #3
@@ -263,9 +249,8 @@ b13:                                              ; preds = %b12, %b9
   %v162 = tail call <64 x i32> @llvm.hexagon.V6.vcombine.128B(<32 x i32> %v161, <32 x i32> undef)
   %v163 = tail call <32 x i32> @llvm.hexagon.V6.hi.128B(<64 x i32> %v162) #3
   %v164 = tail call <32 x i32> @llvm.hexagon.V6.vshufeh.128B(<32 x i32> %v163, <32 x i32> undef) #3
-  %v165 = getelementptr inbounds i16, i16* %v37, i32 undef
-  %v166 = bitcast i16* %v165 to <32 x i32>*
-  store <32 x i32> %v164, <32 x i32>* %v166, align 128, !tbaa !6
+  %v165 = getelementptr inbounds i16, ptr %v22, i32 undef
+  store <32 x i32> %v164, ptr %v165, align 128, !tbaa !6
   br label %b8
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/reg-scavengebug-2.ll b/llvm/test/CodeGen/Hexagon/reg-scavengebug-2.ll
index 4b2dd36e6c14f..150d7e271818a 100644
--- a/llvm/test/CodeGen/Hexagon/reg-scavengebug-2.ll
+++ b/llvm/test/CodeGen/Hexagon/reg-scavengebug-2.ll
@@ -4,25 +4,23 @@
 target triple = "hexagon"
 
 ; Function Attrs: nounwind
-define void @f0(i16* nocapture %a0) #0 {
+define void @f0(ptr nocapture %a0) #0 {
 b0:
   br i1 undef, label %b1, label %b5
 
 b1:                                               ; preds = %b0
-  %v0 = bitcast i16* %a0 to <16 x i32>*
   br label %b2
 
 b2:                                               ; preds = %b4, %b1
   %v1 = phi i32 [ 0, %b1 ], [ %v50, %b4 ]
-  %v2 = phi <16 x i32>* [ %v0, %b1 ], [ undef, %b4 ]
+  %v2 = phi ptr [ %a0, %b1 ], [ undef, %b4 ]
   br label %b3
 
 b3:                                               ; preds = %b3, %b2
   %v3 = phi i32 [ -4, %b2 ], [ %v40, %b3 ]
   %v4 = add i32 0, -64
-  %v5 = getelementptr inbounds i8, i8* null, i32 %v4
-  %v6 = bitcast i8* %v5 to <16 x i32>*
-  %v7 = load <16 x i32>, <16 x i32>* %v6, align 64, !tbaa !0
+  %v5 = getelementptr inbounds i8, ptr null, i32 %v4
+  %v7 = load <16 x i32>, ptr %v5, align 64, !tbaa !0
   %v8 = tail call <16 x i32> @llvm.hexagon.V6.vlalignbi(<16 x i32> undef, <16 x i32> %v7, i32 4)
   %v9 = tail call <16 x i32> @llvm.hexagon.V6.vabs
diff ub(<16 x i32> %v8, <16 x i32> zeroinitializer)
   %v10 = tail call <64 x i1> @llvm.hexagon.V6.vgtub(<16 x i32> %v9, <16 x i32> undef)
@@ -66,10 +64,10 @@ b4:                                               ; preds = %b3
   %v45 = tail call <32 x i32> @llvm.hexagon.V6.vshuffvdd(<16 x i32> zeroinitializer, <16 x i32> %v44, i32 -2)
   %v46 = tail call <32 x i32> @llvm.hexagon.V6.vunpackub(<16 x i32> %v42)
   %v47 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v45)
-  store <16 x i32> %v47, <16 x i32>* %v2, align 64, !tbaa !0
-  %v48 = getelementptr inbounds <16 x i32>, <16 x i32>* null, i32 1
+  store <16 x i32> %v47, ptr %v2, align 64, !tbaa !0
+  %v48 = getelementptr inbounds <16 x i32>, ptr null, i32 1
   %v49 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v46)
-  store <16 x i32> %v49, <16 x i32>* %v48, align 64, !tbaa !0
+  store <16 x i32> %v49, ptr %v48, align 64, !tbaa !0
   %v50 = add nsw i32 %v1, 1
   %v51 = icmp slt i32 %v50, 0
   br i1 %v51, label %b2, label %b5

diff  --git a/llvm/test/CodeGen/Hexagon/reg-scavengebug-3.ll b/llvm/test/CodeGen/Hexagon/reg-scavengebug-3.ll
index 91fb350daab59..62b5cdb36a18a 100644
--- a/llvm/test/CodeGen/Hexagon/reg-scavengebug-3.ll
+++ b/llvm/test/CodeGen/Hexagon/reg-scavengebug-3.ll
@@ -19,13 +19,13 @@ target triple = "hexagon"
 @VectorPairResult = external global <32 x i32>, align 128
 
 ; Function Attrs: nounwind
-declare void @print_vector(i32, i8*) #0
+declare void @print_vector(i32, ptr) #0
 
 ; Function Attrs: nounwind
-declare i32 @printf(i8*, ...) #0
+declare i32 @printf(ptr, ...) #0
 
 ; Function Attrs: nounwind
-declare void @print_vecpred(i32, i8*) #0
+declare void @print_vecpred(i32, ptr) #0
 
 ; Function Attrs: nounwind readnone
 declare <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1>, i32) #1
@@ -48,29 +48,29 @@ declare <16 x i32> @llvm.hexagon.V6.vsubhnq(<64 x i1>, <16 x i32>, <16 x i32>) #
 ; Function Attrs: nounwind
 define i32 @main() #0 {
 entry:
-  %0 = load <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vecpreds, i32 0, i32 0), align 64
-  %1 = load <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
-  call void @print_vecpred(i32 64, i8* bitcast (<16 x i32>* @Q6VecPredResult to i8*))
-  %2 = load <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
-  %call50 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([57 x i8], [57 x i8]* @.str52, i32 0, i32 0)) #3
-  %3 = load <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
-  %call52 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([59 x i8], [59 x i8]* @.str54, i32 0, i32 0)) #3
-  %4 = load <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
-  %call300 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([65 x i8], [65 x i8]* @.str290, i32 0, i32 0)) #3
-  %5 = load <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %6 = load <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
-  %call1373 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([20 x i8], [20 x i8]* @.str1, i32 0, i32 0), i8* getelementptr inbounds ([43 x i8], [43 x i8]* @.str2, i32 0, i32 0), i8* getelementptr inbounds ([60 x i8], [60 x i8]* @.str243, i32 0, i32 0)) #3
+  %0 = load <16 x i32>, ptr @vecpreds, align 64
+  %1 = load <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
+  call void @print_vecpred(i32 64, ptr @Q6VecPredResult)
+  %2 = load <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
+  %call50 = call i32 (ptr, ...) @printf(ptr @.str52) #3
+  %3 = load <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
+  %call52 = call i32 (ptr, ...) @printf(ptr @.str54) #3
+  %4 = load <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
+  %call300 = call i32 (ptr, ...) @printf(ptr @.str290) #3
+  %5 = load <16 x i32>, ptr @vectors, align 64
+  %6 = load <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
+  %call1373 = call i32 (ptr, ...) @printf(ptr @.str1, ptr @.str2, ptr @.str243) #3
   %7 = call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 1)
-  %call1381 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([20 x i8], [20 x i8]* @.str1, i32 0, i32 0), i8* getelementptr inbounds ([43 x i8], [43 x i8]* @.str2, i32 0, i32 0), i8* getelementptr inbounds ([77 x i8], [77 x i8]* @.str251, i32 0, i32 0)) #3
+  %call1381 = call i32 (ptr, ...) @printf(ptr @.str1, ptr @.str2, ptr @.str251) #3
   %8 = call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 1)
   %9 = call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %8, i32 16843009)
-  call void @print_vector(i32 64, i8* bitcast (<16 x i32>* @VectorResult to i8*))
+  call void @print_vector(i32 64, ptr @VectorResult)
   %10 = call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 1)
   %11 = call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %10, i32 16843009)
   %12 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %11, i32 -1)
   %13 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %12, i32 -1)
   %14 = call <16 x i32> @llvm.hexagon.V6.vsubhnq(<64 x i1> %13, <16 x i32> undef, <16 x i32> undef)
-  store <16 x i32> %14, <16 x i32>* @VectorResult, align 64
+  store <16 x i32> %14, ptr @VectorResult, align 64
   ret i32 0
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/reg-scavengebug-4.ll b/llvm/test/CodeGen/Hexagon/reg-scavengebug-4.ll
index 138b7321086b5..b0211888e703e 100644
--- a/llvm/test/CodeGen/Hexagon/reg-scavengebug-4.ll
+++ b/llvm/test/CodeGen/Hexagon/reg-scavengebug-4.ll
@@ -6,17 +6,17 @@
 ; and requires another register to compute the location on the stack.
 
 ; Function Attrs: nounwind
-define void @f0(i8* nocapture readonly %a0, i32 %a1, i32 %a2, i8* nocapture readonly %a3, i8* nocapture readonly %a4, i8* nocapture %a5) #0 {
+define void @f0(ptr nocapture readonly %a0, i32 %a1, i32 %a2, ptr nocapture readonly %a3, ptr nocapture readonly %a4, ptr nocapture %a5) #0 {
 b0:
   %v0 = tail call <16 x i32> @llvm.hexagon.V6.vshuffb(<16 x i32> zeroinitializer)
   br i1 undef, label %b1, label %b5
 
 b1:                                               ; preds = %b0
-  %v1 = getelementptr inbounds i8, i8* %a3, i32 31
+  %v1 = getelementptr inbounds i8, ptr %a3, i32 31
   br label %b2
 
 b2:                                               ; preds = %b4, %b1
-  %v2 = phi <16 x i32>* [ undef, %b1 ], [ %v102, %b4 ]
+  %v2 = phi ptr [ undef, %b1 ], [ %v102, %b4 ]
   %v3 = phi i32 [ %a2, %b1 ], [ undef, %b4 ]
   %v4 = tail call <32 x i32> @llvm.hexagon.V6.vmpyh(<16 x i32> undef, i32 undef)
   br label %b3
@@ -25,7 +25,7 @@ b3:                                               ; preds = %b3, %b2
   %v5 = phi <32 x i32> [ %v4, %b2 ], [ %v72, %b3 ]
   %v6 = phi <32 x i32> [ zeroinitializer, %b2 ], [ %v71, %b3 ]
   %v7 = phi i32 [ -4, %b2 ], [ %v73, %b3 ]
-  %v8 = load <16 x i32>, <16 x i32>* undef, align 64
+  %v8 = load <16 x i32>, ptr undef, align 64
   %v9 = mul nsw i32 %v7, 9
   %v10 = tail call <16 x i32> @llvm.hexagon.V6.vlalignb(<16 x i32> %v8, <16 x i32> undef, i32 4)
   %v11 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> undef, <16 x i32> %v8, i32 4)
@@ -44,8 +44,8 @@ b3:                                               ; preds = %b3, %b2
   %v24 = tail call <16 x i32> @llvm.hexagon.V6.vlutvvb.oracc(<16 x i32> %v23, <16 x i32> %v13, <16 x i32> undef, i32 6)
   %v25 = tail call <16 x i32> @llvm.hexagon.V6.vlutvvb.oracc(<16 x i32> %v24, <16 x i32> %v13, <16 x i32> undef, i32 7)
   %v26 = add nsw i32 %v9, 36
-  %v27 = getelementptr inbounds i8, i8* %a3, i32 %v26
-  %v28 = load i8, i8* %v27, align 1
+  %v27 = getelementptr inbounds i8, ptr %a3, i32 %v26
+  %v28 = load i8, ptr %v27, align 1
   %v29 = zext i8 %v28 to i32
   %v30 = tail call i32 @llvm.hexagon.S2.vsplatrb(i32 %v29)
   %v31 = tail call <32 x i32> @llvm.hexagon.V6.vmpyub(<16 x i32> %v21, i32 %v30)
@@ -63,8 +63,8 @@ b3:                                               ; preds = %b3, %b2
   %v43 = tail call <32 x i32> @llvm.hexagon.V6.vadduhw(<16 x i32> %v41, <16 x i32> %v42)
   %v44 = tail call <32 x i32> @llvm.hexagon.V6.vaddw.dv(<32 x i32> %v5, <32 x i32> %v43)
   %v45 = add nsw i32 %v9, 37
-  %v46 = getelementptr inbounds i8, i8* %a3, i32 %v45
-  %v47 = load i8, i8* %v46, align 1
+  %v46 = getelementptr inbounds i8, ptr %a3, i32 %v45
+  %v47 = load i8, ptr %v46, align 1
   %v48 = tail call <32 x i32> @llvm.hexagon.V6.vmpabus.acc(<32 x i32> %v38, <32 x i32> undef, i32 16843009)
   %v49 = tail call <32 x i32> @llvm.hexagon.V6.vaddw.dv(<32 x i32> %v44, <32 x i32> undef)
   %v50 = tail call <16 x i32> @llvm.hexagon.V6.vlalignb(<16 x i32> %v8, <16 x i32> undef, i32 2)
@@ -78,7 +78,7 @@ b3:                                               ; preds = %b3, %b2
   %v58 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> undef, <16 x i32> %v8, i32 1)
   %v59 = tail call <16 x i32> @llvm.hexagon.V6.vabs
diff ub(<16 x i32> %v58, <16 x i32> undef)
   %v60 = tail call <16 x i32> @llvm.hexagon.V6.vlutvvb.oracc(<16 x i32> undef, <16 x i32> %v59, <16 x i32> undef, i32 7)
-  %v61 = load i8, i8* undef, align 1
+  %v61 = load i8, ptr undef, align 1
   %v62 = zext i8 %v61 to i32
   %v63 = tail call i32 @llvm.hexagon.S2.vsplatrb(i32 %v62)
   %v64 = tail call <32 x i32> @llvm.hexagon.V6.vmpyub(<16 x i32> undef, i32 %v63)
@@ -97,7 +97,7 @@ b3:                                               ; preds = %b3, %b2
 b4:                                               ; preds = %b3
   %v75 = phi <32 x i32> [ %v72, %b3 ]
   %v76 = phi <32 x i32> [ %v71, %b3 ]
-  %v77 = load i8, i8* %v1, align 1
+  %v77 = load i8, ptr %v1, align 1
   %v78 = zext i8 %v77 to i32
   %v79 = tail call i32 @llvm.hexagon.S2.vsplatrb(i32 %v78)
   %v80 = tail call <32 x i32> @llvm.hexagon.V6.vmpyub(<16 x i32> undef, i32 %v79)
@@ -122,8 +122,8 @@ b4:                                               ; preds = %b3
   %v99 = tail call <16 x i32> @llvm.hexagon.V6.vaddwnq(<64 x i1> undef, <16 x i32> undef, <16 x i32> undef)
   %v100 = tail call <16 x i32> @llvm.hexagon.V6.vshufeh(<16 x i32> %v99, <16 x i32> %v98)
   %v101 = tail call <16 x i32> @llvm.hexagon.V6.vshuffeb(<16 x i32> %v100, <16 x i32> undef)
-  %v102 = getelementptr inbounds <16 x i32>, <16 x i32>* %v2, i32 1
-  store <16 x i32> %v101, <16 x i32>* %v2, align 64
+  %v102 = getelementptr inbounds <16 x i32>, ptr %v2, i32 1
+  store <16 x i32> %v101, ptr %v2, align 64
   %v103 = icmp sgt i32 %v3, 64
   br i1 %v103, label %b2, label %b5
 

diff  --git a/llvm/test/CodeGen/Hexagon/reg-scavengebug-5.ll b/llvm/test/CodeGen/Hexagon/reg-scavengebug-5.ll
index ce7b5f4fe94a3..9ad00cd83ad81 100644
--- a/llvm/test/CodeGen/Hexagon/reg-scavengebug-5.ll
+++ b/llvm/test/CodeGen/Hexagon/reg-scavengebug-5.ll
@@ -7,32 +7,30 @@
 ; the code changed when a spill is inserted, was not always returning true.
 
 ; Function Attrs: nounwind
-define void @f0(i8* noalias nocapture readonly %a0, i32 %a1, i32 %a2, i32 %a3, i8* noalias nocapture %a4, i32 %a5) #0 {
+define void @f0(ptr noalias nocapture readonly %a0, i32 %a1, i32 %a2, i32 %a3, ptr noalias nocapture %a4, i32 %a5) #0 {
 b0:
   %v0 = sub i32 0, %a1
-  %v1 = getelementptr inbounds i8, i8* %a0, i32 %v0
-  %v2 = getelementptr inbounds i8, i8* %a0, i32 %a1
+  %v1 = getelementptr inbounds i8, ptr %a0, i32 %v0
+  %v2 = getelementptr inbounds i8, ptr %a0, i32 %a1
   %v3 = mul nsw i32 %a1, 2
-  %v4 = getelementptr inbounds i8, i8* %a0, i32 %v3
-  %v5 = bitcast i8* %a4 to <16 x i32>*
-  %v6 = getelementptr inbounds i8, i8* %a4, i32 %a5
-  %v7 = bitcast i8* %v6 to <16 x i32>*
+  %v4 = getelementptr inbounds i8, ptr %a0, i32 %v3
+  %v6 = getelementptr inbounds i8, ptr %a4, i32 %a5
   %v8 = tail call <16 x i32> @llvm.hexagon.V6.vd0()
-  %v9 = load <16 x i32>, <16 x i32>* undef, align 64
+  %v9 = load <16 x i32>, ptr undef, align 64
   %v10 = or i64 undef, 0
   %v11 = trunc i64 %v10 to i32
-  %v12 = load i8, i8* undef, align 1
+  %v12 = load i8, ptr undef, align 1
   %v13 = zext i8 %v12 to i64
   %v14 = shl nuw nsw i64 %v13, 8
   %v15 = or i64 0, %v14
   %v16 = trunc i64 %v15 to i32
-  %v17 = load i8, i8* undef, align 1
+  %v17 = load i8, ptr undef, align 1
   %v18 = zext i8 %v17 to i64
   %v19 = or i64 0, %v18
   %v20 = or i64 %v19, 0
   %v21 = or i64 %v20, 0
   %v22 = trunc i64 %v21 to i32
-  %v23 = load i8, i8* undef, align 1
+  %v23 = load i8, ptr undef, align 1
   %v24 = zext i8 %v23 to i64
   %v25 = shl nuw nsw i64 %v24, 8
   %v26 = or i64 undef, %v25
@@ -41,16 +39,12 @@ b0:
   br i1 %v28, label %b1, label %b6
 
 b1:                                               ; preds = %b0
-  %v29 = getelementptr inbounds i8, i8* %v4, i32 64
-  %v30 = bitcast i8* %v29 to <16 x i32>*
-  %v31 = getelementptr inbounds i8, i8* %v2, i32 64
-  %v32 = bitcast i8* %v31 to <16 x i32>*
-  %v33 = getelementptr inbounds i8, i8* %a0, i32 64
-  %v34 = bitcast i8* %v33 to <16 x i32>*
-  %v35 = getelementptr inbounds i8, i8* %v1, i32 64
-  %v36 = bitcast i8* %v35 to <16 x i32>*
+  %v29 = getelementptr inbounds i8, ptr %v4, i32 64
+  %v31 = getelementptr inbounds i8, ptr %v2, i32 64
+  %v33 = getelementptr inbounds i8, ptr %a0, i32 64
+  %v35 = getelementptr inbounds i8, ptr %v1, i32 64
   %v37 = add i32 0, 64
-  %v38 = getelementptr i8, i8* %a4, i32 %v37
+  %v38 = getelementptr i8, ptr %a4, i32 %v37
   %v39 = add i32 %a2, -65
   %v40 = lshr i32 %v39, 6
   %v41 = add nuw nsw i32 %v40, 1
@@ -64,19 +58,19 @@ b2:                                               ; preds = %b2, %b1
   %v46 = phi <16 x i32> [ %v113, %b2 ], [ undef, %b1 ]
   %v47 = phi <16 x i32> [ %v102, %b2 ], [ %v8, %b1 ]
   %v48 = phi <16 x i32> [ %v118, %b2 ], [ undef, %b1 ]
-  %v49 = phi <16 x i32>* [ %v112, %b2 ], [ %v36, %b1 ]
-  %v50 = phi <16 x i32>* [ %v114, %b2 ], [ %v34, %b1 ]
-  %v51 = phi <16 x i32>* [ %v116, %b2 ], [ %v32, %b1 ]
-  %v52 = phi <16 x i32>* [ undef, %b2 ], [ %v30, %b1 ]
-  %v53 = phi <16 x i32>* [ %v139, %b2 ], [ %v5, %b1 ]
-  %v54 = phi <16 x i32>* [ %v143, %b2 ], [ %v7, %b1 ]
+  %v49 = phi ptr [ %v112, %b2 ], [ %v35, %b1 ]
+  %v50 = phi ptr [ %v114, %b2 ], [ %v33, %b1 ]
+  %v51 = phi ptr [ %v116, %b2 ], [ %v31, %b1 ]
+  %v52 = phi ptr [ undef, %b2 ], [ %v29, %b1 ]
+  %v53 = phi ptr [ %v139, %b2 ], [ %a4, %b1 ]
+  %v54 = phi ptr [ %v143, %b2 ], [ %v6, %b1 ]
   %v55 = tail call <16 x i32> @llvm.hexagon.V6.vlalignbi(<16 x i32> %v46, <16 x i32> %v45, i32 1)
   %v56 = tail call <16 x i32> @llvm.hexagon.V6.vlalignbi(<16 x i32> undef, <16 x i32> %v47, i32 1)
-  %v57 = getelementptr inbounds <16 x i32>, <16 x i32>* %v49, i32 1
-  %v58 = load <16 x i32>, <16 x i32>* %v49, align 64
-  %v59 = getelementptr inbounds <16 x i32>, <16 x i32>* %v50, i32 1
-  %v60 = load <16 x i32>, <16 x i32>* %v50, align 64
-  %v61 = load <16 x i32>, <16 x i32>* %v51, align 64
+  %v57 = getelementptr inbounds <16 x i32>, ptr %v49, i32 1
+  %v58 = load <16 x i32>, ptr %v49, align 64
+  %v59 = getelementptr inbounds <16 x i32>, ptr %v50, i32 1
+  %v60 = load <16 x i32>, ptr %v50, align 64
+  %v61 = load <16 x i32>, ptr %v51, align 64
   %v62 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %v58, <16 x i32> %v46, i32 1)
   %v63 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %v60, <16 x i32> undef, i32 1)
   %v64 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %v61, <16 x i32> undef, i32 1)
@@ -93,18 +87,18 @@ b2:                                               ; preds = %b2, %b1
   %v75 = tail call <32 x i32> @llvm.hexagon.V6.vmpybus.acc(<32 x i32> zeroinitializer, <16 x i32> %v65, i32 %v27)
   %v76 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v74)
   %v77 = tail call <16 x i32> @llvm.hexagon.V6.vasrhubsat(<16 x i32> %v76, <16 x i32> undef, i32 %a3)
-  %v78 = getelementptr inbounds <16 x i32>, <16 x i32>* %v53, i32 1
-  store <16 x i32> %v77, <16 x i32>* %v53, align 64
+  %v78 = getelementptr inbounds <16 x i32>, ptr %v53, i32 1
+  store <16 x i32> %v77, ptr %v53, align 64
   %v79 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v75)
   %v80 = tail call <16 x i32> @llvm.hexagon.V6.vasrhubsat(<16 x i32> %v79, <16 x i32> undef, i32 %a3)
-  %v81 = getelementptr inbounds <16 x i32>, <16 x i32>* %v54, i32 1
-  store <16 x i32> %v80, <16 x i32>* %v54, align 64
-  %v82 = getelementptr inbounds <16 x i32>, <16 x i32>* %v49, i32 2
-  %v83 = load <16 x i32>, <16 x i32>* %v57, align 64
-  %v84 = getelementptr inbounds <16 x i32>, <16 x i32>* %v50, i32 2
-  %v85 = load <16 x i32>, <16 x i32>* %v59, align 64
-  %v86 = load <16 x i32>, <16 x i32>* undef, align 64
-  %v87 = load <16 x i32>, <16 x i32>* null, align 64
+  %v81 = getelementptr inbounds <16 x i32>, ptr %v54, i32 1
+  store <16 x i32> %v80, ptr %v54, align 64
+  %v82 = getelementptr inbounds <16 x i32>, ptr %v49, i32 2
+  %v83 = load <16 x i32>, ptr %v57, align 64
+  %v84 = getelementptr inbounds <16 x i32>, ptr %v50, i32 2
+  %v85 = load <16 x i32>, ptr %v59, align 64
+  %v86 = load <16 x i32>, ptr undef, align 64
+  %v87 = load <16 x i32>, ptr null, align 64
   %v88 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %v83, <16 x i32> %v58, i32 1)
   %v89 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %v85, <16 x i32> %v60, i32 1)
   %v90 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %v86, <16 x i32> %v61, i32 1)
@@ -116,30 +110,30 @@ b2:                                               ; preds = %b2, %b1
   %v96 = tail call <32 x i32> @llvm.hexagon.V6.vmpybus.acc(<32 x i32> %v95, <16 x i32> %v90, i32 %v27)
   %v97 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v96)
   %v98 = tail call <16 x i32> @llvm.hexagon.V6.vasrhubsat(<16 x i32> %v97, <16 x i32> undef, i32 %a3)
-  store <16 x i32> %v98, <16 x i32>* %v78, align 64
-  %v99 = getelementptr inbounds <16 x i32>, <16 x i32>* %v54, i32 2
-  store <16 x i32> undef, <16 x i32>* %v81, align 64
-  %v100 = getelementptr inbounds <16 x i32>, <16 x i32>* %v49, i32 3
-  %v101 = load <16 x i32>, <16 x i32>* %v82, align 64
-  %v102 = load <16 x i32>, <16 x i32>* %v84, align 64
-  %v103 = getelementptr inbounds <16 x i32>, <16 x i32>* %v51, i32 3
-  %v104 = load <16 x i32>, <16 x i32>* null, align 64
-  %v105 = getelementptr inbounds <16 x i32>, <16 x i32>* %v52, i32 3
-  %v106 = load <16 x i32>, <16 x i32>* undef, align 64
+  store <16 x i32> %v98, ptr %v78, align 64
+  %v99 = getelementptr inbounds <16 x i32>, ptr %v54, i32 2
+  store <16 x i32> undef, ptr %v81, align 64
+  %v100 = getelementptr inbounds <16 x i32>, ptr %v49, i32 3
+  %v101 = load <16 x i32>, ptr %v82, align 64
+  %v102 = load <16 x i32>, ptr %v84, align 64
+  %v103 = getelementptr inbounds <16 x i32>, ptr %v51, i32 3
+  %v104 = load <16 x i32>, ptr null, align 64
+  %v105 = getelementptr inbounds <16 x i32>, ptr %v52, i32 3
+  %v106 = load <16 x i32>, ptr undef, align 64
   %v107 = tail call <16 x i32> @llvm.hexagon.V6.vasrhubsat(<16 x i32> undef, <16 x i32> undef, i32 %a3)
-  store <16 x i32> %v107, <16 x i32>* undef, align 64
+  store <16 x i32> %v107, ptr undef, align 64
   %v108 = tail call <16 x i32> @llvm.hexagon.V6.vasrhubsat(<16 x i32> undef, <16 x i32> undef, i32 %a3)
-  %v109 = getelementptr inbounds <16 x i32>, <16 x i32>* %v54, i32 3
-  store <16 x i32> %v108, <16 x i32>* %v99, align 64
+  %v109 = getelementptr inbounds <16 x i32>, ptr %v54, i32 3
+  store <16 x i32> %v108, ptr %v99, align 64
   %v110 = tail call <16 x i32> @llvm.hexagon.V6.vlalignbi(<16 x i32> %v104, <16 x i32> %v86, i32 1)
   %v111 = tail call <16 x i32> @llvm.hexagon.V6.vlalignbi(<16 x i32> %v106, <16 x i32> %v87, i32 1)
-  %v112 = getelementptr inbounds <16 x i32>, <16 x i32>* %v49, i32 4
-  %v113 = load <16 x i32>, <16 x i32>* %v100, align 64
-  %v114 = getelementptr inbounds <16 x i32>, <16 x i32>* %v50, i32 4
-  %v115 = load <16 x i32>, <16 x i32>* undef, align 64
-  %v116 = getelementptr inbounds <16 x i32>, <16 x i32>* %v51, i32 4
-  %v117 = load <16 x i32>, <16 x i32>* %v103, align 64
-  %v118 = load <16 x i32>, <16 x i32>* %v105, align 64
+  %v112 = getelementptr inbounds <16 x i32>, ptr %v49, i32 4
+  %v113 = load <16 x i32>, ptr %v100, align 64
+  %v114 = getelementptr inbounds <16 x i32>, ptr %v50, i32 4
+  %v115 = load <16 x i32>, ptr undef, align 64
+  %v116 = getelementptr inbounds <16 x i32>, ptr %v51, i32 4
+  %v117 = load <16 x i32>, ptr %v103, align 64
+  %v118 = load <16 x i32>, ptr %v105, align 64
   %v119 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %v113, <16 x i32> %v101, i32 1)
   %v120 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %v115, <16 x i32> %v102, i32 1)
   %v121 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %v117, <16 x i32> %v104, i32 1)
@@ -160,32 +154,32 @@ b2:                                               ; preds = %b2, %b1
   %v136 = tail call <32 x i32> @llvm.hexagon.V6.vmpybus.acc(<32 x i32> %v134, <16 x i32> %v122, i32 %v27)
   %v137 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v135)
   %v138 = tail call <16 x i32> @llvm.hexagon.V6.vasrhubsat(<16 x i32> %v137, <16 x i32> undef, i32 %a3)
-  %v139 = getelementptr inbounds <16 x i32>, <16 x i32>* %v53, i32 4
-  store <16 x i32> %v138, <16 x i32>* undef, align 64
+  %v139 = getelementptr inbounds <16 x i32>, ptr %v53, i32 4
+  store <16 x i32> %v138, ptr undef, align 64
   %v140 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v136)
   %v141 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v136)
   %v142 = tail call <16 x i32> @llvm.hexagon.V6.vasrhubsat(<16 x i32> %v140, <16 x i32> %v141, i32 %a3)
-  %v143 = getelementptr inbounds <16 x i32>, <16 x i32>* %v54, i32 4
-  store <16 x i32> %v142, <16 x i32>* %v109, align 64
+  %v143 = getelementptr inbounds <16 x i32>, ptr %v54, i32 4
+  store <16 x i32> %v142, ptr %v109, align 64
   %v144 = add nsw i32 %v44, -256
   %v145 = icmp sgt i32 %v144, 256
   br i1 %v145, label %b2, label %b3
 
 b3:                                               ; preds = %b2
-  %v146 = phi <16 x i32>* [ %v116, %b2 ]
-  %v147 = phi <16 x i32>* [ %v114, %b2 ]
-  %v148 = phi <16 x i32>* [ %v112, %b2 ]
+  %v146 = phi ptr [ %v116, %b2 ]
+  %v147 = phi ptr [ %v114, %b2 ]
+  %v148 = phi ptr [ %v112, %b2 ]
   br i1 %v43, label %b5, label %b4
 
 b4:                                               ; preds = %b3, %b1
   %v149 = phi <16 x i32> [ %v9, %b1 ], [ undef, %b3 ]
-  %v150 = phi <16 x i32>* [ %v36, %b1 ], [ %v148, %b3 ]
-  %v151 = phi <16 x i32>* [ %v34, %b1 ], [ %v147, %b3 ]
-  %v152 = phi <16 x i32>* [ %v32, %b1 ], [ %v146, %b3 ]
-  %v153 = phi <16 x i32>* [ %v5, %b1 ], [ undef, %b3 ]
-  %v154 = load <16 x i32>, <16 x i32>* %v150, align 64
-  %v155 = load <16 x i32>, <16 x i32>* %v151, align 64
-  %v156 = load <16 x i32>, <16 x i32>* %v152, align 64
+  %v150 = phi ptr [ %v35, %b1 ], [ %v148, %b3 ]
+  %v151 = phi ptr [ %v33, %b1 ], [ %v147, %b3 ]
+  %v152 = phi ptr [ %v31, %b1 ], [ %v146, %b3 ]
+  %v153 = phi ptr [ %a4, %b1 ], [ undef, %b3 ]
+  %v154 = load <16 x i32>, ptr %v150, align 64
+  %v155 = load <16 x i32>, ptr %v151, align 64
+  %v156 = load <16 x i32>, ptr %v152, align 64
   %v157 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %v154, <16 x i32> undef, i32 1)
   %v158 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %v155, <16 x i32> undef, i32 1)
   %v159 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %v156, <16 x i32> %v149, i32 1)
@@ -195,17 +189,16 @@ b4:                                               ; preds = %b3, %b1
   %v163 = tail call <32 x i32> @llvm.hexagon.V6.vmpybus.acc(<32 x i32> %v162, <16 x i32> %v159, i32 %v27)
   %v164 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v163)
   %v165 = tail call <16 x i32> @llvm.hexagon.V6.vasrhubsat(<16 x i32> %v164, <16 x i32> undef, i32 %a3)
-  store <16 x i32> %v165, <16 x i32>* %v153, align 64
+  store <16 x i32> %v165, ptr %v153, align 64
   unreachable
 
 b5:                                               ; preds = %b3
-  %v166 = bitcast i8* %v38 to <16 x i32>*
   br label %b6
 
 b6:                                               ; preds = %b5, %b0
   %v167 = phi <16 x i32> [ %v8, %b0 ], [ undef, %b5 ]
-  %v168 = phi <16 x i32>* [ %v5, %b0 ], [ %v166, %b5 ]
-  %v169 = phi <16 x i32>* [ %v7, %b0 ], [ undef, %b5 ]
+  %v168 = phi ptr [ %a4, %b0 ], [ %v38, %b5 ]
+  %v169 = phi ptr [ %v6, %b0 ], [ undef, %b5 ]
   %v170 = tail call <16 x i32> @llvm.hexagon.V6.vlalignbi(<16 x i32> undef, <16 x i32> %v167, i32 1)
   %v171 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> undef, <16 x i32> undef, i32 1)
   %v172 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> undef, <16 x i32> %v170)
@@ -219,10 +212,10 @@ b6:                                               ; preds = %b5, %b0
   %v180 = tail call <32 x i32> @llvm.hexagon.V6.vmpybus.acc(<32 x i32> %v178, <16 x i32> undef, i32 %v27)
   %v181 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v179)
   %v182 = tail call <16 x i32> @llvm.hexagon.V6.vasrhubsat(<16 x i32> undef, <16 x i32> %v181, i32 %a3)
-  store <16 x i32> %v182, <16 x i32>* %v168, align 64
+  store <16 x i32> %v182, ptr %v168, align 64
   %v183 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v180)
   %v184 = tail call <16 x i32> @llvm.hexagon.V6.vasrhubsat(<16 x i32> undef, <16 x i32> %v183, i32 %a3)
-  store <16 x i32> %v184, <16 x i32>* %v169, align 64
+  store <16 x i32> %v184, ptr %v169, align 64
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/reg-scavengebug.ll b/llvm/test/CodeGen/Hexagon/reg-scavengebug.ll
index b712d1556cea1..710e3b44028c7 100644
--- a/llvm/test/CodeGen/Hexagon/reg-scavengebug.ll
+++ b/llvm/test/CodeGen/Hexagon/reg-scavengebug.ll
@@ -19,35 +19,30 @@ declare <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32>, <16 x i32>, i32) #0
 declare <16 x i32> @llvm.hexagon.V6.vaddh(<16 x i32>, <16 x i32>) #0
 
 ; Function Attrs: nounwind
-define void @f0(i16* noalias nocapture %a0, i32* noalias nocapture readonly %a1, i32 %a2, i8* noalias nocapture readonly %a3, i1 %cond) #1 {
+define void @f0(ptr noalias nocapture %a0, ptr noalias nocapture readonly %a1, i32 %a2, ptr noalias nocapture readonly %a3, i1 %cond) #1 {
 b0:
   %v0 = add nsw i32 %a2, 63
   %v1 = ashr i32 %v0, 6
-  %v2 = bitcast i16* %a0 to <16 x i32>*
-  %v3 = bitcast i8* %a3 to <16 x i32>*
-  %v4 = getelementptr inbounds i32, i32* %a1, i32 32
-  %v5 = bitcast i32* %v4 to <16 x i32>*
-  %v6 = load <16 x i32>, <16 x i32>* %v5, align 64, !tbaa !0
+  %v4 = getelementptr inbounds i32, ptr %a1, i32 32
+  %v6 = load <16 x i32>, ptr %v4, align 64, !tbaa !0
   %v7 = tail call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 32768)
   %v8 = tail call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 2147450879)
   %v9 = icmp sgt i32 %v1, 0
   br i1 %v9, label %b1, label %b4
 
 b1:                                               ; preds = %b0
-  %v10 = bitcast i32* %a1 to <16 x i32>*
-  %v11 = load <16 x i32>, <16 x i32>* %v10, align 64, !tbaa !0
+  %v11 = load <16 x i32>, ptr %a1, align 64, !tbaa !0
   %v12 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v6, <16 x i32> %v11, i32 2)
-  %v13 = getelementptr inbounds i32, i32* %a1, i32 48
+  %v13 = getelementptr inbounds i32, ptr %a1, i32 48
   %v14 = tail call <16 x i32> @llvm.hexagon.V6.vaddh(<16 x i32> %v12, <16 x i32> undef)
-  %v15 = bitcast i32* %v13 to <16 x i32>*
   br i1 %cond, label %b2, label %b3
 
 b2:                                               ; preds = %b1
-  %v16 = getelementptr inbounds <16 x i32>, <16 x i32>* %v15, i32 1
-  %v17 = load <16 x i32>, <16 x i32>* %v16, align 64, !tbaa !0
+  %v16 = getelementptr inbounds <16 x i32>, ptr %v13, i32 1
+  %v17 = load <16 x i32>, ptr %v16, align 64, !tbaa !0
   %v18 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v17, <16 x i32> %v6, i32 4)
-  %v19 = load <16 x i32>, <16 x i32>* %v15, align 64, !tbaa !0
-  %v20 = getelementptr inbounds <16 x i32>, <16 x i32>* %v15, i32 2
+  %v19 = load <16 x i32>, ptr %v13, align 64, !tbaa !0
+  %v20 = getelementptr inbounds <16 x i32>, ptr %v13, i32 2
   %v21 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %v18, <16 x i32> %v19)
   %v22 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v21, <16 x i32> %v14, i32 4)
   %v23 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v21, <16 x i32> %v14, i32 8)
@@ -60,15 +55,15 @@ b2:                                               ; preds = %b1
   %v30 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %v27, <16 x i32> %v28)
   %v31 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwh.acc(<16 x i32> %v7, <16 x i32> %v29, i32 53019433)
   %v32 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwh.acc(<16 x i32> %v7, <16 x i32> %v30, i32 53019433)
-  %v33 = load <16 x i32>, <16 x i32>* %v3, align 64, !tbaa !0
+  %v33 = load <16 x i32>, ptr %a3, align 64, !tbaa !0
   %v34 = tail call <16 x i32> @llvm.hexagon.V6.vshuffb(<16 x i32> %v33)
   %v35 = tail call <32 x i32> @llvm.hexagon.V6.vmpyubv(<16 x i32> %v34, <16 x i32> %v34)
   %v36 = tail call <16 x i32> @llvm.hexagon.V6.vshufoh(<16 x i32> %v32, <16 x i32> %v31)
-  store <16 x i32> %v36, <16 x i32>* %v2, align 64, !tbaa !0
-  %v37 = getelementptr inbounds <16 x i32>, <16 x i32>* %v15, i32 3
-  %v38 = load <16 x i32>, <16 x i32>* %v37, align 64, !tbaa !0
-  %v39 = load <16 x i32>, <16 x i32>* %v20, align 64, !tbaa !0
-  %v40 = getelementptr inbounds <16 x i32>, <16 x i32>* %v15, i32 4
+  store <16 x i32> %v36, ptr %a0, align 64, !tbaa !0
+  %v37 = getelementptr inbounds <16 x i32>, ptr %v13, i32 3
+  %v38 = load <16 x i32>, ptr %v37, align 64, !tbaa !0
+  %v39 = load <16 x i32>, ptr %v20, align 64, !tbaa !0
+  %v40 = getelementptr inbounds <16 x i32>, ptr %v13, i32 4
   %v41 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> undef, <16 x i32> %v39)
   %v42 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v41, <16 x i32> %v21, i32 4)
   %v43 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v41, <16 x i32> %v21, i32 8)
@@ -85,12 +80,12 @@ b2:                                               ; preds = %b1
   %v54 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v35)
   %v55 = tail call <16 x i32> @llvm.hexagon.V6.vsubuhsat(<16 x i32> %v53, <16 x i32> %v54)
   %v56 = tail call <16 x i32> @llvm.hexagon.V6.vminuh(<16 x i32> %v55, <16 x i32> %v8)
-  %v57 = getelementptr inbounds <16 x i32>, <16 x i32>* %v2, i32 undef
-  store <16 x i32> %v56, <16 x i32>* %v57, align 64, !tbaa !0
-  %v58 = getelementptr <16 x i32>, <16 x i32>* %v2, i32 2
-  %v59 = getelementptr inbounds <16 x i32>, <16 x i32>* %v15, i32 5
+  %v57 = getelementptr inbounds <16 x i32>, ptr %a0, i32 undef
+  store <16 x i32> %v56, ptr %v57, align 64, !tbaa !0
+  %v58 = getelementptr <16 x i32>, ptr %a0, i32 2
+  %v59 = getelementptr inbounds <16 x i32>, ptr %v13, i32 5
   %v60 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> zeroinitializer, <16 x i32> %v38, i32 4)
-  %v61 = load <16 x i32>, <16 x i32>* %v40, align 64, !tbaa !0
+  %v61 = load <16 x i32>, ptr %v40, align 64, !tbaa !0
   %v62 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %v60, <16 x i32> %v61)
   %v63 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v62, <16 x i32> %v41, i32 4)
   %v64 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v62, <16 x i32> %v41, i32 8)
@@ -99,22 +94,22 @@ b2:                                               ; preds = %b1
   %v67 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %v66, <16 x i32> %v64)
   %v68 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %v67, <16 x i32> %v65)
   %v69 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v61, <16 x i32> %v39, i32 16)
-  %v70 = getelementptr inbounds <16 x i32>, <16 x i32>* %v15, i32 1
-  %v71 = load <16 x i32>, <16 x i32>* %v70, align 64, !tbaa !0
+  %v70 = getelementptr inbounds <16 x i32>, ptr %v13, i32 1
+  %v71 = load <16 x i32>, ptr %v70, align 64, !tbaa !0
   %v72 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %v68, <16 x i32> %v71)
   %v73 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %v68, <16 x i32> %v69)
   %v74 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwh.acc(<16 x i32> %v7, <16 x i32> %v72, i32 53019433)
   %v75 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwh.acc(<16 x i32> %v7, <16 x i32> %v73, i32 53019433)
   %v76 = tail call <16 x i32> @llvm.hexagon.V6.vshufoh(<16 x i32> %v75, <16 x i32> %v74)
-  store <16 x i32> %v76, <16 x i32>* %v58, align 64, !tbaa !0
-  %v77 = getelementptr inbounds <16 x i32>, <16 x i32>* %v15, i32 7
-  %v78 = load <16 x i32>, <16 x i32>* %v77, align 64, !tbaa !0
+  store <16 x i32> %v76, ptr %v58, align 64, !tbaa !0
+  %v77 = getelementptr inbounds <16 x i32>, ptr %v13, i32 7
+  %v78 = load <16 x i32>, ptr %v77, align 64, !tbaa !0
   %v79 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> undef, <16 x i32> undef)
-  %v80 = getelementptr <16 x i32>, <16 x i32>* %v2, i32 4
-  %v81 = getelementptr inbounds <16 x i32>, <16 x i32>* %v15, i32 9
-  %v82 = load <16 x i32>, <16 x i32>* %v81, align 64, !tbaa !0
+  %v80 = getelementptr <16 x i32>, ptr %a0, i32 4
+  %v81 = getelementptr inbounds <16 x i32>, ptr %v13, i32 9
+  %v82 = load <16 x i32>, ptr %v81, align 64, !tbaa !0
   %v83 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v82, <16 x i32> %v78, i32 4)
-  %v84 = getelementptr inbounds <16 x i32>, <16 x i32>* %v15, i32 10
+  %v84 = getelementptr inbounds <16 x i32>, ptr %v13, i32 10
   %v85 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %v83, <16 x i32> undef)
   %v86 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v85, <16 x i32> %v79, i32 4)
   %v87 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v85, <16 x i32> %v79, i32 8)
@@ -129,11 +124,11 @@ b2:                                               ; preds = %b1
   %v96 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwh.acc(<16 x i32> %v7, <16 x i32> %v94, i32 53019433)
   %v97 = tail call <32 x i32> @llvm.hexagon.V6.vmpyubv(<16 x i32> undef, <16 x i32> undef)
   %v98 = tail call <16 x i32> @llvm.hexagon.V6.vshufoh(<16 x i32> %v96, <16 x i32> %v95)
-  store <16 x i32> %v98, <16 x i32>* %v80, align 64, !tbaa !0
-  %v99 = getelementptr inbounds <16 x i32>, <16 x i32>* %v15, i32 11
-  %v100 = load <16 x i32>, <16 x i32>* %v99, align 64, !tbaa !0
+  store <16 x i32> %v98, ptr %v80, align 64, !tbaa !0
+  %v99 = getelementptr inbounds <16 x i32>, ptr %v13, i32 11
+  %v100 = load <16 x i32>, ptr %v99, align 64, !tbaa !0
   %v101 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v100, <16 x i32> %v82, i32 4)
-  %v102 = load <16 x i32>, <16 x i32>* %v84, align 64, !tbaa !0
+  %v102 = load <16 x i32>, ptr %v84, align 64, !tbaa !0
   %v103 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %v101, <16 x i32> %v102)
   %v104 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v103, <16 x i32> %v85, i32 4)
   %v105 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v103, <16 x i32> %v85, i32 8)
@@ -150,11 +145,11 @@ b2:                                               ; preds = %b1
   %v116 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v97)
   %v117 = tail call <16 x i32> @llvm.hexagon.V6.vsubuhsat(<16 x i32> %v115, <16 x i32> %v116)
   %v118 = tail call <16 x i32> @llvm.hexagon.V6.vminuh(<16 x i32> %v117, <16 x i32> %v8)
-  %v119 = getelementptr inbounds <16 x i32>, <16 x i32>* %v2, i32 undef
-  store <16 x i32> %v118, <16 x i32>* %v119, align 64, !tbaa !0
-  %v120 = getelementptr <16 x i32>, <16 x i32>* %v2, i32 6
+  %v119 = getelementptr inbounds <16 x i32>, ptr %a0, i32 undef
+  store <16 x i32> %v118, ptr %v119, align 64, !tbaa !0
+  %v120 = getelementptr <16 x i32>, ptr %a0, i32 6
   %v121 = tail call <16 x i32> @llvm.hexagon.V6.vshufoh(<16 x i32> undef, <16 x i32> undef)
-  store <16 x i32> %v121, <16 x i32>* %v120, align 64, !tbaa !0
+  store <16 x i32> %v121, ptr %v120, align 64, !tbaa !0
   ret void
 
 b3:                                               ; preds = %b1

diff  --git a/llvm/test/CodeGen/Hexagon/reg-scavenger-valid-slot.ll b/llvm/test/CodeGen/Hexagon/reg-scavenger-valid-slot.ll
index 8ffa4659a9dd1..57b0c8d03a40b 100644
--- a/llvm/test/CodeGen/Hexagon/reg-scavenger-valid-slot.ll
+++ b/llvm/test/CodeGen/Hexagon/reg-scavenger-valid-slot.ll
@@ -17,7 +17,7 @@
 
 target triple = "hexagon"
 
-define void @foo(<16 x i32>* nocapture readnone %p) #0 {
+define void @foo(ptr nocapture readnone %p) #0 {
 entry:
   %0 = tail call { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } asm "nop", "=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,=r"() #1
   %asmresult = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %0, 0

diff  --git a/llvm/test/CodeGen/Hexagon/reg_seq.ll b/llvm/test/CodeGen/Hexagon/reg_seq.ll
index ca2bc0aab1b22..c447e426db134 100644
--- a/llvm/test/CodeGen/Hexagon/reg_seq.ll
+++ b/llvm/test/CodeGen/Hexagon/reg_seq.ll
@@ -4,27 +4,24 @@
 ; CHECK-NOT: combine(#0
 
 ; Function Attrs: nounwind
-define void @f0(i16* nocapture readonly %a0, i16* nocapture readonly %a1, i16* nocapture %a2, i16* nocapture readonly %a3, i32 %a4) #0 {
+define void @f0(ptr nocapture readonly %a0, ptr nocapture readonly %a1, ptr nocapture %a2, ptr nocapture readonly %a3, i32 %a4) #0 {
 b0:
   %v0 = lshr i32 %a4, 1
   %v1 = icmp eq i32 %v0, 0
   br i1 %v1, label %b3, label %b1
 
 b1:                                               ; preds = %b0
-  %v2 = bitcast i16* %a2 to i64*
-  %v3 = bitcast i16* %a1 to i64*
-  %v4 = bitcast i16* %a0 to i64*
   br label %b2
 
 b2:                                               ; preds = %b2, %b1
   %v5 = phi i32 [ 0, %b1 ], [ %v71, %b2 ]
-  %v6 = phi i64* [ %v4, %b1 ], [ %v9, %b2 ]
-  %v7 = phi i64* [ %v3, %b1 ], [ %v11, %b2 ]
-  %v8 = phi i64* [ %v2, %b1 ], [ %v70, %b2 ]
-  %v9 = getelementptr inbounds i64, i64* %v6, i32 1
-  %v10 = load i64, i64* %v6, align 8, !tbaa !0
-  %v11 = getelementptr inbounds i64, i64* %v7, i32 1
-  %v12 = load i64, i64* %v7, align 8, !tbaa !0
+  %v6 = phi ptr [ %a0, %b1 ], [ %v9, %b2 ]
+  %v7 = phi ptr [ %a1, %b1 ], [ %v11, %b2 ]
+  %v8 = phi ptr [ %a2, %b1 ], [ %v70, %b2 ]
+  %v9 = getelementptr inbounds i64, ptr %v6, i32 1
+  %v10 = load i64, ptr %v6, align 8, !tbaa !0
+  %v11 = getelementptr inbounds i64, ptr %v7, i32 1
+  %v12 = load i64, ptr %v7, align 8, !tbaa !0
   %v13 = trunc i64 %v10 to i32
   %v14 = lshr i64 %v10, 32
   %v15 = tail call i64 @llvm.hexagon.S2.vzxthw(i32 %v13)
@@ -33,18 +30,18 @@ b2:                                               ; preds = %b2, %b1
   %v18 = tail call i64 @llvm.hexagon.S2.vzxthw(i32 %v16)
   %v19 = trunc i64 %v15 to i32
   %v20 = lshr i64 %v15, 32
-  %v21 = getelementptr inbounds i16, i16* %a3, i32 %v19
-  %v22 = load i16, i16* %v21, align 2, !tbaa !3
+  %v21 = getelementptr inbounds i16, ptr %a3, i32 %v19
+  %v22 = load i16, ptr %v21, align 2, !tbaa !3
   %v23 = trunc i64 %v20 to i32
-  %v24 = getelementptr inbounds i16, i16* %a3, i32 %v23
-  %v25 = load i16, i16* %v24, align 2, !tbaa !3
+  %v24 = getelementptr inbounds i16, ptr %a3, i32 %v23
+  %v25 = load i16, ptr %v24, align 2, !tbaa !3
   %v26 = trunc i64 %v18 to i32
   %v27 = lshr i64 %v18, 32
-  %v28 = getelementptr inbounds i16, i16* %a3, i32 %v26
-  %v29 = load i16, i16* %v28, align 2, !tbaa !3
+  %v28 = getelementptr inbounds i16, ptr %a3, i32 %v26
+  %v29 = load i16, ptr %v28, align 2, !tbaa !3
   %v30 = trunc i64 %v27 to i32
-  %v31 = getelementptr inbounds i16, i16* %a3, i32 %v30
-  %v32 = load i16, i16* %v31, align 2, !tbaa !3
+  %v31 = getelementptr inbounds i16, ptr %a3, i32 %v30
+  %v32 = load i16, ptr %v31, align 2, !tbaa !3
   %v33 = zext i16 %v32 to i64
   %v34 = shl nuw nsw i64 %v33, 32
   %v35 = zext i16 %v29 to i64
@@ -54,26 +51,26 @@ b2:                                               ; preds = %b2, %b1
   %v39 = zext i16 %v22 to i64
   %v40 = or i64 %v39, %v38
   %v41 = tail call i64 @llvm.hexagon.S2.vtrunewh(i64 %v36, i64 %v40)
-  %v42 = getelementptr inbounds i64, i64* %v8, i32 1
-  store i64 %v41, i64* %v8, align 8, !tbaa !0
+  %v42 = getelementptr inbounds i64, ptr %v8, i32 1
+  store i64 %v41, ptr %v8, align 8, !tbaa !0
   %v43 = trunc i64 %v14 to i32
   %v44 = tail call i64 @llvm.hexagon.S2.vzxthw(i32 %v43)
   %v45 = trunc i64 %v17 to i32
   %v46 = tail call i64 @llvm.hexagon.S2.vzxthw(i32 %v45)
   %v47 = trunc i64 %v44 to i32
   %v48 = lshr i64 %v44, 32
-  %v49 = getelementptr inbounds i16, i16* %a3, i32 %v47
-  %v50 = load i16, i16* %v49, align 2, !tbaa !3
+  %v49 = getelementptr inbounds i16, ptr %a3, i32 %v47
+  %v50 = load i16, ptr %v49, align 2, !tbaa !3
   %v51 = trunc i64 %v48 to i32
-  %v52 = getelementptr inbounds i16, i16* %a3, i32 %v51
-  %v53 = load i16, i16* %v52, align 2, !tbaa !3
+  %v52 = getelementptr inbounds i16, ptr %a3, i32 %v51
+  %v53 = load i16, ptr %v52, align 2, !tbaa !3
   %v54 = trunc i64 %v46 to i32
   %v55 = lshr i64 %v46, 32
-  %v56 = getelementptr inbounds i16, i16* %a3, i32 %v54
-  %v57 = load i16, i16* %v56, align 2, !tbaa !3
+  %v56 = getelementptr inbounds i16, ptr %a3, i32 %v54
+  %v57 = load i16, ptr %v56, align 2, !tbaa !3
   %v58 = trunc i64 %v55 to i32
-  %v59 = getelementptr inbounds i16, i16* %a3, i32 %v58
-  %v60 = load i16, i16* %v59, align 2, !tbaa !3
+  %v59 = getelementptr inbounds i16, ptr %a3, i32 %v58
+  %v60 = load i16, ptr %v59, align 2, !tbaa !3
   %v61 = zext i16 %v60 to i64
   %v62 = shl nuw nsw i64 %v61, 32
   %v63 = zext i16 %v57 to i64
@@ -83,8 +80,8 @@ b2:                                               ; preds = %b2, %b1
   %v67 = zext i16 %v50 to i64
   %v68 = or i64 %v67, %v66
   %v69 = tail call i64 @llvm.hexagon.S2.vtrunewh(i64 %v64, i64 %v68)
-  %v70 = getelementptr inbounds i64, i64* %v8, i32 2
-  store i64 %v69, i64* %v42, align 8, !tbaa !0
+  %v70 = getelementptr inbounds i64, ptr %v8, i32 2
+  store i64 %v69, ptr %v42, align 8, !tbaa !0
   %v71 = add nsw i32 %v5, 1
   %v72 = icmp ult i32 %v71, %v0
   br i1 %v72, label %b2, label %b3

diff  --git a/llvm/test/CodeGen/Hexagon/regalloc-block-overlap.ll b/llvm/test/CodeGen/Hexagon/regalloc-block-overlap.ll
index 90b37f2a0d728..8632ddf44b6f7 100644
--- a/llvm/test/CodeGen/Hexagon/regalloc-block-overlap.ll
+++ b/llvm/test/CodeGen/Hexagon/regalloc-block-overlap.ll
@@ -16,7 +16,7 @@ declare <32 x i32> @llvm.hexagon.V6.vasrwhsat.128B(<32 x i32>, <32 x i32>, i32)
 declare <64 x i32> @llvm.hexagon.V6.vlutvwh.128B(<32 x i32>, <32 x i32>, i32) #1
 declare <64 x i32> @llvm.hexagon.V6.vlutvwh.oracc.128B(<64 x i32>, <32 x i32>, <32 x i32>, i32) #1
 
-define hidden void @fred(<32 x i32>* %a0, i32 %a1, i1 %cond) #0 {
+define hidden void @fred(ptr %a0, i32 %a1, i1 %cond) #0 {
 b0:
   %v1 = ashr i32 %a1, 7
   %v2 = shl nsw i32 %v1, 7
@@ -135,7 +135,7 @@ b42:                                              ; preds = %b40
   %v52 = tail call <64 x i32> @llvm.hexagon.V6.vaddw.dv.128B(<64 x i32> %v51, <64 x i32> undef) #2
   %v53 = tail call <32 x i32> @llvm.hexagon.V6.hi.128B(<64 x i32> %v52) #2
   %v54 = tail call <32 x i32> @llvm.hexagon.V6.vasrwhsat.128B(<32 x i32> %v53, <32 x i32> undef, i32 15) #2
-  store <32 x i32> %v54, <32 x i32>* %a0, align 128
+  store <32 x i32> %v54, ptr %a0, align 128
   br label %b39
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/registerscav-missing-spill-slot.ll b/llvm/test/CodeGen/Hexagon/registerscav-missing-spill-slot.ll
index 73adae92142ab..1a8e06259abb7 100644
--- a/llvm/test/CodeGen/Hexagon/registerscav-missing-spill-slot.ll
+++ b/llvm/test/CodeGen/Hexagon/registerscav-missing-spill-slot.ll
@@ -4,12 +4,12 @@
 
 target triple = "hexagon-unknown-linux-gnu"
 
-%s.0 = type { double, double, double, double, double, double, i32, double, double, double, double, i8*, i8, [9 x i8], double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, [200 x i8*], [32 x i8*], [32 x i8], i32 }
+%s.0 = type { double, double, double, double, double, double, i32, double, double, double, double, ptr, i8, [9 x i8], double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, [200 x ptr], [32 x ptr], [32 x i8], i32 }
 
 ; Function Attrs: nounwind
 define void @f0() #0 {
 b0:
-  %v0 = call i8* @f2()
+  %v0 = call ptr @f2()
   br i1 undef, label %b1, label %b2
 
 b1:                                               ; preds = %b0
@@ -28,7 +28,7 @@ b5:                                               ; preds = %b4
   unreachable
 
 b6:                                               ; preds = %b4
-  %v1 = call i32 bitcast (i32 (...)* @f1 to i32 ()*)() #0
+  %v1 = call i32 @f1() #0
   br i1 undef, label %b7, label %b20
 
 b7:                                               ; preds = %b6
@@ -60,11 +60,10 @@ b13:                                              ; preds = %b7
   unreachable
 
 b14:                                              ; preds = %b7
-  %v2 = call %s.0* bitcast (%s.0* (...)* @f3 to %s.0* (i32)*)(i32 0) #0
+  %v2 = call ptr @f3(i32 0) #0
   br label %b15
 
 b15:                                              ; preds = %b15, %b14
-  %v3 = bitcast i8* undef to double*
   %v4 = fadd double undef, undef
   br i1 undef, label %b16, label %b15
 
@@ -75,18 +74,15 @@ b16:                                              ; preds = %b15
   ]
 
 b17:                                              ; preds = %b16
-  %v5 = getelementptr i8, i8* %v0, i32 0
-  %v6 = bitcast i8* %v5 to double*
   %v7 = or i32 0, 16
-  %v8 = getelementptr i8, i8* %v0, i32 %v7
-  %v9 = bitcast i8* %v8 to double*
-  %v10 = load double, double* undef, align 8, !tbaa !0
+  %v8 = getelementptr i8, ptr %v0, i32 %v7
+  %v10 = load double, ptr undef, align 8, !tbaa !0
   %v11 = fcmp olt double -1.000000e+11, %v10
   %v12 = select i1 %v11, double %v10, double -1.000000e+11
-  %v13 = load double, double* %v6, align 8, !tbaa !0
+  %v13 = load double, ptr %v0, align 8, !tbaa !0
   %v14 = fcmp olt double -1.000000e+11, %v13
   %v15 = select i1 %v14, double %v13, double -1.000000e+11
-  %v16 = load double, double* %v9, align 8, !tbaa !0
+  %v16 = load double, ptr %v8, align 8, !tbaa !0
   %v17 = fcmp olt double -1.000000e+11, %v16
   %v18 = select i1 %v17, double %v16, double -1.000000e+11
   %v19 = fcmp ogt double 1.000000e+11, %v13
@@ -101,11 +97,11 @@ b18:                                              ; preds = %b17, %b16
   %v25 = phi double [ %v20, %b17 ], [ 1.000000e+11, %b16 ]
   %v26 = phi double [ %v18, %b17 ], [ -1.000000e+11, %b16 ]
   %v27 = phi double [ %v22, %b17 ], [ 1.000000e+11, %b16 ]
-  %v28 = load double, double* undef, align 8, !tbaa !0
+  %v28 = load double, ptr undef, align 8, !tbaa !0
   %v29 = select i1 undef, double %v28, double %v23
-  %v30 = load double, double* null, align 8, !tbaa !0
+  %v30 = load double, ptr null, align 8, !tbaa !0
   %v31 = select i1 undef, double %v30, double %v24
-  %v32 = load double, double* undef, align 8, !tbaa !0
+  %v32 = load double, ptr undef, align 8, !tbaa !0
   %v33 = select i1 undef, double %v32, double %v26
   %v34 = select i1 undef, double %v30, double %v25
   %v35 = select i1 undef, double %v32, double %v27
@@ -118,41 +114,39 @@ b19:                                              ; preds = %b19, %b18, %b16
   %v39 = phi double [ %v82, %b19 ], [ 1.000000e+11, %b16 ], [ %v34, %b18 ]
   %v40 = phi double [ %v80, %b19 ], [ -1.000000e+11, %b16 ], [ %v33, %b18 ]
   %v41 = phi double [ %v84, %b19 ], [ 1.000000e+11, %b16 ], [ %v35, %b18 ]
-  %v42 = getelementptr i8, i8* %v0, i32 0
-  %v43 = bitcast i8* %v42 to double*
-  %v44 = load double, double* null, align 8, !tbaa !0
+  %v44 = load double, ptr null, align 8, !tbaa !0
   %v45 = select i1 undef, double %v44, double %v36
-  %v46 = load double, double* %v43, align 8, !tbaa !0
+  %v46 = load double, ptr %v0, align 8, !tbaa !0
   %v47 = select i1 undef, double %v46, double %v38
-  %v48 = load double, double* undef, align 8, !tbaa !0
+  %v48 = load double, ptr undef, align 8, !tbaa !0
   %v49 = select i1 undef, double %v48, double %v40
   %v50 = select i1 undef, double %v44, double %v37
   %v51 = fcmp ogt double %v39, %v46
   %v52 = select i1 %v51, double %v46, double %v39
   %v53 = select i1 undef, double %v48, double %v41
-  %v54 = load double, double* null, align 8, !tbaa !0
+  %v54 = load double, ptr null, align 8, !tbaa !0
   %v55 = select i1 undef, double %v54, double %v45
-  %v56 = load double, double* undef, align 8, !tbaa !0
+  %v56 = load double, ptr undef, align 8, !tbaa !0
   %v57 = select i1 undef, double %v56, double %v47
-  %v58 = load double, double* undef, align 8, !tbaa !0
+  %v58 = load double, ptr undef, align 8, !tbaa !0
   %v59 = select i1 undef, double %v58, double %v49
   %v60 = select i1 undef, double %v54, double %v50
   %v61 = select i1 undef, double %v56, double %v52
   %v62 = select i1 false, double %v58, double %v53
-  %v63 = load double, double* undef, align 8, !tbaa !0
+  %v63 = load double, ptr undef, align 8, !tbaa !0
   %v64 = select i1 undef, double %v63, double %v55
-  %v65 = load double, double* undef, align 8, !tbaa !0
+  %v65 = load double, ptr undef, align 8, !tbaa !0
   %v66 = select i1 undef, double %v65, double %v57
-  %v67 = load double, double* null, align 8, !tbaa !0
+  %v67 = load double, ptr null, align 8, !tbaa !0
   %v68 = select i1 undef, double %v67, double %v59
   %v69 = fcmp ogt double %v60, %v63
   %v70 = select i1 %v69, double %v63, double %v60
   %v71 = select i1 false, double %v65, double %v61
   %v72 = select i1 false, double %v67, double %v62
-  %v73 = load double, double* null, align 8, !tbaa !0
+  %v73 = load double, ptr null, align 8, !tbaa !0
   %v74 = fcmp olt double %v64, %v73
   %v75 = select i1 %v74, double %v73, double %v64
-  %v76 = load double, double* null, align 8, !tbaa !0
+  %v76 = load double, ptr null, align 8, !tbaa !0
   %v77 = fcmp olt double %v66, %v76
   %v78 = select i1 %v77, double %v76, double %v66
   %v79 = fcmp olt double %v68, 0.000000e+00
@@ -170,9 +164,9 @@ b20:                                              ; preds = %b19, %b18, %b6
 declare i32 @f1(...)
 
 ; Function Attrs: nounwind
-declare noalias i8* @f2() #0
+declare noalias ptr @f2() #0
 
-declare %s.0* @f3(...)
+declare ptr @f3(...)
 
 attributes #0 = { nounwind }
 

diff  --git a/llvm/test/CodeGen/Hexagon/registerscavenger-fail1.ll b/llvm/test/CodeGen/Hexagon/registerscavenger-fail1.ll
index 6c4c9fc1432e7..baafd3e9c6244 100644
--- a/llvm/test/CodeGen/Hexagon/registerscavenger-fail1.ll
+++ b/llvm/test/CodeGen/Hexagon/registerscavenger-fail1.ll
@@ -3,16 +3,16 @@
 
 target triple = "hexagon-unknown-linux-gnu"
 
-%s.0 = type { double, double, double, double, double, double, i32, double, double, double, double, i8*, i8, [9 x i8], double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, [200 x i8*], [32 x i8*], [32 x i8], i32 }
+%s.0 = type { double, double, double, double, double, double, i32, double, double, double, double, ptr, i8, [9 x i8], double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, [200 x ptr], [32 x ptr], [32 x i8], i32 }
 
 @g0 = external unnamed_addr constant [6 x i8], align 8
 
 ; Function Attrs: nounwind
 define i32 @f0(double %a0) #0 {
 b0:
-  %v0 = call double bitcast (double (...)* @f1 to double (i8*)*)(i8* getelementptr inbounds ([6 x i8], [6 x i8]* @g0, i32 0, i32 0)) #0
-  %v1 = call i32 bitcast (i32 (...)* @f2 to i32 ()*)() #0
-  %v2 = call i8* @f3(i32 undef)
+  %v0 = call double @f1(ptr @g0) #0
+  %v1 = call i32 @f2() #0
+  %v2 = call ptr @f3(i32 undef)
   br i1 undef, label %b1, label %b2
 
 b1:                                               ; preds = %b0
@@ -32,7 +32,7 @@ b5:                                               ; preds = %b4
   ret i32 0
 
 b6:                                               ; preds = %b4
-  %v4 = call i32 bitcast (i32 (...)* @f2 to i32 ()*)() #0
+  %v4 = call i32 @f2() #0
   br i1 undef, label %b7, label %b24
 
 b7:                                               ; preds = %b6
@@ -65,7 +65,7 @@ b13:                                              ; preds = %b12, %b7
   unreachable
 
 b14:                                              ; preds = %b7
-  %v5 = call %s.0* bitcast (%s.0* (...)* @f4 to %s.0* (i32)*)(i32 0) #0
+  %v5 = call ptr @f4(i32 0) #0
   %v6 = icmp ult i32 %v4, 8
   br i1 %v6, label %b16, label %b15
 
@@ -94,18 +94,15 @@ b20:                                              ; preds = %b19
   br i1 %v12, label %b21, label %b22
 
 b21:                                              ; preds = %b20
-  %v13 = getelementptr i8, i8* %v2, i32 0
-  %v14 = bitcast i8* %v13 to double*
   %v15 = or i32 0, 16
-  %v16 = getelementptr i8, i8* %v2, i32 %v15
-  %v17 = bitcast i8* %v16 to double*
-  %v18 = load double, double* undef, align 8, !tbaa !0
+  %v16 = getelementptr i8, ptr %v2, i32 %v15
+  %v18 = load double, ptr undef, align 8, !tbaa !0
   %v19 = fcmp olt double -1.000000e+11, %v18
   %v20 = select i1 %v19, double %v18, double -1.000000e+11
-  %v21 = load double, double* %v14, align 8, !tbaa !0
+  %v21 = load double, ptr %v2, align 8, !tbaa !0
   %v22 = fcmp olt double -1.000000e+11, %v21
   %v23 = select i1 %v22, double %v21, double -1.000000e+11
-  %v24 = load double, double* %v17, align 8, !tbaa !0
+  %v24 = load double, ptr %v16, align 8, !tbaa !0
   %v25 = fcmp olt double -1.000000e+11, %v24
   %v26 = select i1 %v25, double %v24, double -1.000000e+11
   %v27 = fcmp ogt double 1.000000e+11, %v18
@@ -115,7 +112,7 @@ b21:                                              ; preds = %b20
   %v31 = fcmp ogt double 1.000000e+11, %v24
   %v32 = select i1 %v31, double %v24, double 1.000000e+11
   %v33 = add i32 0, 1
-  %v34 = getelementptr i8, i8* %v2, i32 32
+  %v34 = getelementptr i8, ptr %v2, i32 32
   br label %b22
 
 b22:                                              ; preds = %b21, %b20
@@ -125,19 +122,17 @@ b22:                                              ; preds = %b21, %b20
   %v38 = phi double [ %v30, %b21 ], [ 1.000000e+11, %b20 ]
   %v39 = phi double [ %v26, %b21 ], [ -1.000000e+11, %b20 ]
   %v40 = phi double [ %v32, %b21 ], [ 1.000000e+11, %b20 ]
-  %v41 = phi i8* [ %v34, %b21 ], [ %v2, %b20 ]
+  %v41 = phi ptr [ %v34, %b21 ], [ %v2, %b20 ]
   %v42 = phi i32 [ %v33, %b21 ], [ 0, %b20 ]
   %v43 = shl nsw i32 %v42, 5
-  %v44 = bitcast i8* %v41 to double*
   %v45 = or i32 %v43, 8
-  %v46 = getelementptr i8, i8* %v2, i32 %v45
-  %v47 = bitcast i8* %v46 to double*
-  %v48 = load double, double* %v44, align 8, !tbaa !0
+  %v46 = getelementptr i8, ptr %v2, i32 %v45
+  %v48 = load double, ptr %v41, align 8, !tbaa !0
   %v49 = select i1 undef, double %v48, double %v35
-  %v50 = load double, double* %v47, align 8, !tbaa !0
+  %v50 = load double, ptr %v46, align 8, !tbaa !0
   %v51 = fcmp olt double %v37, %v50
   %v52 = select i1 %v51, double %v50, double %v37
-  %v53 = load double, double* undef, align 8, !tbaa !0
+  %v53 = load double, ptr undef, align 8, !tbaa !0
   %v54 = fcmp olt double %v39, %v53
   %v55 = select i1 %v54, double %v53, double %v39
   %v56 = fcmp ogt double %v36, %v48
@@ -153,19 +148,18 @@ b23:                                              ; preds = %b23, %b22, %b19
   %v63 = phi double [ %v81, %b23 ], [ 1.000000e+11, %b19 ], [ %v59, %b22 ]
   %v64 = phi i32 [ %v82, %b23 ], [ 0, %b19 ], [ %v61, %b22 ]
   %v65 = shl i32 %v64, 5
-  %v66 = load double, double* undef, align 8, !tbaa !0
-  %v67 = load double, double* undef, align 8, !tbaa !0
+  %v66 = load double, ptr undef, align 8, !tbaa !0
+  %v67 = load double, ptr undef, align 8, !tbaa !0
   %v68 = select i1 undef, double %v66, double %v62
   %v69 = select i1 undef, double %v67, double %v63
-  %v70 = load double, double* undef, align 8, !tbaa !0
+  %v70 = load double, ptr undef, align 8, !tbaa !0
   %v71 = select i1 false, double 0.000000e+00, double %v68
   %v72 = select i1 undef, double %v70, double %v69
-  %v73 = bitcast i8* undef to double*
-  %v74 = load double, double* undef, align 8, !tbaa !0
+  %v74 = load double, ptr undef, align 8, !tbaa !0
   %v75 = fcmp ogt double %v71, 0.000000e+00
   %v76 = select i1 %v75, double 0.000000e+00, double %v71
   %v77 = select i1 undef, double %v74, double %v72
-  %v78 = load double, double* undef, align 8, !tbaa !0
+  %v78 = load double, ptr undef, align 8, !tbaa !0
   %v79 = select i1 undef, double %v78, double %v76
   %v80 = fcmp ogt double %v77, 0.000000e+00
   %v81 = select i1 %v80, double 0.000000e+00, double %v77
@@ -197,14 +191,14 @@ b25:                                              ; preds = %b24
   %v101 = fptosi double %v100 to i32
   %v102 = fadd double undef, 1.000000e+00
   %v103 = fptosi double %v102 to i32
-  %v104 = call i8* @f3(i32 undef)
+  %v104 = call ptr @f3(i32 undef)
   br i1 false, label %b26, label %b27
 
 b26:                                              ; preds = %b25
   unreachable
 
 b27:                                              ; preds = %b25, %b24
-  %v105 = phi i8* [ %v104, %b25 ], [ undef, %b24 ]
+  %v105 = phi ptr [ %v104, %b25 ], [ undef, %b24 ]
   %v106 = phi i32 [ %v103, %b25 ], [ %v95, %b24 ]
   %v107 = phi i32 [ %v101, %b25 ], [ %v91, %b24 ]
   %v108 = phi i32 [ %v98, %b25 ], [ undef, %b24 ]
@@ -244,9 +238,8 @@ b30:                                              ; preds = %b34, %b29
 
 b31:                                              ; preds = %b30
   %v129 = add i32 %v123, 0
-  %v130 = getelementptr i8, i8* %v105, i32 %v129
-  %v131 = bitcast i8* %v130 to double*
-  store double %v128, double* %v131, align 8, !tbaa !0
+  %v130 = getelementptr i8, ptr %v105, i32 %v129
+  store double %v128, ptr %v130, align 8, !tbaa !0
   br label %b32
 
 b32:                                              ; preds = %b31, %b30
@@ -257,14 +250,13 @@ b33:                                              ; preds = %b33, %b32, %b30
   %v133 = phi i32 [ %v142, %b33 ], [ 0, %b30 ], [ %v132, %b32 ]
   %v134 = mul i32 %v113, %v133
   %v135 = add i32 %v124, %v134
-  %v136 = getelementptr i8, i8* %v105, i32 %v135
-  %v137 = bitcast i8* %v136 to double*
+  %v136 = getelementptr i8, ptr %v105, i32 %v135
   %v138 = sitofp i32 %v133 to double
-  store double undef, double* %v137, align 8, !tbaa !0
+  store double undef, ptr %v136, align 8, !tbaa !0
   %v139 = fmul double undef, %v109
   %v140 = fadd double %v139, %v114
   %v141 = fadd double %v140, %v87
-  store double %v141, double* undef, align 8, !tbaa !0
+  store double %v141, ptr undef, align 8, !tbaa !0
   %v142 = add nsw i32 %v133, 4
   %v143 = icmp eq i32 %v142, %v106
   br i1 %v143, label %b34, label %b33
@@ -284,9 +276,9 @@ declare double @f1(...)
 declare i32 @f2(...)
 
 ; Function Attrs: nounwind
-declare noalias i8* @f3(i32) #0
+declare noalias ptr @f3(i32) #0
 
-declare %s.0* @f4(...)
+declare ptr @f4(...)
 
 attributes #0 = { nounwind }
 

diff  --git a/llvm/test/CodeGen/Hexagon/regp-underflow.ll b/llvm/test/CodeGen/Hexagon/regp-underflow.ll
index a880eab948e19..bc4fb0357b37f 100644
--- a/llvm/test/CodeGen/Hexagon/regp-underflow.ll
+++ b/llvm/test/CodeGen/Hexagon/regp-underflow.ll
@@ -13,72 +13,72 @@ target triple = "hexagon-unknown--elf"
 @g7 = private unnamed_addr constant [13 x i8] c"pong started\00"
 
 ; Function Attrs: nounwind
-define void @f0(i8* nocapture readnone %a0) #0 {
+define void @f0(ptr nocapture readnone %a0) #0 {
 b0:
-  tail call void @f1(i8* %a0, i32 0)
+  tail call void @f1(ptr %a0, i32 0)
   ret void
 }
 
 ; Function Attrs: nounwind
-define internal void @f1(i8* nocapture readnone %a0, i32 %a1) #0 {
+define internal void @f1(ptr nocapture readnone %a0, i32 %a1) #0 {
 b0:
   %v0 = icmp eq i32 %a1, 1
   br i1 %v0, label %b2, label %b1
 
 b1:                                               ; preds = %b0
-  %v1 = tail call i32 @f3(i8* getelementptr inbounds ([13 x i8], [13 x i8]* @g6, i32 0, i32 0))
-  store volatile i32 1, i32* @g0, align 4, !tbaa !0
+  %v1 = tail call i32 @f3(ptr @g6)
+  store volatile i32 1, ptr @g0, align 4, !tbaa !0
   br label %b3
 
 b2:                                               ; preds = %b0
-  %v2 = tail call i32 @f3(i8* getelementptr inbounds ([13 x i8], [13 x i8]* @g7, i32 0, i32 0))
-  store volatile i32 1, i32* @g1, align 4, !tbaa !0
+  %v2 = tail call i32 @f3(ptr @g7)
+  store volatile i32 1, ptr @g1, align 4, !tbaa !0
   br label %b3
 
 b3:                                               ; preds = %b3, %b2, %b1
-  %v3 = load volatile i32, i32* @g2, align 4, !tbaa !0
+  %v3 = load volatile i32, ptr @g2, align 4, !tbaa !0
   %v4 = icmp eq i32 %v3, 0
   br i1 %v4, label %b3, label %b4
 
 b4:                                               ; preds = %b3
-  %v5 = select i1 %v0, i32* getelementptr inbounds ([100 x i32], [100 x i32]* @g5, i32 0, i32 0), i32* getelementptr inbounds ([100 x i32], [100 x i32]* @g4, i32 0, i32 0)
+  %v5 = select i1 %v0, ptr @g5, ptr @g4
   br label %b5
 
 b5:                                               ; preds = %b5, %b4
-  %v6 = phi i32* [ %v5, %b4 ], [ %v29, %b5 ]
+  %v6 = phi ptr [ %v5, %b4 ], [ %v29, %b5 ]
   %v7 = phi i32 [ 0, %b4 ], [ %v27, %b5 ]
-  %v8 = tail call i32 asm sideeffect "1:     $0 = memw_locked($2)\0A       $0 = add($0, $3)\0A       memw_locked($2, p0) = $0\0A       if !p0 jump 1b\0A", "=&r,=*m,r,r,*m,~{p0}"(i32* elementtype(i32) @g3, i32* @g3, i32 1, i32* elementtype(i32) @g3), !srcloc !4
-  store i32 %v8, i32* %v6, align 4, !tbaa !0
-  %v9 = getelementptr i32, i32* %v6, i32 1
-  %v10 = tail call i32 asm sideeffect "1:     $0 = memw_locked($2)\0A       $0 = add($0, $3)\0A       memw_locked($2, p0) = $0\0A       if !p0 jump 1b\0A", "=&r,=*m,r,r,*m,~{p0}"(i32* elementtype(i32) @g3, i32* @g3, i32 1, i32* elementtype(i32) @g3), !srcloc !4
-  store i32 %v10, i32* %v9, align 4, !tbaa !0
-  %v11 = getelementptr i32, i32* %v6, i32 2
-  %v12 = tail call i32 asm sideeffect "1:     $0 = memw_locked($2)\0A       $0 = add($0, $3)\0A       memw_locked($2, p0) = $0\0A       if !p0 jump 1b\0A", "=&r,=*m,r,r,*m,~{p0}"(i32* elementtype(i32) @g3, i32* @g3, i32 1, i32* elementtype(i32) @g3), !srcloc !4
-  store i32 %v12, i32* %v11, align 4, !tbaa !0
-  %v13 = getelementptr i32, i32* %v6, i32 3
-  %v14 = tail call i32 asm sideeffect "1:     $0 = memw_locked($2)\0A       $0 = add($0, $3)\0A       memw_locked($2, p0) = $0\0A       if !p0 jump 1b\0A", "=&r,=*m,r,r,*m,~{p0}"(i32* elementtype(i32) @g3, i32* @g3, i32 1, i32* elementtype(i32) @g3), !srcloc !4
-  store i32 %v14, i32* %v13, align 4, !tbaa !0
-  %v15 = getelementptr i32, i32* %v6, i32 4
-  %v16 = tail call i32 asm sideeffect "1:     $0 = memw_locked($2)\0A       $0 = add($0, $3)\0A       memw_locked($2, p0) = $0\0A       if !p0 jump 1b\0A", "=&r,=*m,r,r,*m,~{p0}"(i32* elementtype(i32) @g3, i32* @g3, i32 1, i32* elementtype(i32) @g3), !srcloc !4
-  store i32 %v16, i32* %v15, align 4, !tbaa !0
-  %v17 = getelementptr i32, i32* %v6, i32 5
-  %v18 = tail call i32 asm sideeffect "1:     $0 = memw_locked($2)\0A       $0 = add($0, $3)\0A       memw_locked($2, p0) = $0\0A       if !p0 jump 1b\0A", "=&r,=*m,r,r,*m,~{p0}"(i32* elementtype(i32) @g3, i32* @g3, i32 1, i32* elementtype(i32) @g3), !srcloc !4
-  store i32 %v18, i32* %v17, align 4, !tbaa !0
-  %v19 = getelementptr i32, i32* %v6, i32 6
-  %v20 = tail call i32 asm sideeffect "1:     $0 = memw_locked($2)\0A       $0 = add($0, $3)\0A       memw_locked($2, p0) = $0\0A       if !p0 jump 1b\0A", "=&r,=*m,r,r,*m,~{p0}"(i32* elementtype(i32) @g3, i32* @g3, i32 1, i32* elementtype(i32) @g3), !srcloc !4
-  store i32 %v20, i32* %v19, align 4, !tbaa !0
-  %v21 = getelementptr i32, i32* %v6, i32 7
-  %v22 = tail call i32 asm sideeffect "1:     $0 = memw_locked($2)\0A       $0 = add($0, $3)\0A       memw_locked($2, p0) = $0\0A       if !p0 jump 1b\0A", "=&r,=*m,r,r,*m,~{p0}"(i32* elementtype(i32) @g3, i32* @g3, i32 1, i32* elementtype(i32) @g3), !srcloc !4
-  store i32 %v22, i32* %v21, align 4, !tbaa !0
-  %v23 = getelementptr i32, i32* %v6, i32 8
-  %v24 = tail call i32 asm sideeffect "1:     $0 = memw_locked($2)\0A       $0 = add($0, $3)\0A       memw_locked($2, p0) = $0\0A       if !p0 jump 1b\0A", "=&r,=*m,r,r,*m,~{p0}"(i32* elementtype(i32) @g3, i32* @g3, i32 1, i32* elementtype(i32) @g3), !srcloc !4
-  store i32 %v24, i32* %v23, align 4, !tbaa !0
-  %v25 = getelementptr i32, i32* %v6, i32 9
-  %v26 = tail call i32 asm sideeffect "1:     $0 = memw_locked($2)\0A       $0 = add($0, $3)\0A       memw_locked($2, p0) = $0\0A       if !p0 jump 1b\0A", "=&r,=*m,r,r,*m,~{p0}"(i32* elementtype(i32) @g3, i32* @g3, i32 1, i32* elementtype(i32) @g3), !srcloc !4
-  store i32 %v26, i32* %v25, align 4, !tbaa !0
+  %v8 = tail call i32 asm sideeffect "1:     $0 = memw_locked($2)\0A       $0 = add($0, $3)\0A       memw_locked($2, p0) = $0\0A       if !p0 jump 1b\0A", "=&r,=*m,r,r,*m,~{p0}"(ptr elementtype(i32) @g3, ptr @g3, i32 1, ptr elementtype(i32) @g3), !srcloc !4
+  store i32 %v8, ptr %v6, align 4, !tbaa !0
+  %v9 = getelementptr i32, ptr %v6, i32 1
+  %v10 = tail call i32 asm sideeffect "1:     $0 = memw_locked($2)\0A       $0 = add($0, $3)\0A       memw_locked($2, p0) = $0\0A       if !p0 jump 1b\0A", "=&r,=*m,r,r,*m,~{p0}"(ptr elementtype(i32) @g3, ptr @g3, i32 1, ptr elementtype(i32) @g3), !srcloc !4
+  store i32 %v10, ptr %v9, align 4, !tbaa !0
+  %v11 = getelementptr i32, ptr %v6, i32 2
+  %v12 = tail call i32 asm sideeffect "1:     $0 = memw_locked($2)\0A       $0 = add($0, $3)\0A       memw_locked($2, p0) = $0\0A       if !p0 jump 1b\0A", "=&r,=*m,r,r,*m,~{p0}"(ptr elementtype(i32) @g3, ptr @g3, i32 1, ptr elementtype(i32) @g3), !srcloc !4
+  store i32 %v12, ptr %v11, align 4, !tbaa !0
+  %v13 = getelementptr i32, ptr %v6, i32 3
+  %v14 = tail call i32 asm sideeffect "1:     $0 = memw_locked($2)\0A       $0 = add($0, $3)\0A       memw_locked($2, p0) = $0\0A       if !p0 jump 1b\0A", "=&r,=*m,r,r,*m,~{p0}"(ptr elementtype(i32) @g3, ptr @g3, i32 1, ptr elementtype(i32) @g3), !srcloc !4
+  store i32 %v14, ptr %v13, align 4, !tbaa !0
+  %v15 = getelementptr i32, ptr %v6, i32 4
+  %v16 = tail call i32 asm sideeffect "1:     $0 = memw_locked($2)\0A       $0 = add($0, $3)\0A       memw_locked($2, p0) = $0\0A       if !p0 jump 1b\0A", "=&r,=*m,r,r,*m,~{p0}"(ptr elementtype(i32) @g3, ptr @g3, i32 1, ptr elementtype(i32) @g3), !srcloc !4
+  store i32 %v16, ptr %v15, align 4, !tbaa !0
+  %v17 = getelementptr i32, ptr %v6, i32 5
+  %v18 = tail call i32 asm sideeffect "1:     $0 = memw_locked($2)\0A       $0 = add($0, $3)\0A       memw_locked($2, p0) = $0\0A       if !p0 jump 1b\0A", "=&r,=*m,r,r,*m,~{p0}"(ptr elementtype(i32) @g3, ptr @g3, i32 1, ptr elementtype(i32) @g3), !srcloc !4
+  store i32 %v18, ptr %v17, align 4, !tbaa !0
+  %v19 = getelementptr i32, ptr %v6, i32 6
+  %v20 = tail call i32 asm sideeffect "1:     $0 = memw_locked($2)\0A       $0 = add($0, $3)\0A       memw_locked($2, p0) = $0\0A       if !p0 jump 1b\0A", "=&r,=*m,r,r,*m,~{p0}"(ptr elementtype(i32) @g3, ptr @g3, i32 1, ptr elementtype(i32) @g3), !srcloc !4
+  store i32 %v20, ptr %v19, align 4, !tbaa !0
+  %v21 = getelementptr i32, ptr %v6, i32 7
+  %v22 = tail call i32 asm sideeffect "1:     $0 = memw_locked($2)\0A       $0 = add($0, $3)\0A       memw_locked($2, p0) = $0\0A       if !p0 jump 1b\0A", "=&r,=*m,r,r,*m,~{p0}"(ptr elementtype(i32) @g3, ptr @g3, i32 1, ptr elementtype(i32) @g3), !srcloc !4
+  store i32 %v22, ptr %v21, align 4, !tbaa !0
+  %v23 = getelementptr i32, ptr %v6, i32 8
+  %v24 = tail call i32 asm sideeffect "1:     $0 = memw_locked($2)\0A       $0 = add($0, $3)\0A       memw_locked($2, p0) = $0\0A       if !p0 jump 1b\0A", "=&r,=*m,r,r,*m,~{p0}"(ptr elementtype(i32) @g3, ptr @g3, i32 1, ptr elementtype(i32) @g3), !srcloc !4
+  store i32 %v24, ptr %v23, align 4, !tbaa !0
+  %v25 = getelementptr i32, ptr %v6, i32 9
+  %v26 = tail call i32 asm sideeffect "1:     $0 = memw_locked($2)\0A       $0 = add($0, $3)\0A       memw_locked($2, p0) = $0\0A       if !p0 jump 1b\0A", "=&r,=*m,r,r,*m,~{p0}"(ptr elementtype(i32) @g3, ptr @g3, i32 1, ptr elementtype(i32) @g3), !srcloc !4
+  store i32 %v26, ptr %v25, align 4, !tbaa !0
   %v27 = add nsw i32 %v7, 10
   %v28 = icmp eq i32 %v27, 100
-  %v29 = getelementptr i32, i32* %v6, i32 10
+  %v29 = getelementptr i32, ptr %v6, i32 10
   br i1 %v28, label %b6, label %b5
 
 b6:                                               ; preds = %b5
@@ -90,7 +90,7 @@ b6:                                               ; preds = %b5
 declare void @f2(i32) #1
 
 ; Function Attrs: nounwind
-declare i32 @f3(i8* nocapture readonly) #1
+declare i32 @f3(ptr nocapture readonly) #1
 
 attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-fatures"="+hvx,+hvx-length64b" }
 attributes #1 = { nounwind }

diff  --git a/llvm/test/CodeGen/Hexagon/regscav-wrong-super-sub-regs.ll b/llvm/test/CodeGen/Hexagon/regscav-wrong-super-sub-regs.ll
index f320a766453ef..e01af104b4f66 100644
--- a/llvm/test/CodeGen/Hexagon/regscav-wrong-super-sub-regs.ll
+++ b/llvm/test/CodeGen/Hexagon/regscav-wrong-super-sub-regs.ll
@@ -4,55 +4,53 @@
 ; Test that register scvenging does not assert because of wrong
 ; bits being set for Kill and Def bit vectors in replaceSuperBySubRegs
 
-%s.0 = type { i32, i32*, [0 x i32], [0 x i32], [1 x i32] }
+%s.0 = type { i32, ptr, [0 x i32], [0 x i32], [1 x i32] }
 %s.1 = type { %s.2, %s.4, %s.5 }
 %s.2 = type { %s.3 }
 %s.3 = type { i32 }
 %s.4 = type { i32 }
-%s.5 = type { [0 x i32], [0 x i32 (i32*, i32*, i32*, i32*, i32*, i32, i32*)*] }
+%s.5 = type { [0 x i32], [0 x ptr] }
 
 @g0 = common global i32 0, align 4
 @g1 = common global %s.0 zeroinitializer, align 4
 @g2 = common global i32 0, align 4
 @g3 = common global i32 0, align 4
- at g4 = common global i32* null, align 4
+ at g4 = common global ptr null, align 4
 @g5 = common global i32 0, align 4
 @g6 = common global i32 0, align 4
 
 ; Function Attrs: nounwind
-define i32 @f0(%s.1* nocapture readonly %a0) #0 {
+define i32 @f0(ptr nocapture readonly %a0) #0 {
 b0:
   %v0 = alloca [0 x i32], align 4
-  %v1 = load i32, i32* @g0, align 4, !tbaa !0
-  %v2 = getelementptr inbounds %s.1, %s.1* %a0, i32 0, i32 0, i32 0, i32 0
-  %v3 = load i32, i32* %v2, align 4, !tbaa !0
-  %v4 = load i32*, i32** getelementptr inbounds (%s.0, %s.0* @g1, i32 0, i32 1), align 4, !tbaa !4
-  %v5 = load i32, i32* @g2, align 4, !tbaa !0
+  %v1 = load i32, ptr @g0, align 4, !tbaa !0
+  %v3 = load i32, ptr %a0, align 4, !tbaa !0
+  %v4 = load ptr, ptr getelementptr inbounds (%s.0, ptr @g1, i32 0, i32 1), align 4, !tbaa !4
+  %v5 = load i32, ptr @g2, align 4, !tbaa !0
   %v6 = sub i32 0, %v5
-  %v7 = getelementptr inbounds i32, i32* %v4, i32 %v6
-  %v8 = getelementptr inbounds %s.1, %s.1* %a0, i32 0, i32 1, i32 0
-  %v9 = load i32, i32* %v8, align 4, !tbaa !0
+  %v7 = getelementptr inbounds i32, ptr %v4, i32 %v6
+  %v8 = getelementptr inbounds %s.1, ptr %a0, i32 0, i32 1, i32 0
+  %v9 = load i32, ptr %v8, align 4, !tbaa !0
   switch i32 %v9, label %b17 [
     i32 0, label %b1
     i32 1, label %b2
   ]
 
 b1:                                               ; preds = %b0
-  store i32 0, i32* @g3, align 4, !tbaa !0
+  store i32 0, ptr @g3, align 4, !tbaa !0
   br label %b2
 
 b2:                                               ; preds = %b1, %b0
   %v10 = icmp eq i32 %v1, 0
   %v11 = icmp sgt i32 %v3, 0
-  %v12 = getelementptr inbounds [0 x i32], [0 x i32]* %v0, i32 0, i32 0
   %v13 = sdiv i32 %v3, 2
   %v14 = add i32 %v13, -1
-  %v15 = getelementptr inbounds [0 x i32], [0 x i32]* %v0, i32 0, i32 1
-  %v16 = getelementptr inbounds [0 x i32], [0 x i32]* %v0, i32 0, i32 2
-  %v17 = getelementptr inbounds %s.1, %s.1* %a0, i32 0, i32 2, i32 1, i32 %v1
-  %v18 = getelementptr inbounds %s.1, %s.1* %a0, i32 0, i32 2, i32 1, i32 0
+  %v15 = getelementptr inbounds [0 x i32], ptr %v0, i32 0, i32 1
+  %v16 = getelementptr inbounds [0 x i32], ptr %v0, i32 0, i32 2
+  %v17 = getelementptr inbounds %s.1, ptr %a0, i32 0, i32 2, i32 1, i32 %v1
+  %v18 = getelementptr inbounds %s.1, ptr %a0, i32 0, i32 2, i32 1, i32 0
   %v19 = sub i32 1, %v5
-  %v20 = getelementptr inbounds i32, i32* %v4, i32 %v19
+  %v20 = getelementptr inbounds i32, ptr %v4, i32 %v19
   %v21 = sdiv i32 %v3, 4
   %v22 = icmp slt i32 %v3, -3
   %v23 = add i32 %v3, -1
@@ -66,7 +64,7 @@ b2:                                               ; preds = %b1, %b0
   br label %b4
 
 b3:                                               ; preds = %b16
-  store i32 %v30, i32* @g3, align 4, !tbaa !0
+  store i32 %v30, ptr @g3, align 4, !tbaa !0
   br label %b4
 
 b4:                                               ; preds = %b13, %b3, %b2
@@ -84,15 +82,15 @@ b6:                                               ; preds = %b4
   br i1 %v11, label %b8, label %b7
 
 b7:                                               ; preds = %b6
-  store i32 0, i32* @g3, align 4, !tbaa !0
+  store i32 0, ptr @g3, align 4, !tbaa !0
   br label %b11
 
 b8:                                               ; preds = %b6
-  store i32 %v26, i32* @g3, align 4, !tbaa !0
+  store i32 %v26, ptr @g3, align 4, !tbaa !0
   br i1 %v28, label %b9, label %b11
 
 b9:                                               ; preds = %b8
-  %v36 = load i32*, i32** @g4, align 4, !tbaa !7
+  %v36 = load ptr, ptr @g4, align 4, !tbaa !7
   br label %b10
 
 b10:                                              ; preds = %b10, %b9
@@ -100,19 +98,19 @@ b10:                                              ; preds = %b10, %b9
   %v38 = phi i32 [ %v34, %b9 ], [ %v44, %b10 ]
   %v39 = add nsw i32 %v37, %v33
   %v40 = shl i32 %v39, 1
-  %v41 = getelementptr inbounds i32, i32* %v36, i32 %v40
-  %v42 = load i32, i32* %v41, align 4, !tbaa !0
+  %v41 = getelementptr inbounds i32, ptr %v36, i32 %v40
+  %v42 = load i32, ptr %v41, align 4, !tbaa !0
   %v43 = icmp slt i32 %v42, %v31
   %v44 = select i1 %v43, i32 0, i32 %v38
   %v45 = add nsw i32 %v37, 1
-  store i32 %v45, i32* @g3, align 4, !tbaa !0
+  store i32 %v45, ptr @g3, align 4, !tbaa !0
   %v46 = icmp slt i32 %v45, 0
   br i1 %v46, label %b10, label %b11
 
 b11:                                              ; preds = %b10, %b8, %b7
   %v47 = phi i32 [ %v26, %b8 ], [ 0, %b7 ], [ 0, %b10 ]
   %v48 = phi i32 [ %v34, %b8 ], [ %v34, %b7 ], [ %v44, %b10 ]
-  %v49 = load i32, i32* @g5, align 4, !tbaa !0
+  %v49 = load i32, ptr @g5, align 4, !tbaa !0
   %v50 = icmp slt i32 %v13, %v49
   %v51 = icmp slt i32 %v47, %v14
   %v52 = and i1 %v50, %v51
@@ -124,47 +122,47 @@ b12:                                              ; preds = %b11
   %v55 = mul i32 %v54, 2
   %v56 = add i32 %v47, 2
   %v57 = add i32 %v56, %v55
-  store i32 %v57, i32* @g3, align 4, !tbaa !0
+  store i32 %v57, ptr @g3, align 4, !tbaa !0
   br label %b13
 
 b13:                                              ; preds = %b12, %b11
   %v58 = shl i32 %v35, 2
-  %v59 = load i32*, i32** @g4, align 4, !tbaa !7
-  %v60 = getelementptr inbounds i32, i32* %v59, i32 %v58
-  %v61 = load i32, i32* %v60, align 4, !tbaa !0
-  %v62 = load i32, i32* %v7, align 4, !tbaa !0
+  %v59 = load ptr, ptr @g4, align 4, !tbaa !7
+  %v60 = getelementptr inbounds i32, ptr %v59, i32 %v58
+  %v61 = load i32, ptr %v60, align 4, !tbaa !0
+  %v62 = load i32, ptr %v7, align 4, !tbaa !0
   %v63 = add nsw i32 %v62, %v61
   %v64 = add nsw i32 %v63, %v32
-  store i32 %v64, i32* %v15, align 4, !tbaa !0
+  store i32 %v64, ptr %v15, align 4, !tbaa !0
   %v65 = add i32 %v35, -1
-  %v66 = getelementptr inbounds i32, i32* %v59, i32 %v65
-  %v67 = load i32, i32* %v66, align 4, !tbaa !0
+  %v66 = getelementptr inbounds i32, ptr %v59, i32 %v65
+  %v67 = load i32, ptr %v66, align 4, !tbaa !0
   %v68 = sub i32 %v49, %v5
-  %v69 = getelementptr inbounds i32, i32* %v4, i32 %v68
-  %v70 = load i32, i32* %v69, align 4, !tbaa !0
+  %v69 = getelementptr inbounds i32, ptr %v4, i32 %v68
+  %v70 = load i32, ptr %v69, align 4, !tbaa !0
   %v71 = add nsw i32 %v70, %v67
-  %v72 = load i32, i32* %v16, align 4, !tbaa !0
+  %v72 = load i32, ptr %v16, align 4, !tbaa !0
   %v73 = add nsw i32 %v71, %v72
-  store i32 %v73, i32* %v16, align 4, !tbaa !0
-  %v74 = load i32, i32* @g6, align 4, !tbaa !0
-  %v75 = load i32 (i32*, i32*, i32*, i32*, i32*, i32, i32*)*, i32 (i32*, i32*, i32*, i32*, i32*, i32, i32*)** %v17, align 4, !tbaa !7
-  %v76 = load i32, i32* getelementptr inbounds (%s.0, %s.0* @g1, i32 0, i32 4, i32 0), align 4, !tbaa !0
-  %v77 = call i32 %v75(i32* getelementptr inbounds (%s.0, %s.0* @g1, i32 0, i32 4, i32 0), i32* null, i32* null, i32* null, i32* null, i32 %v76, i32* null) #0
-  %v78 = load i32 (i32*, i32*, i32*, i32*, i32*, i32, i32*)*, i32 (i32*, i32*, i32*, i32*, i32*, i32, i32*)** %v18, align 4, !tbaa !7
-  %v79 = inttoptr i32 %v74 to i32*
-  %v80 = load i32, i32* getelementptr inbounds (%s.0, %s.0* @g1, i32 0, i32 4, i32 0), align 4, !tbaa !0
-  %v81 = call i32 %v78(i32* getelementptr inbounds (%s.0, %s.0* @g1, i32 0, i32 4, i32 0), i32* null, i32* null, i32* null, i32* %v79, i32 %v80, i32* %v12) #0
-  %v82 = load i32*, i32** @g4, align 4, !tbaa !7
-  %v83 = getelementptr inbounds i32, i32* %v82, i32 %v58
-  %v84 = load i32, i32* %v83, align 4, !tbaa !0
-  %v85 = load i32, i32* %v20, align 4, !tbaa !0
+  store i32 %v73, ptr %v16, align 4, !tbaa !0
+  %v74 = load i32, ptr @g6, align 4, !tbaa !0
+  %v75 = load ptr, ptr %v17, align 4, !tbaa !7
+  %v76 = load i32, ptr getelementptr inbounds (%s.0, ptr @g1, i32 0, i32 4, i32 0), align 4, !tbaa !0
+  %v77 = call i32 %v75(ptr getelementptr inbounds (%s.0, ptr @g1, i32 0, i32 4, i32 0), ptr null, ptr null, ptr null, ptr null, i32 %v76, ptr null) #0
+  %v78 = load ptr, ptr %v18, align 4, !tbaa !7
+  %v79 = inttoptr i32 %v74 to ptr
+  %v80 = load i32, ptr getelementptr inbounds (%s.0, ptr @g1, i32 0, i32 4, i32 0), align 4, !tbaa !0
+  %v81 = call i32 %v78(ptr getelementptr inbounds (%s.0, ptr @g1, i32 0, i32 4, i32 0), ptr null, ptr null, ptr null, ptr %v79, i32 %v80, ptr %v0) #0
+  %v82 = load ptr, ptr @g4, align 4, !tbaa !7
+  %v83 = getelementptr inbounds i32, ptr %v82, i32 %v58
+  %v84 = load i32, ptr %v83, align 4, !tbaa !0
+  %v85 = load i32, ptr %v20, align 4, !tbaa !0
   %v86 = add nsw i32 %v85, %v84
-  store i32 %v86, i32* %v15, align 4, !tbaa !0
-  %v87 = load i32, i32* %v12, align 4, !tbaa !0
+  store i32 %v86, ptr %v15, align 4, !tbaa !0
+  %v87 = load i32, ptr %v0, align 4, !tbaa !0
   %v88 = icmp eq i32 %v87, 0
   %v89 = select i1 %v88, i32 %v48, i32 1
-  store i32 %v89, i32* @g5, align 4, !tbaa !0
-  store i32 0, i32* @g3, align 4, !tbaa !0
+  store i32 %v89, ptr @g5, align 4, !tbaa !0
+  store i32 0, ptr @g3, align 4, !tbaa !0
   br i1 %v22, label %b4, label %b14
 
 b14:                                              ; preds = %b16, %b13

diff  --git a/llvm/test/CodeGen/Hexagon/regscavenger_fail_hwloop.ll b/llvm/test/CodeGen/Hexagon/regscavenger_fail_hwloop.ll
index 2db79424d915f..23d9931a019e2 100644
--- a/llvm/test/CodeGen/Hexagon/regscavenger_fail_hwloop.ll
+++ b/llvm/test/CodeGen/Hexagon/regscavenger_fail_hwloop.ll
@@ -8,7 +8,7 @@
 target triple = "hexagon-unknown-linux-gnu"
 
 ; Function Attrs: nounwind
-define hidden fastcc void @f0(i8* nocapture %a0, i32 %a1, i32 %a2, i32 %a3, i32 %a4, i8* nocapture %a5) #0 {
+define hidden fastcc void @f0(ptr nocapture %a0, i32 %a1, i32 %a2, i32 %a3, i32 %a4, ptr nocapture %a5) #0 {
 b0:
   %v0 = add i32 %a3, -4
   %v1 = icmp ult i32 %v0, %a1
@@ -80,7 +80,7 @@ b2:                                               ; preds = %b1, %b0
 b3:                                               ; preds = %b1
   %v59 = mul i32 %a3, %a2
   %v60 = add i32 %v59, %a1
-  %v61 = getelementptr inbounds i8, i8* %a5, i32 %v60
+  %v61 = getelementptr inbounds i8, ptr %a5, i32 %v60
   %v62 = shl i32 %a3, 1
   %v63 = sub i32 0, %v62
   %v64 = sub i32 %a3, %v62
@@ -94,34 +94,34 @@ b3:                                               ; preds = %b1
   br label %b4
 
 b4:                                               ; preds = %b4, %b3
-  %v72 = phi i8* [ %a0, %b3 ], [ %v165, %b4 ]
-  %v73 = phi i8* [ %v61, %b3 ], [ %v164, %b4 ]
+  %v72 = phi ptr [ %a0, %b3 ], [ %v165, %b4 ]
+  %v73 = phi ptr [ %v61, %b3 ], [ %v164, %b4 ]
   %v74 = phi i32 [ 4, %b3 ], [ %v166, %b4 ]
-  %v75 = getelementptr inbounds i8, i8* %v73, i32 %v63
-  %v76 = load i8, i8* %v75, align 1, !tbaa !0
+  %v75 = getelementptr inbounds i8, ptr %v73, i32 %v63
+  %v76 = load i8, ptr %v75, align 1, !tbaa !0
   %v77 = zext i8 %v76 to i32
-  %v78 = getelementptr inbounds i8, i8* %v73, i32 %v64
-  %v79 = load i8, i8* %v78, align 1, !tbaa !0
+  %v78 = getelementptr inbounds i8, ptr %v73, i32 %v64
+  %v79 = load i8, ptr %v78, align 1, !tbaa !0
   %v80 = zext i8 %v79 to i32
-  %v81 = load i8, i8* %v73, align 1, !tbaa !0
+  %v81 = load i8, ptr %v73, align 1, !tbaa !0
   %v82 = zext i8 %v81 to i32
-  %v83 = getelementptr inbounds i8, i8* %v73, i32 %v66
-  %v84 = load i8, i8* %v83, align 1, !tbaa !0
+  %v83 = getelementptr inbounds i8, ptr %v73, i32 %v66
+  %v84 = load i8, ptr %v83, align 1, !tbaa !0
   %v85 = zext i8 %v84 to i32
-  %v86 = getelementptr inbounds i8, i8* %v73, i32 %v67
-  %v87 = load i8, i8* %v86, align 1, !tbaa !0
+  %v86 = getelementptr inbounds i8, ptr %v73, i32 %v67
+  %v87 = load i8, ptr %v86, align 1, !tbaa !0
   %v88 = zext i8 %v87 to i32
-  %v89 = getelementptr inbounds i8, i8* %v73, i32 %v68
-  %v90 = load i8, i8* %v89, align 1, !tbaa !0
+  %v89 = getelementptr inbounds i8, ptr %v73, i32 %v68
+  %v90 = load i8, ptr %v89, align 1, !tbaa !0
   %v91 = zext i8 %v90 to i32
-  %v92 = getelementptr inbounds i8, i8* %v73, i32 %v69
-  %v93 = load i8, i8* %v92, align 1, !tbaa !0
+  %v92 = getelementptr inbounds i8, ptr %v73, i32 %v69
+  %v93 = load i8, ptr %v92, align 1, !tbaa !0
   %v94 = zext i8 %v93 to i32
-  %v95 = getelementptr inbounds i8, i8* %v73, i32 %v70
-  %v96 = load i8, i8* %v95, align 1, !tbaa !0
+  %v95 = getelementptr inbounds i8, ptr %v73, i32 %v70
+  %v96 = load i8, ptr %v95, align 1, !tbaa !0
   %v97 = zext i8 %v96 to i32
-  %v98 = getelementptr inbounds i8, i8* %v73, i32 %v71
-  %v99 = load i8, i8* %v98, align 1, !tbaa !0
+  %v98 = getelementptr inbounds i8, ptr %v73, i32 %v71
+  %v99 = load i8, ptr %v98, align 1, !tbaa !0
   %v100 = zext i8 %v99 to i32
   %v101 = add nsw i32 %v88, %v80
   %v102 = mul i32 %v101, -5
@@ -138,7 +138,7 @@ b4:                                               ; preds = %b4, %b3
   %v113 = add i32 %v112, 255
   %v114 = select i1 %v111, i32 %v113, i32 %v109
   %v115 = trunc i32 %v114 to i8
-  store i8 %v115, i8* %v72, align 1, !tbaa !0
+  store i8 %v115, ptr %v72, align 1, !tbaa !0
   %v116 = add nsw i32 %v91, %v82
   %v117 = mul i32 %v116, -5
   %v118 = add nsw i32 %v88, %v85
@@ -154,8 +154,8 @@ b4:                                               ; preds = %b4, %b3
   %v128 = add i32 %v127, 255
   %v129 = select i1 %v126, i32 %v128, i32 %v124
   %v130 = trunc i32 %v129 to i8
-  %v131 = getelementptr inbounds i8, i8* %v72, i32 4
-  store i8 %v130, i8* %v131, align 1, !tbaa !0
+  %v131 = getelementptr inbounds i8, ptr %v72, i32 4
+  store i8 %v130, ptr %v131, align 1, !tbaa !0
   %v132 = add nsw i32 %v94, %v85
   %v133 = mul i32 %v132, -5
   %v134 = add nsw i32 %v91, %v88
@@ -171,8 +171,8 @@ b4:                                               ; preds = %b4, %b3
   %v144 = add i32 %v143, 255
   %v145 = select i1 %v142, i32 %v144, i32 %v140
   %v146 = trunc i32 %v145 to i8
-  %v147 = getelementptr inbounds i8, i8* %v72, i32 8
-  store i8 %v146, i8* %v147, align 1, !tbaa !0
+  %v147 = getelementptr inbounds i8, ptr %v72, i32 8
+  store i8 %v146, ptr %v147, align 1, !tbaa !0
   %v148 = add nsw i32 %v97, %v88
   %v149 = mul i32 %v148, -5
   %v150 = add nsw i32 %v94, %v91
@@ -188,16 +188,16 @@ b4:                                               ; preds = %b4, %b3
   %v160 = add i32 %v159, 255
   %v161 = select i1 %v158, i32 %v160, i32 %v156
   %v162 = trunc i32 %v161 to i8
-  %v163 = getelementptr inbounds i8, i8* %v72, i32 12
-  store i8 %v162, i8* %v163, align 1, !tbaa !0
-  %v164 = getelementptr inbounds i8, i8* %v73, i32 1
-  %v165 = getelementptr inbounds i8, i8* %v72, i32 1
+  %v163 = getelementptr inbounds i8, ptr %v72, i32 12
+  store i8 %v162, ptr %v163, align 1, !tbaa !0
+  %v164 = getelementptr inbounds i8, ptr %v73, i32 1
+  %v165 = getelementptr inbounds i8, ptr %v72, i32 1
   %v166 = add i32 %v74, -1
   %v167 = icmp eq i32 %v166, 0
   br i1 %v167, label %b7, label %b4
 
 b5:                                               ; preds = %b5, %b2
-  %v168 = phi i8* [ %a0, %b2 ], [ %v312, %b5 ]
+  %v168 = phi ptr [ %a0, %b2 ], [ %v312, %b5 ]
   %v169 = phi i32 [ 0, %b2 ], [ %v313, %b5 ]
   %v170 = add i32 %v169, %a1
   %v171 = icmp slt i32 %v170, 0
@@ -205,36 +205,36 @@ b5:                                               ; preds = %b5, %b2
   %v173 = select i1 %v172, i32 %v170, i32 %v6
   %v174 = select i1 %v171, i32 0, i32 %v173
   %v175 = add i32 %v19, %v174
-  %v176 = getelementptr inbounds i8, i8* %a5, i32 %v175
-  %v177 = load i8, i8* %v176, align 1, !tbaa !0
+  %v176 = getelementptr inbounds i8, ptr %a5, i32 %v175
+  %v177 = load i8, ptr %v176, align 1, !tbaa !0
   %v178 = zext i8 %v177 to i32
   %v179 = add i32 %v24, %v174
-  %v180 = getelementptr inbounds i8, i8* %a5, i32 %v179
-  %v181 = load i8, i8* %v180, align 1, !tbaa !0
+  %v180 = getelementptr inbounds i8, ptr %a5, i32 %v179
+  %v181 = load i8, ptr %v180, align 1, !tbaa !0
   %v182 = zext i8 %v181 to i32
   %v183 = mul nsw i32 %v182, -5
   %v184 = add nsw i32 %v183, %v178
   %v185 = add i32 %v29, %v174
-  %v186 = getelementptr inbounds i8, i8* %a5, i32 %v185
-  %v187 = load i8, i8* %v186, align 1, !tbaa !0
+  %v186 = getelementptr inbounds i8, ptr %a5, i32 %v185
+  %v187 = load i8, ptr %v186, align 1, !tbaa !0
   %v188 = zext i8 %v187 to i32
   %v189 = mul nsw i32 %v188, 20
   %v190 = add nsw i32 %v189, %v184
   %v191 = add i32 %v13, %v174
-  %v192 = getelementptr inbounds i8, i8* %a5, i32 %v191
-  %v193 = load i8, i8* %v192, align 1, !tbaa !0
+  %v192 = getelementptr inbounds i8, ptr %a5, i32 %v191
+  %v193 = load i8, ptr %v192, align 1, !tbaa !0
   %v194 = zext i8 %v193 to i32
   %v195 = mul nsw i32 %v194, 20
   %v196 = add nsw i32 %v195, %v190
   %v197 = add i32 %v35, %v174
-  %v198 = getelementptr inbounds i8, i8* %a5, i32 %v197
-  %v199 = load i8, i8* %v198, align 1, !tbaa !0
+  %v198 = getelementptr inbounds i8, ptr %a5, i32 %v197
+  %v199 = load i8, ptr %v198, align 1, !tbaa !0
   %v200 = zext i8 %v199 to i32
   %v201 = mul nsw i32 %v200, -5
   %v202 = add nsw i32 %v201, %v196
   %v203 = add i32 %v40, %v174
-  %v204 = getelementptr inbounds i8, i8* %a5, i32 %v203
-  %v205 = load i8, i8* %v204, align 1, !tbaa !0
+  %v204 = getelementptr inbounds i8, ptr %a5, i32 %v203
+  %v205 = load i8, ptr %v204, align 1, !tbaa !0
   %v206 = zext i8 %v205 to i32
   %v207 = add nsw i32 %v206, %v202
   %v208 = add nsw i32 %v207, 16
@@ -245,29 +245,29 @@ b5:                                               ; preds = %b5, %b2
   %v213 = add i32 %v212, 255
   %v214 = select i1 %v211, i32 %v213, i32 %v209
   %v215 = trunc i32 %v214 to i8
-  store i8 %v215, i8* %v168, align 1, !tbaa !0
-  %v216 = getelementptr inbounds i8, i8* %v168, i32 4
-  %v217 = load i8, i8* %v180, align 1, !tbaa !0
+  store i8 %v215, ptr %v168, align 1, !tbaa !0
+  %v216 = getelementptr inbounds i8, ptr %v168, i32 4
+  %v217 = load i8, ptr %v180, align 1, !tbaa !0
   %v218 = zext i8 %v217 to i32
-  %v219 = load i8, i8* %v186, align 1, !tbaa !0
+  %v219 = load i8, ptr %v186, align 1, !tbaa !0
   %v220 = zext i8 %v219 to i32
   %v221 = mul nsw i32 %v220, -5
   %v222 = add nsw i32 %v221, %v218
-  %v223 = load i8, i8* %v192, align 1, !tbaa !0
+  %v223 = load i8, ptr %v192, align 1, !tbaa !0
   %v224 = zext i8 %v223 to i32
   %v225 = mul nsw i32 %v224, 20
   %v226 = add nsw i32 %v225, %v222
-  %v227 = load i8, i8* %v198, align 1, !tbaa !0
+  %v227 = load i8, ptr %v198, align 1, !tbaa !0
   %v228 = zext i8 %v227 to i32
   %v229 = mul nsw i32 %v228, 20
   %v230 = add nsw i32 %v229, %v226
-  %v231 = load i8, i8* %v204, align 1, !tbaa !0
+  %v231 = load i8, ptr %v204, align 1, !tbaa !0
   %v232 = zext i8 %v231 to i32
   %v233 = mul nsw i32 %v232, -5
   %v234 = add nsw i32 %v233, %v230
   %v235 = add i32 %v46, %v174
-  %v236 = getelementptr inbounds i8, i8* %a5, i32 %v235
-  %v237 = load i8, i8* %v236, align 1, !tbaa !0
+  %v236 = getelementptr inbounds i8, ptr %a5, i32 %v235
+  %v237 = load i8, ptr %v236, align 1, !tbaa !0
   %v238 = zext i8 %v237 to i32
   %v239 = add nsw i32 %v238, %v234
   %v240 = add nsw i32 %v239, 16
@@ -278,29 +278,29 @@ b5:                                               ; preds = %b5, %b2
   %v245 = add i32 %v244, 255
   %v246 = select i1 %v243, i32 %v245, i32 %v241
   %v247 = trunc i32 %v246 to i8
-  store i8 %v247, i8* %v216, align 1, !tbaa !0
-  %v248 = getelementptr inbounds i8, i8* %v168, i32 8
-  %v249 = load i8, i8* %v186, align 1, !tbaa !0
+  store i8 %v247, ptr %v216, align 1, !tbaa !0
+  %v248 = getelementptr inbounds i8, ptr %v168, i32 8
+  %v249 = load i8, ptr %v186, align 1, !tbaa !0
   %v250 = zext i8 %v249 to i32
-  %v251 = load i8, i8* %v192, align 1, !tbaa !0
+  %v251 = load i8, ptr %v192, align 1, !tbaa !0
   %v252 = zext i8 %v251 to i32
   %v253 = mul nsw i32 %v252, -5
   %v254 = add nsw i32 %v253, %v250
-  %v255 = load i8, i8* %v198, align 1, !tbaa !0
+  %v255 = load i8, ptr %v198, align 1, !tbaa !0
   %v256 = zext i8 %v255 to i32
   %v257 = mul nsw i32 %v256, 20
   %v258 = add nsw i32 %v257, %v254
-  %v259 = load i8, i8* %v204, align 1, !tbaa !0
+  %v259 = load i8, ptr %v204, align 1, !tbaa !0
   %v260 = zext i8 %v259 to i32
   %v261 = mul nsw i32 %v260, 20
   %v262 = add nsw i32 %v261, %v258
-  %v263 = load i8, i8* %v236, align 1, !tbaa !0
+  %v263 = load i8, ptr %v236, align 1, !tbaa !0
   %v264 = zext i8 %v263 to i32
   %v265 = mul nsw i32 %v264, -5
   %v266 = add nsw i32 %v265, %v262
   %v267 = add i32 %v52, %v174
-  %v268 = getelementptr inbounds i8, i8* %a5, i32 %v267
-  %v269 = load i8, i8* %v268, align 1, !tbaa !0
+  %v268 = getelementptr inbounds i8, ptr %a5, i32 %v267
+  %v269 = load i8, ptr %v268, align 1, !tbaa !0
   %v270 = zext i8 %v269 to i32
   %v271 = add nsw i32 %v270, %v266
   %v272 = add nsw i32 %v271, 16
@@ -311,29 +311,29 @@ b5:                                               ; preds = %b5, %b2
   %v277 = add i32 %v276, 255
   %v278 = select i1 %v275, i32 %v277, i32 %v273
   %v279 = trunc i32 %v278 to i8
-  store i8 %v279, i8* %v248, align 1, !tbaa !0
-  %v280 = getelementptr inbounds i8, i8* %v168, i32 12
-  %v281 = load i8, i8* %v192, align 1, !tbaa !0
+  store i8 %v279, ptr %v248, align 1, !tbaa !0
+  %v280 = getelementptr inbounds i8, ptr %v168, i32 12
+  %v281 = load i8, ptr %v192, align 1, !tbaa !0
   %v282 = zext i8 %v281 to i32
-  %v283 = load i8, i8* %v198, align 1, !tbaa !0
+  %v283 = load i8, ptr %v198, align 1, !tbaa !0
   %v284 = zext i8 %v283 to i32
   %v285 = mul nsw i32 %v284, -5
   %v286 = add nsw i32 %v285, %v282
-  %v287 = load i8, i8* %v204, align 1, !tbaa !0
+  %v287 = load i8, ptr %v204, align 1, !tbaa !0
   %v288 = zext i8 %v287 to i32
   %v289 = mul nsw i32 %v288, 20
   %v290 = add nsw i32 %v289, %v286
-  %v291 = load i8, i8* %v236, align 1, !tbaa !0
+  %v291 = load i8, ptr %v236, align 1, !tbaa !0
   %v292 = zext i8 %v291 to i32
   %v293 = mul nsw i32 %v292, 20
   %v294 = add nsw i32 %v293, %v290
-  %v295 = load i8, i8* %v268, align 1, !tbaa !0
+  %v295 = load i8, ptr %v268, align 1, !tbaa !0
   %v296 = zext i8 %v295 to i32
   %v297 = mul nsw i32 %v296, -5
   %v298 = add nsw i32 %v297, %v294
   %v299 = add i32 %v58, %v174
-  %v300 = getelementptr inbounds i8, i8* %a5, i32 %v299
-  %v301 = load i8, i8* %v300, align 1, !tbaa !0
+  %v300 = getelementptr inbounds i8, ptr %a5, i32 %v299
+  %v301 = load i8, ptr %v300, align 1, !tbaa !0
   %v302 = zext i8 %v301 to i32
   %v303 = add nsw i32 %v302, %v298
   %v304 = add nsw i32 %v303, 16
@@ -344,8 +344,8 @@ b5:                                               ; preds = %b5, %b2
   %v309 = add i32 %v308, 255
   %v310 = select i1 %v307, i32 %v309, i32 %v305
   %v311 = trunc i32 %v310 to i8
-  store i8 %v311, i8* %v280, align 1, !tbaa !0
-  %v312 = getelementptr inbounds i8, i8* %v168, i32 1
+  store i8 %v311, ptr %v280, align 1, !tbaa !0
+  %v312 = getelementptr inbounds i8, ptr %v168, i32 1
   %v313 = add i32 %v169, 1
   %v314 = icmp eq i32 %v313, 4
   br i1 %v314, label %b6, label %b5

diff  --git a/llvm/test/CodeGen/Hexagon/regscavengerbug.ll b/llvm/test/CodeGen/Hexagon/regscavengerbug.ll
index ab9318e52ba66..35d77c7c7ecbd 100644
--- a/llvm/test/CodeGen/Hexagon/regscavengerbug.ll
+++ b/llvm/test/CodeGen/Hexagon/regscavengerbug.ll
@@ -9,340 +9,314 @@ target triple = "hexagon-unknown-linux-gnu"
 %1 = type { %2 }
 %2 = type { [4 x [4 x double]] }
 %3 = type { [3 x double] }
-%4 = type { %5, %0, %0, %5*, %3, %3 }
-%5 = type { i32 (...)** }
+%4 = type { %5, %0, %0, ptr, %3, %3 }
+%5 = type { ptr }
 %6 = type { %3, %3 }
 
-declare void @f0(%3* sret(%3), %0*, %3*)
+declare void @f0(ptr sret(%3), ptr, ptr)
 
 ; Function Attrs: nounwind
-define void @f1(%4* %a0, %0* nocapture %a1, %0* nocapture %a2) #0 align 2 {
+define void @f1(ptr %a0, ptr nocapture %a1, ptr nocapture %a2) #0 align 2 {
 b0:
   %v0 = alloca %6, align 8
   %v1 = alloca [2 x [2 x [2 x %3]]], align 8
   %v2 = alloca %3, align 8
-  %v3 = getelementptr inbounds %4, %4* %a0, i32 0, i32 1
-  %v4 = bitcast %0* %v3 to i8*
-  %v5 = bitcast %0* %a1 to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 %v4, i8* align 8 %v5, i32 128, i1 false)
-  %v6 = getelementptr inbounds %4, %4* %a0, i32 0, i32 2
-  %v7 = bitcast %0* %v6 to i8*
-  %v8 = bitcast %0* %a2 to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 %v7, i8* align 8 %v8, i32 128, i1 false)
-  %v9 = bitcast %6* %v0 to i8*
-  call void @llvm.memset.p0i8.i64(i8* align 8 %v9, i8 0, i64 48, i1 false)
-  %v10 = getelementptr inbounds %4, %4* %a0, i32 0, i32 3
-  %v11 = load %5*, %5** %v10, align 4, !tbaa !0
-  %v12 = bitcast %5* %v11 to i32 (%5*, double, double, %6*)***
-  %v13 = load i32 (%5*, double, double, %6*)**, i32 (%5*, double, double, %6*)*** %v12, align 4, !tbaa !4
-  %v14 = getelementptr inbounds i32 (%5*, double, double, %6*)*, i32 (%5*, double, double, %6*)** %v13, i32 3
-  %v15 = load i32 (%5*, double, double, %6*)*, i32 (%5*, double, double, %6*)** %v14, align 4
-  %v16 = call i32 %v15(%5* %v11, double 0.000000e+00, double 0.000000e+00, %6* %v0)
+  %v3 = getelementptr inbounds %4, ptr %a0, i32 0, i32 1
+  call void @llvm.memcpy.p0.p0.i32(ptr align 8 %v3, ptr align 8 %a1, i32 128, i1 false)
+  %v6 = getelementptr inbounds %4, ptr %a0, i32 0, i32 2
+  call void @llvm.memcpy.p0.p0.i32(ptr align 8 %v6, ptr align 8 %a2, i32 128, i1 false)
+  call void @llvm.memset.p0.i64(ptr align 8 %v0, i8 0, i64 48, i1 false)
+  %v10 = getelementptr inbounds %4, ptr %a0, i32 0, i32 3
+  %v11 = load ptr, ptr %v10, align 4, !tbaa !0
+  %v13 = load ptr, ptr %v11, align 4, !tbaa !4
+  %v14 = getelementptr inbounds ptr, ptr %v13, i32 3
+  %v15 = load ptr, ptr %v14, align 4
+  %v16 = call i32 %v15(ptr %v11, double 0.000000e+00, double 0.000000e+00, ptr %v0)
   %v17 = icmp eq i32 %v16, 0
   br i1 %v17, label %b1, label %b3
 
 b1:                                               ; preds = %b0
-  %v18 = getelementptr inbounds %4, %4* %a0, i32 0, i32 4, i32 0, i32 0
-  store double -1.000000e+06, double* %v18, align 8, !tbaa !6
-  %v19 = getelementptr inbounds %4, %4* %a0, i32 0, i32 4, i32 0, i32 1
-  store double -1.000000e+06, double* %v19, align 8, !tbaa !6
-  %v20 = getelementptr inbounds %4, %4* %a0, i32 0, i32 4, i32 0, i32 2
-  store double -1.000000e+06, double* %v20, align 8, !tbaa !6
-  %v21 = getelementptr inbounds %4, %4* %a0, i32 0, i32 5, i32 0, i32 0
-  store double 1.000000e+06, double* %v21, align 8, !tbaa !6
-  %v22 = getelementptr inbounds %4, %4* %a0, i32 0, i32 5, i32 0, i32 1
-  store double 1.000000e+06, double* %v22, align 8, !tbaa !6
-  %v23 = getelementptr inbounds %4, %4* %a0, i32 0, i32 5, i32 0, i32 2
-  store double 1.000000e+06, double* %v23, align 8, !tbaa !6
+  %v18 = getelementptr inbounds %4, ptr %a0, i32 0, i32 4, i32 0, i32 0
+  store double -1.000000e+06, ptr %v18, align 8, !tbaa !6
+  %v19 = getelementptr inbounds %4, ptr %a0, i32 0, i32 4, i32 0, i32 1
+  store double -1.000000e+06, ptr %v19, align 8, !tbaa !6
+  %v20 = getelementptr inbounds %4, ptr %a0, i32 0, i32 4, i32 0, i32 2
+  store double -1.000000e+06, ptr %v20, align 8, !tbaa !6
+  %v21 = getelementptr inbounds %4, ptr %a0, i32 0, i32 5, i32 0, i32 0
+  store double 1.000000e+06, ptr %v21, align 8, !tbaa !6
+  %v22 = getelementptr inbounds %4, ptr %a0, i32 0, i32 5, i32 0, i32 1
+  store double 1.000000e+06, ptr %v22, align 8, !tbaa !6
+  %v23 = getelementptr inbounds %4, ptr %a0, i32 0, i32 5, i32 0, i32 2
+  store double 1.000000e+06, ptr %v23, align 8, !tbaa !6
   br label %b2
 
 b2:                                               ; preds = %b3, %b1
   ret void
 
 b3:                                               ; preds = %b0
-  %v24 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 0, i32 0
-  %v25 = bitcast [2 x [2 x [2 x %3]]]* %v1 to i8*
-  %v26 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 0, i32 2
-  %v27 = bitcast %3* %v26 to i8*
-  %v28 = bitcast [2 x [2 x [2 x %3]]]* %v1 to i8*
-  call void @llvm.memset.p0i8.i64(i8* align 8 %v28, i8 0, i64 48, i1 false)
-  call void @llvm.memset.p0i8.i64(i8* align 8 %v27, i8 0, i64 24, i1 false)
-  %v29 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 0, i32 3
-  %v30 = bitcast %3* %v29 to i8*
-  call void @llvm.memset.p0i8.i64(i8* align 8 %v30, i8 0, i64 24, i1 false)
-  %v31 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 0, i32 4
-  %v32 = bitcast %3* %v31 to i8*
-  call void @llvm.memset.p0i8.i64(i8* align 8 %v32, i8 0, i64 24, i1 false)
-  %v33 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 0, i32 5
-  %v34 = bitcast %3* %v33 to i8*
-  call void @llvm.memset.p0i8.i64(i8* align 8 %v34, i8 0, i64 24, i1 false)
-  %v35 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 0, i32 6
-  %v36 = bitcast %3* %v35 to i8*
-  call void @llvm.memset.p0i8.i64(i8* align 8 %v36, i8 0, i64 24, i1 false)
-  %v37 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 0, i32 7
-  %v38 = bitcast %3* %v37 to i8*
-  call void @llvm.memset.p0i8.i64(i8* align 8 %v38, i8 0, i64 24, i1 false)
-  %v39 = getelementptr inbounds %6, %6* %v0, i32 0, i32 0, i32 0, i32 0
-  %v40 = getelementptr inbounds %6, %6* %v0, i32 0, i32 0, i32 0, i32 1
-  %v41 = getelementptr inbounds %6, %6* %v0, i32 0, i32 0, i32 0, i32 2
-  %v42 = bitcast %3* %v2 to i8*
-  %v43 = getelementptr inbounds %6, %6* %v0, i32 0, i32 1, i32 0, i32 2
-  %v44 = getelementptr inbounds %6, %6* %v0, i32 0, i32 1, i32 0, i32 1
-  %v45 = getelementptr inbounds %6, %6* %v0, i32 0, i32 1, i32 0, i32 0
-  %v46 = load double, double* %v39, align 8, !tbaa !6
-  %v47 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0
-  store double %v46, double* %v47, align 8, !tbaa !6
-  %v48 = load double, double* %v40, align 8, !tbaa !6
-  %v49 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 1
-  store double %v48, double* %v49, align 8, !tbaa !6
-  %v50 = load double, double* %v41, align 8, !tbaa !6
-  %v51 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 2
-  store double %v50, double* %v51, align 8, !tbaa !6
-  call void @f0(%3* sret(%3) %v2, %0* %v3, %3* %v24)
-  call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 %v25, i8* align 8 %v42, i32 24, i1 false)
-  %v52 = load double, double* %v39, align 8, !tbaa !6
-  %v53 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 0, i32 1, i32 0, i32 0
-  store double %v52, double* %v53, align 8, !tbaa !6
-  %v54 = load double, double* %v40, align 8, !tbaa !6
-  %v55 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 0, i32 1, i32 0, i32 1
-  store double %v54, double* %v55, align 8, !tbaa !6
-  %v56 = load double, double* %v43, align 8, !tbaa !6
-  %v57 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 0, i32 1, i32 0, i32 2
-  store double %v56, double* %v57, align 8, !tbaa !6
-  %v58 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 0, i32 1
-  call void @f0(%3* sret(%3) %v2, %0* %v3, %3* %v58)
-  %v59 = bitcast %3* %v58 to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 %v59, i8* align 8 %v42, i32 24, i1 false)
-  %v60 = load double, double* %v39, align 8, !tbaa !6
-  %v61 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0
-  store double %v60, double* %v61, align 8, !tbaa !6
-  %v62 = load double, double* %v44, align 8, !tbaa !6
-  %v63 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 1
-  store double %v62, double* %v63, align 8, !tbaa !6
-  %v64 = load double, double* %v41, align 8, !tbaa !6
-  %v65 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 2
-  store double %v64, double* %v65, align 8, !tbaa !6
-  %v66 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 1, i32 0
-  call void @f0(%3* sret(%3) %v2, %0* %v3, %3* %v66)
-  %v67 = bitcast %3* %v66 to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 %v67, i8* align 8 %v42, i32 24, i1 false)
-  %v68 = load double, double* %v39, align 8, !tbaa !6
-  %v69 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 1, i32 1, i32 0, i32 0
-  store double %v68, double* %v69, align 8, !tbaa !6
-  %v70 = load double, double* %v44, align 8, !tbaa !6
-  %v71 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 1, i32 1, i32 0, i32 1
-  store double %v70, double* %v71, align 8, !tbaa !6
-  %v72 = load double, double* %v43, align 8, !tbaa !6
-  %v73 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 1, i32 1, i32 0, i32 2
-  store double %v72, double* %v73, align 8, !tbaa !6
-  %v74 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 1, i32 1
-  call void @f0(%3* sret(%3) %v2, %0* %v3, %3* %v74)
-  %v75 = bitcast %3* %v74 to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 %v75, i8* align 8 %v42, i32 24, i1 false)
-  %v76 = load double, double* %v45, align 8, !tbaa !6
-  %v77 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0
-  store double %v76, double* %v77, align 8, !tbaa !6
-  %v78 = load double, double* %v40, align 8, !tbaa !6
-  %v79 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 1, i32 0, i32 0, i32 0, i32 1
-  store double %v78, double* %v79, align 8, !tbaa !6
-  %v80 = load double, double* %v41, align 8, !tbaa !6
-  %v81 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 1, i32 0, i32 0, i32 0, i32 2
-  store double %v80, double* %v81, align 8, !tbaa !6
-  %v82 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 1, i32 0, i32 0
-  call void @f0(%3* sret(%3) %v2, %0* %v3, %3* %v82)
-  %v83 = bitcast %3* %v82 to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 %v83, i8* align 8 %v42, i32 24, i1 false)
-  %v84 = load double, double* %v45, align 8, !tbaa !6
-  %v85 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 0
-  store double %v84, double* %v85, align 8, !tbaa !6
-  %v86 = load double, double* %v40, align 8, !tbaa !6
-  %v87 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1
-  store double %v86, double* %v87, align 8, !tbaa !6
-  %v88 = load double, double* %v43, align 8, !tbaa !6
-  %v89 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 2
-  store double %v88, double* %v89, align 8, !tbaa !6
-  %v90 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 1, i32 0, i32 1
-  call void @f0(%3* sret(%3) %v2, %0* %v3, %3* %v90)
-  %v91 = bitcast %3* %v90 to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 %v91, i8* align 8 %v42, i32 24, i1 false)
-  %v92 = load double, double* %v45, align 8, !tbaa !6
-  %v93 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 1, i32 1, i32 0, i32 0, i32 0
-  store double %v92, double* %v93, align 8, !tbaa !6
-  %v94 = load double, double* %v44, align 8, !tbaa !6
-  %v95 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 1, i32 1, i32 0, i32 0, i32 1
-  store double %v94, double* %v95, align 8, !tbaa !6
-  %v96 = load double, double* %v41, align 8, !tbaa !6
-  %v97 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 1, i32 1, i32 0, i32 0, i32 2
-  store double %v96, double* %v97, align 8, !tbaa !6
-  %v98 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 1, i32 1, i32 0
-  call void @f0(%3* sret(%3) %v2, %0* %v3, %3* %v98)
-  %v99 = bitcast %3* %v98 to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 %v99, i8* align 8 %v42, i32 24, i1 false)
-  %v100 = load double, double* %v45, align 8, !tbaa !6
-  %v101 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 1, i32 1, i32 1, i32 0, i32 0
-  store double %v100, double* %v101, align 8, !tbaa !6
-  %v102 = load double, double* %v44, align 8, !tbaa !6
-  %v103 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 1, i32 1, i32 1, i32 0, i32 1
-  store double %v102, double* %v103, align 8, !tbaa !6
-  %v104 = load double, double* %v43, align 8, !tbaa !6
-  %v105 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 1, i32 1, i32 1, i32 0, i32 2
-  store double %v104, double* %v105, align 8, !tbaa !6
-  %v106 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 1, i32 1, i32 1
-  call void @f0(%3* sret(%3) %v2, %0* %v3, %3* %v106)
-  %v107 = bitcast %3* %v106 to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 %v107, i8* align 8 %v42, i32 24, i1 false)
-  %v108 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0
-  %v109 = load double, double* %v108, align 8, !tbaa !6
-  %v110 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 1
-  %v111 = load double, double* %v110, align 8, !tbaa !6
-  %v112 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 2
-  %v113 = load double, double* %v112, align 8, !tbaa !6
-  %v114 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 0, i32 1, i32 0, i32 0
-  %v115 = load double, double* %v114, align 8, !tbaa !6
+  %v26 = getelementptr inbounds [2 x [2 x [2 x %3]]], ptr %v1, i32 0, i32 0, i32 0, i32 2
+  call void @llvm.memset.p0.i64(ptr align 8 %v1, i8 0, i64 48, i1 false)
+  call void @llvm.memset.p0.i64(ptr align 8 %v26, i8 0, i64 24, i1 false)
+  %v29 = getelementptr inbounds [2 x [2 x [2 x %3]]], ptr %v1, i32 0, i32 0, i32 0, i32 3
+  call void @llvm.memset.p0.i64(ptr align 8 %v29, i8 0, i64 24, i1 false)
+  %v31 = getelementptr inbounds [2 x [2 x [2 x %3]]], ptr %v1, i32 0, i32 0, i32 0, i32 4
+  call void @llvm.memset.p0.i64(ptr align 8 %v31, i8 0, i64 24, i1 false)
+  %v33 = getelementptr inbounds [2 x [2 x [2 x %3]]], ptr %v1, i32 0, i32 0, i32 0, i32 5
+  call void @llvm.memset.p0.i64(ptr align 8 %v33, i8 0, i64 24, i1 false)
+  %v35 = getelementptr inbounds [2 x [2 x [2 x %3]]], ptr %v1, i32 0, i32 0, i32 0, i32 6
+  call void @llvm.memset.p0.i64(ptr align 8 %v35, i8 0, i64 24, i1 false)
+  %v37 = getelementptr inbounds [2 x [2 x [2 x %3]]], ptr %v1, i32 0, i32 0, i32 0, i32 7
+  call void @llvm.memset.p0.i64(ptr align 8 %v37, i8 0, i64 24, i1 false)
+  %v40 = getelementptr inbounds %6, ptr %v0, i32 0, i32 0, i32 0, i32 1
+  %v41 = getelementptr inbounds %6, ptr %v0, i32 0, i32 0, i32 0, i32 2
+  %v43 = getelementptr inbounds %6, ptr %v0, i32 0, i32 1, i32 0, i32 2
+  %v44 = getelementptr inbounds %6, ptr %v0, i32 0, i32 1, i32 0, i32 1
+  %v45 = getelementptr inbounds %6, ptr %v0, i32 0, i32 1, i32 0, i32 0
+  %v46 = load double, ptr %v0, align 8, !tbaa !6
+  store double %v46, ptr %v1, align 8, !tbaa !6
+  %v48 = load double, ptr %v40, align 8, !tbaa !6
+  %v49 = getelementptr inbounds [2 x [2 x [2 x %3]]], ptr %v1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 1
+  store double %v48, ptr %v49, align 8, !tbaa !6
+  %v50 = load double, ptr %v41, align 8, !tbaa !6
+  %v51 = getelementptr inbounds [2 x [2 x [2 x %3]]], ptr %v1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 2
+  store double %v50, ptr %v51, align 8, !tbaa !6
+  call void @f0(ptr sret(%3) %v2, ptr %v3, ptr %v1)
+  call void @llvm.memcpy.p0.p0.i32(ptr align 8 %v1, ptr align 8 %v2, i32 24, i1 false)
+  %v52 = load double, ptr %v0, align 8, !tbaa !6
+  %v53 = getelementptr inbounds [2 x [2 x [2 x %3]]], ptr %v1, i32 0, i32 0, i32 0, i32 1, i32 0, i32 0
+  store double %v52, ptr %v53, align 8, !tbaa !6
+  %v54 = load double, ptr %v40, align 8, !tbaa !6
+  %v55 = getelementptr inbounds [2 x [2 x [2 x %3]]], ptr %v1, i32 0, i32 0, i32 0, i32 1, i32 0, i32 1
+  store double %v54, ptr %v55, align 8, !tbaa !6
+  %v56 = load double, ptr %v43, align 8, !tbaa !6
+  %v57 = getelementptr inbounds [2 x [2 x [2 x %3]]], ptr %v1, i32 0, i32 0, i32 0, i32 1, i32 0, i32 2
+  store double %v56, ptr %v57, align 8, !tbaa !6
+  %v58 = getelementptr inbounds [2 x [2 x [2 x %3]]], ptr %v1, i32 0, i32 0, i32 0, i32 1
+  call void @f0(ptr sret(%3) %v2, ptr %v3, ptr %v58)
+  call void @llvm.memcpy.p0.p0.i32(ptr align 8 %v58, ptr align 8 %v2, i32 24, i1 false)
+  %v60 = load double, ptr %v0, align 8, !tbaa !6
+  %v61 = getelementptr inbounds [2 x [2 x [2 x %3]]], ptr %v1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0
+  store double %v60, ptr %v61, align 8, !tbaa !6
+  %v62 = load double, ptr %v44, align 8, !tbaa !6
+  %v63 = getelementptr inbounds [2 x [2 x [2 x %3]]], ptr %v1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 1
+  store double %v62, ptr %v63, align 8, !tbaa !6
+  %v64 = load double, ptr %v41, align 8, !tbaa !6
+  %v65 = getelementptr inbounds [2 x [2 x [2 x %3]]], ptr %v1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 2
+  store double %v64, ptr %v65, align 8, !tbaa !6
+  %v66 = getelementptr inbounds [2 x [2 x [2 x %3]]], ptr %v1, i32 0, i32 0, i32 1, i32 0
+  call void @f0(ptr sret(%3) %v2, ptr %v3, ptr %v66)
+  call void @llvm.memcpy.p0.p0.i32(ptr align 8 %v66, ptr align 8 %v2, i32 24, i1 false)
+  %v68 = load double, ptr %v0, align 8, !tbaa !6
+  %v69 = getelementptr inbounds [2 x [2 x [2 x %3]]], ptr %v1, i32 0, i32 0, i32 1, i32 1, i32 0, i32 0
+  store double %v68, ptr %v69, align 8, !tbaa !6
+  %v70 = load double, ptr %v44, align 8, !tbaa !6
+  %v71 = getelementptr inbounds [2 x [2 x [2 x %3]]], ptr %v1, i32 0, i32 0, i32 1, i32 1, i32 0, i32 1
+  store double %v70, ptr %v71, align 8, !tbaa !6
+  %v72 = load double, ptr %v43, align 8, !tbaa !6
+  %v73 = getelementptr inbounds [2 x [2 x [2 x %3]]], ptr %v1, i32 0, i32 0, i32 1, i32 1, i32 0, i32 2
+  store double %v72, ptr %v73, align 8, !tbaa !6
+  %v74 = getelementptr inbounds [2 x [2 x [2 x %3]]], ptr %v1, i32 0, i32 0, i32 1, i32 1
+  call void @f0(ptr sret(%3) %v2, ptr %v3, ptr %v74)
+  call void @llvm.memcpy.p0.p0.i32(ptr align 8 %v74, ptr align 8 %v2, i32 24, i1 false)
+  %v76 = load double, ptr %v45, align 8, !tbaa !6
+  %v77 = getelementptr inbounds [2 x [2 x [2 x %3]]], ptr %v1, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0
+  store double %v76, ptr %v77, align 8, !tbaa !6
+  %v78 = load double, ptr %v40, align 8, !tbaa !6
+  %v79 = getelementptr inbounds [2 x [2 x [2 x %3]]], ptr %v1, i32 0, i32 1, i32 0, i32 0, i32 0, i32 1
+  store double %v78, ptr %v79, align 8, !tbaa !6
+  %v80 = load double, ptr %v41, align 8, !tbaa !6
+  %v81 = getelementptr inbounds [2 x [2 x [2 x %3]]], ptr %v1, i32 0, i32 1, i32 0, i32 0, i32 0, i32 2
+  store double %v80, ptr %v81, align 8, !tbaa !6
+  %v82 = getelementptr inbounds [2 x [2 x [2 x %3]]], ptr %v1, i32 0, i32 1, i32 0, i32 0
+  call void @f0(ptr sret(%3) %v2, ptr %v3, ptr %v82)
+  call void @llvm.memcpy.p0.p0.i32(ptr align 8 %v82, ptr align 8 %v2, i32 24, i1 false)
+  %v84 = load double, ptr %v45, align 8, !tbaa !6
+  %v85 = getelementptr inbounds [2 x [2 x [2 x %3]]], ptr %v1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 0
+  store double %v84, ptr %v85, align 8, !tbaa !6
+  %v86 = load double, ptr %v40, align 8, !tbaa !6
+  %v87 = getelementptr inbounds [2 x [2 x [2 x %3]]], ptr %v1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1
+  store double %v86, ptr %v87, align 8, !tbaa !6
+  %v88 = load double, ptr %v43, align 8, !tbaa !6
+  %v89 = getelementptr inbounds [2 x [2 x [2 x %3]]], ptr %v1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 2
+  store double %v88, ptr %v89, align 8, !tbaa !6
+  %v90 = getelementptr inbounds [2 x [2 x [2 x %3]]], ptr %v1, i32 0, i32 1, i32 0, i32 1
+  call void @f0(ptr sret(%3) %v2, ptr %v3, ptr %v90)
+  call void @llvm.memcpy.p0.p0.i32(ptr align 8 %v90, ptr align 8 %v2, i32 24, i1 false)
+  %v92 = load double, ptr %v45, align 8, !tbaa !6
+  %v93 = getelementptr inbounds [2 x [2 x [2 x %3]]], ptr %v1, i32 0, i32 1, i32 1, i32 0, i32 0, i32 0
+  store double %v92, ptr %v93, align 8, !tbaa !6
+  %v94 = load double, ptr %v44, align 8, !tbaa !6
+  %v95 = getelementptr inbounds [2 x [2 x [2 x %3]]], ptr %v1, i32 0, i32 1, i32 1, i32 0, i32 0, i32 1
+  store double %v94, ptr %v95, align 8, !tbaa !6
+  %v96 = load double, ptr %v41, align 8, !tbaa !6
+  %v97 = getelementptr inbounds [2 x [2 x [2 x %3]]], ptr %v1, i32 0, i32 1, i32 1, i32 0, i32 0, i32 2
+  store double %v96, ptr %v97, align 8, !tbaa !6
+  %v98 = getelementptr inbounds [2 x [2 x [2 x %3]]], ptr %v1, i32 0, i32 1, i32 1, i32 0
+  call void @f0(ptr sret(%3) %v2, ptr %v3, ptr %v98)
+  call void @llvm.memcpy.p0.p0.i32(ptr align 8 %v98, ptr align 8 %v2, i32 24, i1 false)
+  %v100 = load double, ptr %v45, align 8, !tbaa !6
+  %v101 = getelementptr inbounds [2 x [2 x [2 x %3]]], ptr %v1, i32 0, i32 1, i32 1, i32 1, i32 0, i32 0
+  store double %v100, ptr %v101, align 8, !tbaa !6
+  %v102 = load double, ptr %v44, align 8, !tbaa !6
+  %v103 = getelementptr inbounds [2 x [2 x [2 x %3]]], ptr %v1, i32 0, i32 1, i32 1, i32 1, i32 0, i32 1
+  store double %v102, ptr %v103, align 8, !tbaa !6
+  %v104 = load double, ptr %v43, align 8, !tbaa !6
+  %v105 = getelementptr inbounds [2 x [2 x [2 x %3]]], ptr %v1, i32 0, i32 1, i32 1, i32 1, i32 0, i32 2
+  store double %v104, ptr %v105, align 8, !tbaa !6
+  %v106 = getelementptr inbounds [2 x [2 x [2 x %3]]], ptr %v1, i32 0, i32 1, i32 1, i32 1
+  call void @f0(ptr sret(%3) %v2, ptr %v3, ptr %v106)
+  call void @llvm.memcpy.p0.p0.i32(ptr align 8 %v106, ptr align 8 %v2, i32 24, i1 false)
+  %v109 = load double, ptr %v1, align 8, !tbaa !6
+  %v110 = getelementptr inbounds [2 x [2 x [2 x %3]]], ptr %v1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 1
+  %v111 = load double, ptr %v110, align 8, !tbaa !6
+  %v112 = getelementptr inbounds [2 x [2 x [2 x %3]]], ptr %v1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 2
+  %v113 = load double, ptr %v112, align 8, !tbaa !6
+  %v114 = getelementptr inbounds [2 x [2 x [2 x %3]]], ptr %v1, i32 0, i32 0, i32 0, i32 1, i32 0, i32 0
+  %v115 = load double, ptr %v114, align 8, !tbaa !6
   %v116 = fcmp olt double %v115, %v109
   %v117 = select i1 %v116, double %v115, double %v109
   %v118 = fcmp ogt double %v115, %v109
   %v119 = select i1 %v118, double %v115, double %v109
-  %v120 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 0, i32 1, i32 0, i32 1
-  %v121 = load double, double* %v120, align 8, !tbaa !6
+  %v120 = getelementptr inbounds [2 x [2 x [2 x %3]]], ptr %v1, i32 0, i32 0, i32 0, i32 1, i32 0, i32 1
+  %v121 = load double, ptr %v120, align 8, !tbaa !6
   %v122 = fcmp olt double %v121, %v111
   %v123 = select i1 %v122, double %v121, double %v111
   %v124 = fcmp ogt double %v121, %v111
   %v125 = select i1 %v124, double %v121, double %v111
-  %v126 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 0, i32 1, i32 0, i32 2
-  %v127 = load double, double* %v126, align 8, !tbaa !6
+  %v126 = getelementptr inbounds [2 x [2 x [2 x %3]]], ptr %v1, i32 0, i32 0, i32 0, i32 1, i32 0, i32 2
+  %v127 = load double, ptr %v126, align 8, !tbaa !6
   %v128 = fcmp olt double %v127, %v113
   %v129 = select i1 %v128, double %v127, double %v113
   %v130 = fcmp ogt double %v127, %v113
   %v131 = select i1 %v130, double %v127, double %v113
-  %v132 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0
-  %v133 = load double, double* %v132, align 8, !tbaa !6
+  %v132 = getelementptr inbounds [2 x [2 x [2 x %3]]], ptr %v1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0
+  %v133 = load double, ptr %v132, align 8, !tbaa !6
   %v134 = fcmp olt double %v133, %v117
   %v135 = select i1 %v134, double %v133, double %v117
   %v136 = fcmp ogt double %v133, %v119
   %v137 = select i1 %v136, double %v133, double %v119
-  %v138 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 1
-  %v139 = load double, double* %v138, align 8, !tbaa !6
+  %v138 = getelementptr inbounds [2 x [2 x [2 x %3]]], ptr %v1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 1
+  %v139 = load double, ptr %v138, align 8, !tbaa !6
   %v140 = fcmp olt double %v139, %v123
   %v141 = select i1 %v140, double %v139, double %v123
   %v142 = fcmp ogt double %v139, %v125
   %v143 = select i1 %v142, double %v139, double %v125
-  %v144 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 2
-  %v145 = load double, double* %v144, align 8, !tbaa !6
+  %v144 = getelementptr inbounds [2 x [2 x [2 x %3]]], ptr %v1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 2
+  %v145 = load double, ptr %v144, align 8, !tbaa !6
   %v146 = fcmp olt double %v145, %v129
   %v147 = select i1 %v146, double %v145, double %v129
   %v148 = fcmp ogt double %v145, %v131
   %v149 = select i1 %v148, double %v145, double %v131
-  %v150 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 1, i32 1, i32 0, i32 0
-  %v151 = load double, double* %v150, align 8, !tbaa !6
+  %v150 = getelementptr inbounds [2 x [2 x [2 x %3]]], ptr %v1, i32 0, i32 0, i32 1, i32 1, i32 0, i32 0
+  %v151 = load double, ptr %v150, align 8, !tbaa !6
   %v152 = fcmp olt double %v151, %v135
   %v153 = select i1 %v152, double %v151, double %v135
   %v154 = fcmp ogt double %v151, %v137
   %v155 = select i1 %v154, double %v151, double %v137
-  %v156 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 1, i32 1, i32 0, i32 1
-  %v157 = load double, double* %v156, align 8, !tbaa !6
+  %v156 = getelementptr inbounds [2 x [2 x [2 x %3]]], ptr %v1, i32 0, i32 0, i32 1, i32 1, i32 0, i32 1
+  %v157 = load double, ptr %v156, align 8, !tbaa !6
   %v158 = fcmp olt double %v157, %v141
   %v159 = select i1 %v158, double %v157, double %v141
   %v160 = fcmp ogt double %v157, %v143
   %v161 = select i1 %v160, double %v157, double %v143
-  %v162 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 1, i32 1, i32 0, i32 2
-  %v163 = load double, double* %v162, align 8, !tbaa !6
+  %v162 = getelementptr inbounds [2 x [2 x [2 x %3]]], ptr %v1, i32 0, i32 0, i32 1, i32 1, i32 0, i32 2
+  %v163 = load double, ptr %v162, align 8, !tbaa !6
   %v164 = fcmp olt double %v163, %v147
   %v165 = select i1 %v164, double %v163, double %v147
   %v166 = fcmp ogt double %v163, %v149
   %v167 = select i1 %v166, double %v163, double %v149
-  %v168 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0
-  %v169 = load double, double* %v168, align 8, !tbaa !6
+  %v168 = getelementptr inbounds [2 x [2 x [2 x %3]]], ptr %v1, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0
+  %v169 = load double, ptr %v168, align 8, !tbaa !6
   %v170 = fcmp olt double %v169, %v153
   %v171 = select i1 %v170, double %v169, double %v153
   %v172 = fcmp ogt double %v169, %v155
   %v173 = select i1 %v172, double %v169, double %v155
-  %v174 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 1, i32 0, i32 0, i32 0, i32 1
-  %v175 = load double, double* %v174, align 8, !tbaa !6
+  %v174 = getelementptr inbounds [2 x [2 x [2 x %3]]], ptr %v1, i32 0, i32 1, i32 0, i32 0, i32 0, i32 1
+  %v175 = load double, ptr %v174, align 8, !tbaa !6
   %v176 = fcmp olt double %v175, %v159
   %v177 = select i1 %v176, double %v175, double %v159
   %v178 = fcmp ogt double %v175, %v161
   %v179 = select i1 %v178, double %v175, double %v161
-  %v180 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 1, i32 0, i32 0, i32 0, i32 2
-  %v181 = load double, double* %v180, align 8, !tbaa !6
+  %v180 = getelementptr inbounds [2 x [2 x [2 x %3]]], ptr %v1, i32 0, i32 1, i32 0, i32 0, i32 0, i32 2
+  %v181 = load double, ptr %v180, align 8, !tbaa !6
   %v182 = fcmp olt double %v181, %v165
   %v183 = select i1 %v182, double %v181, double %v165
   %v184 = fcmp ogt double %v181, %v167
   %v185 = select i1 %v184, double %v181, double %v167
-  %v186 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 0
-  %v187 = load double, double* %v186, align 8, !tbaa !6
+  %v186 = getelementptr inbounds [2 x [2 x [2 x %3]]], ptr %v1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 0
+  %v187 = load double, ptr %v186, align 8, !tbaa !6
   %v188 = fcmp olt double %v187, %v171
   %v189 = select i1 %v188, double %v187, double %v171
   %v190 = fcmp ogt double %v187, %v173
   %v191 = select i1 %v190, double %v187, double %v173
-  %v192 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1
-  %v193 = load double, double* %v192, align 8, !tbaa !6
+  %v192 = getelementptr inbounds [2 x [2 x [2 x %3]]], ptr %v1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1
+  %v193 = load double, ptr %v192, align 8, !tbaa !6
   %v194 = fcmp olt double %v193, %v177
   %v195 = select i1 %v194, double %v193, double %v177
   %v196 = fcmp ogt double %v193, %v179
   %v197 = select i1 %v196, double %v193, double %v179
-  %v198 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 2
-  %v199 = load double, double* %v198, align 8, !tbaa !6
+  %v198 = getelementptr inbounds [2 x [2 x [2 x %3]]], ptr %v1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 2
+  %v199 = load double, ptr %v198, align 8, !tbaa !6
   %v200 = fcmp olt double %v199, %v183
   %v201 = select i1 %v200, double %v199, double %v183
   %v202 = fcmp ogt double %v199, %v185
   %v203 = select i1 %v202, double %v199, double %v185
-  %v204 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 1, i32 1, i32 0, i32 0, i32 0
-  %v205 = load double, double* %v204, align 8, !tbaa !6
+  %v204 = getelementptr inbounds [2 x [2 x [2 x %3]]], ptr %v1, i32 0, i32 1, i32 1, i32 0, i32 0, i32 0
+  %v205 = load double, ptr %v204, align 8, !tbaa !6
   %v206 = fcmp olt double %v205, %v189
   %v207 = select i1 %v206, double %v205, double %v189
   %v208 = fcmp ogt double %v205, %v191
   %v209 = select i1 %v208, double %v205, double %v191
-  %v210 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 1, i32 1, i32 0, i32 0, i32 1
-  %v211 = load double, double* %v210, align 8, !tbaa !6
+  %v210 = getelementptr inbounds [2 x [2 x [2 x %3]]], ptr %v1, i32 0, i32 1, i32 1, i32 0, i32 0, i32 1
+  %v211 = load double, ptr %v210, align 8, !tbaa !6
   %v212 = fcmp olt double %v211, %v195
   %v213 = select i1 %v212, double %v211, double %v195
   %v214 = fcmp ogt double %v211, %v197
   %v215 = select i1 %v214, double %v211, double %v197
-  %v216 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 1, i32 1, i32 0, i32 0, i32 2
-  %v217 = load double, double* %v216, align 8, !tbaa !6
+  %v216 = getelementptr inbounds [2 x [2 x [2 x %3]]], ptr %v1, i32 0, i32 1, i32 1, i32 0, i32 0, i32 2
+  %v217 = load double, ptr %v216, align 8, !tbaa !6
   %v218 = fcmp olt double %v217, %v201
   %v219 = select i1 %v218, double %v217, double %v201
   %v220 = fcmp ogt double %v217, %v203
   %v221 = select i1 %v220, double %v217, double %v203
-  %v222 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 1, i32 1, i32 1, i32 0, i32 0
-  %v223 = load double, double* %v222, align 8, !tbaa !6
+  %v222 = getelementptr inbounds [2 x [2 x [2 x %3]]], ptr %v1, i32 0, i32 1, i32 1, i32 1, i32 0, i32 0
+  %v223 = load double, ptr %v222, align 8, !tbaa !6
   %v224 = fcmp olt double %v223, %v207
   %v225 = select i1 %v224, double %v223, double %v207
   %v226 = fcmp ogt double %v223, %v209
   %v227 = select i1 %v226, double %v223, double %v209
-  %v228 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 1, i32 1, i32 1, i32 0, i32 1
-  %v229 = load double, double* %v228, align 8, !tbaa !6
+  %v228 = getelementptr inbounds [2 x [2 x [2 x %3]]], ptr %v1, i32 0, i32 1, i32 1, i32 1, i32 0, i32 1
+  %v229 = load double, ptr %v228, align 8, !tbaa !6
   %v230 = fcmp olt double %v229, %v213
   %v231 = select i1 %v230, double %v229, double %v213
   %v232 = fcmp ogt double %v229, %v215
   %v233 = select i1 %v232, double %v229, double %v215
-  %v234 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 1, i32 1, i32 1, i32 0, i32 2
-  %v235 = load double, double* %v234, align 8, !tbaa !6
+  %v234 = getelementptr inbounds [2 x [2 x [2 x %3]]], ptr %v1, i32 0, i32 1, i32 1, i32 1, i32 0, i32 2
+  %v235 = load double, ptr %v234, align 8, !tbaa !6
   %v236 = fcmp olt double %v235, %v219
   %v237 = select i1 %v236, double %v235, double %v219
   %v238 = fcmp ogt double %v235, %v221
   %v239 = select i1 %v238, double %v235, double %v221
-  %v240 = getelementptr inbounds %4, %4* %a0, i32 0, i32 4, i32 0, i32 0
-  store double %v225, double* %v240, align 8
-  %v241 = getelementptr inbounds %4, %4* %a0, i32 0, i32 4, i32 0, i32 1
-  store double %v231, double* %v241, align 8
-  %v242 = getelementptr inbounds %4, %4* %a0, i32 0, i32 4, i32 0, i32 2
-  store double %v237, double* %v242, align 8
-  %v243 = getelementptr inbounds %4, %4* %a0, i32 0, i32 5, i32 0, i32 0
-  store double %v227, double* %v243, align 8
-  %v244 = getelementptr inbounds %4, %4* %a0, i32 0, i32 5, i32 0, i32 1
-  store double %v233, double* %v244, align 8
-  %v245 = getelementptr inbounds %4, %4* %a0, i32 0, i32 5, i32 0, i32 2
-  store double %v239, double* %v245, align 8
+  %v240 = getelementptr inbounds %4, ptr %a0, i32 0, i32 4, i32 0, i32 0
+  store double %v225, ptr %v240, align 8
+  %v241 = getelementptr inbounds %4, ptr %a0, i32 0, i32 4, i32 0, i32 1
+  store double %v231, ptr %v241, align 8
+  %v242 = getelementptr inbounds %4, ptr %a0, i32 0, i32 4, i32 0, i32 2
+  store double %v237, ptr %v242, align 8
+  %v243 = getelementptr inbounds %4, ptr %a0, i32 0, i32 5, i32 0, i32 0
+  store double %v227, ptr %v243, align 8
+  %v244 = getelementptr inbounds %4, ptr %a0, i32 0, i32 5, i32 0, i32 1
+  store double %v233, ptr %v244, align 8
+  %v245 = getelementptr inbounds %4, ptr %a0, i32 0, i32 5, i32 0, i32 2
+  store double %v239, ptr %v245, align 8
   br label %b2
 }
 
 ; Function Attrs: argmemonly nounwind
-declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture writeonly, i8* nocapture readonly, i32, i1) #1
+declare void @llvm.memcpy.p0.p0.i32(ptr nocapture writeonly, ptr nocapture readonly, i32, i1) #1
 
 ; Function Attrs: argmemonly nounwind
-declare void @llvm.memset.p0i8.i64(i8* nocapture writeonly, i8, i64, i1) #1
+declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1) #1
 
 attributes #0 = { nounwind "target-cpu"="hexagonv55" }
 attributes #1 = { argmemonly nounwind }

diff  --git a/llvm/test/CodeGen/Hexagon/remove-endloop.ll b/llvm/test/CodeGen/Hexagon/remove-endloop.ll
index 73e1ad02cd807..c091fbbcad5fa 100644
--- a/llvm/test/CodeGen/Hexagon/remove-endloop.ll
+++ b/llvm/test/CodeGen/Hexagon/remove-endloop.ll
@@ -1,6 +1,6 @@
 ; RUN: llc -march=hexagon -O2 < %s | FileCheck %s
 
-define void @foo(i32 %n, i32* nocapture %A, i32* nocapture %B) nounwind optsize {
+define void @foo(i32 %n, ptr nocapture %A, ptr nocapture %B) nounwind optsize {
 entry:
   %cmp = icmp sgt i32 %n, 100
   br i1 %cmp, label %for.body.preheader, label %for.cond4.preheader
@@ -20,29 +20,29 @@ for.body7.preheader:
   br label %for.body7
 
 for.body:
-  %arrayidx.phi = phi i32* [ %arrayidx.inc, %for.body ], [ %B, %for.body.preheader ]
-  %arrayidx3.phi = phi i32* [ %arrayidx3.inc, %for.body ], [ %A, %for.body.preheader ]
+  %arrayidx.phi = phi ptr [ %arrayidx.inc, %for.body ], [ %B, %for.body.preheader ]
+  %arrayidx3.phi = phi ptr [ %arrayidx3.inc, %for.body ], [ %A, %for.body.preheader ]
   %i.014 = phi i32 [ %inc, %for.body ], [ 0, %for.body.preheader ]
-  %0 = load i32, i32* %arrayidx.phi, align 4
+  %0 = load i32, ptr %arrayidx.phi, align 4
   %sub = add nsw i32 %0, -1
-  store i32 %sub, i32* %arrayidx3.phi, align 4
+  store i32 %sub, ptr %arrayidx3.phi, align 4
   %inc = add nsw i32 %i.014, 1
   %exitcond = icmp eq i32 %inc, %n
-  %arrayidx.inc = getelementptr i32, i32* %arrayidx.phi, i32 1
-  %arrayidx3.inc = getelementptr i32, i32* %arrayidx3.phi, i32 1
+  %arrayidx.inc = getelementptr i32, ptr %arrayidx.phi, i32 1
+  %arrayidx3.inc = getelementptr i32, ptr %arrayidx3.phi, i32 1
   br i1 %exitcond, label %if.end.loopexit, label %for.body
 
 for.body7:
-  %arrayidx8.phi = phi i32* [ %arrayidx8.inc, %for.body7 ], [ %B, %for.body7.preheader ]
-  %arrayidx9.phi = phi i32* [ %arrayidx9.inc, %for.body7 ], [ %A, %for.body7.preheader ]
+  %arrayidx8.phi = phi ptr [ %arrayidx8.inc, %for.body7 ], [ %B, %for.body7.preheader ]
+  %arrayidx9.phi = phi ptr [ %arrayidx9.inc, %for.body7 ], [ %A, %for.body7.preheader ]
   %i.117 = phi i32 [ %inc11, %for.body7 ], [ 0, %for.body7.preheader ]
-  %1 = load i32, i32* %arrayidx8.phi, align 4
+  %1 = load i32, ptr %arrayidx8.phi, align 4
   %add = add nsw i32 %1, 1
-  store i32 %add, i32* %arrayidx9.phi, align 4
+  store i32 %add, ptr %arrayidx9.phi, align 4
   %inc11 = add nsw i32 %i.117, 1
   %exitcond18 = icmp eq i32 %inc11, %n
-  %arrayidx8.inc = getelementptr i32, i32* %arrayidx8.phi, i32 1
-  %arrayidx9.inc = getelementptr i32, i32* %arrayidx9.phi, i32 1
+  %arrayidx8.inc = getelementptr i32, ptr %arrayidx8.phi, i32 1
+  %arrayidx9.inc = getelementptr i32, ptr %arrayidx9.phi, i32 1
   br i1 %exitcond18, label %if.end.loopexit21, label %for.body7
 
 if.end.loopexit:

diff  --git a/llvm/test/CodeGen/Hexagon/remove_lsr.ll b/llvm/test/CodeGen/Hexagon/remove_lsr.ll
index dee384520e50c..9249174e50066 100644
--- a/llvm/test/CodeGen/Hexagon/remove_lsr.ll
+++ b/llvm/test/CodeGen/Hexagon/remove_lsr.ll
@@ -17,51 +17,48 @@
 %s.0 = type { i64 }
 %s.1 = type { i32 }
 
-define void @f0(%s.0* nocapture %a0, %s.1* nocapture %a1, %s.1* nocapture %a2, i8* nocapture %a3, i8* nocapture %a4) #0 {
+define void @f0(ptr nocapture %a0, ptr nocapture %a1, ptr nocapture %a2, ptr nocapture %a3, ptr nocapture %a4) #0 {
 b0:
-  %v0 = getelementptr %s.0, %s.0* %a0, i32 1
-  %v1 = getelementptr %s.1, %s.1* %a2, i32 1
-  %v2 = getelementptr %s.1, %s.1* %a1, i32 1
-  %v3 = getelementptr i8, i8* %a4, i32 1
-  %v4 = getelementptr i8, i8* %a3, i32 1
+  %v0 = getelementptr %s.0, ptr %a0, i32 1
+  %v1 = getelementptr %s.1, ptr %a2, i32 1
+  %v2 = getelementptr %s.1, ptr %a1, i32 1
+  %v3 = getelementptr i8, ptr %a4, i32 1
+  %v4 = getelementptr i8, ptr %a3, i32 1
   br label %b1
 
 b1:                                               ; preds = %b1, %b0
   %v5 = phi i32 [ %v38, %b1 ], [ 2, %b0 ]
-  %v6 = phi i8* [ %v37, %b1 ], [ %v4, %b0 ]
-  %v7 = phi i8* [ %v36, %b1 ], [ %v3, %b0 ]
-  %v8 = phi %s.1* [ %v35, %b1 ], [ %v2, %b0 ]
-  %v9 = phi %s.1* [ %v34, %b1 ], [ %v1, %b0 ]
-  %v10 = phi %s.0* [ %v33, %b1 ], [ %v0, %b0 ]
+  %v6 = phi ptr [ %v37, %b1 ], [ %v4, %b0 ]
+  %v7 = phi ptr [ %v36, %b1 ], [ %v3, %b0 ]
+  %v8 = phi ptr [ %v35, %b1 ], [ %v2, %b0 ]
+  %v9 = phi ptr [ %v34, %b1 ], [ %v1, %b0 ]
+  %v10 = phi ptr [ %v33, %b1 ], [ %v0, %b0 ]
   %v11 = phi i8 [ undef, %b0 ], [ %v30, %b1 ]
   %v12 = phi i8 [ undef, %b0 ], [ %v29, %b1 ]
   %v13 = phi i64 [ undef, %b0 ], [ %v28, %b1 ]
-  %v14 = bitcast %s.1* %v8 to i32*
-  %v15 = bitcast %s.1* %v9 to i32*
-  %v16 = bitcast %s.0* %v10 to i64*
   %v17 = tail call i64 @llvm.hexagon.A2.vsubhs(i64 0, i64 %v13)
   %v18 = sext i8 %v12 to i32
   %v19 = trunc i64 %v13 to i32
   %v20 = trunc i64 %v17 to i32
   %v21 = tail call i32 @llvm.hexagon.C2.mux(i32 %v18, i32 %v19, i32 %v20)
-  store i32 %v21, i32* %v14, align 4
+  store i32 %v21, ptr %v8, align 4
   %v22 = sext i8 %v11 to i32
   %v23 = lshr i64 %v13, 32
   %v24 = trunc i64 %v23 to i32
   %v25 = lshr i64 %v17, 32
   %v26 = trunc i64 %v25 to i32
   %v27 = tail call i32 @llvm.hexagon.C2.mux(i32 %v22, i32 %v24, i32 %v26)
-  store i32 %v27, i32* %v15, align 4
-  %v28 = load i64, i64* %v16, align 8
-  %v29 = load i8, i8* %v6, align 1
-  %v30 = load i8, i8* %v7, align 1
+  store i32 %v27, ptr %v9, align 4
+  %v28 = load i64, ptr %v10, align 8
+  %v29 = load i8, ptr %v6, align 1
+  %v30 = load i8, ptr %v7, align 1
   %v31 = trunc i32 %v5 to i8
   %v32 = icmp eq i8 %v31, 32
-  %v33 = getelementptr %s.0, %s.0* %v10, i32 1
-  %v34 = getelementptr %s.1, %s.1* %v9, i32 1
-  %v35 = getelementptr %s.1, %s.1* %v8, i32 1
-  %v36 = getelementptr i8, i8* %v7, i32 1
-  %v37 = getelementptr i8, i8* %v6, i32 1
+  %v33 = getelementptr %s.0, ptr %v10, i32 1
+  %v34 = getelementptr %s.1, ptr %v9, i32 1
+  %v35 = getelementptr %s.1, ptr %v8, i32 1
+  %v36 = getelementptr i8, ptr %v7, i32 1
+  %v37 = getelementptr i8, ptr %v6, i32 1
   %v38 = add i32 %v5, 1
   br i1 %v32, label %b2, label %b1
 

diff  --git a/llvm/test/CodeGen/Hexagon/retval-redundant-copy.ll b/llvm/test/CodeGen/Hexagon/retval-redundant-copy.ll
index d47469de5c053..fbcc7d9131800 100644
--- a/llvm/test/CodeGen/Hexagon/retval-redundant-copy.ll
+++ b/llvm/test/CodeGen/Hexagon/retval-redundant-copy.ll
@@ -11,9 +11,9 @@
 define void @f0() {
 b0:
   %v0 = tail call i32 @f1(i32 1, i32 2, i32 3)
-  store i32 %v0, i32* @g0, align 4
+  store i32 %v0, ptr @g0, align 4
   %v1 = tail call i32 @f1(i32 4, i32 5, i32 6)
-  store i32 %v1, i32* @g1, align 4
+  store i32 %v1, ptr @g1, align 4
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/rotl-i64.ll b/llvm/test/CodeGen/Hexagon/rotl-i64.ll
index 353c7bafb7129..a37814cd9c5b9 100644
--- a/llvm/test/CodeGen/Hexagon/rotl-i64.ll
+++ b/llvm/test/CodeGen/Hexagon/rotl-i64.ll
@@ -2,16 +2,16 @@
 ; CHECK: rol
 
 ; Function Attrs: nounwind
-define fastcc void @f0(i64* %a0) #0 {
+define fastcc void @f0(ptr %a0) #0 {
 b0:                                               ; preds = %b3, %b2
-  %v0 = load i64, i64* %a0, align 8, !tbaa !0
+  %v0 = load i64, ptr %a0, align 8, !tbaa !0
   %v1 = lshr i64 %v0, 8
   %v2 = shl i64 %v0, 56
   %v3 = or i64 %v2, %v1
   %v4 = xor i64 %v3, 0
   %v5 = xor i64 %v4, 0
   %v6 = add i64 0, %v5
-  store i64 %v6, i64* %a0, align 8, !tbaa !0
+  store i64 %v6, ptr %a0, align 8, !tbaa !0
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/runtime-stkchk.ll b/llvm/test/CodeGen/Hexagon/runtime-stkchk.ll
index 2ab0393d92a13..81f18ec74e804 100644
--- a/llvm/test/CodeGen/Hexagon/runtime-stkchk.ll
+++ b/llvm/test/CodeGen/Hexagon/runtime-stkchk.ll
@@ -5,37 +5,33 @@
 define i32 @foo_1(i32 %n) #0 {
 entry:
   %local = alloca [1024 x i32], align 8
-  %0 = bitcast [1024 x i32]* %local to i8*
-  call void @llvm.lifetime.start.p0i8(i64 4096, i8* %0) #1
-  %arraydecay = getelementptr inbounds [1024 x i32], [1024 x i32]* %local, i32 0, i32 0
-  call void @baz_1(i32* %arraydecay) #3
-  %arrayidx = getelementptr inbounds [1024 x i32], [1024 x i32]* %local, i32 0, i32 %n
-  %1 = load i32, i32* %arrayidx, align 4
-  call void @llvm.lifetime.end.p0i8(i64 4096, i8* %0) #1
-  ret i32 %1
+  call void @llvm.lifetime.start.p0(i64 4096, ptr %local) #1
+  call void @baz_1(ptr %local) #3
+  %arrayidx = getelementptr inbounds [1024 x i32], ptr %local, i32 0, i32 %n
+  %0 = load i32, ptr %arrayidx, align 4
+  call void @llvm.lifetime.end.p0(i64 4096, ptr %local) #1
+  ret i32 %0
 }
 
 ; CHECK-LABEL: foo_2
 ; CHECK: __save_r16_through_r19_stkchk
-define i32 @foo_2(i32 %n, i32* %y) #0 {
+define i32 @foo_2(i32 %n, ptr %y) #0 {
 entry:
   %local = alloca [2048 x i32], align 8
-  %0 = bitcast [2048 x i32]* %local to i8*
-  call void @llvm.lifetime.start.p0i8(i64 8192, i8* %0) #1
-  %arraydecay = getelementptr inbounds [2048 x i32], [2048 x i32]* %local, i32 0, i32 0
-  call void @baz_2(i32* %y, i32* %arraydecay) #3
-  %1 = load i32, i32* %y, align 4
-  %add = add nsw i32 %n, %1
-  %arrayidx = getelementptr inbounds [2048 x i32], [2048 x i32]* %local, i32 0, i32 %add
-  %2 = load i32, i32* %arrayidx, align 4
-  call void @llvm.lifetime.end.p0i8(i64 8192, i8* %0) #1
-  ret i32 %2
+  call void @llvm.lifetime.start.p0(i64 8192, ptr %local) #1
+  call void @baz_2(ptr %y, ptr %local) #3
+  %0 = load i32, ptr %y, align 4
+  %add = add nsw i32 %n, %0
+  %arrayidx = getelementptr inbounds [2048 x i32], ptr %local, i32 0, i32 %add
+  %1 = load i32, ptr %arrayidx, align 4
+  call void @llvm.lifetime.end.p0(i64 8192, ptr %local) #1
+  ret i32 %1
 }
 
-declare void @baz_1(i32*) #2
-declare void @baz_2(i32*, i32*) #2
-declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #1
-declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #1
+declare void @baz_1(ptr) #2
+declare void @baz_2(ptr, ptr) #2
+declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
 
 attributes #0 = { nounwind optsize "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
 attributes #1 = { nounwind }

diff  --git a/llvm/test/CodeGen/Hexagon/save-kill-csr.ll b/llvm/test/CodeGen/Hexagon/save-kill-csr.ll
index 6a398e997ceee..994de78bdb0dc 100644
--- a/llvm/test/CodeGen/Hexagon/save-kill-csr.ll
+++ b/llvm/test/CodeGen/Hexagon/save-kill-csr.ll
@@ -6,9 +6,9 @@ target triple = "hexagon"
 %s.0 = type { i8, i8, i8, i8 }
 %s.1 = type { %s.2 }
 %s.2 = type { %s.3 }
-%s.3 = type { i32 (...)** }
+%s.3 = type { ptr }
 %s.4 = type { i8, i8, i16, i8 }
-%s.5 = type { i8, %s.0* }
+%s.5 = type { i8, ptr }
 
 @g0 = external hidden global [3 x %s.0], align 8
 @g1 = external hidden global [3 x %s.0], align 8
@@ -31,45 +31,45 @@ target triple = "hexagon"
 @g18 = external hidden global [3 x %s.0], align 8
 
 ; Function Attrs: norecurse nounwind optsize ssp
-define hidden zeroext i8 @f0(%s.1* nocapture readnone %a0, %s.4* readonly %a1, %s.5* %a2, i32 %a3) unnamed_addr #0 align 2 {
+define hidden zeroext i8 @f0(ptr nocapture readnone %a0, ptr readonly %a1, ptr %a2, i32 %a3) unnamed_addr #0 align 2 {
 b0:
   br i1 undef, label %b4, label %b1
 
 b1:                                               ; preds = %b0
   %v0 = icmp eq i32 %a3, 1
-  %v1 = select i1 %v0, %s.0* getelementptr inbounds ([3 x %s.0], [3 x %s.0]* @g18, i32 0, i32 0), %s.0* getelementptr inbounds ([3 x %s.0], [3 x %s.0]* @g0, i32 0, i32 0)
+  %v1 = select i1 %v0, ptr @g18, ptr @g0
   %v2 = icmp eq i32 %a3, 2
-  %v3 = select i1 %v2, %s.0* getelementptr inbounds ([3 x %s.0], [3 x %s.0]* @g16, i32 0, i32 0), %s.0* %v1
+  %v3 = select i1 %v2, ptr @g16, ptr %v1
   %v4 = icmp eq i32 %a3, 3
-  %v5 = select i1 %v4, %s.0* getelementptr inbounds ([3 x %s.0], [3 x %s.0]* @g15, i32 0, i32 0), %s.0* %v3
+  %v5 = select i1 %v4, ptr @g15, ptr %v3
   %v6 = icmp eq i32 %a3, 4
-  %v7 = select i1 %v6, %s.0* getelementptr inbounds ([3 x %s.0], [3 x %s.0]* @g14, i32 0, i32 0), %s.0* %v5
+  %v7 = select i1 %v6, ptr @g14, ptr %v5
   %v8 = icmp eq i32 %a3, 5
-  %v9 = select i1 %v8, %s.0* getelementptr inbounds ([3 x %s.0], [3 x %s.0]* @g12, i32 0, i32 0), %s.0* %v7
+  %v9 = select i1 %v8, ptr @g12, ptr %v7
   %v10 = icmp eq i32 %a3, 6
-  %v11 = select i1 %v10, %s.0* getelementptr inbounds ([3 x %s.0], [3 x %s.0]* @g11, i32 0, i32 0), %s.0* %v9
+  %v11 = select i1 %v10, ptr @g11, ptr %v9
   %v12 = icmp eq i32 %a3, 7
-  %v13 = select i1 %v12, %s.0* getelementptr inbounds ([3 x %s.0], [3 x %s.0]* @g9, i32 0, i32 0), %s.0* %v11
+  %v13 = select i1 %v12, ptr @g9, ptr %v11
   %v14 = icmp eq i32 %a3, 8
-  %v15 = select i1 %v14, %s.0* getelementptr inbounds ([3 x %s.0], [3 x %s.0]* @g8, i32 0, i32 0), %s.0* %v13
+  %v15 = select i1 %v14, ptr @g8, ptr %v13
   %v16 = icmp eq i32 %a3, 9
-  %v17 = select i1 %v16, %s.0* getelementptr inbounds ([3 x %s.0], [3 x %s.0]* @g7, i32 0, i32 0), %s.0* %v15
+  %v17 = select i1 %v16, ptr @g7, ptr %v15
   %v18 = icmp eq i32 %a3, 10
-  %v19 = select i1 %v18, %s.0* getelementptr inbounds ([3 x %s.0], [3 x %s.0]* @g5, i32 0, i32 0), %s.0* %v17
+  %v19 = select i1 %v18, ptr @g5, ptr %v17
   %v20 = icmp eq i32 %a3, 11
-  %v21 = select i1 %v20, %s.0* getelementptr inbounds ([3 x %s.0], [3 x %s.0]* @g4, i32 0, i32 0), %s.0* %v19
+  %v21 = select i1 %v20, ptr @g4, ptr %v19
   %v22 = icmp eq i32 %a3, 12
-  %v23 = select i1 %v22, %s.0* getelementptr inbounds ([3 x %s.0], [3 x %s.0]* @g3, i32 0, i32 0), %s.0* %v21
+  %v23 = select i1 %v22, ptr @g3, ptr %v21
   %v24 = icmp eq i32 %a3, 13
-  %v25 = select i1 %v24, %s.0* getelementptr inbounds ([3 x %s.0], [3 x %s.0]* @g2, i32 0, i32 0), %s.0* %v23
-  %v26 = select i1 undef, %s.0* getelementptr inbounds ([3 x %s.0], [3 x %s.0]* @g1, i32 0, i32 0), %s.0* %v25
-  %v27 = select i1 undef, %s.0* getelementptr inbounds ([4 x %s.0], [4 x %s.0]* @g17, i32 0, i32 0), %s.0* %v26
+  %v25 = select i1 %v24, ptr @g2, ptr %v23
+  %v26 = select i1 undef, ptr @g1, ptr %v25
+  %v27 = select i1 undef, ptr @g17, ptr %v26
   %v28 = icmp eq i32 %a3, 16
-  %v29 = select i1 %v28, %s.0* getelementptr inbounds ([4 x %s.0], [4 x %s.0]* @g13, i32 0, i32 0), %s.0* %v27
+  %v29 = select i1 %v28, ptr @g13, ptr %v27
   %v30 = icmp eq i32 %a3, 17
-  %v31 = select i1 %v30, %s.0* null, %s.0* %v29
-  %v32 = select i1 undef, %s.0* getelementptr inbounds ([4 x %s.0], [4 x %s.0]* @g10, i32 0, i32 0), %s.0* %v31
-  %v33 = select i1 undef, %s.0* getelementptr inbounds ([4 x %s.0], [4 x %s.0]* @g6, i32 0, i32 0), %s.0* %v32
+  %v31 = select i1 %v30, ptr null, ptr %v29
+  %v32 = select i1 undef, ptr @g10, ptr %v31
+  %v33 = select i1 undef, ptr @g6, ptr %v32
   %v34 = add i32 %a3, -15
   %v35 = icmp ult i32 %v34, 2
   %v36 = select i1 %v35, i8 4, i8 3
@@ -78,11 +78,11 @@ b1:                                               ; preds = %b0
   br i1 undef, label %b2, label %b3
 
 b2:                                               ; preds = %b3, %b1
-  %v39 = phi %s.0* [ undef, %b3 ], [ %v33, %b1 ]
+  %v39 = phi ptr [ undef, %b3 ], [ %v33, %b1 ]
   %v40 = phi i8 [ undef, %b3 ], [ %v38, %b1 ]
-  %v41 = getelementptr inbounds %s.5, %s.5* %a2, i32 0, i32 1
-  store %s.0* %v39, %s.0** %v41, align 4
-  store i8 %v40, i8* undef, align 4
+  %v41 = getelementptr inbounds %s.5, ptr %a2, i32 0, i32 1
+  store ptr %v39, ptr %v41, align 4
+  store i8 %v40, ptr undef, align 4
   br label %b4
 
 b3:                                               ; preds = %b1

diff  --git a/llvm/test/CodeGen/Hexagon/save-regs-thresh.ll b/llvm/test/CodeGen/Hexagon/save-regs-thresh.ll
index d78b0e4634573..655aa77deabd3 100644
--- a/llvm/test/CodeGen/Hexagon/save-regs-thresh.ll
+++ b/llvm/test/CodeGen/Hexagon/save-regs-thresh.ll
@@ -13,62 +13,58 @@ target triple = "hexagon"
 %s.5 = type { i16, i16 }
 
 @g0 = private unnamed_addr constant [21 x i8] c"....................\00", align 1
- at g1 = internal unnamed_addr global [1 x %s.0*] zeroinitializer, align 4
+ at g1 = internal unnamed_addr global [1 x ptr] zeroinitializer, align 4
 
 ; Function Attrs: nounwind
-define void @f0(i8 zeroext %a0, %s.0** nocapture %a1) #0 {
+define void @f0(i8 zeroext %a0, ptr nocapture %a1) #0 {
 b0:
-  %v0 = tail call i8* @f1(i8 zeroext %a0, i32 1424, i8* getelementptr inbounds ([21 x i8], [21 x i8]* @g0, i32 0, i32 0), i32 118) #0
-  %v1 = bitcast i8* %v0 to %s.0*
+  %v0 = tail call ptr @f1(i8 zeroext %a0, i32 1424, ptr @g0, i32 118) #0
   %v2 = zext i8 %a0 to i32
-  %v3 = getelementptr inbounds [1 x %s.0*], [1 x %s.0*]* @g1, i32 0, i32 %v2
-  store %s.0* %v1, %s.0** %v3, align 4, !tbaa !0
-  store %s.0* %v1, %s.0** %a1, align 4, !tbaa !0
+  %v3 = getelementptr inbounds [1 x ptr], ptr @g1, i32 0, i32 %v2
+  store ptr %v0, ptr %v3, align 4, !tbaa !0
+  store ptr %v0, ptr %a1, align 4, !tbaa !0
   ret void
 }
 
-declare i8* @f1(i8 zeroext, i32, i8*, i32)
+declare ptr @f1(i8 zeroext, i32, ptr, i32)
 
 ; Function Attrs: nounwind
 define void @f2(i8 zeroext %a0) #0 {
 b0:
   %v0 = zext i8 %a0 to i32
-  %v1 = getelementptr inbounds [1 x %s.0*], [1 x %s.0*]* @g1, i32 0, i32 %v0
-  %v2 = load %s.0*, %s.0** %v1, align 4, !tbaa !0
-  %v3 = getelementptr inbounds %s.0, %s.0* %v2, i32 0, i32 0, i32 0
-  tail call void @f3(i8 zeroext %a0, i8* %v3, i8* getelementptr inbounds ([21 x i8], [21 x i8]* @g0, i32 0, i32 0), i32 142) #0
-  store %s.0* null, %s.0** %v1, align 4, !tbaa !0
+  %v1 = getelementptr inbounds [1 x ptr], ptr @g1, i32 0, i32 %v0
+  %v2 = load ptr, ptr %v1, align 4, !tbaa !0
+  tail call void @f3(i8 zeroext %a0, ptr %v2, ptr @g0, i32 142) #0
+  store ptr null, ptr %v1, align 4, !tbaa !0
   ret void
 }
 
-declare void @f3(i8 zeroext, i8*, i8*, i32)
+declare void @f3(i8 zeroext, ptr, ptr, i32)
 
 ; Function Attrs: nounwind
 define void @f4(i8 zeroext %a0, i8 zeroext %a1, i8 zeroext %a2, i8 zeroext %a3, i8 zeroext %a4) #0 {
 b0:
   %v0 = alloca [7 x i32], align 4
   %v1 = zext i8 %a0 to i32
-  %v2 = getelementptr inbounds [1 x %s.0*], [1 x %s.0*]* @g1, i32 0, i32 %v1
-  %v3 = load %s.0*, %s.0** %v2, align 4, !tbaa !0
-  %v4 = getelementptr inbounds %s.0, %s.0* %v3, i32 0, i32 3
-  %v5 = load i32, i32* %v4, align 4, !tbaa !4
+  %v2 = getelementptr inbounds [1 x ptr], ptr @g1, i32 0, i32 %v1
+  %v3 = load ptr, ptr %v2, align 4, !tbaa !0
+  %v4 = getelementptr inbounds %s.0, ptr %v3, i32 0, i32 3
+  %v5 = load i32, ptr %v4, align 4, !tbaa !4
   %v6 = and i32 %v5, 8
   %v7 = icmp eq i32 %v6, 0
   br i1 %v7, label %b2, label %b1
 
 b1:                                               ; preds = %b0
-  %v8 = getelementptr inbounds [7 x i32], [7 x i32]* %v0, i32 0, i32 0
-  %v9 = bitcast [7 x i32]* %v0 to %s.2*
   %v10 = call i32 @f5() #0
-  %v11 = getelementptr [7 x i32], [7 x i32]* %v0, i32 0, i32 1
-  store i32 %v10, i32* %v11, align 4
+  %v11 = getelementptr [7 x i32], ptr %v0, i32 0, i32 1
+  store i32 %v10, ptr %v11, align 4
   %v12 = call zeroext i16 @f6(i8 zeroext %a0) #0
   %v13 = zext i16 %v12 to i32
   %v14 = shl nuw i32 %v13, 16
   %v15 = or i32 %v14, 260
-  store i32 %v15, i32* %v8, align 4
+  store i32 %v15, ptr %v0, align 4
   %v16 = zext i8 %a1 to i32
-  %v17 = getelementptr [7 x i32], [7 x i32]* %v0, i32 0, i32 2
+  %v17 = getelementptr [7 x i32], ptr %v0, i32 0, i32 2
   %v18 = zext i8 %a2 to i32
   %v19 = shl nuw nsw i32 %v18, 12
   %v20 = zext i8 %a3 to i32
@@ -86,8 +82,8 @@ b1:                                               ; preds = %b0
   %v32 = shl nuw nsw i32 %v31, 8
   %v33 = and i32 %v32, 3840
   %v34 = or i32 %v33, %v29
-  store i32 %v34, i32* %v17, align 4
-  %v35 = call i32 bitcast (i32 (...)* @f8 to i32 (i32, %s.2*)*)(i32 %v1, %s.2* %v9) #0
+  store i32 %v34, ptr %v17, align 4
+  %v35 = call i32 @f8(i32 %v1, ptr %v0) #0
   br label %b2
 
 b2:                                               ; preds = %b1, %b0

diff  --git a/llvm/test/CodeGen/Hexagon/sdata-array.ll b/llvm/test/CodeGen/Hexagon/sdata-array.ll
index cea86bd426d95..84cb321957456 100644
--- a/llvm/test/CodeGen/Hexagon/sdata-array.ll
+++ b/llvm/test/CodeGen/Hexagon/sdata-array.ll
@@ -7,7 +7,7 @@
 
 define void @set(i8 %x) nounwind {
 entry:
-  store i8 %x, i8* getelementptr inbounds ([4 x i8], [4 x i8]* @foo, i32 0, i32 0), align 1
+  store i8 %x, ptr @foo, align 1
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/sdata-basic.ll b/llvm/test/CodeGen/Hexagon/sdata-basic.ll
index db7375417df9e..e15a8c9a9a9d2 100644
--- a/llvm/test/CodeGen/Hexagon/sdata-basic.ll
+++ b/llvm/test/CodeGen/Hexagon/sdata-basic.ll
@@ -7,7 +7,7 @@ target triple = "hexagon"
 
 define i32 @foo() nounwind readonly {
 entry:
-  %0 = load i32, i32* @var, align 4, !tbaa !0
+  %0 = load i32, ptr @var, align 4, !tbaa !0
   ret i32 %0
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/sdata-expand-const.ll b/llvm/test/CodeGen/Hexagon/sdata-expand-const.ll
index 0a312950105d8..31c2c3f2c6d3c 100644
--- a/llvm/test/CodeGen/Hexagon/sdata-expand-const.ll
+++ b/llvm/test/CodeGen/Hexagon/sdata-expand-const.ll
@@ -7,7 +7,7 @@ target triple = "hexagon"
 define i32 @f0(i64 %a0) #0 {
 b0:
   %v0 = alloca i64, align 8
-  store i64 %a0, i64* %v0, align 8
+  store i64 %a0, ptr %v0, align 8
   %v1 = call i32 @llvm.hexagon.S2.ct0p(i64 4222189076152335)
   ret i32 %v1
 }

diff  --git a/llvm/test/CodeGen/Hexagon/sdata-load-size.ll b/llvm/test/CodeGen/Hexagon/sdata-load-size.ll
index 325713f7062a8..fb63ffd35ade4 100644
--- a/llvm/test/CodeGen/Hexagon/sdata-load-size.ll
+++ b/llvm/test/CodeGen/Hexagon/sdata-load-size.ll
@@ -9,7 +9,7 @@ target triple = "hexagon"
 
 define i32 @f0() #0 {
 entry:
-  %v0 = load i64, i64* @g0, align 8
+  %v0 = load i64, ptr @g0, align 8
   %v1 = trunc i64 %v0 to i8
   %v2 = zext i8 %v1 to i32
   ret i32 %v2

diff  --git a/llvm/test/CodeGen/Hexagon/sdata-opaque-type.ll b/llvm/test/CodeGen/Hexagon/sdata-opaque-type.ll
index d26bbc06296d8..64afefe467dc5 100644
--- a/llvm/test/CodeGen/Hexagon/sdata-opaque-type.ll
+++ b/llvm/test/CodeGen/Hexagon/sdata-opaque-type.ll
@@ -9,9 +9,9 @@ target triple = "hexagon"
 @g0 = external global %s.0
 
 ; Function Attrs: nounwind
-define %s.0* @f0() #0 {
+define ptr @f0() #0 {
 b0:
-  ret %s.0* @g0
+  ret ptr @g0
 }
 
 attributes #0 = { nounwind }

diff  --git a/llvm/test/CodeGen/Hexagon/sdata-stack-guard.ll b/llvm/test/CodeGen/Hexagon/sdata-stack-guard.ll
index 56a0c98124566..6b335930d4772 100644
--- a/llvm/test/CodeGen/Hexagon/sdata-stack-guard.ll
+++ b/llvm/test/CodeGen/Hexagon/sdata-stack-guard.ll
@@ -14,38 +14,37 @@ define zeroext i8 @f0(i32 %a0) #0 {
 b0:
   %v0 = alloca i32, align 4
   %v1 = alloca [64 x i8], align 8
-  %v2 = alloca i8*, align 4
-  store i32 %a0, i32* %v0, align 4
-  store i8* getelementptr inbounds ([37 x i8], [37 x i8]* @g0, i32 0, i32 0), i8** %v2, align 4
-  %v3 = getelementptr inbounds [64 x i8], [64 x i8]* %v1, i32 0, i32 0
-  %v4 = load i8*, i8** %v2, align 4
-  %v5 = call i8* @f1(i8* %v3, i8* %v4) #2
-  %v6 = load i32, i32* %v0, align 4
-  %v7 = getelementptr inbounds [64 x i8], [64 x i8]* %v1, i32 0, i32 %v6
-  %v8 = load i8, i8* %v7, align 1
+  %v2 = alloca ptr, align 4
+  store i32 %a0, ptr %v0, align 4
+  store ptr @g0, ptr %v2, align 4
+  %v4 = load ptr, ptr %v2, align 4
+  %v5 = call ptr @f1(ptr %v1, ptr %v4) #2
+  %v6 = load i32, ptr %v0, align 4
+  %v7 = getelementptr inbounds [64 x i8], ptr %v1, i32 0, i32 %v6
+  %v8 = load i8, ptr %v7, align 1
   ret i8 %v8
 }
 
 ; Function Attrs: nounwind
-declare i8* @f1(i8*, i8*) #1
+declare ptr @f1(ptr, ptr) #1
 
 ; Function Attrs: noinline nounwind ssp
-define i32 @f2(i32 %a0, i8** %a1) #0 {
+define i32 @f2(i32 %a0, ptr %a1) #0 {
 b0:
   %v0 = alloca i32, align 4
   %v1 = alloca i32, align 4
-  %v2 = alloca i8**, align 4
-  store i32 0, i32* %v0, align 4
-  store i32 %a0, i32* %v1, align 4
-  store i8** %a1, i8*** %v2, align 4
+  %v2 = alloca ptr, align 4
+  store i32 0, ptr %v0, align 4
+  store i32 %a0, ptr %v1, align 4
+  store ptr %a1, ptr %v2, align 4
   %v3 = call zeroext i8 @f0(i32 20)
   %v4 = zext i8 %v3 to i32
-  %v5 = call i32 (i8*, ...) @f3(i8* getelementptr inbounds ([15 x i8], [15 x i8]* @g1, i32 0, i32 0), i32 %v4) #2
+  %v5 = call i32 (ptr, ...) @f3(ptr @g1, i32 %v4) #2
   ret i32 0
 }
 
 ; Function Attrs: nounwind
-declare i32 @f3(i8*, ...) #1
+declare i32 @f3(ptr, ...) #1
 
 attributes #0 = { noinline nounwind ssp "target-cpu"="hexagonv60" "target-features"="+hvx,+hvx-length64b" }
 attributes #1 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvx,+hvx-length64b" }

diff  --git a/llvm/test/CodeGen/Hexagon/sdr-nosplit1.ll b/llvm/test/CodeGen/Hexagon/sdr-nosplit1.ll
index c171e0ab5cc43..bb20f77df5d92 100644
--- a/llvm/test/CodeGen/Hexagon/sdr-nosplit1.ll
+++ b/llvm/test/CodeGen/Hexagon/sdr-nosplit1.ll
@@ -8,13 +8,13 @@
 
 target triple = "hexagon"
 
-define void @fred(i64 %a0, i64 %a1, i64 %a2, i64* nocapture %a3, i32 %a4) local_unnamed_addr #0 {
+define void @fred(i64 %a0, i64 %a1, i64 %a2, ptr nocapture %a3, i32 %a4) local_unnamed_addr #0 {
 b5:
   %v6 = icmp sgt i32 %a4, 0
   br i1 %v6, label %b7, label %b20
 
 b7:                                               ; preds = %b7, %b5
-  %v8 = phi i64* [ %v16, %b7 ], [ %a3, %b5 ]
+  %v8 = phi ptr [ %v16, %b7 ], [ %a3, %b5 ]
   %v9 = phi i32 [ %v18, %b7 ], [ 0, %b5 ]
   %v10 = phi i64 [ %v17, %b7 ], [ %a0, %b5 ]
   %v11 = tail call i64 @llvm.hexagon.A2.andp(i64 %v10, i64 1085102592571150095)
@@ -22,9 +22,9 @@ b7:                                               ; preds = %b7, %b5
   %v13 = tail call i64 @llvm.hexagon.A2.vsubub(i64 %v11, i64 %a1)
   %v14 = and i32 %v12, 255
   %v15 = tail call i64 @llvm.hexagon.C2.vmux(i32 %v14, i64 %a2, i64 %v13)
-  store i64 %v15, i64* %v8, align 8
-  %v16 = getelementptr i64, i64* %v8, i32 1
-  %v17 = load i64, i64* %v16, align 8
+  store i64 %v15, ptr %v8, align 8
+  %v16 = getelementptr i64, ptr %v8, i32 1
+  %v17 = load i64, ptr %v16, align 8
   %v18 = add nuw nsw i32 %v9, 1
   %v19 = icmp eq i32 %v18, %a4
   br i1 %v19, label %b20, label %b7

diff  --git a/llvm/test/CodeGen/Hexagon/sdr-reg-profit.ll b/llvm/test/CodeGen/Hexagon/sdr-reg-profit.ll
index 0d82d2189a85f..951a05a53df28 100644
--- a/llvm/test/CodeGen/Hexagon/sdr-reg-profit.ll
+++ b/llvm/test/CodeGen/Hexagon/sdr-reg-profit.ll
@@ -20,12 +20,11 @@
 
 target triple = "hexagon"
 
-define i32 @fred(i32 %a0, i64* nocapture readonly %a1) local_unnamed_addr #0 {
+define i32 @fred(i32 %a0, ptr nocapture readonly %a1) local_unnamed_addr #0 {
 b2:
-  %v3 = bitcast i64* %a1 to i32*
-  %v4 = getelementptr inbounds i32, i32* %v3, i32 1
-  %v5 = load i32, i32* %v3, align 4
-  %v6 = load i32, i32* %v4, align 4
+  %v4 = getelementptr inbounds i32, ptr %a1, i32 1
+  %v5 = load i32, ptr %a1, align 4
+  %v6 = load i32, ptr %v4, align 4
   %v7 = zext i32 %a0 to i64
   br label %b8
 
@@ -43,11 +42,11 @@ b8:                                               ; preds = %b8, %b2
   %v19 = zext i32 %v9 to i64
   %v20 = or i64 %v13, %v19
   %v21 = tail call i64 @llvm.hexagon.S4.vxsubaddhr(i64 %v20, i64 %v7)
-  %v22 = getelementptr inbounds i32, i32* %v3, i32 %v11
-  %v23 = load i32, i32* %v22, align 4
+  %v22 = getelementptr inbounds i32, ptr %a1, i32 %v11
+  %v23 = load i32, ptr %v22, align 4
   %v24 = or i32 %v11, 1
-  %v25 = getelementptr inbounds i32, i32* %v3, i32 %v24
-  %v26 = load i32, i32* %v25, align 4
+  %v25 = getelementptr inbounds i32, ptr %a1, i32 %v24
+  %v26 = load i32, ptr %v25, align 4
   %v27 = zext i32 %v14 to i64
   %v28 = shl nuw i64 %v27, 32
   %v29 = zext i32 %v23 to i64
@@ -63,14 +62,14 @@ b8:                                               ; preds = %b8, %b2
   %v39 = lshr i64 %v38, 32
   %v40 = trunc i64 %v39 to i32
   %v41 = add nuw nsw i32 %v11, 2
-  %v42 = getelementptr inbounds i32, i32* %v3, i32 %v41
+  %v42 = getelementptr inbounds i32, ptr %a1, i32 %v41
   %v43 = add nuw nsw i32 %v11, 3
-  %v44 = getelementptr inbounds i32, i32* %v3, i32 %v43
+  %v44 = getelementptr inbounds i32, ptr %a1, i32 %v43
   %v45 = add nuw nsw i32 %v11, 4
   %v46 = and i64 %v18, -4294967296
   %v47 = and i64 %v21, -4294967296
-  %v48 = load i32, i32* %v42, align 4
-  %v49 = load i32, i32* %v44, align 4
+  %v48 = load i32, ptr %v42, align 4
+  %v49 = load i32, ptr %v44, align 4
   %v50 = icmp ult i32 %v45, 30
   br i1 %v50, label %b8, label %b51
 

diff  --git a/llvm/test/CodeGen/Hexagon/section_7275.ll b/llvm/test/CodeGen/Hexagon/section_7275.ll
index 1806f1e9c844f..733cee86a60b9 100644
--- a/llvm/test/CodeGen/Hexagon/section_7275.ll
+++ b/llvm/test/CodeGen/Hexagon/section_7275.ll
@@ -29,21 +29,21 @@
 
 define void @foo() nounwind {
 entry:
-  %0 = load i32, i32* @b, align 4
-  store i32 %0, i32* @a, align 4
-  %1 = load i32, i32* @d, align 4
-  store i32 %1, i32* @c, align 4
-  %2 = load i32, i32* @f, align 4
-  store i32 %2, i32* @e, align 4
-  %3 = load i32, i32* @h, align 4
-  store i32 %3, i32* @g, align 4
+  %0 = load i32, ptr @b, align 4
+  store i32 %0, ptr @a, align 4
+  %1 = load i32, ptr @d, align 4
+  store i32 %1, ptr @c, align 4
+  %2 = load i32, ptr @f, align 4
+  store i32 %2, ptr @e, align 4
+  %3 = load i32, ptr @h, align 4
+  store i32 %3, ptr @g, align 4
   ret void
 }
 
 define void @bar() nounwind section ".function.section" {
 entry:
-  %0 = load i32, i32* @a, align 4
-  store i32 %0, i32* @b, align 4
+  %0 = load i32, ptr @a, align 4
+  store i32 %0, ptr @b, align 4
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/select-instr-align.ll b/llvm/test/CodeGen/Hexagon/select-instr-align.ll
index 1021f924f1d71..8a66ecb8fca4a 100644
--- a/llvm/test/CodeGen/Hexagon/select-instr-align.ll
+++ b/llvm/test/CodeGen/Hexagon/select-instr-align.ll
@@ -2,29 +2,29 @@
 
 ; CHECK-LABEL: aligned_load:
 ; CHECK: = vmem({{.*}})
-define <16 x i32> @aligned_load(<16 x i32>* %p, <16 x i32> %a) #0 {
-  %v = load <16 x i32>, <16 x i32>* %p, align 64
+define <16 x i32> @aligned_load(ptr %p, <16 x i32> %a) #0 {
+  %v = load <16 x i32>, ptr %p, align 64
   ret <16 x i32> %v
 }
 
 ; CHECK-LABEL: aligned_store:
 ; CHECK: vmem({{.*}}) =
-define void @aligned_store(<16 x i32>* %p, <16 x i32> %a) #0 {
-  store <16 x i32> %a, <16 x i32>* %p, align 64
+define void @aligned_store(ptr %p, <16 x i32> %a) #0 {
+  store <16 x i32> %a, ptr %p, align 64
   ret void
 }
 
 ; CHECK-LABEL: unaligned_load:
 ; CHECK: = vmemu({{.*}})
-define <16 x i32> @unaligned_load(<16 x i32>* %p, <16 x i32> %a) #0 {
-  %v = load <16 x i32>, <16 x i32>* %p, align 32
+define <16 x i32> @unaligned_load(ptr %p, <16 x i32> %a) #0 {
+  %v = load <16 x i32>, ptr %p, align 32
   ret <16 x i32> %v
 }
 
 ; CHECK-LABEL: unaligned_store:
 ; CHECK: vmemu({{.*}}) =
-define void @unaligned_store(<16 x i32>* %p, <16 x i32> %a) #0 {
-  store <16 x i32> %a, <16 x i32>* %p, align 32
+define void @unaligned_store(ptr %p, <16 x i32> %a) #0 {
+  store <16 x i32> %a, ptr %p, align 32
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/select-vector-pred.ll b/llvm/test/CodeGen/Hexagon/select-vector-pred.ll
index 4c5c0886f8008..e4e7c957e7ad3 100644
--- a/llvm/test/CodeGen/Hexagon/select-vector-pred.ll
+++ b/llvm/test/CodeGen/Hexagon/select-vector-pred.ll
@@ -7,20 +7,20 @@
 
 target triple = "hexagon"
 
-declare void @llvm.hexagon.V6.vS32b.qpred.ai.128B(<128 x i1>, i8*, <32 x i32>) #0
+declare void @llvm.hexagon.V6.vS32b.qpred.ai.128B(<128 x i1>, ptr, <32 x i32>) #0
 declare <128 x i1> @llvm.hexagon.V6.pred.scalar2.128B(i32) #1
 declare <128 x i1> @llvm.hexagon.V6.pred.and.128B(<128 x i1>, <128 x i1>) #1
 
-define void @libjit_convertFromD32_sm_hf_wrap_3_specialized(i16* %0) local_unnamed_addr #2 {
+define void @libjit_convertFromD32_sm_hf_wrap_3_specialized(ptr %0) local_unnamed_addr #2 {
 entry:
-  %arrayidx55.i.i = getelementptr inbounds i16, i16* %0, i32 undef
-  %1 = ptrtoint i16* %arrayidx55.i.i to i32
+  %arrayidx55.i.i = getelementptr inbounds i16, ptr %0, i32 undef
+  %1 = ptrtoint ptr %arrayidx55.i.i to i32
   %and.i5.i.i = and i32 %1, 127
   %2 = icmp eq i32 %and.i5.i.i, 127
   %.sroa.speculated.i13.i.i = zext i1 %2 to i32
   %3 = tail call <128 x i1> @llvm.hexagon.V6.pred.scalar2.128B(i32 %.sroa.speculated.i13.i.i) #3
   %4 = tail call <128 x i1> @llvm.hexagon.V6.pred.and.128B(<128 x i1> undef, <128 x i1> %3) #3
-  tail call void @llvm.hexagon.V6.vS32b.qpred.ai.128B(<128 x i1> %4, i8* nonnull undef, <32 x i32> undef) #3
+  tail call void @llvm.hexagon.V6.vS32b.qpred.ai.128B(<128 x i1> %4, ptr nonnull undef, <32 x i32> undef) #3
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/setmemrefs.ll b/llvm/test/CodeGen/Hexagon/setmemrefs.ll
index 00401507e6f60..7037505cb8ab2 100644
--- a/llvm/test/CodeGen/Hexagon/setmemrefs.ll
+++ b/llvm/test/CodeGen/Hexagon/setmemrefs.ll
@@ -9,13 +9,13 @@
 ; CHECK-NEXT:  r{{[0-9]*}} = memw(r1{{[678]}}+#0)
 
 ; Function Attrs: nounwind
-define i64 @f0(i32* nocapture %a0, i32* nocapture %a1, i32* nocapture %a2) #0 {
+define i64 @f0(ptr nocapture %a0, ptr nocapture %a1, ptr nocapture %a2) #0 {
 b0:
-  %v0 = tail call i32 bitcast (i32 (...)* @f1 to i32 ()*)() #0
-  store i32 %v0, i32* %a2, align 4, !tbaa !0
-  %v1 = load i32, i32* %a0, align 4, !tbaa !0
+  %v0 = tail call i32 @f1() #0
+  store i32 %v0, ptr %a2, align 4, !tbaa !0
+  %v1 = load i32, ptr %a0, align 4, !tbaa !0
   %v2 = sext i32 %v1 to i64
-  %v3 = load i32, i32* %a1, align 4, !tbaa !0
+  %v3 = load i32, ptr %a1, align 4, !tbaa !0
   %v4 = sext i32 %v3 to i64
   %v5 = mul nsw i64 %v4, %v2
   ret i64 %v5

diff  --git a/llvm/test/CodeGen/Hexagon/sffms.ll b/llvm/test/CodeGen/Hexagon/sffms.ll
index ef47976ab3bb5..e3fd11f495b37 100644
--- a/llvm/test/CodeGen/Hexagon/sffms.ll
+++ b/llvm/test/CodeGen/Hexagon/sffms.ll
@@ -4,22 +4,21 @@
 
 ; CHECK: r{{[0-9]+}} -= sfmpy
 
-%struct.matrix_params = type { float** }
+%struct.matrix_params = type { ptr }
 
 ; Function Attrs: norecurse nounwind
-define void @loop2_1(%struct.matrix_params* nocapture readonly %params, i32 %col1) #0 {
+define void @loop2_1(ptr nocapture readonly %params, i32 %col1) #0 {
 entry:
-  %matrixA = getelementptr inbounds %struct.matrix_params, %struct.matrix_params* %params, i32 0, i32 0
-  %0 = load float**, float*** %matrixA, align 4
-  %1 = load float*, float** %0, align 4
-  %arrayidx1 = getelementptr inbounds float, float* %1, i32 %col1
-  %2 = load float, float* %arrayidx1, align 4
-  %arrayidx3 = getelementptr inbounds float*, float** %0, i32 %col1
-  %3 = load float*, float** %arrayidx3, align 4
-  %4 = load float, float* %3, align 4
+  %0 = load ptr, ptr %params, align 4
+  %1 = load ptr, ptr %0, align 4
+  %arrayidx1 = getelementptr inbounds float, ptr %1, i32 %col1
+  %2 = load float, ptr %arrayidx1, align 4
+  %arrayidx3 = getelementptr inbounds ptr, ptr %0, i32 %col1
+  %3 = load ptr, ptr %arrayidx3, align 4
+  %4 = load float, ptr %3, align 4
   %mul = fmul float %2, %4
   %sub = fsub float %2, %mul
-  %arrayidx10 = getelementptr inbounds float, float* %3, i32 %col1
-  store float %sub, float* %arrayidx10, align 4
+  %arrayidx10 = getelementptr inbounds float, ptr %3, i32 %col1
+  store float %sub, ptr %arrayidx10, align 4
   ret void
 }

diff  --git a/llvm/test/CodeGen/Hexagon/sfmpyacc_scale.ll b/llvm/test/CodeGen/Hexagon/sfmpyacc_scale.ll
index c1381fa3b21b3..ebfc4db2a532f 100644
--- a/llvm/test/CodeGen/Hexagon/sfmpyacc_scale.ll
+++ b/llvm/test/CodeGen/Hexagon/sfmpyacc_scale.ll
@@ -6,18 +6,18 @@ target triple = "hexagon"
 @g0 = private unnamed_addr constant [65 x i8] c"%f :  Q6_R_sfmpyacc_RRp_scale(FLT_MIN,FLT_MIN,FLT_MIN,CHAR_MIN)\0A\00", align 1
 
 ; Function Attrs: nounwind
-declare i32 @f0(i8*, ...) #0
+declare i32 @f0(ptr, ...) #0
 
 ; Function Attrs: nounwind
 define i32 @f1() #0 {
 b0:
   %v0 = alloca i32, align 4
   %v1 = alloca i32, align 4
-  store i32 0, i32* %v0
-  store i32 0, i32* %v1, align 4
+  store i32 0, ptr %v0
+  store i32 0, ptr %v1, align 4
   %v2 = call float @llvm.hexagon.F2.sffma.sc(float 0x3810000000000000, float 0x3810000000000000, float 0x3810000000000000, i32 0)
   %v3 = fpext float %v2 to double
-  %v4 = call i32 (i8*, ...) @f0(i8* getelementptr inbounds ([65 x i8], [65 x i8]* @g0, i32 0, i32 0), double %v3) #0
+  %v4 = call i32 (ptr, ...) @f0(ptr @g0, double %v3) #0
   ret i32 0
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/shrink-frame-basic.ll b/llvm/test/CodeGen/Hexagon/shrink-frame-basic.ll
index 50b37885eda49..440ebb8963720 100644
--- a/llvm/test/CodeGen/Hexagon/shrink-frame-basic.ll
+++ b/llvm/test/CodeGen/Hexagon/shrink-frame-basic.ll
@@ -9,19 +9,19 @@ target datalayout = "e-m:e-p:32:32-i1:32-i64:64-a:0-v32:32-n16:32"
 target triple = "hexagon"
 
 ; Function Attrs: nounwind
-define i32 @foo(i32 %n, i32* %p) #0 {
+define i32 @foo(i32 %n, ptr %p) #0 {
 entry:
-  %cmp = icmp eq i32* %p, null
+  %cmp = icmp eq ptr %p, null
   br i1 %cmp, label %if.end, label %if.then
 
 if.then:                                          ; preds = %entry
-  %0 = load i32, i32* %p, align 4
+  %0 = load i32, ptr %p, align 4
   %inc = add nsw i32 %0, 1
-  store i32 %inc, i32* %p, align 4
+  store i32 %inc, ptr %p, align 4
   br label %return
 
 if.end:                                           ; preds = %entry
-  %call = tail call i32 bitcast (i32 (...)* @bar to i32 (i32)*)(i32 %n) #0
+  %call = tail call i32 @bar(i32 %n) #0
   %add = add nsw i32 %call, 1
   br label %return
 

diff  --git a/llvm/test/CodeGen/Hexagon/signed_immediates.ll b/llvm/test/CodeGen/Hexagon/signed_immediates.ll
index 334dbbeff7985..72e6d24cf794d 100644
--- a/llvm/test/CodeGen/Hexagon/signed_immediates.ll
+++ b/llvm/test/CodeGen/Hexagon/signed_immediates.ll
@@ -2,42 +2,42 @@
 
 ; s4_0Imm
 ; CHECK: memb(r0++#-1) = r1
-define i8* @foo1(i8* %a, i8 %b)  {
-  store i8 %b, i8* %a
-  %c = getelementptr i8, i8* %a, i32 -1
-  ret i8* %c
+define ptr @foo1(ptr %a, i8 %b)  {
+  store i8 %b, ptr %a
+  %c = getelementptr i8, ptr %a, i32 -1
+  ret ptr %c
 }
 
 ; s4_1Imm
 ; CHECK: memh(r0++#-2) = r1
-define i16* @foo2(i16* %a, i16 %b)  {
-  store i16 %b, i16* %a
-  %c = getelementptr i16, i16* %a, i32 -1
-  ret i16* %c
+define ptr @foo2(ptr %a, i16 %b)  {
+  store i16 %b, ptr %a
+  %c = getelementptr i16, ptr %a, i32 -1
+  ret ptr %c
 }
 
 ; s4_2Imm
 ; CHECK: memw(r0++#-4) = r1
-define i32* @foo3(i32* %a, i32 %b)  {
-  store i32 %b, i32* %a
-  %c = getelementptr i32, i32* %a, i32 -1
-  ret i32* %c
+define ptr @foo3(ptr %a, i32 %b)  {
+  store i32 %b, ptr %a
+  %c = getelementptr i32, ptr %a, i32 -1
+  ret ptr %c
 }
 
 ; s4_3Imm
 ; CHECK: memd(r0++#-8) = r3:2
-define i64* @foo4(i64* %a, i64 %b)  {
-  store i64 %b, i64* %a
-  %c = getelementptr i64, i64* %a, i32 -1
-  ret i64* %c
+define ptr @foo4(ptr %a, i64 %b)  {
+  store i64 %b, ptr %a
+  %c = getelementptr i64, ptr %a, i32 -1
+  ret ptr %c
 }
 
 ; s6Ext
 ; CHECK: if (p0.new) memw(r0+#0) = #-1
-define void @foo5(i32* %a, i1 %b) {
+define void @foo5(ptr %a, i1 %b) {
 br i1 %b, label %x, label %y
 x:
-  store i32 -1, i32* %a
+  store i32 -1, ptr %a
   ret void
 y:
   ret void
@@ -52,33 +52,33 @@ define i1 @foo7(i32 %a) {
 
 ; s11_0Ext
 ; CHECK: memb(r0+#-1) = r1
-define void @foo8(i8* %a, i8 %b) {
-  %c = getelementptr i8, i8* %a, i32 -1
-  store i8 %b, i8* %c
+define void @foo8(ptr %a, i8 %b) {
+  %c = getelementptr i8, ptr %a, i32 -1
+  store i8 %b, ptr %c
   ret void
 }
 
 ; s11_1Ext
 ; CHECK: memh(r0+#-2) = r1
-define void @foo9(i16* %a, i16 %b) {
-  %c = getelementptr i16, i16* %a, i32 -1
-  store i16 %b, i16* %c
+define void @foo9(ptr %a, i16 %b) {
+  %c = getelementptr i16, ptr %a, i32 -1
+  store i16 %b, ptr %c
   ret void
 }
 
 ; s11_2Ext
 ; CHECK: memw(r0+#-4) = r1
-define void @foo10(i32* %a, i32 %b) {
-  %c = getelementptr i32, i32* %a, i32 -1
-  store i32 %b, i32* %c
+define void @foo10(ptr %a, i32 %b) {
+  %c = getelementptr i32, ptr %a, i32 -1
+  store i32 %b, ptr %c
   ret void
 }
 
 ; s11_3Ext
 ; CHECK: memd(r0+#-8) = r3:2
-define void @foo11(i64* %a, i64 %b) {
-  %c = getelementptr i64, i64* %a, i32 -1
-  store i64 %b, i64* %c
+define void @foo11(ptr %a, i64 %b) {
+  %c = getelementptr i64, ptr %a, i32 -1
+  store i64 %b, ptr %c
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/simpletailcall.ll b/llvm/test/CodeGen/Hexagon/simpletailcall.ll
index 76854bc1981d6..7d1055c9ae549 100644
--- a/llvm/test/CodeGen/Hexagon/simpletailcall.ll
+++ b/llvm/test/CodeGen/Hexagon/simpletailcall.ll
@@ -7,7 +7,7 @@
 define void @f0(i32 %a0) #0 {
 b0:
   %v0 = add nsw i32 %a0, 3
-  %v1 = tail call i32 bitcast (i32 (...)* @f1 to i32 (i32)*)(i32 %v0) #0
+  %v1 = tail call i32 @f1(i32 %v0) #0
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/simplify64bitops_7223.ll b/llvm/test/CodeGen/Hexagon/simplify64bitops_7223.ll
index 56093c1d00c94..427a9948f63b6 100644
--- a/llvm/test/CodeGen/Hexagon/simplify64bitops_7223.ll
+++ b/llvm/test/CodeGen/Hexagon/simplify64bitops_7223.ll
@@ -14,10 +14,9 @@
 @g0 = common global i32 0, align 4
 
 ; Function Attrs: nounwind
-define i64 @f0(%s.22* nocapture %a0, i32 %a1) #0 {
+define i64 @f0(ptr nocapture %a0, i32 %a1) #0 {
 b0:
-  %v0 = bitcast %s.22* %a0 to i16*
-  %v1 = load i16, i16* %v0, align 2, !tbaa !0
+  %v1 = load i16, ptr %a0, align 2, !tbaa !0
   %v2 = zext i16 %v1 to i64
   %v3 = icmp sgt i32 %a1, 0
   br i1 %v3, label %b1, label %b4
@@ -26,14 +25,14 @@ b1:                                               ; preds = %b0
   br label %b2
 
 b2:                                               ; preds = %b2, %b1
-  %v4 = phi i16* [ %v8, %b2 ], [ %v0, %b1 ]
+  %v4 = phi ptr [ %v8, %b2 ], [ %a0, %b1 ]
   %v5 = phi i32 [ %v10, %b2 ], [ undef, %b1 ]
   %v6 = phi i32 [ %v15, %b2 ], [ 0, %b1 ]
   %v7 = phi i64 [ %v14, %b2 ], [ %v2, %b1 ]
-  %v8 = getelementptr inbounds i16, i16* %v4, i32 1
+  %v8 = getelementptr inbounds i16, ptr %v4, i32 1
   %v9 = trunc i64 %v7 to i32
   %v10 = add i32 %v5, %v9
-  %v11 = load i16, i16* %v8, align 2, !tbaa !0
+  %v11 = load i16, ptr %v8, align 2, !tbaa !0
   %v12 = zext i16 %v11 to i64
   %v13 = and i64 %v7, -4294967296
   %v14 = or i64 %v12, %v13
@@ -47,7 +46,7 @@ b3:                                               ; preds = %b2
 b4:                                               ; preds = %b3, %b0
   %v17 = phi i32 [ undef, %b0 ], [ %v10, %b3 ]
   %v18 = phi i64 [ %v2, %b0 ], [ %v14, %b3 ]
-  store volatile i32 %v17, i32* @g0, align 4, !tbaa !4
+  store volatile i32 %v17, ptr @g0, align 4, !tbaa !4
   ret i64 %v18
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/split-const32-const64.ll b/llvm/test/CodeGen/Hexagon/split-const32-const64.ll
index 30bc5ed322560..5766053ae5ead 100644
--- a/llvm/test/CodeGen/Hexagon/split-const32-const64.ll
+++ b/llvm/test/CodeGen/Hexagon/split-const32-const64.ll
@@ -14,9 +14,9 @@ define void @test1() nounwind {
 entry:
   br label %block
 block:
-  store i32 12345670, i32* @a, align 4
-  %q = ptrtoint i8* blockaddress (@test1, %block) to i32
-  store i32 %q, i32* @b, align 4
+  store i32 12345670, ptr @a, align 4
+  %q = ptrtoint ptr blockaddress (@test1, %block) to i32
+  store i32 %q, ptr @b, align 4
   ret void
 }
 
@@ -24,7 +24,7 @@ block:
 ; CHECK-NOT: CONST64
 define void @test2() nounwind {
 entry:
-  store i64 1234567890123, i64* @la, align 8
-  store i64 1234567890123, i64* @lb, align 8
+  store i64 1234567890123, ptr @la, align 8
+  store i64 1234567890123, ptr @lb, align 8
   ret void
 }

diff  --git a/llvm/test/CodeGen/Hexagon/split-muxii.ll b/llvm/test/CodeGen/Hexagon/split-muxii.ll
index 77c79f571a645..687221b8f8ad2 100644
--- a/llvm/test/CodeGen/Hexagon/split-muxii.ll
+++ b/llvm/test/CodeGen/Hexagon/split-muxii.ll
@@ -5,13 +5,13 @@ target triple = "hexagon"
 
 define void @f0() #0 {
 b0:
-  %v0 = load i32, i32* null, align 4
+  %v0 = load i32, ptr null, align 4
   %v1 = icmp slt i32 undef, %v0
   %v2 = zext i1 %v1 to i32
   %v3 = icmp sgt i32 undef, 0
   %v4 = zext i1 %v3 to i32
   %v5 = add nsw i32 %v2, %v4
-  store i32 %v5, i32* undef, align 4
+  store i32 %v5, ptr undef, align 4
   br i1 undef, label %b1, label %b2
 
 b1:                                               ; preds = %b0

diff  --git a/llvm/test/CodeGen/Hexagon/split-vecpred.ll b/llvm/test/CodeGen/Hexagon/split-vecpred.ll
index c3c0e18b2b261..c68c491d09c9e 100644
--- a/llvm/test/CodeGen/Hexagon/split-vecpred.ll
+++ b/llvm/test/CodeGen/Hexagon/split-vecpred.ll
@@ -55,7 +55,7 @@ b11:                                              ; preds = %b10, %b8
   %v11 = tail call <16 x i32> @llvm.hexagon.V6.vor(<16 x i32> %v10, <16 x i32> undef)
   %v12 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %v11, i32 -1)
   %v13 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %v12, i32 undef)
-  tail call void @llvm.hexagon.V6.vmaskedstoreq(<64 x i1> undef, i8* undef, <16 x i32> %v13)
+  tail call void @llvm.hexagon.V6.vmaskedstoreq(<64 x i1> undef, ptr undef, <16 x i32> %v13)
   unreachable
 
 b12:                                              ; preds = %b12, %b9
@@ -81,7 +81,7 @@ declare <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32>, <16 x i32>, i32) #1
 declare <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1>, i32) #1
 
 ; Function Attrs: argmemonly nounwind
-declare void @llvm.hexagon.V6.vmaskedstoreq(<64 x i1>, i8*, <16 x i32>) #2
+declare void @llvm.hexagon.V6.vmaskedstoreq(<64 x i1>, ptr, <16 x i32>) #2
 
 ; Function Attrs: nounwind readnone
 declare <16 x i32> @llvm.hexagon.V6.vaddbq(<64 x i1>, <16 x i32>, <16 x i32>) #1

diff  --git a/llvm/test/CodeGen/Hexagon/stack-align-reset.ll b/llvm/test/CodeGen/Hexagon/stack-align-reset.ll
index f7639c728624b..e258ddeeaa614 100644
--- a/llvm/test/CodeGen/Hexagon/stack-align-reset.ll
+++ b/llvm/test/CodeGen/Hexagon/stack-align-reset.ll
@@ -6,16 +6,16 @@
 target triple = "hexagon-unknown--elf"
 
 %struct.0 = type { [5 x i32] }
-%struct.2 = type { i32, i32, i32, %struct.1* }
-%struct.1 = type { i16*, i32, i32, i32 }
+%struct.2 = type { i32, i32, i32, ptr }
+%struct.1 = type { ptr, i32, i32, i32 }
 
 @g0 = external hidden unnamed_addr constant [52 x i8], align 1
 @g1 = external hidden unnamed_addr constant [3 x i8], align 1
 
-declare extern_weak void @f0(i32, i8*, i32, i8*, ...) #0
-declare void @f1(%struct.0*, i32) #0
+declare extern_weak void @f0(i32, ptr, i32, ptr, ...) #0
+declare void @f1(ptr, i32) #0
 
-define void @fred(i8* %a0) #0 {
+define void @fred(ptr %a0) #0 {
 b1:
   %v2 = alloca %struct.0, align 4
   %v3 = alloca %struct.2, i32 undef, align 8
@@ -25,20 +25,20 @@ b4:                                               ; preds = %b1
   br label %b7
 
 b5:                                               ; preds = %b5, %b1
-  %v6 = getelementptr inbounds %struct.2, %struct.2* %v3, i32 undef, i32 3
-  store %struct.1* undef, %struct.1** %v6, align 4
+  %v6 = getelementptr inbounds %struct.2, ptr %v3, i32 undef, i32 3
+  store ptr undef, ptr %v6, align 4
   br label %b5
 
 b7:                                               ; preds = %b10, %b4
   %v8 = call i32 @llvm.hexagon.V6.extractw(<16 x i32> zeroinitializer, i32 0)
-  br i1 icmp eq (void (i32, i8*, i32, i8*, ...)* @f0, void (i32, i8*, i32, i8*, ...)* null), label %b11, label %b9
+  br i1 icmp eq (ptr @f0, ptr null), label %b11, label %b9
 
 b9:                                               ; preds = %b7
-  call void (i32, i8*, i32, i8*, ...) @f0(i32 2, i8* getelementptr inbounds ([52 x i8], [52 x i8]* @g0, i32 0, i32 0), i32 2346, i8* getelementptr inbounds ([3 x i8], [3 x i8]* @g1, i32 0, i32 0), i32 %v8)
+  call void (i32, ptr, i32, ptr, ...) @f0(i32 2, ptr @g0, i32 2346, ptr @g1, i32 %v8)
   unreachable
 
 b10:                                              ; preds = %b11
-  call void @f1(%struct.0* nonnull %v2, i32 28)
+  call void @f1(ptr nonnull %v2, i32 28)
   br label %b7
 
 b11:                                              ; preds = %b11, %b7

diff  --git a/llvm/test/CodeGen/Hexagon/stack-align1.ll b/llvm/test/CodeGen/Hexagon/stack-align1.ll
index aefd16594f067..b4bd125c687f0 100644
--- a/llvm/test/CodeGen/Hexagon/stack-align1.ll
+++ b/llvm/test/CodeGen/Hexagon/stack-align1.ll
@@ -10,12 +10,10 @@ define void @foo() #0 {
 entry:
   %x = alloca i32, align 4
   %y = alloca i32, align 32
-  %0 = bitcast i32* %x to i8*
-  %1 = bitcast i32* %y to i8*
-  call void @bar(i8* %0, i8* %1)
+  call void @bar(ptr %x, ptr %y)
   ret void
 }
 
-declare void @bar(i8*, i8*) #0
+declare void @bar(ptr, ptr) #0
 
 attributes #0 = { nounwind }

diff  --git a/llvm/test/CodeGen/Hexagon/stack-align2.ll b/llvm/test/CodeGen/Hexagon/stack-align2.ll
index 042e4097c56a5..5ebcbbfff5272 100644
--- a/llvm/test/CodeGen/Hexagon/stack-align2.ll
+++ b/llvm/test/CodeGen/Hexagon/stack-align2.ll
@@ -14,14 +14,10 @@ entry:
   %y = alloca i32, align 32
   %z = alloca i32, align 64
   %w = alloca i32, align 128
-  %0 = bitcast i32* %x to i8*
-  %1 = bitcast i32* %y to i8*
-  %2 = bitcast i32* %z to i8*
-  %3 = bitcast i32* %w to i8*
-  call void @bar(i8* %0, i8* %1, i8* %2, i8* %3)
+  call void @bar(ptr %x, ptr %y, ptr %z, ptr %w)
   ret void
 }
 
-declare void @bar(i8*, i8*, i8*, i8*) #0
+declare void @bar(ptr, ptr, ptr, ptr) #0
 
 attributes #0 = { nounwind }

diff  --git a/llvm/test/CodeGen/Hexagon/stack-alloca1.ll b/llvm/test/CodeGen/Hexagon/stack-alloca1.ll
index b38b8846d26fc..3c2296b4038d4 100644
--- a/llvm/test/CodeGen/Hexagon/stack-alloca1.ll
+++ b/llvm/test/CodeGen/Hexagon/stack-alloca1.ll
@@ -8,11 +8,10 @@ target triple = "hexagon-unknown-unknown"
 define void @foo(i32 %n) #0 {
 entry:
   %x = alloca i32, i32 %n
-  %0 = bitcast i32* %x to i8*
-  call void @bar(i8* %0)
+  call void @bar(ptr %x)
   ret void
 }
 
-declare void @bar(i8*) #0
+declare void @bar(ptr) #0
 
 attributes #0 = { nounwind }

diff  --git a/llvm/test/CodeGen/Hexagon/stack-alloca2.ll b/llvm/test/CodeGen/Hexagon/stack-alloca2.ll
index 103358d55840d..40c6ba8f12fc6 100644
--- a/llvm/test/CodeGen/Hexagon/stack-alloca2.ll
+++ b/llvm/test/CodeGen/Hexagon/stack-alloca2.ll
@@ -12,12 +12,10 @@ define void @foo(i32 %n) #0 {
 entry:
   %x = alloca i32, i32 %n
   %y = alloca i32, align 32
-  %0 = bitcast i32* %x to i8*
-  %1 = bitcast i32* %y to i8*
-  call void @bar(i8* %0, i8* %1)
+  call void @bar(ptr %x, ptr %y)
   ret void
 }
 
-declare void @bar(i8*, i8* %y) #0
+declare void @bar(ptr, ptr %y) #0
 
 attributes #0 = { nounwind }

diff  --git a/llvm/test/CodeGen/Hexagon/stack-guard-acceptable-type.ll b/llvm/test/CodeGen/Hexagon/stack-guard-acceptable-type.ll
index da19259631c4f..432699065554d 100644
--- a/llvm/test/CodeGen/Hexagon/stack-guard-acceptable-type.ll
+++ b/llvm/test/CodeGen/Hexagon/stack-guard-acceptable-type.ll
@@ -1,5 +1,5 @@
 ; Check that we accept a user definition/declaration of __stack_chk_guard
-; that is not the expected type (i8*) but one of the same size.
+; that is not the expected type (ptr) but one of the same size.
 ;
 ; RUN: llc -march=hexagon -O2 < %s | FileCheck %s
 ; CHECK: __stack_chk_fail
@@ -14,20 +14,19 @@ define zeroext i8 @f0(i32 %a0) #0 {
 b0:
   %v0 = alloca i32, align 4
   %v1 = alloca [64 x i8], align 8
-  %v2 = alloca i8*, align 4
-  store i32 %a0, i32* %v0, align 4
-  store i8* getelementptr inbounds ([37 x i8], [37 x i8]* @g0, i32 0, i32 0), i8** %v2, align 4
-  %v3 = getelementptr inbounds [64 x i8], [64 x i8]* %v1, i32 0, i32 0
-  %v4 = load i8*, i8** %v2, align 4
-  %v5 = call i8* @f1(i8* %v3, i8* %v4) #1
-  %v6 = load i32, i32* %v0, align 4
-  %v7 = getelementptr inbounds [64 x i8], [64 x i8]* %v1, i32 0, i32 %v6
-  %v8 = load i8, i8* %v7, align 1
+  %v2 = alloca ptr, align 4
+  store i32 %a0, ptr %v0, align 4
+  store ptr @g0, ptr %v2, align 4
+  %v4 = load ptr, ptr %v2, align 4
+  %v5 = call ptr @f1(ptr %v1, ptr %v4) #1
+  %v6 = load i32, ptr %v0, align 4
+  %v7 = getelementptr inbounds [64 x i8], ptr %v1, i32 0, i32 %v6
+  %v8 = load i8, ptr %v7, align 1
   ret i8 %v8
 }
 
 ; Function Attrs: nounwind
-declare i8* @f1(i8*, i8*) #1
+declare ptr @f1(ptr, ptr) #1
 
 attributes #0 = { noinline nounwind ssp }
 attributes #1 = { nounwind }

diff  --git a/llvm/test/CodeGen/Hexagon/static.ll b/llvm/test/CodeGen/Hexagon/static.ll
index 15aab434158c6..ea88580ff8927 100644
--- a/llvm/test/CodeGen/Hexagon/static.ll
+++ b/llvm/test/CodeGen/Hexagon/static.ll
@@ -10,11 +10,11 @@
 
 define void @foo() nounwind {
 entry:
-  %0 = load i32, i32* @num, align 4
-  %1 = load i32, i32* @acc, align 4
+  %0 = load i32, ptr @num, align 4
+  %1 = load i32, ptr @acc, align 4
   %mul = mul nsw i32 %0, %1
-  %2 = load i32, i32* @val, align 4
+  %2 = load i32, ptr @val, align 4
   %add = add nsw i32 %mul, %2
-  store i32 %add, i32* @num, align 4
+  store i32 %add, ptr @num, align 4
   ret void
 }

diff  --git a/llvm/test/CodeGen/Hexagon/store-AbsSet.ll b/llvm/test/CodeGen/Hexagon/store-AbsSet.ll
index 94c76b309dbff..6ee2c61f44886 100644
--- a/llvm/test/CodeGen/Hexagon/store-AbsSet.ll
+++ b/llvm/test/CodeGen/Hexagon/store-AbsSet.ll
@@ -10,54 +10,53 @@
 %s.4 = type { i32, i32, i32 }
 
 ; Function Attrs: nounwind ssp
-define void @f0(%s.0* nocapture readonly %a0, i32 %a1) #0 {
+define void @f0(ptr nocapture readonly %a0, i32 %a1) #0 {
 b0:
-  %v0 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 0, i32 0
-  %v1 = load i8, i8* %v0, align 1
+  %v1 = load i8, ptr %a0, align 1
   %v2 = and i32 %a1, 1
   %v3 = icmp eq i32 %v2, 0
   br i1 %v3, label %b4, label %b1
 
 b1:                                               ; preds = %b0
-  %v4 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 1, i32 0
-  %v5 = load i8, i8* %v4, align 1
+  %v4 = getelementptr inbounds %s.0, ptr %a0, i32 0, i32 1, i32 0
+  %v5 = load i8, ptr %v4, align 1
   %v6 = icmp eq i8 %v5, 0
   br i1 %v6, label %b3, label %b2
 
 b2:                                               ; preds = %b1
-  %v7 = getelementptr %s.0, %s.0* %a0, i32 0, i32 2, i32 0
-  %v8 = load i32, i32* %v7, align 4
-  store volatile i32 %v8, i32* inttoptr (i32 -318766672 to i32*), align 16
-  %v9 = getelementptr %s.0, %s.0* %a0, i32 0, i32 3, i32 0
-  %v10 = load i32, i32* %v9, align 4
-  store volatile i32 %v10, i32* inttoptr (i32 -318766672 to i32*), align 16
+  %v7 = getelementptr %s.0, ptr %a0, i32 0, i32 2, i32 0
+  %v8 = load i32, ptr %v7, align 4
+  store volatile i32 %v8, ptr inttoptr (i32 -318766672 to ptr), align 16
+  %v9 = getelementptr %s.0, ptr %a0, i32 0, i32 3, i32 0
+  %v10 = load i32, ptr %v9, align 4
+  store volatile i32 %v10, ptr inttoptr (i32 -318766672 to ptr), align 16
   br label %b3
 
 b3:                                               ; preds = %b2, %b1
-  %v11 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 4, i32 0
-  %v12 = load i32, i32* %v11, align 4
+  %v11 = getelementptr inbounds %s.0, ptr %a0, i32 0, i32 4, i32 0
+  %v12 = load i32, ptr %v11, align 4
   %v13 = zext i8 %v1 to i32
   %v14 = mul nsw i32 %v13, 64
   %v15 = add nsw i32 %v14, -318111684
-  %v16 = inttoptr i32 %v15 to i32*
-  store volatile i32 %v12, i32* %v16, align 4
+  %v16 = inttoptr i32 %v15 to ptr
+  store volatile i32 %v12, ptr %v16, align 4
   %v17 = shl i32 1, %v13
-  %v18 = load volatile i32, i32* inttoptr (i32 -318111596 to i32*), align 4
+  %v18 = load volatile i32, ptr inttoptr (i32 -318111596 to ptr), align 4
   %v19 = and i32 %v17, 3
   %v20 = xor i32 %v19, 3
   %v21 = and i32 %v18, %v20
-  %v22 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 4, i32 1
-  %v23 = load i32, i32* %v22, align 4
+  %v22 = getelementptr inbounds %s.0, ptr %a0, i32 0, i32 4, i32 1
+  %v23 = load i32, ptr %v22, align 4
   %v24 = and i32 %v23, 1
   %v25 = shl i32 %v24, %v13
   %v26 = or i32 %v25, %v21
-  store volatile i32 %v26, i32* inttoptr (i32 -318111596 to i32*), align 4
-  %v27 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 4, i32 2
-  %v28 = load i32, i32* %v27, align 4
+  store volatile i32 %v26, ptr inttoptr (i32 -318111596 to ptr), align 4
+  %v27 = getelementptr inbounds %s.0, ptr %a0, i32 0, i32 4, i32 2
+  %v28 = load i32, ptr %v27, align 4
   %v29 = mul nsw i32 %v13, 4
   %v30 = add nsw i32 %v29, -318111592
-  %v31 = inttoptr i32 %v30 to i32*
-  store volatile i32 %v28, i32* %v31, align 4
+  %v31 = inttoptr i32 %v30 to ptr
+  store volatile i32 %v28, ptr %v31, align 4
   br label %b4
 
 b4:                                               ; preds = %b3, %b0

diff  --git a/llvm/test/CodeGen/Hexagon/store-abs.ll b/llvm/test/CodeGen/Hexagon/store-abs.ll
index c06d7f2e87124..5ef3b506ab391 100644
--- a/llvm/test/CodeGen/Hexagon/store-abs.ll
+++ b/llvm/test/CodeGen/Hexagon/store-abs.ll
@@ -14,7 +14,7 @@
 ; CHECK: memd(##441656) = r{{[0-9]+}}
 define void @f0(i64 %a0) #0 {
 b0:
-  store volatile i64 %a0, i64* inttoptr (i32 441656 to i64*)
+  store volatile i64 %a0, ptr inttoptr (i32 441656 to ptr)
   ret void
 }
 
@@ -23,7 +23,7 @@ b0:
 define void @f1(i64 %a0) #0 {
 b0:
   %v0 = trunc i64 %a0 to i32
-  store volatile i32 %v0, i32* inttoptr (i32 441656 to i32*)
+  store volatile i32 %v0, ptr inttoptr (i32 441656 to ptr)
   ret void
 }
 
@@ -32,7 +32,7 @@ b0:
 define void @f2(i64 %a0) #0 {
 b0:
   %v0 = trunc i64 %a0 to i16
-  store volatile i16 %v0, i16* inttoptr (i32 441656 to i16*)
+  store volatile i16 %v0, ptr inttoptr (i32 441656 to ptr)
   ret void
 }
 
@@ -41,7 +41,7 @@ b0:
 define void @f3(i64 %a0) #0 {
 b0:
   %v0 = trunc i64 %a0 to i8
-  store volatile i8 %v0, i8* inttoptr (i32 441656 to i8*)
+  store volatile i8 %v0, ptr inttoptr (i32 441656 to ptr)
   ret void
 }
 
@@ -50,7 +50,7 @@ b0:
 define void @f4(i64 %a0) #0 {
 b0:
   %v0 = trunc i64 %a0 to i32
-  store volatile i32 %v0, i32* @g2
+  store volatile i32 %v0, ptr @g2
   ret void
 }
 
@@ -59,7 +59,7 @@ b0:
 define void @f5(i64 %a0) #0 {
 b0:
   %v0 = trunc i64 %a0 to i16
-  store volatile i16 %v0, i16* @g1
+  store volatile i16 %v0, ptr @g1
   ret void
 }
 
@@ -68,7 +68,7 @@ b0:
 define void @f6(i64 %a0) #0 {
 b0:
   %v0 = trunc i64 %a0 to i8
-  store volatile i8 %v0, i8* @g0
+  store volatile i8 %v0, ptr @g0
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/store-constant.ll b/llvm/test/CodeGen/Hexagon/store-constant.ll
index ac01cc6a19758..4599f21ab37ef 100644
--- a/llvm/test/CodeGen/Hexagon/store-constant.ll
+++ b/llvm/test/CodeGen/Hexagon/store-constant.ll
@@ -9,42 +9,42 @@
 ; CHECK: memb{{.*}} = {{.*}}#4
 ; CHECK: memb{{.*}} = {{.*}}#5
 
-define void @f0(i32* nocapture %a0) #0 {
+define void @f0(ptr nocapture %a0) #0 {
 b0:
-  store i32 0, i32* %a0, align 4
+  store i32 0, ptr %a0, align 4
   ret void
 }
 
-define void @f1(i32* nocapture %a0) #0 {
+define void @f1(ptr nocapture %a0) #0 {
 b0:
-  %v0 = getelementptr inbounds i32, i32* %a0, i32 1
-  store i32 1, i32* %v0, align 4
+  %v0 = getelementptr inbounds i32, ptr %a0, i32 1
+  store i32 1, ptr %v0, align 4
   ret void
 }
 
-define void @f2(i16* nocapture %a0) #0 {
+define void @f2(ptr nocapture %a0) #0 {
 b0:
-  store i16 2, i16* %a0, align 2
+  store i16 2, ptr %a0, align 2
   ret void
 }
 
-define void @f3(i16* nocapture %a0) #0 {
+define void @f3(ptr nocapture %a0) #0 {
 b0:
-  %v0 = getelementptr inbounds i16, i16* %a0, i32 2
-  store i16 3, i16* %v0, align 2
+  %v0 = getelementptr inbounds i16, ptr %a0, i32 2
+  store i16 3, ptr %v0, align 2
   ret void
 }
 
-define void @f4(i8* nocapture %a0) #0 {
+define void @f4(ptr nocapture %a0) #0 {
 b0:
-  store i8 4, i8* %a0, align 1
+  store i8 4, ptr %a0, align 1
   ret void
 }
 
-define void @f5(i8* nocapture %a0) #0 {
+define void @f5(ptr nocapture %a0) #0 {
 b0:
-  %v0 = getelementptr inbounds i8, i8* %a0, i32 2
-  store i8 5, i8* %v0, align 1
+  %v0 = getelementptr inbounds i8, ptr %a0, i32 2
+  store i8 5, ptr %v0, align 1
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/store-imm-amode.ll b/llvm/test/CodeGen/Hexagon/store-imm-amode.ll
index 3f475c1d68d2a..8dd26ae86fa21 100644
--- a/llvm/test/CodeGen/Hexagon/store-imm-amode.ll
+++ b/llvm/test/CodeGen/Hexagon/store-imm-amode.ll
@@ -6,18 +6,18 @@
 
 @var_i8 = global [10 x i8] zeroinitializer, align 8
 
-define void @store_imm_i8(i8* %p) nounwind {
+define void @store_imm_i8(ptr %p) nounwind {
 ; CHECK-LABEL: store_imm_i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    {
 ; CHECK-NEXT:     jumpr r31
 ; CHECK-NEXT:     memb(r0+#0) = #-1
 ; CHECK-NEXT:    }
-  store i8 255, i8* %p, align 4
+  store i8 255, ptr %p, align 4
   ret void
 }
 
-define void @store_rr_i8(i8* %p, i32 %x) nounwind {
+define void @store_rr_i8(ptr %p, i32 %x) nounwind {
 ; CHECK-LABEL: store_rr_i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    {
@@ -25,8 +25,8 @@ define void @store_rr_i8(i8* %p, i32 %x) nounwind {
 ; CHECK-NEXT:     jumpr r31
 ; CHECK-NEXT:     memb(r0+r1<<#0) = r2.new
 ; CHECK-NEXT:    }
-  %t0 = getelementptr i8, i8* %p, i32 %x
-  store i8 255, i8* %t0, align 4
+  %t0 = getelementptr i8, ptr %p, i32 %x
+  store i8 255, ptr %t0, align 4
   ret void
 }
 
@@ -38,8 +38,8 @@ define void @store_io_i8(i32 %x) nounwind {
 ; CHECK-NEXT:     jumpr r31
 ; CHECK-NEXT:     memb(r0+##var_i8) = r1.new
 ; CHECK-NEXT:    }
-  %t0 = getelementptr [10 x i8], [10 x i8]* @var_i8, i32 0, i32 %x
-  store i8 255, i8* %t0, align 4
+  %t0 = getelementptr [10 x i8], ptr @var_i8, i32 0, i32 %x
+  store i8 255, ptr %t0, align 4
   ret void
 }
 
@@ -52,25 +52,25 @@ define void @store_ur_i8(i32 %x) nounwind {
 ; CHECK-NEXT:     memb(r0<<#2+##var_i8) = r1.new
 ; CHECK-NEXT:    }
   %t0 = shl i32 %x, 2
-  %t1 = getelementptr [10 x i8], [10 x i8]* @var_i8, i32 0, i32 %t0
-  store i8 255, i8* %t1, align 4
+  %t1 = getelementptr [10 x i8], ptr @var_i8, i32 0, i32 %t0
+  store i8 255, ptr %t1, align 4
   ret void
 }
 
 @var_i16 = global [10 x i16] zeroinitializer, align 8
 
-define void @store_imm_i16(i16* %p) nounwind {
+define void @store_imm_i16(ptr %p) nounwind {
 ; CHECK-LABEL: store_imm_i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    {
 ; CHECK-NEXT:     jumpr r31
 ; CHECK-NEXT:     memh(r0+#0) = #-1
 ; CHECK-NEXT:    }
-  store i16 65535, i16* %p, align 4
+  store i16 65535, ptr %p, align 4
   ret void
 }
 
-define void @store_rr_i16(i16* %p, i32 %x) nounwind {
+define void @store_rr_i16(ptr %p, i32 %x) nounwind {
 ; CHECK-LABEL: store_rr_i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    {
@@ -78,8 +78,8 @@ define void @store_rr_i16(i16* %p, i32 %x) nounwind {
 ; CHECK-NEXT:     jumpr r31
 ; CHECK-NEXT:     memh(r0+r1<<#1) = r2.new
 ; CHECK-NEXT:    }
-  %t0 = getelementptr i16, i16* %p, i32 %x
-  store i16 65535, i16* %t0, align 4
+  %t0 = getelementptr i16, ptr %p, i32 %x
+  store i16 65535, ptr %t0, align 4
   ret void
 }
 
@@ -93,25 +93,25 @@ define void @store_ur_i16(i32 %x) nounwind {
 ; CHECK-NEXT:    {
 ; CHECK-NEXT:     jumpr r31
 ; CHECK-NEXT:    }
-  %t0 = getelementptr [10 x i16], [10 x i16]* @var_i16, i32 0, i32 %x
-  store i16 65535, i16* %t0, align 4
+  %t0 = getelementptr [10 x i16], ptr @var_i16, i32 0, i32 %x
+  store i16 65535, ptr %t0, align 4
   ret void
 }
 
 @var_i32 = global [10 x i32] zeroinitializer, align 8
 
-define void @store_imm_i32(i32* %p) nounwind {
+define void @store_imm_i32(ptr %p) nounwind {
 ; CHECK-LABEL: store_imm_i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    {
 ; CHECK-NEXT:     jumpr r31
 ; CHECK-NEXT:     memw(r0+#0) = #-1
 ; CHECK-NEXT:    }
-  store i32 4294967295, i32* %p, align 4
+  store i32 4294967295, ptr %p, align 4
   ret void
 }
 
-define void @store_rr_i32(i32* %p, i32 %x) nounwind {
+define void @store_rr_i32(ptr %p, i32 %x) nounwind {
 ; CHECK-LABEL: store_rr_i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    {
@@ -119,8 +119,8 @@ define void @store_rr_i32(i32* %p, i32 %x) nounwind {
 ; CHECK-NEXT:     jumpr r31
 ; CHECK-NEXT:     memw(r0+r1<<#2) = r2.new
 ; CHECK-NEXT:    }
-  %t0 = getelementptr i32, i32* %p, i32 %x
-  store i32 4294967295, i32* %t0, align 4
+  %t0 = getelementptr i32, ptr %p, i32 %x
+  store i32 4294967295, ptr %t0, align 4
   ret void
 }
 
@@ -132,8 +132,8 @@ define void @store_ur_i32(i32 %x) nounwind {
 ; CHECK-NEXT:     jumpr r31
 ; CHECK-NEXT:     memw(r0<<#2+##var_i32) = r1.new
 ; CHECK-NEXT:    }
-  %t0 = getelementptr [10 x i32], [10 x i32]* @var_i32, i32 0, i32 %x
-  store i32 4294967295, i32* %t0, align 4
+  %t0 = getelementptr [10 x i32], ptr @var_i32, i32 0, i32 %x
+  store i32 4294967295, ptr %t0, align 4
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/store-imm-byte.ll b/llvm/test/CodeGen/Hexagon/store-imm-byte.ll
index 8958ce28e3c59..801b8dc7d7c65 100644
--- a/llvm/test/CodeGen/Hexagon/store-imm-byte.ll
+++ b/llvm/test/CodeGen/Hexagon/store-imm-byte.ll
@@ -4,9 +4,9 @@
 target triple = "hexagon"
 
 ; Function Attrs: nounwind
-define void @f0(i8* %a0) #0 {
+define void @f0(ptr %a0) #0 {
 b0:
-  store i8 -1, i8* %a0, align 2
+  store i8 -1, ptr %a0, align 2
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/store-imm-halword.ll b/llvm/test/CodeGen/Hexagon/store-imm-halword.ll
index 55d1ced1b8e81..31701adf89142 100644
--- a/llvm/test/CodeGen/Hexagon/store-imm-halword.ll
+++ b/llvm/test/CodeGen/Hexagon/store-imm-halword.ll
@@ -4,9 +4,9 @@
 target triple = "hexagon"
 
 ; Function Attrs: nounwind
-define void @f0(i16* %a0) #0 {
+define void @f0(ptr %a0) #0 {
 b0:
-  store i16 -1, i16* %a0, align 2
+  store i16 -1, ptr %a0, align 2
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/store-imm-large-stack.ll b/llvm/test/CodeGen/Hexagon/store-imm-large-stack.ll
index c7e493aaedc4d..f1727e802f620 100644
--- a/llvm/test/CodeGen/Hexagon/store-imm-large-stack.ll
+++ b/llvm/test/CodeGen/Hexagon/store-imm-large-stack.ll
@@ -15,137 +15,137 @@ define void @fred() local_unnamed_addr #0 {
 b0:
   %v1 = alloca i32, align 4
   %v2 = alloca i32, align 4
-  %v3 = load i8, i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g0, i32 0, i32 9), align 1
-  %v4 = load i8, i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g0, i32 0, i32 10), align 2
-  store i32 24, i32* %v1, align 4
-  store i8 %v3, i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g3, i32 0, i32 16), align 8
-  store i8 %v4, i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g2, i32 0, i32 10), align 2
-  store i32 44, i32* %v2, align 4
-  store i16 0, i16* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g3, i32 0, i32 4) to i16*), align 4
-  %v5 = load i16, i16* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g0, i32 0, i32 11) to i16*), align 1
-  store i16 %v5, i16* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g3, i32 0, i32 18) to i16*), align 2
-  %v6 = load i32, i32* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g0, i32 0, i32 13) to i32*), align 1
-  store i32 %v6, i32* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g3, i32 0, i32 20) to i32*), align 4
-  %v7 = load i16, i16* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g0, i32 0, i32 17) to i16*), align 1
-  store i16 %v7, i16* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g3, i32 0, i32 24) to i16*), align 8
-  %v8 = load i16, i16* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g0, i32 0, i32 23) to i16*), align 1
-  store i16 %v8, i16* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g3, i32 0, i32 32) to i16*), align 8
-  %v9 = load i32, i32* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g0, i32 0, i32 25) to i32*), align 1
-  store i32 %v9, i32* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g3, i32 0, i32 36) to i32*), align 4
-  %v10 = load i16, i16* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g0, i32 0, i32 29) to i16*), align 1
-  store i16 %v10, i16* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g3, i32 0, i32 40) to i16*), align 8
-  %v11 = load i32, i32* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g0, i32 0, i32 31) to i32*), align 1
-  store i32 %v11, i32* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g3, i32 0, i32 44) to i32*), align 4
-  %v12 = load i16, i16* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g0, i32 0, i32 35) to i16*), align 1
-  store i16 %v12, i16* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g3, i32 0, i32 48) to i16*), align 8
-  %v13 = load i32, i32* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g0, i32 0, i32 37) to i32*), align 1
-  store i32 %v13, i32* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g3, i32 0, i32 52) to i32*), align 4
-  %v14 = load i16, i16* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g0, i32 0, i32 41) to i16*), align 1
-  store i16 %v14, i16* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g3, i32 0, i32 56) to i16*), align 8
-  %v15 = load i32, i32* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g0, i32 0, i32 43) to i32*), align 1
-  store i32 %v15, i32* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g3, i32 0, i32 60) to i32*), align 4
-  %v16 = load i16, i16* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g0, i32 0, i32 47) to i16*), align 1
-  store i16 %v16, i16* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g3, i32 0, i32 64) to i16*), align 8
-  %v17 = load i32, i32* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g0, i32 0, i32 49) to i32*), align 1
-  store i32 %v17, i32* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g3, i32 0, i32 68) to i32*), align 4
-  %v18 = load i16, i16* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g0, i32 0, i32 53) to i16*), align 1
-  store i16 %v18, i16* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g3, i32 0, i32 72) to i16*), align 8
-  %v19 = load i32, i32* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g0, i32 0, i32 55) to i32*), align 1
-  store i32 %v19, i32* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g3, i32 0, i32 76) to i32*), align 4
-  %v20 = load i32, i32* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g0, i32 0, i32 61) to i32*), align 1
-  store i32 %v20, i32* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g3, i32 0, i32 84) to i32*), align 4
-  %v21 = load i32, i32* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g0, i32 0, i32 73) to i32*), align 1
-  store i32 %v21, i32* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g3, i32 0, i32 100) to i32*), align 4
-  store i32 104, i32* %v1, align 4
-  store i8 %v4, i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g2, i32 0, i32 10), align 2
-  store i16 %v8, i16* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g2, i32 0, i32 23) to i16*), align 1
-  store i32 %v9, i32* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g2, i32 0, i32 25) to i32*), align 1
-  store i16 %v10, i16* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g2, i32 0, i32 29) to i16*), align 1
-  store i32 %v11, i32* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g2, i32 0, i32 31) to i32*), align 1
-  store i16 %v12, i16* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g2, i32 0, i32 35) to i16*), align 1
-  store i32 %v13, i32* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g2, i32 0, i32 37) to i32*), align 1
-  store i16 %v14, i16* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g2, i32 0, i32 41) to i16*), align 1
-  store i32 %v15, i32* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g2, i32 0, i32 43) to i32*), align 1
-  store i16 %v16, i16* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g2, i32 0, i32 47) to i16*), align 1
-  store i32 %v17, i32* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g2, i32 0, i32 49) to i32*), align 1
-  store i32 %v19, i32* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g2, i32 0, i32 55) to i32*), align 1
-  store i32 %v20, i32* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g2, i32 0, i32 61) to i32*), align 1
-  store i32 %v21, i32* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g2, i32 0, i32 73) to i32*), align 1
+  %v3 = load i8, ptr getelementptr inbounds ([1024 x i8], ptr @g0, i32 0, i32 9), align 1
+  %v4 = load i8, ptr getelementptr inbounds ([1024 x i8], ptr @g0, i32 0, i32 10), align 2
+  store i32 24, ptr %v1, align 4
+  store i8 %v3, ptr getelementptr inbounds ([1024 x i8], ptr @g3, i32 0, i32 16), align 8
+  store i8 %v4, ptr getelementptr inbounds ([1024 x i8], ptr @g2, i32 0, i32 10), align 2
+  store i32 44, ptr %v2, align 4
+  store i16 0, ptr getelementptr inbounds ([1024 x i8], ptr @g3, i32 0, i32 4), align 4
+  %v5 = load i16, ptr getelementptr inbounds ([1024 x i8], ptr @g0, i32 0, i32 11), align 1
+  store i16 %v5, ptr getelementptr inbounds ([1024 x i8], ptr @g3, i32 0, i32 18), align 2
+  %v6 = load i32, ptr getelementptr inbounds ([1024 x i8], ptr @g0, i32 0, i32 13), align 1
+  store i32 %v6, ptr getelementptr inbounds ([1024 x i8], ptr @g3, i32 0, i32 20), align 4
+  %v7 = load i16, ptr getelementptr inbounds ([1024 x i8], ptr @g0, i32 0, i32 17), align 1
+  store i16 %v7, ptr getelementptr inbounds ([1024 x i8], ptr @g3, i32 0, i32 24), align 8
+  %v8 = load i16, ptr getelementptr inbounds ([1024 x i8], ptr @g0, i32 0, i32 23), align 1
+  store i16 %v8, ptr getelementptr inbounds ([1024 x i8], ptr @g3, i32 0, i32 32), align 8
+  %v9 = load i32, ptr getelementptr inbounds ([1024 x i8], ptr @g0, i32 0, i32 25), align 1
+  store i32 %v9, ptr getelementptr inbounds ([1024 x i8], ptr @g3, i32 0, i32 36), align 4
+  %v10 = load i16, ptr getelementptr inbounds ([1024 x i8], ptr @g0, i32 0, i32 29), align 1
+  store i16 %v10, ptr getelementptr inbounds ([1024 x i8], ptr @g3, i32 0, i32 40), align 8
+  %v11 = load i32, ptr getelementptr inbounds ([1024 x i8], ptr @g0, i32 0, i32 31), align 1
+  store i32 %v11, ptr getelementptr inbounds ([1024 x i8], ptr @g3, i32 0, i32 44), align 4
+  %v12 = load i16, ptr getelementptr inbounds ([1024 x i8], ptr @g0, i32 0, i32 35), align 1
+  store i16 %v12, ptr getelementptr inbounds ([1024 x i8], ptr @g3, i32 0, i32 48), align 8
+  %v13 = load i32, ptr getelementptr inbounds ([1024 x i8], ptr @g0, i32 0, i32 37), align 1
+  store i32 %v13, ptr getelementptr inbounds ([1024 x i8], ptr @g3, i32 0, i32 52), align 4
+  %v14 = load i16, ptr getelementptr inbounds ([1024 x i8], ptr @g0, i32 0, i32 41), align 1
+  store i16 %v14, ptr getelementptr inbounds ([1024 x i8], ptr @g3, i32 0, i32 56), align 8
+  %v15 = load i32, ptr getelementptr inbounds ([1024 x i8], ptr @g0, i32 0, i32 43), align 1
+  store i32 %v15, ptr getelementptr inbounds ([1024 x i8], ptr @g3, i32 0, i32 60), align 4
+  %v16 = load i16, ptr getelementptr inbounds ([1024 x i8], ptr @g0, i32 0, i32 47), align 1
+  store i16 %v16, ptr getelementptr inbounds ([1024 x i8], ptr @g3, i32 0, i32 64), align 8
+  %v17 = load i32, ptr getelementptr inbounds ([1024 x i8], ptr @g0, i32 0, i32 49), align 1
+  store i32 %v17, ptr getelementptr inbounds ([1024 x i8], ptr @g3, i32 0, i32 68), align 4
+  %v18 = load i16, ptr getelementptr inbounds ([1024 x i8], ptr @g0, i32 0, i32 53), align 1
+  store i16 %v18, ptr getelementptr inbounds ([1024 x i8], ptr @g3, i32 0, i32 72), align 8
+  %v19 = load i32, ptr getelementptr inbounds ([1024 x i8], ptr @g0, i32 0, i32 55), align 1
+  store i32 %v19, ptr getelementptr inbounds ([1024 x i8], ptr @g3, i32 0, i32 76), align 4
+  %v20 = load i32, ptr getelementptr inbounds ([1024 x i8], ptr @g0, i32 0, i32 61), align 1
+  store i32 %v20, ptr getelementptr inbounds ([1024 x i8], ptr @g3, i32 0, i32 84), align 4
+  %v21 = load i32, ptr getelementptr inbounds ([1024 x i8], ptr @g0, i32 0, i32 73), align 1
+  store i32 %v21, ptr getelementptr inbounds ([1024 x i8], ptr @g3, i32 0, i32 100), align 4
+  store i32 104, ptr %v1, align 4
+  store i8 %v4, ptr getelementptr inbounds ([1024 x i8], ptr @g2, i32 0, i32 10), align 2
+  store i16 %v8, ptr getelementptr inbounds ([1024 x i8], ptr @g2, i32 0, i32 23), align 1
+  store i32 %v9, ptr getelementptr inbounds ([1024 x i8], ptr @g2, i32 0, i32 25), align 1
+  store i16 %v10, ptr getelementptr inbounds ([1024 x i8], ptr @g2, i32 0, i32 29), align 1
+  store i32 %v11, ptr getelementptr inbounds ([1024 x i8], ptr @g2, i32 0, i32 31), align 1
+  store i16 %v12, ptr getelementptr inbounds ([1024 x i8], ptr @g2, i32 0, i32 35), align 1
+  store i32 %v13, ptr getelementptr inbounds ([1024 x i8], ptr @g2, i32 0, i32 37), align 1
+  store i16 %v14, ptr getelementptr inbounds ([1024 x i8], ptr @g2, i32 0, i32 41), align 1
+  store i32 %v15, ptr getelementptr inbounds ([1024 x i8], ptr @g2, i32 0, i32 43), align 1
+  store i16 %v16, ptr getelementptr inbounds ([1024 x i8], ptr @g2, i32 0, i32 47), align 1
+  store i32 %v17, ptr getelementptr inbounds ([1024 x i8], ptr @g2, i32 0, i32 49), align 1
+  store i32 %v19, ptr getelementptr inbounds ([1024 x i8], ptr @g2, i32 0, i32 55), align 1
+  store i32 %v20, ptr getelementptr inbounds ([1024 x i8], ptr @g2, i32 0, i32 61), align 1
+  store i32 %v21, ptr getelementptr inbounds ([1024 x i8], ptr @g2, i32 0, i32 73), align 1
   %v22 = trunc i32 %v6 to i8
-  store i8 %v22, i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g3, i32 0, i32 20), align 4
-  store i32 24, i32* %v1, align 4
-  store i16 0, i16* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g2, i32 0, i32 4) to i16*), align 4
-  store i8 %v3, i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g2, i32 0, i32 9), align 1
-  store i16 %v5, i16* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g2, i32 0, i32 11) to i16*), align 1
-  store i8 %v22, i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g2, i32 0, i32 13), align 1
-  store i32 14, i32* %v2, align 4
-  store i8 %v4, i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g3, i32 0, i32 17), align 1
-  %v23 = load i64, i64* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g0, i32 0, i32 11) to i64*), align 1
-  store i64 %v23, i64* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g3, i32 0, i32 24) to i64*), align 8
-  %v24 = load i16, i16* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g0, i32 0, i32 19) to i16*), align 1
-  store i16 %v24, i16* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g3, i32 0, i32 32) to i16*), align 8
-  %v25 = load i32, i32* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g0, i32 0, i32 21) to i32*), align 1
-  store i32 %v25, i32* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g3, i32 0, i32 36) to i32*), align 4
-  %v26 = load i32, i32* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g0, i32 0, i32 25) to i32*), align 1
-  store i32 %v26, i32* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g3, i32 0, i32 40) to i32*), align 8
-  %v27 = load i16, i16* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g0, i32 0, i32 29) to i16*), align 1
-  store i16 %v27, i16* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g3, i32 0, i32 44) to i16*), align 4
-  %v28 = load i16, i16* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g0, i32 0, i32 31) to i16*), align 1
-  store i16 %v28, i16* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g3, i32 0, i32 46) to i16*), align 2
-  %v29 = load i8, i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g0, i32 0, i32 33), align 1
-  store i8 %v29, i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g3, i32 0, i32 48), align 8
-  %v30 = load i8, i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g0, i32 0, i32 34), align 2
-  store i8 %v30, i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g3, i32 0, i32 56), align 8
-  %v31 = load i32, i32* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g0, i32 0, i32 35) to i32*), align 1
-  store i32 %v31, i32* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g3, i32 0, i32 60) to i32*), align 4
-  %v32 = load i32, i32* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g0, i32 0, i32 39) to i32*), align 1
-  store i32 72, i32* %v1, align 4
-  store i32 0, i32* bitcast ([1024 x i8]* @g2 to i32*), align 8
-  store i16 0, i16* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g2, i32 0, i32 4) to i16*), align 4
-  store i8 %v3, i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g2, i32 0, i32 9), align 1
-  store i32 %v25, i32* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g2, i32 0, i32 21) to i32*), align 1
-  store i32 %v26, i32* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g2, i32 0, i32 25) to i32*), align 1
-  store i16 %v27, i16* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g2, i32 0, i32 29) to i16*), align 1
-  store i16 %v28, i16* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g2, i32 0, i32 31) to i16*), align 1
-  store i8 %v29, i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g2, i32 0, i32 33), align 1
-  store i8 %v30, i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g2, i32 0, i32 34), align 2
-  store i32 %v31, i32* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g2, i32 0, i32 35) to i32*), align 1
-  store i32 %v32, i32* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g2, i32 0, i32 39) to i32*), align 1
-  store i32 43, i32* %v2, align 4
-  %v33 = load i8, i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g1, i32 0, i32 0), align 8
+  store i8 %v22, ptr getelementptr inbounds ([1024 x i8], ptr @g3, i32 0, i32 20), align 4
+  store i32 24, ptr %v1, align 4
+  store i16 0, ptr getelementptr inbounds ([1024 x i8], ptr @g2, i32 0, i32 4), align 4
+  store i8 %v3, ptr getelementptr inbounds ([1024 x i8], ptr @g2, i32 0, i32 9), align 1
+  store i16 %v5, ptr getelementptr inbounds ([1024 x i8], ptr @g2, i32 0, i32 11), align 1
+  store i8 %v22, ptr getelementptr inbounds ([1024 x i8], ptr @g2, i32 0, i32 13), align 1
+  store i32 14, ptr %v2, align 4
+  store i8 %v4, ptr getelementptr inbounds ([1024 x i8], ptr @g3, i32 0, i32 17), align 1
+  %v23 = load i64, ptr getelementptr inbounds ([1024 x i8], ptr @g0, i32 0, i32 11), align 1
+  store i64 %v23, ptr getelementptr inbounds ([1024 x i8], ptr @g3, i32 0, i32 24), align 8
+  %v24 = load i16, ptr getelementptr inbounds ([1024 x i8], ptr @g0, i32 0, i32 19), align 1
+  store i16 %v24, ptr getelementptr inbounds ([1024 x i8], ptr @g3, i32 0, i32 32), align 8
+  %v25 = load i32, ptr getelementptr inbounds ([1024 x i8], ptr @g0, i32 0, i32 21), align 1
+  store i32 %v25, ptr getelementptr inbounds ([1024 x i8], ptr @g3, i32 0, i32 36), align 4
+  %v26 = load i32, ptr getelementptr inbounds ([1024 x i8], ptr @g0, i32 0, i32 25), align 1
+  store i32 %v26, ptr getelementptr inbounds ([1024 x i8], ptr @g3, i32 0, i32 40), align 8
+  %v27 = load i16, ptr getelementptr inbounds ([1024 x i8], ptr @g0, i32 0, i32 29), align 1
+  store i16 %v27, ptr getelementptr inbounds ([1024 x i8], ptr @g3, i32 0, i32 44), align 4
+  %v28 = load i16, ptr getelementptr inbounds ([1024 x i8], ptr @g0, i32 0, i32 31), align 1
+  store i16 %v28, ptr getelementptr inbounds ([1024 x i8], ptr @g3, i32 0, i32 46), align 2
+  %v29 = load i8, ptr getelementptr inbounds ([1024 x i8], ptr @g0, i32 0, i32 33), align 1
+  store i8 %v29, ptr getelementptr inbounds ([1024 x i8], ptr @g3, i32 0, i32 48), align 8
+  %v30 = load i8, ptr getelementptr inbounds ([1024 x i8], ptr @g0, i32 0, i32 34), align 2
+  store i8 %v30, ptr getelementptr inbounds ([1024 x i8], ptr @g3, i32 0, i32 56), align 8
+  %v31 = load i32, ptr getelementptr inbounds ([1024 x i8], ptr @g0, i32 0, i32 35), align 1
+  store i32 %v31, ptr getelementptr inbounds ([1024 x i8], ptr @g3, i32 0, i32 60), align 4
+  %v32 = load i32, ptr getelementptr inbounds ([1024 x i8], ptr @g0, i32 0, i32 39), align 1
+  store i32 72, ptr %v1, align 4
+  store i32 0, ptr @g2, align 8
+  store i16 0, ptr getelementptr inbounds ([1024 x i8], ptr @g2, i32 0, i32 4), align 4
+  store i8 %v3, ptr getelementptr inbounds ([1024 x i8], ptr @g2, i32 0, i32 9), align 1
+  store i32 %v25, ptr getelementptr inbounds ([1024 x i8], ptr @g2, i32 0, i32 21), align 1
+  store i32 %v26, ptr getelementptr inbounds ([1024 x i8], ptr @g2, i32 0, i32 25), align 1
+  store i16 %v27, ptr getelementptr inbounds ([1024 x i8], ptr @g2, i32 0, i32 29), align 1
+  store i16 %v28, ptr getelementptr inbounds ([1024 x i8], ptr @g2, i32 0, i32 31), align 1
+  store i8 %v29, ptr getelementptr inbounds ([1024 x i8], ptr @g2, i32 0, i32 33), align 1
+  store i8 %v30, ptr getelementptr inbounds ([1024 x i8], ptr @g2, i32 0, i32 34), align 2
+  store i32 %v31, ptr getelementptr inbounds ([1024 x i8], ptr @g2, i32 0, i32 35), align 1
+  store i32 %v32, ptr getelementptr inbounds ([1024 x i8], ptr @g2, i32 0, i32 39), align 1
+  store i32 43, ptr %v2, align 4
+  %v33 = load i8, ptr @g1, align 8
   %v34 = zext i8 %v33 to i32
-  tail call void (i8*, ...) @printf(i8* getelementptr inbounds ([40 x i8], [40 x i8]* @g4, i32 0, i32 0), i32 %v34, i32 0) #0
-  %v35 = load i8, i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g0, i32 0, i32 7), align 1
-  store i8 %v35, i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g3, i32 0, i32 7), align 1
-  %v36 = load i16, i16* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g0, i32 0, i32 17) to i16*), align 1
-  store i16 %v36, i16* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g3, i32 0, i32 24) to i16*), align 8
-  %v37 = load i32, i32* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g0, i32 0, i32 19) to i32*), align 1
-  %v38 = load i32, i32* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g0, i32 0, i32 31) to i32*), align 1
-  store i32 %v38, i32* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g3, i32 0, i32 44) to i32*), align 4
-  %v39 = load i16, i16* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g0, i32 0, i32 35) to i16*), align 1
-  %v40 = load i32, i32* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g0, i32 0, i32 37) to i32*), align 1
-  store i32 %v40, i32* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g3, i32 0, i32 52) to i32*), align 4
-  %v41 = load i32, i32* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g0, i32 0, i32 43) to i32*), align 1
-  store i32 %v41, i32* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g3, i32 0, i32 60) to i32*), align 4
-  %v42 = load i16, i16* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g0, i32 0, i32 47) to i16*), align 1
-  store i16 %v42, i16* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g3, i32 0, i32 64) to i16*), align 8
-  %v43 = load i32, i32* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g0, i32 0, i32 49) to i32*), align 1
-  store i32 %v43, i32* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g3, i32 0, i32 68) to i32*), align 4
-  %v44 = load i16, i16* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g0, i32 0, i32 59) to i16*), align 1
-  store i16 %v44, i16* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g3, i32 0, i32 80) to i16*), align 8
-  %v45 = load i32, i32* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g0, i32 0, i32 67) to i32*), align 1
-  store i32 %v45, i32* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g3, i32 0, i32 92) to i32*), align 4
-  store i32 96, i32* %v1, align 4
-  store i8 %v35, i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g2, i32 0, i32 7), align 1
-  store i16 %v36, i16* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g2, i32 0, i32 17) to i16*), align 1
-  store i32 %v37, i32* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g2, i32 0, i32 19) to i32*), align 1
-  store i32 %v38, i32* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g2, i32 0, i32 31) to i32*), align 1
-  store i16 %v39, i16* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g2, i32 0, i32 35) to i16*), align 1
-  call void (i8*, ...) @printf(i8* getelementptr inbounds ([40 x i8], [40 x i8]* @g4, i32 0, i32 0), i32 0, i32 0) #0
-  call void (i8*, ...) @printf(i8* getelementptr inbounds ([40 x i8], [40 x i8]* @g4, i32 0, i32 0), i32 undef, i32 0) #0
+  tail call void (ptr, ...) @printf(ptr @g4, i32 %v34, i32 0) #0
+  %v35 = load i8, ptr getelementptr inbounds ([1024 x i8], ptr @g0, i32 0, i32 7), align 1
+  store i8 %v35, ptr getelementptr inbounds ([1024 x i8], ptr @g3, i32 0, i32 7), align 1
+  %v36 = load i16, ptr getelementptr inbounds ([1024 x i8], ptr @g0, i32 0, i32 17), align 1
+  store i16 %v36, ptr getelementptr inbounds ([1024 x i8], ptr @g3, i32 0, i32 24), align 8
+  %v37 = load i32, ptr getelementptr inbounds ([1024 x i8], ptr @g0, i32 0, i32 19), align 1
+  %v38 = load i32, ptr getelementptr inbounds ([1024 x i8], ptr @g0, i32 0, i32 31), align 1
+  store i32 %v38, ptr getelementptr inbounds ([1024 x i8], ptr @g3, i32 0, i32 44), align 4
+  %v39 = load i16, ptr getelementptr inbounds ([1024 x i8], ptr @g0, i32 0, i32 35), align 1
+  %v40 = load i32, ptr getelementptr inbounds ([1024 x i8], ptr @g0, i32 0, i32 37), align 1
+  store i32 %v40, ptr getelementptr inbounds ([1024 x i8], ptr @g3, i32 0, i32 52), align 4
+  %v41 = load i32, ptr getelementptr inbounds ([1024 x i8], ptr @g0, i32 0, i32 43), align 1
+  store i32 %v41, ptr getelementptr inbounds ([1024 x i8], ptr @g3, i32 0, i32 60), align 4
+  %v42 = load i16, ptr getelementptr inbounds ([1024 x i8], ptr @g0, i32 0, i32 47), align 1
+  store i16 %v42, ptr getelementptr inbounds ([1024 x i8], ptr @g3, i32 0, i32 64), align 8
+  %v43 = load i32, ptr getelementptr inbounds ([1024 x i8], ptr @g0, i32 0, i32 49), align 1
+  store i32 %v43, ptr getelementptr inbounds ([1024 x i8], ptr @g3, i32 0, i32 68), align 4
+  %v44 = load i16, ptr getelementptr inbounds ([1024 x i8], ptr @g0, i32 0, i32 59), align 1
+  store i16 %v44, ptr getelementptr inbounds ([1024 x i8], ptr @g3, i32 0, i32 80), align 8
+  %v45 = load i32, ptr getelementptr inbounds ([1024 x i8], ptr @g0, i32 0, i32 67), align 1
+  store i32 %v45, ptr getelementptr inbounds ([1024 x i8], ptr @g3, i32 0, i32 92), align 4
+  store i32 96, ptr %v1, align 4
+  store i8 %v35, ptr getelementptr inbounds ([1024 x i8], ptr @g2, i32 0, i32 7), align 1
+  store i16 %v36, ptr getelementptr inbounds ([1024 x i8], ptr @g2, i32 0, i32 17), align 1
+  store i32 %v37, ptr getelementptr inbounds ([1024 x i8], ptr @g2, i32 0, i32 19), align 1
+  store i32 %v38, ptr getelementptr inbounds ([1024 x i8], ptr @g2, i32 0, i32 31), align 1
+  store i16 %v39, ptr getelementptr inbounds ([1024 x i8], ptr @g2, i32 0, i32 35), align 1
+  call void (ptr, ...) @printf(ptr @g4, i32 0, i32 0) #0
+  call void (ptr, ...) @printf(ptr @g4, i32 undef, i32 0) #0
   unreachable
 }
 
-declare void @printf(i8* nocapture readonly, ...) local_unnamed_addr #0
+declare void @printf(ptr nocapture readonly, ...) local_unnamed_addr #0
 
 attributes #0 = { nounwind "target-cpu"="hexagonv5" }

diff  --git a/llvm/test/CodeGen/Hexagon/store-imm-stack-object.ll b/llvm/test/CodeGen/Hexagon/store-imm-stack-object.ll
index 5566bda7683dd..8c5b11d5f1552 100644
--- a/llvm/test/CodeGen/Hexagon/store-imm-stack-object.ll
+++ b/llvm/test/CodeGen/Hexagon/store-imm-stack-object.ll
@@ -13,23 +13,19 @@ b0:
   %v2 = alloca i16, align 2
   %v3 = alloca i32, align 4
   %v4 = alloca i32, align 4
-  %v5 = getelementptr inbounds [1 x i8], [1 x i8]* %v1, i32 0, i32 0
-  call void @llvm.lifetime.start(i64 1, i8* %v5)
-  store i8 49, i8* %v5, align 1
-  %v6 = bitcast i16* %v2 to i8*
-  call void @llvm.lifetime.start(i64 2, i8* %v6)
-  store i16 50, i16* %v2, align 2
-  %v7 = bitcast i32* %v3 to i8*
-  call void @llvm.lifetime.start(i64 4, i8* %v7)
-  store i32 51, i32* %v3, align 4
-  %v8 = bitcast i32* %v4 to i8*
-  call void @llvm.lifetime.start(i64 4, i8* %v8)
-  store i32 875770417, i32* %v4, align 4
-  call void @test4(i8* %v5, i8* %v6, i8* %v7, i8* %v8)
-  call void @llvm.lifetime.end(i64 4, i8* %v8)
-  call void @llvm.lifetime.end(i64 4, i8* %v7)
-  call void @llvm.lifetime.end(i64 2, i8* %v6)
-  call void @llvm.lifetime.end(i64 1, i8* %v5)
+  call void @llvm.lifetime.start(i64 1, ptr %v1)
+  store i8 49, ptr %v1, align 1
+  call void @llvm.lifetime.start(i64 2, ptr %v2)
+  store i16 50, ptr %v2, align 2
+  call void @llvm.lifetime.start(i64 4, ptr %v3)
+  store i32 51, ptr %v3, align 4
+  call void @llvm.lifetime.start(i64 4, ptr %v4)
+  store i32 875770417, ptr %v4, align 4
+  call void @test4(ptr %v1, ptr %v2, ptr %v3, ptr %v4)
+  call void @llvm.lifetime.end(i64 4, ptr %v4)
+  call void @llvm.lifetime.end(i64 4, ptr %v3)
+  call void @llvm.lifetime.end(i64 2, ptr %v2)
+  call void @llvm.lifetime.end(i64 1, ptr %v1)
   ret void
 }
 
@@ -45,41 +41,35 @@ b0:
   %v4 = alloca i32, align 4
   %v5 = alloca [100 x i8], align 8
   %v6 = alloca [101 x i8], align 8
-  %v7 = getelementptr inbounds [1 x i8], [1 x i8]* %v1, i32 0, i32 0
-  call void @llvm.lifetime.start(i64 1, i8* %v7)
-  store i8 49, i8* %v7, align 1
-  %v8 = bitcast i16* %v2 to i8*
-  call void @llvm.lifetime.start(i64 2, i8* %v8)
-  store i16 50, i16* %v2, align 2
-  %v9 = bitcast i32* %v3 to i8*
-  call void @llvm.lifetime.start(i64 4, i8* %v9)
-  store i32 51, i32* %v3, align 4
-  %v10 = bitcast i32* %v4 to i8*
-  call void @llvm.lifetime.start(i64 4, i8* %v10)
-  store i32 875770417, i32* %v4, align 4
-  %v11 = getelementptr inbounds [100 x i8], [100 x i8]* %v5, i32 0, i32 0
-  call void @llvm.lifetime.start(i64 100, i8* %v11)
-  call void @llvm.memset.p0i8.i32(i8* align 8 %v11, i8 0, i32 100, i1 false)
-  store i8 50, i8* %v11, align 8
-  %v12 = getelementptr inbounds [101 x i8], [101 x i8]* %v6, i32 0, i32 0
-  call void @llvm.lifetime.start(i64 101, i8* %v12)
-  call void @llvm.memset.p0i8.i32(i8* align 8 %v12, i8 0, i32 101, i1 false)
-  store i8 49, i8* %v12, align 8
-  call void @test3(i8* %v7, i8* %v8, i8* %v9, i8* %v10, i8* %v11, i8* %v12)
-  call void @llvm.lifetime.end(i64 101, i8* %v12)
-  call void @llvm.lifetime.end(i64 100, i8* %v11)
-  call void @llvm.lifetime.end(i64 4, i8* %v10)
-  call void @llvm.lifetime.end(i64 4, i8* %v9)
-  call void @llvm.lifetime.end(i64 2, i8* %v8)
-  call void @llvm.lifetime.end(i64 1, i8* %v7)
+  call void @llvm.lifetime.start(i64 1, ptr %v1)
+  store i8 49, ptr %v1, align 1
+  call void @llvm.lifetime.start(i64 2, ptr %v2)
+  store i16 50, ptr %v2, align 2
+  call void @llvm.lifetime.start(i64 4, ptr %v3)
+  store i32 51, ptr %v3, align 4
+  call void @llvm.lifetime.start(i64 4, ptr %v4)
+  store i32 875770417, ptr %v4, align 4
+  call void @llvm.lifetime.start(i64 100, ptr %v5)
+  call void @llvm.memset.p0.i32(ptr align 8 %v5, i8 0, i32 100, i1 false)
+  store i8 50, ptr %v5, align 8
+  call void @llvm.lifetime.start(i64 101, ptr %v6)
+  call void @llvm.memset.p0.i32(ptr align 8 %v6, i8 0, i32 101, i1 false)
+  store i8 49, ptr %v6, align 8
+  call void @test3(ptr %v1, ptr %v2, ptr %v3, ptr %v4, ptr %v5, ptr %v6)
+  call void @llvm.lifetime.end(i64 101, ptr %v6)
+  call void @llvm.lifetime.end(i64 100, ptr %v5)
+  call void @llvm.lifetime.end(i64 4, ptr %v4)
+  call void @llvm.lifetime.end(i64 4, ptr %v3)
+  call void @llvm.lifetime.end(i64 2, ptr %v2)
+  call void @llvm.lifetime.end(i64 1, ptr %v1)
   ret void
 }
 
-declare void @llvm.lifetime.start(i64, i8* nocapture) #0
-declare void @llvm.lifetime.end(i64, i8* nocapture) #0
-declare void @llvm.memset.p0i8.i32(i8* nocapture writeonly, i8, i32, i1) #0
+declare void @llvm.lifetime.start(i64, ptr nocapture) #0
+declare void @llvm.lifetime.end(i64, ptr nocapture) #0
+declare void @llvm.memset.p0.i32(ptr nocapture writeonly, i8, i32, i1) #0
 
-declare void @test3(i8*, i8*, i8*, i8*, i8*, i8*)
-declare void @test4(i8*, i8*, i8*, i8*)
+declare void @test3(ptr, ptr, ptr, ptr, ptr, ptr)
+declare void @test4(ptr, ptr, ptr, ptr)
 
 attributes #0 = { argmemonly nounwind "target-cpu"="hexagonv60" }

diff  --git a/llvm/test/CodeGen/Hexagon/store-imm-word.ll b/llvm/test/CodeGen/Hexagon/store-imm-word.ll
index a8bc84d15c054..16006ae27848e 100644
--- a/llvm/test/CodeGen/Hexagon/store-imm-word.ll
+++ b/llvm/test/CodeGen/Hexagon/store-imm-word.ll
@@ -4,9 +4,9 @@
 target triple = "hexagon"
 
 ; Function Attrs: nounwind
-define void @f0(i32* %a0) #0 {
+define void @f0(ptr %a0) #0 {
 b0:
-  store i32 -1, i32* %a0, align 4
+  store i32 -1, ptr %a0, align 4
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/store-shift.ll b/llvm/test/CodeGen/Hexagon/store-shift.ll
index f92e23f4bc41b..b979c6d470fc0 100644
--- a/llvm/test/CodeGen/Hexagon/store-shift.ll
+++ b/llvm/test/CodeGen/Hexagon/store-shift.ll
@@ -13,32 +13,32 @@ target triple = "hexagon"
 @G = external global i32, align 4
 
 ; Function Attrs: norecurse nounwind
-define void @fred(i32* nocapture %A, [50 x i32]* nocapture %B, i32 %N, i32 %M) #0 {
+define void @fred(ptr nocapture %A, ptr nocapture %B, i32 %N, i32 %M) #0 {
 entry:
   %add = add nsw i32 %N, 5
-  %arrayidx = getelementptr inbounds i32, i32* %A, i32 %add
-  store i32 %M, i32* %arrayidx, align 4, !tbaa !1
+  %arrayidx = getelementptr inbounds i32, ptr %A, i32 %add
+  store i32 %M, ptr %arrayidx, align 4, !tbaa !1
   %add2 = add nsw i32 %N, 6
-  %arrayidx3 = getelementptr inbounds i32, i32* %A, i32 %add2
-  store i32 %M, i32* %arrayidx3, align 4, !tbaa !1
+  %arrayidx3 = getelementptr inbounds i32, ptr %A, i32 %add2
+  store i32 %M, ptr %arrayidx3, align 4, !tbaa !1
   %add4 = add nsw i32 %N, 35
-  %arrayidx5 = getelementptr inbounds i32, i32* %A, i32 %add4
-  store i32 %add, i32* %arrayidx5, align 4, !tbaa !1
-  %arrayidx8 = getelementptr inbounds [50 x i32], [50 x i32]* %B, i32 %add, i32 %add
-  store i32 %add, i32* %arrayidx8, align 4, !tbaa !1
+  %arrayidx5 = getelementptr inbounds i32, ptr %A, i32 %add4
+  store i32 %add, ptr %arrayidx5, align 4, !tbaa !1
+  %arrayidx8 = getelementptr inbounds [50 x i32], ptr %B, i32 %add, i32 %add
+  store i32 %add, ptr %arrayidx8, align 4, !tbaa !1
   %inc = add nsw i32 %N, 6
-  %arrayidx8.1 = getelementptr inbounds [50 x i32], [50 x i32]* %B, i32 %add, i32 %inc
-  store i32 %add, i32* %arrayidx8.1, align 4, !tbaa !1
+  %arrayidx8.1 = getelementptr inbounds [50 x i32], ptr %B, i32 %add, i32 %inc
+  store i32 %add, ptr %arrayidx8.1, align 4, !tbaa !1
   %sub = add nsw i32 %N, 4
-  %arrayidx10 = getelementptr inbounds [50 x i32], [50 x i32]* %B, i32 %add, i32 %sub
-  %0 = load i32, i32* %arrayidx10, align 4, !tbaa !1
+  %arrayidx10 = getelementptr inbounds [50 x i32], ptr %B, i32 %add, i32 %sub
+  %0 = load i32, ptr %arrayidx10, align 4, !tbaa !1
   %add11 = add nsw i32 %0, 1
-  store i32 %add11, i32* %arrayidx10, align 4, !tbaa !1
-  %1 = load i32, i32* %arrayidx, align 4, !tbaa !1
+  store i32 %add11, ptr %arrayidx10, align 4, !tbaa !1
+  %1 = load i32, ptr %arrayidx, align 4, !tbaa !1
   %add13 = add nsw i32 %N, 25
-  %arrayidx15 = getelementptr inbounds [50 x i32], [50 x i32]* %B, i32 %add13, i32 %add
-  store i32 %1, i32* %arrayidx15, align 4, !tbaa !1
-  store i32 5, i32* @G, align 4, !tbaa !1
+  %arrayidx15 = getelementptr inbounds [50 x i32], ptr %B, i32 %add13, i32 %add
+  store i32 %1, ptr %arrayidx15, align 4, !tbaa !1
+  store i32 5, ptr @G, align 4, !tbaa !1
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/store-vector-pred.ll b/llvm/test/CodeGen/Hexagon/store-vector-pred.ll
index d9d841cacc5bb..6146bf55b0edb 100644
--- a/llvm/test/CodeGen/Hexagon/store-vector-pred.ll
+++ b/llvm/test/CodeGen/Hexagon/store-vector-pred.ll
@@ -11,13 +11,13 @@ b0:
   br i1 undef, label %b2, label %b1
 
 b1:                                               ; preds = %b0
-  %v0 = load i8, i8* undef, align 1
+  %v0 = load i8, ptr undef, align 1
   %v1 = zext i8 %v0 to i32
   %v2 = add nsw i32 %v1, -1
   %v3 = insertelement <128 x i32> undef, i32 %v2, i32 0
   %v4 = shufflevector <128 x i32> %v3, <128 x i32> undef, <128 x i32> zeroinitializer
   %v5 = icmp ule <128 x i32> undef, %v4
-  %v6 = call <128 x i8> @llvm.masked.load.v128i8.p0v128i8(<128 x i8>* nonnull undef, i32 1, <128 x i1> %v5, <128 x i8> undef)
+  %v6 = call <128 x i8> @llvm.masked.load.v128i8.p0(ptr nonnull undef, i32 1, <128 x i1> %v5, <128 x i8> undef)
   %v7 = lshr <128 x i8> %v6, <i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4>
   %v8 = and <128 x i8> %v7, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
   %v9 = zext <128 x i8> %v8 to <128 x i32>
@@ -31,9 +31,8 @@ b1:                                               ; preds = %b0
   %v17 = add <128 x i32> %v16, undef
   %v18 = add <128 x i32> %v17, undef
   %v19 = extractelement <128 x i32> %v18, i32 0
-  %v20 = getelementptr inbounds i8, i8* null, i32 2160
-  %v21 = bitcast i8* %v20 to i32*
-  store i32 %v19, i32* %v21, align 4
+  %v20 = getelementptr inbounds i8, ptr null, i32 2160
+  store i32 %v19, ptr %v20, align 4
   br label %b2
 
 b2:                                               ; preds = %b1, %b0
@@ -41,7 +40,7 @@ b2:                                               ; preds = %b1, %b0
 }
 
 ; Function Attrs: argmemonly nounwind readonly willreturn
-declare <128 x i8> @llvm.masked.load.v128i8.p0v128i8(<128 x i8>*, i32 immarg, <128 x i1>, <128 x i8>) #1
+declare <128 x i8> @llvm.masked.load.v128i8.p0(ptr, i32 immarg, <128 x i1>, <128 x i8>) #1
 
 attributes #0 = { "target-features"="+hvx-length128b,+hvxv67,+v67,-long-calls" }
 attributes #1 = { argmemonly nounwind readonly willreturn }

diff  --git a/llvm/test/CodeGen/Hexagon/store-widen-aliased-load.ll b/llvm/test/CodeGen/Hexagon/store-widen-aliased-load.ll
index d0bd6850bb825..6c04e7a1e6ea4 100644
--- a/llvm/test/CodeGen/Hexagon/store-widen-aliased-load.ll
+++ b/llvm/test/CodeGen/Hexagon/store-widen-aliased-load.ll
@@ -7,13 +7,12 @@ target triple = "hexagon"
 
 %struct.type_t = type { i8, i8, [2 x i8] }
 
-define zeroext i8 @foo(%struct.type_t* nocapture %p) nounwind {
+define zeroext i8 @foo(ptr nocapture %p) nounwind {
 entry:
-  %a = getelementptr inbounds %struct.type_t, %struct.type_t* %p, i32 0, i32 0
-  store i8 0, i8* %a, align 2, !tbaa !0
-  %b = getelementptr inbounds %struct.type_t, %struct.type_t* %p, i32 0, i32 1
-  %0 = load i8, i8* %b, align 1, !tbaa !0
-  store i8 0, i8* %b, align 1, !tbaa !0
+  store i8 0, ptr %p, align 2, !tbaa !0
+  %b = getelementptr inbounds %struct.type_t, ptr %p, i32 0, i32 1
+  %0 = load i8, ptr %b, align 1, !tbaa !0
+  store i8 0, ptr %b, align 1, !tbaa !0
   ret i8 %0
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/store-widen-negv.ll b/llvm/test/CodeGen/Hexagon/store-widen-negv.ll
index 50a633d82be46..5cdf250aead01 100644
--- a/llvm/test/CodeGen/Hexagon/store-widen-negv.ll
+++ b/llvm/test/CodeGen/Hexagon/store-widen-negv.ll
@@ -1,11 +1,11 @@
 ; RUN: llc -march=hexagon < %s | FileCheck %s
 ; We shouldn't see a 32-bit expansion of -120, just the uint8 value.
 ; CHECK: #136
-define i32 @foo([4 x i8]* %ptr) {
+define i32 @foo(ptr %ptr) {
 entry:
-  %msb = getelementptr inbounds [4 x i8], [4 x i8]* %ptr, i32 0, i32 3
-  %lsb = getelementptr inbounds [4 x i8], [4 x i8]* %ptr, i32 0, i32 2
-  store i8 0, i8* %msb
-  store i8 -120, i8* %lsb, align 2
+  %msb = getelementptr inbounds [4 x i8], ptr %ptr, i32 0, i32 3
+  %lsb = getelementptr inbounds [4 x i8], ptr %ptr, i32 0, i32 2
+  store i8 0, ptr %msb
+  store i8 -120, ptr %lsb, align 2
   ret i32 0
 }

diff  --git a/llvm/test/CodeGen/Hexagon/store-widen-negv2.ll b/llvm/test/CodeGen/Hexagon/store-widen-negv2.ll
index 6abe01a6ed9fc..e6b0483c1f6fe 100644
--- a/llvm/test/CodeGen/Hexagon/store-widen-negv2.ll
+++ b/llvm/test/CodeGen/Hexagon/store-widen-negv2.ll
@@ -7,12 +7,11 @@ target datalayout = "e-m:e-p:32:32-i1:32-i64:64-a:0-v32:32-n16:32"
 target triple = "hexagon"
 
 ; Function Attrs: nounwind
-define void @foo(i16* nocapture %s) #0 {
+define void @foo(ptr nocapture %s) #0 {
 entry:
-  %0 = bitcast i16* %s to i8*
-  store i8 -2, i8* %0, align 2
-  %add.ptr = getelementptr inbounds i8, i8* %0, i32 1
-  store i8 -1, i8* %add.ptr, align 1
+  store i8 -2, ptr %s, align 2
+  %add.ptr = getelementptr inbounds i8, ptr %s, i32 1
+  store i8 -1, ptr %add.ptr, align 1
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/store-widen-subreg.ll b/llvm/test/CodeGen/Hexagon/store-widen-subreg.ll
index a21196c6806a2..86a7db4849152 100644
--- a/llvm/test/CodeGen/Hexagon/store-widen-subreg.ll
+++ b/llvm/test/CodeGen/Hexagon/store-widen-subreg.ll
@@ -6,10 +6,10 @@
 
 ; CHECK: memh({{r[0-9]+}}+#{{[0-9]+}}) =
 
-%s.0 = type { %s.1, %s.2, %s.3*, %s.0*, i32, i8, i8, i32, i8, i8, i32, i32, i8, i32, %s.4*, [2 x %s.4*], %s.13, i8*, %s.15*, %s.26, i32, i32, i32 }
+%s.0 = type { %s.1, %s.2, ptr, ptr, i32, i8, i8, i32, i8, i8, i32, i32, i8, i32, ptr, [2 x ptr], %s.13, ptr, ptr, %s.26, i32, i32, i32 }
 %s.1 = type { i64, [8 x i8] }
-%s.2 = type { i64*, i32, i8 }
-%s.3 = type { %s.1, %s.26, %s.26, i32, i32, i32, void (%s.1*)*, void (%s.1*)*, i32 (%s.1*)*, void (%s.1*)*, i32, i64* }
+%s.2 = type { ptr, i32, i8 }
+%s.3 = type { %s.1, %s.26, %s.26, i32, i32, i32, ptr, ptr, ptr, ptr, i32, ptr }
 %s.4 = type { %s.5, %s.12 }
 %s.5 = type { i32, i32, i32, i32, i32, i32, i32, i32, %s.6 }
 %s.6 = type { %s.7 }
@@ -19,13 +19,13 @@
 %s.10 = type { i32, i32 }
 %s.11 = type { i32, i32, i32, i32, i32, i32, i32, i32 }
 %s.12 = type { i32, i32, i32, i32, i32, i32, i32 }
-%s.13 = type { i32, i32, i32, %s.14*, %s.14*, %s.14*, %s.14*, i32 }
-%s.14 = type { %s.14*, i8, i32, %s.4*, i32, %s.4* }
+%s.13 = type { i32, i32, i32, ptr, ptr, ptr, ptr, i32 }
+%s.14 = type { ptr, i8, i32, ptr, i32, ptr }
 %s.15 = type { %s.16, %s.17, %s.19, %s.20, %s.21, %s.24 }
 %s.16 = type { i64, i64, i64, i32 }
 %s.17 = type { i16, i16, i8, [4 x %s.18], i8, i8 }
 %s.18 = type { i32, i32 }
-%s.19 = type { i32*, i32, i32* }
+%s.19 = type { ptr, i32, ptr }
 %s.20 = type { i8, i8, i32, i32, i8, i32, i32, i32, i32, i32 }
 %s.21 = type { i32, %s.22 }
 %s.22 = type { %s.23 }
@@ -36,20 +36,20 @@
 %s.27 = type { i16, i16, i32, i32, i32 }
 
 ; Function Attrs: nounwind
-define void @f0(i64* %a0, i1 %a1) #0 {
+define void @f0(ptr %a0, i1 %a1) #0 {
 b0:
-  %v0 = load i64, i64* %a0, align 8
+  %v0 = load i64, ptr %a0, align 8
   br i1 %a1, label %b1, label %b2
 
 b1:                                               ; preds = %b0
   %v1 = trunc i64 %v0 to i32
-  %v2 = inttoptr i32 %v1 to %s.0*
-  %v3 = getelementptr inbounds %s.0, %s.0* %v2, i32 0, i32 8
-  store i8 0, i8* %v3, align 8
-  %v4 = getelementptr inbounds %s.0, %s.0* %v2, i32 0, i32 9
-  store i8 1, i8* %v4, align 1
-  %v5 = getelementptr inbounds %s.0, %s.0* %v2, i32 0, i32 6
-  store i8 1, i8* %v5, align 1
+  %v2 = inttoptr i32 %v1 to ptr
+  %v3 = getelementptr inbounds %s.0, ptr %v2, i32 0, i32 8
+  store i8 0, ptr %v3, align 8
+  %v4 = getelementptr inbounds %s.0, ptr %v2, i32 0, i32 9
+  store i8 1, ptr %v4, align 1
+  %v5 = getelementptr inbounds %s.0, ptr %v2, i32 0, i32 6
+  store i8 1, ptr %v5, align 1
   ret void
 
 b2:                                               ; preds = %b0

diff  --git a/llvm/test/CodeGen/Hexagon/store-widen.ll b/llvm/test/CodeGen/Hexagon/store-widen.ll
index 9428093901c58..9691ec3161358 100644
--- a/llvm/test/CodeGen/Hexagon/store-widen.ll
+++ b/llvm/test/CodeGen/Hexagon/store-widen.ll
@@ -2,14 +2,14 @@
 target datalayout = "e-p:32:32:32-i64:64:64-i32:32:32-i16:16:16-i1:32:32-f64:64:64-f32:32:32-v64:64:64-v32:32:32-a0:0-n16:32"
 target triple = "hexagon"
 
-define void @foo(i16* nocapture %a) nounwind {
+define void @foo(ptr nocapture %a) nounwind {
 entry:
 ; There should be a memw, not memh.
 ; CHECK: memw
   ; Cheated on the alignment, just to trigger the widening.
-  store i16 0, i16* %a, align 4, !tbaa !0
-  %arrayidx1 = getelementptr inbounds i16, i16* %a, i32 1
-  store i16 0, i16* %arrayidx1, align 2, !tbaa !0
+  store i16 0, ptr %a, align 4, !tbaa !0
+  %arrayidx1 = getelementptr inbounds i16, ptr %a, i32 1
+  store i16 0, ptr %arrayidx1, align 2, !tbaa !0
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/store1.ll b/llvm/test/CodeGen/Hexagon/store1.ll
index f6329c35f46a6..7cc7a278fe86d 100644
--- a/llvm/test/CodeGen/Hexagon/store1.ll
+++ b/llvm/test/CodeGen/Hexagon/store1.ll
@@ -6,34 +6,34 @@
 ; CHECK: memw(r{{[0-9]+}}+#160) = ##g0+144
 ; CHECK: memw(r{{[0-9]+}}+#172) = ##f3
 
-%s.0 = type { [156 x i8], i8*, i8*, i8, i8*, void (i8*)*, i8 }
+%s.0 = type { [156 x i8], ptr, ptr, i8, ptr, ptr, i8 }
 
 @g0 = common global %s.0 zeroinitializer, align 4
 
 ; Function Attrs: nounwind
-define void @f0(%s.0* %a0) #0 {
+define void @f0(ptr %a0) #0 {
 b0:
-  %v0 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 1
-  store i8* null, i8** %v0, align 4
+  %v0 = getelementptr inbounds %s.0, ptr %a0, i32 0, i32 1
+  store ptr null, ptr %v0, align 4
   ret void
 }
 
 ; Function Attrs: nounwind
-define void @f1(%s.0* %a0) #0 {
+define void @f1(ptr %a0) #0 {
 b0:
-  %v0 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 2
-  store i8* getelementptr inbounds (%s.0, %s.0* @g0, i32 0, i32 0, i32 144), i8** %v0, align 4
+  %v0 = getelementptr inbounds %s.0, ptr %a0, i32 0, i32 2
+  store ptr getelementptr inbounds (%s.0, ptr @g0, i32 0, i32 0, i32 144), ptr %v0, align 4
   ret void
 }
 
 ; Function Attrs: nounwind
-define void @f2(%s.0* %a0) #0 {
+define void @f2(ptr %a0) #0 {
 b0:
-  %v0 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 5
-  store void (i8*)* @f3, void (i8*)** %v0, align 4
+  %v0 = getelementptr inbounds %s.0, ptr %a0, i32 0, i32 5
+  store ptr @f3, ptr %v0, align 4
   ret void
 }
 
-declare void @f3(i8*)
+declare void @f3(ptr)
 
 attributes #0 = { nounwind }

diff  --git a/llvm/test/CodeGen/Hexagon/store_abs.ll b/llvm/test/CodeGen/Hexagon/store_abs.ll
index 5860b4de997a9..8a5f7eaf86b50 100644
--- a/llvm/test/CodeGen/Hexagon/store_abs.ll
+++ b/llvm/test/CodeGen/Hexagon/store_abs.ll
@@ -8,10 +8,10 @@
 target triple = "hexagon-unknown--elf"
 
 %s.0 = type { %s.1, %s.2 }
-%s.1 = type { %s.1*, %s.1* }
+%s.1 = type { ptr, ptr }
 %s.2 = type { %s.3 }
 %s.3 = type { %s.4 }
-%s.4 = type { %s.5, i32, i32, i8* }
+%s.4 = type { %s.5, i32, i32, ptr }
 %s.5 = type { i32 }
 
 @g0 = external global %s.0, align 4
@@ -19,8 +19,8 @@ target triple = "hexagon-unknown--elf"
 ; Function Attrs: nounwind
 define void @f0() #0 section ".init.text" {
 b0:
-  store %s.1* getelementptr inbounds (%s.0, %s.0* @g0, i32 0, i32 0), %s.1** getelementptr inbounds (%s.0, %s.0* @g0, i32 0, i32 0, i32 0), align 4
-  store %s.1* getelementptr inbounds (%s.0, %s.0* @g0, i32 0, i32 0), %s.1** getelementptr inbounds (%s.0, %s.0* @g0, i32 0, i32 0, i32 1), align 4
+  store ptr @g0, ptr @g0, align 4
+  store ptr @g0, ptr getelementptr inbounds (%s.0, ptr @g0, i32 0, i32 0, i32 1), align 4
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/storerd-io-over-rr.ll b/llvm/test/CodeGen/Hexagon/storerd-io-over-rr.ll
index 8727330ca5bdf..3b67117f0cd34 100644
--- a/llvm/test/CodeGen/Hexagon/storerd-io-over-rr.ll
+++ b/llvm/test/CodeGen/Hexagon/storerd-io-over-rr.ll
@@ -4,8 +4,8 @@
 
 define void @fred(i32 %p, i64 %v) #0 {
   %t0 = add i32 %p, 4
-  %t1 = inttoptr i32 %t0 to i64*
-  store i64 %v, i64* %t1
+  %t1 = inttoptr i32 %t0 to ptr
+  store i64 %v, ptr %t1
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/storerinewabs.ll b/llvm/test/CodeGen/Hexagon/storerinewabs.ll
index 73e513a8bceee..1d40459a6e6a6 100644
--- a/llvm/test/CodeGen/Hexagon/storerinewabs.ll
+++ b/llvm/test/CodeGen/Hexagon/storerinewabs.ll
@@ -9,7 +9,7 @@
 define void @foo(i32 %x) #0 {
 entry:
   %add = add nsw i32 %x, 1
-  store i32 %add, i32* @global, align 4
+  store i32 %add, ptr @global, align 4
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/struct-const.ll b/llvm/test/CodeGen/Hexagon/struct-const.ll
index 69a003fe8c6c3..273fc571bdd0a 100644
--- a/llvm/test/CodeGen/Hexagon/struct-const.ll
+++ b/llvm/test/CodeGen/Hexagon/struct-const.ll
@@ -6,63 +6,63 @@
 
 target triple = "hexagon"
 
-%s.8 = type { %s.9, i8*, i8* }
+%s.8 = type { %s.9, ptr, ptr }
 %s.9 = type { i16, i16, i32 }
-%s.0 = type { i32, %s.1*, %s.1*, i32, i32, i32, i32, i32, {}*, void (i8*)*, void (i8*)*, i8*, [32 x %s.4], i32, i16, i16, i16, i16, [16 x %s.7], i16 }
+%s.0 = type { i32, ptr, ptr, i32, i32, i32, i32, i32, ptr, ptr, ptr, ptr, [32 x %s.4], i32, i16, i16, i16, i16, [16 x %s.7], i16 }
 %s.1 = type { i16, i8, i8, i32, %s.2 }
-%s.2 = type { %s.3, i8* }
-%s.3 = type { i8* }
-%s.4 = type { %s.5*, %s.5*, i16, i32 }
-%s.5 = type { %s.6, %s.5* }
+%s.2 = type { %s.3, ptr }
+%s.3 = type { ptr }
+%s.4 = type { ptr, ptr, i16, i32 }
+%s.5 = type { %s.6, ptr }
 %s.6 = type { i16, i8, i8, i32 }
-%s.7 = type { %s.1*, i32 }
-%s.11 = type { i32, %s.12* }
+%s.7 = type { ptr, i32 }
+%s.11 = type { i32, ptr }
 %s.12 = type opaque
 
- at g0 = internal constant %s.8 { %s.9 { i16 531, i16 0, i32 16 }, i8* getelementptr inbounds ([48 x i8], [48 x i8]* @g1, i32 0, i32 0), i8* getelementptr inbounds ([10 x i8], [10 x i8]* @g2, i32 0, i32 0) }, align 4
+ at g0 = internal constant %s.8 { %s.9 { i16 531, i16 0, i32 16 }, ptr @g1, ptr @g2 }, align 4
 @g1 = private unnamed_addr constant [48 x i8] c"In task 0x%x, Assertion heap_ptr != NULL failed\00", align 8
 @g2 = private unnamed_addr constant [10 x i8] c"xxxxxxx.c\00", align 8
 
 ; Function Attrs: nounwind
-define void @f0(%s.0* %a0) #0 {
+define void @f0(ptr %a0) #0 {
 b0:
-  %v0 = icmp eq %s.0* %a0, null
+  %v0 = icmp eq ptr %a0, null
   br i1 %v0, label %b1, label %b4
 
 b1:                                               ; preds = %b0
-  %v1 = tail call %s.11* @f1() #0
-  %v2 = icmp eq %s.11* %v1, null
+  %v1 = tail call ptr @f1() #0
+  %v2 = icmp eq ptr %v1, null
   br i1 %v2, label %b3, label %b2
 
 b2:                                               ; preds = %b1
-  %v3 = ptrtoint %s.11* %v1 to i32
-  tail call void @f2(%s.8* @g0, i32 %v3, i32 0, i32 0) #0
+  %v3 = ptrtoint ptr %v1 to i32
+  tail call void @f2(ptr @g0, i32 %v3, i32 0, i32 0) #0
   br label %b5
 
 b3:                                               ; preds = %b1
-  tail call void @f3(%s.8* @g0) #0
+  tail call void @f3(ptr @g0) #0
   br label %b5
 
 b4:                                               ; preds = %b0
-  %v4 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 9
-  store void (i8*)* @f4, void (i8*)** %v4, align 4, !tbaa !0
-  %v5 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 10
-  store void (i8*)* @f5, void (i8*)** %v5, align 4, !tbaa !0
+  %v4 = getelementptr inbounds %s.0, ptr %a0, i32 0, i32 9
+  store ptr @f4, ptr %v4, align 4, !tbaa !0
+  %v5 = getelementptr inbounds %s.0, ptr %a0, i32 0, i32 10
+  store ptr @f5, ptr %v5, align 4, !tbaa !0
   br label %b5
 
 b5:                                               ; preds = %b4, %b3, %b2
   ret void
 }
 
-declare %s.11* @f1()
+declare ptr @f1()
 
-declare void @f2(%s.8*, i32, i32, i32)
+declare void @f2(ptr, i32, i32, i32)
 
-declare void @f3(%s.8*)
+declare void @f3(ptr)
 
-declare void @f4(i8*)
+declare void @f4(ptr)
 
-declare void @f5(i8*)
+declare void @f5(ptr)
 
 attributes #0 = { nounwind }
 

diff  --git a/llvm/test/CodeGen/Hexagon/struct_args.ll b/llvm/test/CodeGen/Hexagon/struct_args.ll
index 11c23b82ec4a2..ef53fbb6bb685 100644
--- a/llvm/test/CodeGen/Hexagon/struct_args.ll
+++ b/llvm/test/CodeGen/Hexagon/struct_args.ll
@@ -8,7 +8,7 @@
 
 define void @foo() nounwind {
 entry:
-  %0 = load i64, i64* bitcast (%struct.small* @s1 to i64*), align 4
+  %0 = load i64, ptr @s1, align 4
   call void @bar(i64 %0)
   ret void
 }

diff  --git a/llvm/test/CodeGen/Hexagon/struct_args_large.ll b/llvm/test/CodeGen/Hexagon/struct_args_large.ll
index 86550cce0cbee..567c0c3f6538d 100644
--- a/llvm/test/CodeGen/Hexagon/struct_args_large.ll
+++ b/llvm/test/CodeGen/Hexagon/struct_args_large.ll
@@ -10,8 +10,8 @@
 
 define void @foo() nounwind {
 entry:
-  call void @bar(%struct.large* byval(%struct.large) @s2)
+  call void @bar(ptr byval(%struct.large) @s2)
   ret void
 }
 
-declare void @bar(%struct.large* byval(%struct.large))
+declare void @bar(ptr byval(%struct.large))

diff  --git a/llvm/test/CodeGen/Hexagon/struct_copy.ll b/llvm/test/CodeGen/Hexagon/struct_copy.ll
index 076ad7845831f..eb2368b1bfca6 100644
--- a/llvm/test/CodeGen/Hexagon/struct_copy.ll
+++ b/llvm/test/CodeGen/Hexagon/struct_copy.ll
@@ -21,13 +21,12 @@
 define i32 @f0() #0 {
 b0:
   %v0 = alloca %s.0, align 4
-  %v1 = bitcast %s.0* %v0 to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %v1, i8* align 4 bitcast (%s.0* @g0 to i8*), i32 24, i1 false)
-  call void @f1(%s.0* %v0) #0
+  call void @llvm.memcpy.p0.p0.i32(ptr align 4 %v0, ptr align 4 @g0, i32 24, i1 false)
+  call void @f1(ptr %v0) #0
   ret i32 0
 }
 
-declare void @f1(%s.0*)
+declare void @f1(ptr)
 
 ; CHECK-LABEL: f2:
 ; CHECK: [[REG2:(r[0-9]+)]] = {{[#]+}}g1
@@ -36,13 +35,12 @@ declare void @f1(%s.0*)
 define i32 @f2() #0 {
 b0:
   %v0 = alloca %s.1, align 8
-  %v1 = bitcast %s.1* %v0 to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 %v1, i8* align 8 bitcast (%s.1* @g1 to i8*), i32 48, i1 false)
-  call void @f3(%s.1* %v0) #0
+  call void @llvm.memcpy.p0.p0.i32(ptr align 8 %v0, ptr align 8 @g1, i32 48, i1 false)
+  call void @f3(ptr %v0) #0
   ret i32 0
 }
 
-declare void @f3(%s.1*)
+declare void @f3(ptr)
 
 ; CHECK-LABEL: f4:
 ; CHECK: [[REG1:(r[0-9]+)]] = {{[#]+}}g2
@@ -52,13 +50,12 @@ declare void @f3(%s.1*)
 define i32 @f4() #0 {
 b0:
   %v0 = alloca %s.2, align 2
-  %v1 = bitcast %s.2* %v0 to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 2 %v1, i8* align 2 bitcast (%s.2* @g2 to i8*), i32 12, i1 false)
-  call void @f5(%s.2* %v0) #0
+  call void @llvm.memcpy.p0.p0.i32(ptr align 2 %v0, ptr align 2 @g2, i32 12, i1 false)
+  call void @f5(ptr %v0) #0
   ret i32 0
 }
 
-declare void @f5(%s.2*)
+declare void @f5(ptr)
 
 ; CHECK-LABEL: f6:
 ; CHECK: [[REG1:(r[0-9]+)]] = {{[#]+}}g3
@@ -68,15 +65,14 @@ declare void @f5(%s.2*)
 define i32 @f6() #0 {
 b0:
   %v0 = alloca %s.3, align 1
-  %v1 = getelementptr inbounds %s.3, %s.3* %v0, i32 0, i32 0
-  call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 %v1, i8* align 1 getelementptr inbounds (%s.3, %s.3* @g3, i32 0, i32 0), i32 6, i1 false)
-  call void @f7(%s.3* %v0) #0
+  call void @llvm.memcpy.p0.p0.i32(ptr align 1 %v0, ptr align 1 @g3, i32 6, i1 false)
+  call void @f7(ptr %v0) #0
   ret i32 0
 }
 
-declare void @f7(%s.3*)
+declare void @f7(ptr)
 
-declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture writeonly, i8* nocapture readonly, i32, i1) #1
+declare void @llvm.memcpy.p0.p0.i32(ptr nocapture writeonly, ptr nocapture readonly, i32, i1) #1
 
 attributes #0 = { nounwind }
 attributes #1 = { argmemonly nounwind }

diff  --git a/llvm/test/CodeGen/Hexagon/struct_copy_sched_r16.ll b/llvm/test/CodeGen/Hexagon/struct_copy_sched_r16.ll
index 4b075163e83a7..9f594207a1869 100644
--- a/llvm/test/CodeGen/Hexagon/struct_copy_sched_r16.ll
+++ b/llvm/test/CodeGen/Hexagon/struct_copy_sched_r16.ll
@@ -13,16 +13,15 @@
 define i32 @f0() #0 {
 b0:
   %v0 = alloca %s.3, align 1
-  %v1 = getelementptr inbounds %s.3, %s.3* %v0, i32 0, i32 0
-  call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 %v1, i8* align 1 getelementptr inbounds (%s.3, %s.3* @g0, i32 0, i32 0), i32 6, i1 false)
-  call void @f1(%s.3* %v0) #0
+  call void @llvm.memcpy.p0.p0.i32(ptr align 1 %v0, ptr align 1 @g0, i32 6, i1 false)
+  call void @f1(ptr %v0) #0
   ret i32 0
 }
 
-declare void @f1(%s.3*)
+declare void @f1(ptr)
 
 ; Function Attrs: argmemonly nounwind
-declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture writeonly, i8* nocapture readonly, i32, i1) #1
+declare void @llvm.memcpy.p0.p0.i32(ptr nocapture writeonly, ptr nocapture readonly, i32, i1) #1
 
 attributes #0 = { nounwind }
 attributes #1 = { argmemonly nounwind }

diff  --git a/llvm/test/CodeGen/Hexagon/sub-add.ll b/llvm/test/CodeGen/Hexagon/sub-add.ll
index 44cb38b75f87b..83ade9cec0b20 100644
--- a/llvm/test/CodeGen/Hexagon/sub-add.ll
+++ b/llvm/test/CodeGen/Hexagon/sub-add.ll
@@ -9,38 +9,38 @@
 
 target triple = "hexagon"
 
-%s.0 = type { i16, i16, i16, i16*, i16, i16, i16, i16, i16, i16, i32, i32, i16, %s.1*, i16, %s.2*, i16, %s.3*, i16*, i16*, i16, i16*, i16*, i16, i16*, i16, i16*, i8*, %s.5, %s.4, %s.5, %s.5, i32, i32, i32, %s.6, i32, i32, i16, %s.7, %s.7 }
+%s.0 = type { i16, i16, i16, ptr, i16, i16, i16, i16, i16, i16, i32, i32, i16, ptr, i16, ptr, i16, ptr, ptr, ptr, i16, ptr, ptr, i16, ptr, i16, ptr, ptr, %s.5, %s.4, %s.5, %s.5, i32, i32, i32, %s.6, i32, i32, i16, %s.7, %s.7 }
 %s.1 = type { i16, i16, i16, i16 }
 %s.2 = type { i16, i16 }
 %s.3 = type { i16, i16, i16, i16 }
-%s.4 = type { i32, i32, i16* }
-%s.5 = type { i32, i32, i32* }
+%s.4 = type { i32, i32, ptr }
+%s.5 = type { i32, i32, ptr }
 %s.6 = type { i16, i16, i16, i16 }
-%s.7 = type { i16, i16, i16, i16, i16, i16*, i16*, i16*, i8*, i16*, i16*, i16*, i8* }
+%s.7 = type { i16, i16, i16, i16, i16, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr }
 
 ; Function Attrs: nounwind
-define i32 @f0(%s.0* %a0) #0 {
+define i32 @f0(ptr %a0) #0 {
 b0:
   %v0 = alloca i16, align 2
-  %v1 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 12
-  %v2 = load i16, i16* %v1, align 2, !tbaa !0
+  %v1 = getelementptr inbounds %s.0, ptr %a0, i32 0, i32 12
+  %v2 = load i16, ptr %v1, align 2, !tbaa !0
   %v3 = icmp sgt i16 %v2, 0
   br i1 %v3, label %b1, label %b9
 
 b1:                                               ; preds = %b0
-  %v4 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 17
-  %v5 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 29
+  %v4 = getelementptr inbounds %s.0, ptr %a0, i32 0, i32 17
+  %v5 = getelementptr inbounds %s.0, ptr %a0, i32 0, i32 29
   br label %b2
 
 b2:                                               ; preds = %b7, %b1
   %v6 = phi i16 [ %v2, %b1 ], [ %v23, %b7 ]
   %v7 = phi i32 [ 0, %b1 ], [ %v25, %b7 ]
   %v8 = phi i16 [ 1, %b1 ], [ %v26, %b7 ]
-  %v9 = load %s.3*, %s.3** %v4, align 4, !tbaa !4
-  %v10 = getelementptr inbounds %s.3, %s.3* %v9, i32 %v7, i32 0
-  %v11 = load i16, i16* %v10, align 2, !tbaa !0
-  %v12 = getelementptr inbounds %s.3, %s.3* %v9, i32 %v7, i32 1
-  %v13 = load i16, i16* %v12, align 2, !tbaa !0
+  %v9 = load ptr, ptr %v4, align 4, !tbaa !4
+  %v10 = getelementptr inbounds %s.3, ptr %v9, i32 %v7, i32 0
+  %v11 = load i16, ptr %v10, align 2, !tbaa !0
+  %v12 = getelementptr inbounds %s.3, ptr %v9, i32 %v7, i32 1
+  %v13 = load i16, ptr %v12, align 2, !tbaa !0
   %v14 = icmp sgt i16 %v11, %v13
   br i1 %v14, label %b6, label %b3
 
@@ -52,13 +52,13 @@ b3:                                               ; preds = %b2
 
 b4:                                               ; preds = %b4, %b3
   %v18 = phi i32 [ %v15, %b3 ], [ %v20, %b4 ]
-  %v19 = call i32 bitcast (i32 (...)* @f1 to i32 (%s.4*, i32, i32, i16*)*)(%s.4* %v5, i32 %v7, i32 undef, i16* %v0) #0
+  %v19 = call i32 @f1(ptr %v5, i32 %v7, i32 undef, ptr %v0) #0
   %v20 = add i32 %v18, 1
   %v21 = icmp eq i32 %v20, %v17
   br i1 %v21, label %b5, label %b4
 
 b5:                                               ; preds = %b4
-  %v22 = load i16, i16* %v1, align 2, !tbaa !0
+  %v22 = load i16, ptr %v1, align 2, !tbaa !0
   br label %b6
 
 b6:                                               ; preds = %b5, %b2

diff  --git a/llvm/test/CodeGen/Hexagon/subi-asl.ll b/llvm/test/CodeGen/Hexagon/subi-asl.ll
index 0fd88384b89df..eb564c399bbe8 100644
--- a/llvm/test/CodeGen/Hexagon/subi-asl.ll
+++ b/llvm/test/CodeGen/Hexagon/subi-asl.ll
@@ -17,51 +17,47 @@
 @this_insn_number = external global i32, align 4
 
 ; Function Attrs: nounwind
-define void @yes_sub_asl(%struct.rtx_def* %reg, %struct.rtx_def* nocapture readonly %setter) #0 {
+define void @yes_sub_asl(ptr %reg, ptr nocapture readonly %setter) #0 {
 entry:
-  %code = getelementptr inbounds %struct.rtx_def, %struct.rtx_def* %reg, i32 0, i32 0
-  %0 = load i16, i16* %code, align 4
+  %0 = load i16, ptr %reg, align 4
   switch i16 %0, label %return [
     i16 2, label %if.end
     i16 5, label %if.end
   ]
 
 if.end:
-  %code6 = getelementptr inbounds %struct.rtx_def, %struct.rtx_def* %setter, i32 0, i32 0
-  %1 = load i16, i16* %code6, align 4
+  %1 = load i16, ptr %setter, align 4
   %cmp8 = icmp eq i16 %1, 56
   %conv9 = zext i1 %cmp8 to i32
-  %2 = load i32, i32* @this_insn_number, align 4
+  %2 = load i32, ptr @this_insn_number, align 4
   %3 = mul i32 %2, -2
   %sub = add nsw i32 %conv9, %3
-  tail call void @reg_is_born(%struct.rtx_def* nonnull %reg, i32 %sub) #2
+  tail call void @reg_is_born(ptr nonnull %reg, i32 %sub) #2
   br label %return
 
 return:
   ret void
 }
 
-declare void @reg_is_born(%struct.rtx_def*, i32) #1
+declare void @reg_is_born(ptr, i32) #1
 
 ; Function Attrs: nounwind
-define void @no_sub_asl(%struct.rtx_def* %reg, %struct.rtx_def* nocapture readonly %setter) #0 {
+define void @no_sub_asl(ptr %reg, ptr nocapture readonly %setter) #0 {
 entry:
-  %code = getelementptr inbounds %struct.rtx_def, %struct.rtx_def* %reg, i32 0, i32 0
-  %0 = load i16, i16* %code, align 4
+  %0 = load i16, ptr %reg, align 4
   switch i16 %0, label %return [
     i16 2, label %if.end
     i16 5, label %if.end
   ]
 
 if.end:
-  %1 = load i32, i32* @this_insn_number, align 4
+  %1 = load i32, ptr @this_insn_number, align 4
   %mul = mul nsw i32 %1, 2
-  %code6 = getelementptr inbounds %struct.rtx_def, %struct.rtx_def* %setter, i32 0, i32 0
-  %2 = load i16, i16* %code6, align 4
+  %2 = load i16, ptr %setter, align 4
   %cmp8 = icmp eq i16 %2, 56
   %conv9 = zext i1 %cmp8 to i32
   %sub = sub nsw i32 %mul, %conv9
-  tail call void @reg_is_born(%struct.rtx_def* nonnull %reg, i32 %sub) #2
+  tail call void @reg_is_born(ptr nonnull %reg, i32 %sub) #2
   br label %return
 
 return:

diff  --git a/llvm/test/CodeGen/Hexagon/switch-lut-explicit-section.ll b/llvm/test/CodeGen/Hexagon/switch-lut-explicit-section.ll
index b542c9c60c22c..66a2c1eb9d45d 100644
--- a/llvm/test/CodeGen/Hexagon/switch-lut-explicit-section.ll
+++ b/llvm/test/CodeGen/Hexagon/switch-lut-explicit-section.ll
@@ -21,8 +21,8 @@ entry:
   br i1 %0, label %switch.lookup, label %return
 
 switch.lookup:                                    ; preds = %entry
-  %switch.gep = getelementptr inbounds [9 x i32], [9 x i32]* @switch.table, i32 0, i32 %x
-  %switch.load = load i32, i32* %switch.gep, align 4
+  %switch.gep = getelementptr inbounds [9 x i32], ptr @switch.table, i32 0, i32 %x
+  %switch.load = load i32, ptr %switch.gep, align 4
   ret i32 %switch.load
 
 return:                                           ; preds = %entry

diff  --git a/llvm/test/CodeGen/Hexagon/switch-lut-function-section.ll b/llvm/test/CodeGen/Hexagon/switch-lut-function-section.ll
index 42043530b9267..0b2afa75cedf3 100644
--- a/llvm/test/CodeGen/Hexagon/switch-lut-function-section.ll
+++ b/llvm/test/CodeGen/Hexagon/switch-lut-function-section.ll
@@ -19,8 +19,8 @@ entry:
   br i1 %0, label %switch.lookup, label %return
 
 switch.lookup:                                    ; preds = %entry
-  %switch.gep = getelementptr inbounds [9 x i32], [9 x i32]* @switch.table, i32 0, i32 %x
-  %switch.load = load i32, i32* %switch.gep, align 4
+  %switch.gep = getelementptr inbounds [9 x i32], ptr @switch.table, i32 0, i32 %x
+  %switch.load = load i32, ptr %switch.gep, align 4
   ret i32 %switch.load
 
 return:                                           ; preds = %entry

diff  --git a/llvm/test/CodeGen/Hexagon/switch-lut-multiple-functions.ll b/llvm/test/CodeGen/Hexagon/switch-lut-multiple-functions.ll
index be0b6817af9ef..d0365f8cf8fa5 100644
--- a/llvm/test/CodeGen/Hexagon/switch-lut-multiple-functions.ll
+++ b/llvm/test/CodeGen/Hexagon/switch-lut-multiple-functions.ll
@@ -17,8 +17,8 @@ entry:
   br i1 %0, label %switch.lookup, label %return
 
 switch.lookup:                                    ; preds = %entry
-  %switch.gep = getelementptr inbounds [9 x i32], [9 x i32]* @switch.table, i32 0, i32 %x
-  %switch.load = load i32, i32* %switch.gep, align 4
+  %switch.gep = getelementptr inbounds [9 x i32], ptr @switch.table, i32 0, i32 %x
+  %switch.load = load i32, ptr %switch.gep, align 4
   ret i32 %switch.load
 
 return:                                           ; preds = %entry
@@ -31,8 +31,8 @@ entry:
   br i1 %0, label %switch.lookup, label %return
 
 switch.lookup:                                    ; preds = %entry
-  %switch.gep = getelementptr inbounds [9 x i32], [9 x i32]* @switch.table, i32 0, i32 %x
-  %switch.load = load i32, i32* %switch.gep, align 4
+  %switch.gep = getelementptr inbounds [9 x i32], ptr @switch.table, i32 0, i32 %x
+  %switch.load = load i32, ptr %switch.gep, align 4
   ret i32 %switch.load
 
 return:                                           ; preds = %entry

diff  --git a/llvm/test/CodeGen/Hexagon/switch-lut-text-section.ll b/llvm/test/CodeGen/Hexagon/switch-lut-text-section.ll
index bca7036db69f1..17d4aa0e6dd5d 100644
--- a/llvm/test/CodeGen/Hexagon/switch-lut-text-section.ll
+++ b/llvm/test/CodeGen/Hexagon/switch-lut-text-section.ll
@@ -16,8 +16,8 @@ entry:
   br i1 %0, label %switch.lookup, label %return
 
 switch.lookup:                                    ; preds = %entry
-  %switch.gep = getelementptr inbounds [9 x i32], [9 x i32]* @switch.table, i32 0, i32 %x
-  %switch.load = load i32, i32* %switch.gep, align 4
+  %switch.gep = getelementptr inbounds [9 x i32], ptr @switch.table, i32 0, i32 %x
+  %switch.load = load i32, ptr %switch.gep, align 4
   ret i32 %switch.load
 
 return:                                           ; preds = %entry

diff  --git a/llvm/test/CodeGen/Hexagon/swp-art-deps-rec.ll b/llvm/test/CodeGen/Hexagon/swp-art-deps-rec.ll
index 36ee3da4a6fbc..dee2695cdae9f 100644
--- a/llvm/test/CodeGen/Hexagon/swp-art-deps-rec.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-art-deps-rec.ll
@@ -46,24 +46,24 @@ L57.us.ur:
   %R8.0469.us.ur = phi i32 [ %sub34.us.ur, %L57.us.ur ], [ undef, %entry ], [ undef, %for.cond22.for.end_crit_edge.us.ur-lcssa ]
   %1 = tail call i64 @llvm.hexagon.M2.vdmacs.s0(i64 %R15_14.0478.us.ur, i64 %R1_0.0472.us.ur, i64 %R3_2.0473.us.ur)
   %2 = tail call i64 @llvm.hexagon.S2.shuffeh(i64 %R5_4.2474.us.ur, i64 %R7_6.0475.us.ur)
-  %3 = inttoptr i32 %R9.0470.us.ur to i16*
-  %4 = load i16, i16* %3, align 2
+  %3 = inttoptr i32 %R9.0470.us.ur to ptr
+  %4 = load i16, ptr %3, align 2
   %conv27.us.ur = sext i16 %4 to i32
   %sub28.us.ur = add i32 %R9.0470.us.ur, -8
-  %5 = inttoptr i32 %R8.0469.us.ur to i16*
-  %6 = load i16, i16* %5, align 2
+  %5 = inttoptr i32 %R8.0469.us.ur to ptr
+  %6 = load i16, ptr %5, align 2
   %conv30.us.ur = sext i16 %6 to i32
   %sub31.us.ur = add i32 %R8.0469.us.ur, -8
   %7 = tail call i64 @llvm.hexagon.A2.combinew(i32 %conv27.us.ur, i32 %conv30.us.ur)
   %8 = tail call i64 @llvm.hexagon.M2.vdmacs.s0(i64 %R11_10.0476.us.ur, i64 %R1_0.0472.us.ur, i64 %2)
   %9 = tail call i64 @llvm.hexagon.S2.shuffeh(i64 %7, i64 %R5_4.2474.us.ur)
-  %10 = inttoptr i32 %sub31.us.ur to i16*
-  %11 = load i16, i16* %10, align 2
+  %10 = inttoptr i32 %sub31.us.ur to ptr
+  %11 = load i16, ptr %10, align 2
   %conv33.us.ur = sext i16 %11 to i32
   %sub34.us.ur = add i32 %R8.0469.us.ur, -16
   %conv35.us.ur = trunc i64 %9 to i32
-  %12 = inttoptr i32 %sub28.us.ur to i16*
-  %13 = load i16, i16* %12, align 2
+  %12 = inttoptr i32 %sub28.us.ur to ptr
+  %13 = load i16, ptr %12, align 2
   %conv39.us.ur = sext i16 %13 to i32
   %sub40.us.ur = add i32 %R9.0470.us.ur, -16
   %14 = tail call i64 @llvm.hexagon.M2.vdmacs.s0(i64 %R13_12.0477.us.ur, i64 %R1_0.0472.us.ur, i64 %9)
@@ -87,13 +87,11 @@ for.cond22.for.end_crit_edge.us:
   %.lcssa551.off0 = phi i32 [ undef, %for.cond22.for.end_crit_edge.us.ur-lcssa ], [ %extract.t652, %for.cond22.for.end_crit_edge.us.ur-lcssa572 ]
   %.lcssa550.off32 = phi i32 [ undef, %for.cond22.for.end_crit_edge.us.ur-lcssa ], [ %extract.t662, %for.cond22.for.end_crit_edge.us.ur-lcssa572 ]
   %.lcssa549.off0 = phi i32 [ undef, %for.cond22.for.end_crit_edge.us.ur-lcssa ], [ %extract.t664, %for.cond22.for.end_crit_edge.us.ur-lcssa572 ]
-  %17 = inttoptr i32 %add to i32*
-  store i32 %.lcssa549.off0, i32* %17, align 4
-  %add.ptr61.us = getelementptr inbounds i8, i8* null, i32 32
-  %18 = bitcast i8* %add.ptr61.us to i32*
-  store i32 %.lcssa551.off0, i32* %18, align 4
-  %19 = bitcast i8* undef to i32*
-  store i32 %.lcssa550.off32, i32* %19, align 4
+  %17 = inttoptr i32 %add to ptr
+  store i32 %.lcssa549.off0, ptr %17, align 4
+  %add.ptr61.us = getelementptr inbounds i8, ptr null, i32 32
+  store i32 %.lcssa551.off0, ptr %add.ptr61.us, align 4
+  store i32 %.lcssa550.off32, ptr undef, align 4
   call void @llvm.trap()
   unreachable
 }

diff  --git a/llvm/test/CodeGen/Hexagon/swp-bad-sched.ll b/llvm/test/CodeGen/Hexagon/swp-bad-sched.ll
index ee93e8b3468e2..e4e4bcda4ceee 100644
--- a/llvm/test/CodeGen/Hexagon/swp-bad-sched.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-bad-sched.ll
@@ -14,7 +14,7 @@
 ; CHECK-NEXT: }{{[ \t]*}}:endloop0
 
 ; Function Attrs: nounwind
-define void @f0([576 x i32]* nocapture %a0, i32 %a1, i32* nocapture %a2) #0 {
+define void @f0(ptr nocapture %a0, i32 %a1, ptr nocapture %a2) #0 {
 b0:
   %v0 = icmp sgt i32 %a1, 0
   br i1 %v0, label %b1, label %b9
@@ -31,53 +31,53 @@ b3:                                               ; preds = %b3, %b2
   %v3 = phi i32 [ %v48, %b3 ], [ 0, %b2 ]
   %v4 = phi i32 [ %v46, %b3 ], [ 0, %b2 ]
   %v5 = phi i32 [ %v49, %b3 ], [ 0, %b2 ]
-  %v6 = getelementptr inbounds [576 x i32], [576 x i32]* %a0, i32 0, i32 %v5
-  %v7 = load i32, i32* %v6, align 4, !tbaa !0
-  %v8 = getelementptr inbounds [576 x i32], [576 x i32]* %a0, i32 1, i32 %v5
-  %v9 = load i32, i32* %v8, align 4, !tbaa !0
+  %v6 = getelementptr inbounds [576 x i32], ptr %a0, i32 0, i32 %v5
+  %v7 = load i32, ptr %v6, align 4, !tbaa !0
+  %v8 = getelementptr inbounds [576 x i32], ptr %a0, i32 1, i32 %v5
+  %v9 = load i32, ptr %v8, align 4, !tbaa !0
   %v10 = add nsw i32 %v9, %v7
-  store i32 %v10, i32* %v6, align 4, !tbaa !0
+  store i32 %v10, ptr %v6, align 4, !tbaa !0
   %v11 = sub nsw i32 %v7, %v9
-  store i32 %v11, i32* %v8, align 4, !tbaa !0
+  store i32 %v11, ptr %v8, align 4, !tbaa !0
   %v12 = tail call i32 @llvm.hexagon.A2.abs(i32 %v10)
   %v13 = or i32 %v12, %v4
   %v14 = tail call i32 @llvm.hexagon.A2.abs(i32 %v11)
   %v15 = or i32 %v14, %v3
   %v16 = add nsw i32 %v5, 1
-  %v17 = getelementptr inbounds [576 x i32], [576 x i32]* %a0, i32 0, i32 %v16
-  %v18 = load i32, i32* %v17, align 4, !tbaa !0
-  %v19 = getelementptr inbounds [576 x i32], [576 x i32]* %a0, i32 1, i32 %v16
-  %v20 = load i32, i32* %v19, align 4, !tbaa !0
+  %v17 = getelementptr inbounds [576 x i32], ptr %a0, i32 0, i32 %v16
+  %v18 = load i32, ptr %v17, align 4, !tbaa !0
+  %v19 = getelementptr inbounds [576 x i32], ptr %a0, i32 1, i32 %v16
+  %v20 = load i32, ptr %v19, align 4, !tbaa !0
   %v21 = add nsw i32 %v20, %v18
-  store i32 %v21, i32* %v17, align 4, !tbaa !0
+  store i32 %v21, ptr %v17, align 4, !tbaa !0
   %v22 = sub nsw i32 %v18, %v20
-  store i32 %v22, i32* %v19, align 4, !tbaa !0
+  store i32 %v22, ptr %v19, align 4, !tbaa !0
   %v23 = tail call i32 @llvm.hexagon.A2.abs(i32 %v21)
   %v24 = or i32 %v23, %v13
   %v25 = tail call i32 @llvm.hexagon.A2.abs(i32 %v22)
   %v26 = or i32 %v25, %v15
   %v27 = add nsw i32 %v5, 2
-  %v28 = getelementptr inbounds [576 x i32], [576 x i32]* %a0, i32 0, i32 %v27
-  %v29 = load i32, i32* %v28, align 4, !tbaa !0
-  %v30 = getelementptr inbounds [576 x i32], [576 x i32]* %a0, i32 1, i32 %v27
-  %v31 = load i32, i32* %v30, align 4, !tbaa !0
+  %v28 = getelementptr inbounds [576 x i32], ptr %a0, i32 0, i32 %v27
+  %v29 = load i32, ptr %v28, align 4, !tbaa !0
+  %v30 = getelementptr inbounds [576 x i32], ptr %a0, i32 1, i32 %v27
+  %v31 = load i32, ptr %v30, align 4, !tbaa !0
   %v32 = add nsw i32 %v31, %v29
-  store i32 %v32, i32* %v28, align 4, !tbaa !0
+  store i32 %v32, ptr %v28, align 4, !tbaa !0
   %v33 = sub nsw i32 %v29, %v31
-  store i32 %v33, i32* %v30, align 4, !tbaa !0
+  store i32 %v33, ptr %v30, align 4, !tbaa !0
   %v34 = tail call i32 @llvm.hexagon.A2.abs(i32 %v32)
   %v35 = or i32 %v34, %v24
   %v36 = tail call i32 @llvm.hexagon.A2.abs(i32 %v33)
   %v37 = or i32 %v36, %v26
   %v38 = add nsw i32 %v5, 3
-  %v39 = getelementptr inbounds [576 x i32], [576 x i32]* %a0, i32 0, i32 %v38
-  %v40 = load i32, i32* %v39, align 4, !tbaa !0
-  %v41 = getelementptr inbounds [576 x i32], [576 x i32]* %a0, i32 1, i32 %v38
-  %v42 = load i32, i32* %v41, align 4, !tbaa !0
+  %v39 = getelementptr inbounds [576 x i32], ptr %a0, i32 0, i32 %v38
+  %v40 = load i32, ptr %v39, align 4, !tbaa !0
+  %v41 = getelementptr inbounds [576 x i32], ptr %a0, i32 1, i32 %v38
+  %v42 = load i32, ptr %v41, align 4, !tbaa !0
   %v43 = add nsw i32 %v42, %v40
-  store i32 %v43, i32* %v39, align 4, !tbaa !0
+  store i32 %v43, ptr %v39, align 4, !tbaa !0
   %v44 = sub nsw i32 %v40, %v42
-  store i32 %v44, i32* %v41, align 4, !tbaa !0
+  store i32 %v44, ptr %v41, align 4, !tbaa !0
   %v45 = tail call i32 @llvm.hexagon.A2.abs(i32 %v43)
   %v46 = or i32 %v45, %v35
   %v47 = tail call i32 @llvm.hexagon.A2.abs(i32 %v44)
@@ -103,14 +103,14 @@ b7:                                               ; preds = %b7, %b6
   %v55 = phi i32 [ %v67, %b7 ], [ %v52, %b6 ]
   %v56 = phi i32 [ %v65, %b7 ], [ %v53, %b6 ]
   %v57 = phi i32 [ %v68, %b7 ], [ %v51, %b6 ]
-  %v58 = getelementptr inbounds [576 x i32], [576 x i32]* %a0, i32 0, i32 %v57
-  %v59 = load i32, i32* %v58, align 4, !tbaa !0
-  %v60 = getelementptr inbounds [576 x i32], [576 x i32]* %a0, i32 1, i32 %v57
-  %v61 = load i32, i32* %v60, align 4, !tbaa !0
+  %v58 = getelementptr inbounds [576 x i32], ptr %a0, i32 0, i32 %v57
+  %v59 = load i32, ptr %v58, align 4, !tbaa !0
+  %v60 = getelementptr inbounds [576 x i32], ptr %a0, i32 1, i32 %v57
+  %v61 = load i32, ptr %v60, align 4, !tbaa !0
   %v62 = add nsw i32 %v61, %v59
-  store i32 %v62, i32* %v58, align 4, !tbaa !0
+  store i32 %v62, ptr %v58, align 4, !tbaa !0
   %v63 = sub nsw i32 %v59, %v61
-  store i32 %v63, i32* %v60, align 4, !tbaa !0
+  store i32 %v63, ptr %v60, align 4, !tbaa !0
   %v64 = tail call i32 @llvm.hexagon.A2.abs(i32 %v62)
   %v65 = or i32 %v64, %v56
   %v66 = tail call i32 @llvm.hexagon.A2.abs(i32 %v63)
@@ -125,13 +125,13 @@ b8:                                               ; preds = %b7
 b9:                                               ; preds = %b8, %b5, %b0
   %v70 = phi i32 [ 0, %b0 ], [ %v52, %b5 ], [ %v67, %b8 ]
   %v71 = phi i32 [ 0, %b0 ], [ %v53, %b5 ], [ %v65, %b8 ]
-  %v72 = load i32, i32* %a2, align 4, !tbaa !0
+  %v72 = load i32, ptr %a2, align 4, !tbaa !0
   %v73 = or i32 %v72, %v71
-  store i32 %v73, i32* %a2, align 4, !tbaa !0
-  %v74 = getelementptr inbounds i32, i32* %a2, i32 1
-  %v75 = load i32, i32* %v74, align 4, !tbaa !0
+  store i32 %v73, ptr %a2, align 4, !tbaa !0
+  %v74 = getelementptr inbounds i32, ptr %a2, i32 1
+  %v75 = load i32, ptr %v74, align 4, !tbaa !0
   %v76 = or i32 %v75, %v70
-  store i32 %v76, i32* %v74, align 4, !tbaa !0
+  store i32 %v76, ptr %v74, align 4, !tbaa !0
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/swp-badorder.ll b/llvm/test/CodeGen/Hexagon/swp-badorder.ll
index 568c7642052d1..07f394d9f85db 100644
--- a/llvm/test/CodeGen/Hexagon/swp-badorder.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-badorder.ll
@@ -2,17 +2,17 @@
 ; REQUIRES: asserts
 
 ; Function Attrs: nounwind
-define void @f0(i32* nocapture %a0) #0 {
+define void @f0(ptr nocapture %a0) #0 {
 b0:
   br i1 undef, label %b1, label %b2
 
 b1:                                               ; preds = %b1, %b0
   %v0 = phi i64 [ %v9, %b1 ], [ 0, %b0 ]
   %v1 = phi i32 [ %v10, %b1 ], [ 0, %b0 ]
-  %v2 = getelementptr inbounds i32, i32* %a0, i32 %v1
-  %v3 = load i32, i32* %v2, align 4, !tbaa !0
+  %v2 = getelementptr inbounds i32, ptr %a0, i32 %v1
+  %v3 = load i32, ptr %v2, align 4, !tbaa !0
   %v4 = zext i32 %v3 to i64
-  %v5 = load i32, i32* undef, align 4, !tbaa !0
+  %v5 = load i32, ptr undef, align 4, !tbaa !0
   %v6 = zext i32 %v5 to i64
   %v7 = shl nuw i64 %v6, 32
   %v8 = or i64 %v7, %v4

diff  --git a/llvm/test/CodeGen/Hexagon/swp-carried-1.ll b/llvm/test/CodeGen/Hexagon/swp-carried-1.ll
index e5b5be4d43004..60b1d9b8c5b35 100644
--- a/llvm/test/CodeGen/Hexagon/swp-carried-1.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-carried-1.ll
@@ -25,8 +25,8 @@ b2:                                               ; preds = %b1
   br label %b3
 
 b3:                                               ; preds = %b3, %b2
-  %v0 = phi i32* [ getelementptr inbounds ([256 x i32], [256 x i32]* @g0, i32 0, i32 0), %b2 ], [ %v1, %b3 ]
-  %v1 = getelementptr i32, i32* %v0, i32 6
+  %v0 = phi ptr [ @g0, %b2 ], [ %v1, %b3 ]
+  %v1 = getelementptr i32, ptr %v0, i32 6
   br i1 undef, label %b4, label %b3
 
 b4:                                               ; preds = %b3
@@ -34,15 +34,15 @@ b4:                                               ; preds = %b3
 
 b5:                                               ; preds = %b5, %b4
   %v2 = phi i64 [ %v19, %b5 ], [ undef, %b4 ]
-  %v3 = phi i32* [ %v8, %b5 ], [ %v1, %b4 ]
+  %v3 = phi ptr [ %v8, %b5 ], [ %v1, %b4 ]
   %v4 = phi i32 [ %v9, %b5 ], [ undef, %b4 ]
   %v5 = phi i32 [ %v11, %b5 ], [ undef, %b4 ]
   %v6 = phi i32 [ %v5, %b5 ], [ undef, %b4 ]
   %v7 = phi i32 [ %v10, %b5 ], [ 0, %b4 ]
-  %v8 = getelementptr i32, i32* %v3, i32 1
+  %v8 = getelementptr i32, ptr %v3, i32 1
   %v9 = add nsw i32 %v4, 1
-  %v10 = load i32, i32* %v8, align 4
-  %v11 = load i32, i32* null, align 4
+  %v10 = load i32, ptr %v8, align 4
+  %v11 = load i32, ptr null, align 4
   %v12 = sext i32 %v6 to i64
   %v13 = sext i32 %v10 to i64
   %v14 = sext i32 %v7 to i64

diff  --git a/llvm/test/CodeGen/Hexagon/swp-chain-refs.ll b/llvm/test/CodeGen/Hexagon/swp-chain-refs.ll
index 5695f3d61b740..966ff2398ec24 100644
--- a/llvm/test/CodeGen/Hexagon/swp-chain-refs.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-chain-refs.ll
@@ -33,11 +33,11 @@ b6:                                               ; preds = %b5
 
 b7:                                               ; preds = %b7, %b1
   %v0 = phi i32 [ 0, %b1 ], [ %v4, %b7 ]
-  %v1 = load i16, i16* undef, align 8, !tbaa !0
+  %v1 = load i16, ptr undef, align 8, !tbaa !0
   %v2 = icmp sgt i16 %v1, undef
   %v3 = select i1 %v2, i16 4, i16 undef
-  store i16 %v3, i16* undef, align 2, !tbaa !0
-  store i16 -32768, i16* undef, align 2, !tbaa !0
+  store i16 %v3, ptr undef, align 2, !tbaa !0
+  store i16 -32768, ptr undef, align 2, !tbaa !0
   %v4 = add i32 %v0, 1
   %v5 = icmp eq i32 %v4, 5
   br i1 %v5, label %b8, label %b7

diff  --git a/llvm/test/CodeGen/Hexagon/swp-change-dep-cycle.ll b/llvm/test/CodeGen/Hexagon/swp-change-dep-cycle.ll
index f7217d7549664..9a0551eac9a89 100644
--- a/llvm/test/CodeGen/Hexagon/swp-change-dep-cycle.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-change-dep-cycle.ll
@@ -4,33 +4,33 @@
 ; Don't change the dependences if it's going to cause a cycle.
 
 ; Function Attrs: nounwind
-define void @f0(i8* nocapture %a0, i32 %a1) #0 {
+define void @f0(ptr nocapture %a0, i32 %a1) #0 {
 b0:
   br i1 undef, label %b1, label %b2
 
 b1:                                               ; preds = %b1, %b0
-  %v0 = phi i8* [ undef, %b1 ], [ undef, %b0 ]
+  %v0 = phi ptr [ undef, %b1 ], [ undef, %b0 ]
   %v1 = phi i32 [ %v20, %b1 ], [ 1, %b0 ]
-  %v2 = phi i8* [ %v6, %b1 ], [ %a0, %b0 ]
-  %v3 = load i8, i8* %v2, align 1
+  %v2 = phi ptr [ %v6, %b1 ], [ %a0, %b0 ]
+  %v3 = load i8, ptr %v2, align 1
   %v4 = zext i8 %v3 to i32
   %v5 = mul nsw i32 %v4, 3
-  %v6 = getelementptr inbounds i8, i8* %v2, i32 1
-  %v7 = load i8, i8* %v6, align 1
+  %v6 = getelementptr inbounds i8, ptr %v2, i32 1
+  %v7 = load i8, ptr %v6, align 1
   %v8 = zext i8 %v7 to i32
   %v9 = add i32 %v8, 2
   %v10 = add i32 %v9, %v5
   %v11 = lshr i32 %v10, 2
   %v12 = trunc i32 %v11 to i8
-  %v13 = getelementptr inbounds i8, i8* undef, i32 2
-  store i8 %v12, i8* %v0, align 1
-  %v14 = load i8, i8* %v2, align 1
+  %v13 = getelementptr inbounds i8, ptr undef, i32 2
+  store i8 %v12, ptr %v0, align 1
+  %v14 = load i8, ptr %v2, align 1
   %v15 = zext i8 %v14 to i32
   %v16 = add i32 %v15, 2
   %v17 = add i32 %v16, 0
   %v18 = lshr i32 %v17, 2
   %v19 = trunc i32 %v18 to i8
-  store i8 %v19, i8* %v13, align 1
+  store i8 %v19, ptr %v13, align 1
   %v20 = add i32 %v1, 1
   %v21 = icmp eq i32 %v20, %a1
   br i1 %v21, label %b2, label %b1

diff  --git a/llvm/test/CodeGen/Hexagon/swp-change-dep.ll b/llvm/test/CodeGen/Hexagon/swp-change-dep.ll
index bd1f1b6eb59d3..d75b8459589ab 100644
--- a/llvm/test/CodeGen/Hexagon/swp-change-dep.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-change-dep.ll
@@ -44,12 +44,12 @@ b12:                                              ; preds = %b12, %b11
 
 b13:                                              ; preds = %b13, %b12
   %v0 = phi i32 [ %v5, %b13 ], [ 0, %b12 ]
-  %v1 = getelementptr inbounds [11 x i32], [11 x i32]* undef, i32 0, i32 %v0
-  %v2 = load i32, i32* %v1, align 4
+  %v1 = getelementptr inbounds [11 x i32], ptr undef, i32 0, i32 %v0
+  %v2 = load i32, ptr %v1, align 4
   %v3 = add i32 %v2, 1
   %v4 = lshr i32 %v3, 1
-  store i32 %v4, i32* %v1, align 4
-  store i32 0, i32* %v1, align 4
+  store i32 %v4, ptr %v1, align 4
+  store i32 0, ptr %v1, align 4
   %v5 = add nsw i32 %v0, 1
   %v6 = icmp eq i32 %v5, 11
   br i1 %v6, label %b14, label %b13

diff  --git a/llvm/test/CodeGen/Hexagon/swp-change-dep1.ll b/llvm/test/CodeGen/Hexagon/swp-change-dep1.ll
index 157bdd069f921..8c7fa707b35e7 100644
--- a/llvm/test/CodeGen/Hexagon/swp-change-dep1.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-change-dep1.ll
@@ -23,20 +23,18 @@ b1:                                               ; preds = %b2
 ; CHECK: }{{[ \t]*}}:endloop
 
 b2:                                               ; preds = %b2, %b0
-  %v0 = phi i32* [ getelementptr inbounds ([400 x i32], [400 x i32]* @g0, i32 0, i32 0), %b0 ], [ %v11, %b2 ]
-  %v1 = phi i32* [ getelementptr inbounds ([400 x i32], [400 x i32]* @g1, i32 0, i32 0), %b0 ], [ %v12, %b2 ]
+  %v0 = phi ptr [ @g0, %b0 ], [ %v11, %b2 ]
+  %v1 = phi ptr [ @g1, %b0 ], [ %v12, %b2 ]
   %v2 = phi i32 [ 0, %b0 ], [ %v9, %b2 ]
-  %v3 = bitcast i32* %v0 to <2 x i32>*
-  %v4 = load <2 x i32>, <2 x i32>* %v3, align 8
+  %v4 = load <2 x i32>, ptr %v0, align 8
   %v5 = mul <2 x i32> %v4, <i32 7, i32 7>
-  %v6 = bitcast i32* %v1 to <2 x i32>*
-  %v7 = load <2 x i32>, <2 x i32>* %v6, align 8
+  %v7 = load <2 x i32>, ptr %v1, align 8
   %v8 = add <2 x i32> %v7, %v5
-  store <2 x i32> %v8, <2 x i32>* %v6, align 8
+  store <2 x i32> %v8, ptr %v1, align 8
   %v9 = add nsw i32 %v2, 2
   %v10 = icmp slt i32 %v2, 398
-  %v11 = getelementptr i32, i32* %v0, i32 2
-  %v12 = getelementptr i32, i32* %v1, i32 2
+  %v11 = getelementptr i32, ptr %v0, i32 2
+  %v12 = getelementptr i32, ptr %v1, i32 2
   br i1 %v10, label %b2, label %b1
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/swp-change-deps.ll b/llvm/test/CodeGen/Hexagon/swp-change-deps.ll
index 1b35c633c52de..704a5b2b8f3c4 100644
--- a/llvm/test/CodeGen/Hexagon/swp-change-deps.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-change-deps.ll
@@ -31,9 +31,9 @@ b1:                                               ; preds = %b0
   br label %b2
 
 b2:                                               ; preds = %b2, %b1
-  %v0 = phi i16* [ undef, %b1 ], [ %v14, %b2 ]
+  %v0 = phi ptr [ undef, %b1 ], [ %v14, %b2 ]
   %v1 = phi i32 [ 0, %b1 ], [ %v12, %b2 ]
-  %v2 = load i16, i16* %v0, align 2
+  %v2 = load i16, ptr %v0, align 2
   %v3 = sext i16 %v2 to i32
   %v4 = call i32 @llvm.hexagon.M2.mpy.sat.ll.s1(i32 undef, i32 %v3)
   %v5 = call i32 @llvm.hexagon.S2.asr.r.r.sat(i32 %v4, i32 undef)
@@ -42,12 +42,12 @@ b2:                                               ; preds = %b2, %b1
   %v8 = call i32 @llvm.hexagon.S2.asr.r.r.sat(i32 %v7, i32 undef)
   %v9 = call i32 @llvm.hexagon.A2.sath(i32 %v8)
   %v10 = trunc i32 %v9 to i16
-  store i16 %v10, i16* null, align 2
+  store i16 %v10, ptr null, align 2
   %v11 = trunc i32 %v7 to i16
-  store i16 %v11, i16* %v0, align 2
+  store i16 %v11, ptr %v0, align 2
   %v12 = add nsw i32 %v1, 1
   %v13 = icmp slt i32 %v12, %a0
-  %v14 = getelementptr i16, i16* %v0, i32 1
+  %v14 = getelementptr i16, ptr %v0, i32 1
   br i1 %v13, label %b2, label %b3
 
 b3:                                               ; preds = %b2

diff  --git a/llvm/test/CodeGen/Hexagon/swp-check-offset.ll b/llvm/test/CodeGen/Hexagon/swp-check-offset.ll
index 6a7211df12d8f..5fe2f36723011 100644
--- a/llvm/test/CodeGen/Hexagon/swp-check-offset.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-check-offset.ll
@@ -15,20 +15,18 @@
 ; CHECK-V65: }{{[ \t]*}}:mem_noshuf
 
 ; Function Attrs: nounwind
-define i32 @f0(i8** %a0) #0 {
+define i32 @f0(ptr %a0) #0 {
 b0:
   br label %b1
 
 b1:                                               ; preds = %b1, %b0
   %v0 = phi i32 [ %v7, %b1 ], [ 0, %b0 ]
-  %v1 = getelementptr inbounds i8*, i8** %a0, i32 %v0
-  %v2 = load i8*, i8** %v1, align 4
-  %v3 = bitcast i8* %v2 to i32*
-  store i32 0, i32* %v3, align 4
-  %v4 = load i8*, i8** %v1, align 4
-  %v5 = getelementptr inbounds i8, i8* %v4, i32 8
-  %v6 = bitcast i8* %v5 to i32*
-  store i32 0, i32* %v6, align 4
+  %v1 = getelementptr inbounds ptr, ptr %a0, i32 %v0
+  %v2 = load ptr, ptr %v1, align 4
+  store i32 0, ptr %v2, align 4
+  %v4 = load ptr, ptr %v1, align 4
+  %v5 = getelementptr inbounds i8, ptr %v4, i32 8
+  store i32 0, ptr %v5, align 4
   %v7 = add nsw i32 %v0, 1
   %v8 = icmp eq i32 %v7, 2
   br i1 %v8, label %b2, label %b1

diff  --git a/llvm/test/CodeGen/Hexagon/swp-const-tc.ll b/llvm/test/CodeGen/Hexagon/swp-const-tc.ll
index 8fac705995b1b..44e0a8983366f 100644
--- a/llvm/test/CodeGen/Hexagon/swp-const-tc.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-const-tc.ll
@@ -6,19 +6,19 @@
 ; CHECK-LABEL: @test
 ; CHECK: loop0(.LBB0_1,#999)
 
-define i32 @test(i32* %A, i32* %B, i32 %count) {
+define i32 @test(ptr %A, ptr %B, i32 %count) {
 entry:
   br label %for.body
 
 for.body:
   %sum.02 = phi i32 [ 0, %entry ], [ %add, %for.body ]
-  %arrayidx.phi = phi i32* [ %A, %entry ], [ %arrayidx.inc, %for.body ]
+  %arrayidx.phi = phi ptr [ %A, %entry ], [ %arrayidx.inc, %for.body ]
   %i.01 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
-  %0 = load i32, i32* %arrayidx.phi, align 4
+  %0 = load i32, ptr %arrayidx.phi, align 4
   %add = add nsw i32 %0, %sum.02
   %inc = add nsw i32 %i.01, 1
   %exitcond = icmp eq i32 %inc, 1000
-  %arrayidx.inc = getelementptr i32, i32* %arrayidx.phi, i32 1
+  %arrayidx.inc = getelementptr i32, ptr %arrayidx.phi, i32 1
   br i1 %exitcond, label %for.end, label %for.body
 
 for.end:
@@ -30,19 +30,19 @@ for.end:
 ; CHECK-LABEL: @test1
 ; CHECK-NOT: loop0(
 
-define i32 @test1(i32* %A, i32* %B, i32 %count) {
+define i32 @test1(ptr %A, ptr %B, i32 %count) {
 entry:
   br label %for.body
 
 for.body:
   %sum.02 = phi i32 [ 0, %entry ], [ %add, %for.body ]
-  %arrayidx.phi = phi i32* [ %A, %entry ], [ %arrayidx.inc, %for.body ]
+  %arrayidx.phi = phi ptr [ %A, %entry ], [ %arrayidx.inc, %for.body ]
   %i.01 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
-  %0 = load i32, i32* %arrayidx.phi, align 4
+  %0 = load i32, ptr %arrayidx.phi, align 4
   %add = add nsw i32 %0, %sum.02
   %inc = add nsw i32 %i.01, 1
   %exitcond = icmp eq i32 %inc, 1
-  %arrayidx.inc = getelementptr i32, i32* %arrayidx.phi, i32 1
+  %arrayidx.inc = getelementptr i32, ptr %arrayidx.phi, i32 1
   br i1 %exitcond, label %for.end, label %for.body
 
 for.end:

diff  --git a/llvm/test/CodeGen/Hexagon/swp-const-tc1.ll b/llvm/test/CodeGen/Hexagon/swp-const-tc1.ll
index c785ee74513a9..9b6c91f6326ee 100644
--- a/llvm/test/CodeGen/Hexagon/swp-const-tc1.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-const-tc1.ll
@@ -18,9 +18,9 @@ b0:
   br label %b1
 
 b1:                                               ; preds = %b5, %b0
-  %v0 = load i16, i16* undef, align 2, !tbaa !0
+  %v0 = load i16, ptr undef, align 2, !tbaa !0
   %v1 = sext i16 %v0 to i32
-  %v2 = load i16, i16* undef, align 2, !tbaa !0
+  %v2 = load i16, ptr undef, align 2, !tbaa !0
   %v3 = sext i16 %v2 to i32
   %v4 = and i32 %v1, 7
   %v5 = and i32 %v3, 7
@@ -36,8 +36,8 @@ b3:                                               ; preds = %b3, %b2
   %v9 = add nsw i32 %v7, 1
   %v10 = select i1 %x, i32 1, i32 %v9
   %v11 = add i32 %v10, 0
-  %v12 = getelementptr inbounds i8, i8* null, i32 %v11
-  %v13 = load i8, i8* %v12, align 1, !tbaa !4
+  %v12 = getelementptr inbounds i8, ptr null, i32 %v11
+  %v13 = load i8, ptr %v12, align 1, !tbaa !4
   %v14 = zext i8 %v13 to i32
   %v15 = mul i32 %v14, %v4
   %v16 = add i32 %v15, 0
@@ -46,7 +46,7 @@ b3:                                               ; preds = %b3, %b2
   %v19 = add i32 %v18, 0
   %v20 = lshr i32 %v19, 6
   %v21 = trunc i32 %v20 to i8
-  store i8 %v21, i8* undef, align 1, !tbaa !4
+  store i8 %v21, ptr undef, align 1, !tbaa !4
   %v22 = add i32 %v6, 1
   %v23 = icmp eq i32 %v22, 2
   br i1 %v23, label %b4, label %b3

diff  --git a/llvm/test/CodeGen/Hexagon/swp-const-tc2.ll b/llvm/test/CodeGen/Hexagon/swp-const-tc2.ll
index 29d12bd14390d..73268f160fdd6 100644
--- a/llvm/test/CodeGen/Hexagon/swp-const-tc2.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-const-tc2.ll
@@ -9,14 +9,14 @@
 ; CHECK: r{{[0-9]+}} = mpyi
 ; CHECK-NOT: r{{[0-9]+}} = mpyi
 
-define i32 @f0(i32* %a0) {
+define i32 @f0(ptr %a0) {
 b0:
   br label %b1
 
 b1:                                               ; preds = %b1, %b0
   %v0 = phi i32 [ 0, %b0 ], [ %v9, %b1 ]
   %v1 = phi i32 [ 0, %b0 ], [ %v8, %b1 ]
-  %v2 = load i32, i32* %a0, align 4
+  %v2 = load i32, ptr %a0, align 4
   %v3 = add nsw i32 %v1, 1
   %v4 = srem i32 %v2, 3
   %v5 = icmp ne i32 %v4, 0

diff  --git a/llvm/test/CodeGen/Hexagon/swp-const-tc3.ll b/llvm/test/CodeGen/Hexagon/swp-const-tc3.ll
index 48a61428538c2..c7fdf18d116de 100644
--- a/llvm/test/CodeGen/Hexagon/swp-const-tc3.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-const-tc3.ll
@@ -11,15 +11,15 @@
 ; CHECK: r{{[0-9]+}} = sxth(r{{[0-9]+}})
 
 ; Function Attrs: nounwind readonly
-define signext i16 @f0(i16* nocapture readonly %a0, i16* nocapture readnone %a1, i16* nocapture readonly %a2, i16* nocapture readonly %a3, i16 signext %a4, i16 signext %a5, i16 signext %a6) #0 {
+define signext i16 @f0(ptr nocapture readonly %a0, ptr nocapture readnone %a1, ptr nocapture readonly %a2, ptr nocapture readonly %a3, i16 signext %a4, i16 signext %a5, i16 signext %a6) #0 {
 b0:
   %v0 = icmp sgt i16 %a5, 0
   br i1 %v0, label %b1, label %b7
 
 b1:                                               ; preds = %b0
-  %v1 = load i16, i16* %a0, align 2
+  %v1 = load i16, ptr %a0, align 2
   %v2 = sext i16 %v1 to i32
-  %v3 = load i16, i16* %a3, align 2
+  %v3 = load i16, ptr %a3, align 2
   %v4 = sext i16 %v3 to i32
   br label %b2
 
@@ -27,8 +27,8 @@ b2:                                               ; preds = %b6, %b1
   %v5 = phi i32 [ 2147483647, %b1 ], [ %v44, %b6 ]
   %v6 = phi i16 [ 0, %b1 ], [ %v45, %b6 ]
   %v7 = phi i16 [ 0, %b1 ], [ %v43, %b6 ]
-  %v8 = phi i16* [ %a2, %b1 ], [ %v38, %b6 ]
-  %v9 = load i16, i16* %v8, align 2
+  %v8 = phi ptr [ %a2, %b1 ], [ %v38, %b6 ]
+  %v9 = load i16, ptr %v8, align 2
   %v10 = sext i16 %v9 to i32
   %v11 = tail call i32 @llvm.hexagon.A2.subh.l16.sat.ll(i32 %v2, i32 %v10)
   %v12 = shl i32 %v11, 16
@@ -36,31 +36,31 @@ b2:                                               ; preds = %b6, %b1
   %v14 = tail call i32 @llvm.hexagon.M2.mpy.sat.ll.s1(i32 %v13, i32 %v4)
   %v15 = tail call i32 @llvm.hexagon.M2.hmmpyl.s1(i32 %v14, i32 %v13)
   %v16 = tail call i32 @llvm.hexagon.S2.asr.r.r.sat(i32 %v15, i32 10)
-  %v17 = getelementptr inbounds i16, i16* %v8, i32 1
+  %v17 = getelementptr inbounds i16, ptr %v8, i32 1
   br label %b3
 
 b3:                                               ; preds = %b3, %b2
-  %v18 = phi i16* [ %v8, %b2 ], [ %v19, %b3 ]
-  %v19 = phi i16* [ %v17, %b2 ], [ %v38, %b3 ]
+  %v18 = phi ptr [ %v8, %b2 ], [ %v19, %b3 ]
+  %v19 = phi ptr [ %v17, %b2 ], [ %v38, %b3 ]
   %v20 = phi i32 [ %v16, %b2 ], [ %v36, %b3 ]
   %v21 = phi i32 [ 1, %b2 ], [ %v37, %b3 ]
-  %v22 = getelementptr inbounds i16, i16* %a0, i32 %v21
-  %v23 = load i16, i16* %v22, align 2
+  %v22 = getelementptr inbounds i16, ptr %a0, i32 %v21
+  %v23 = load i16, ptr %v22, align 2
   %v24 = sext i16 %v23 to i32
-  %v25 = load i16, i16* %v19, align 2
+  %v25 = load i16, ptr %v19, align 2
   %v26 = sext i16 %v25 to i32
   %v27 = tail call i32 @llvm.hexagon.A2.subh.l16.sat.ll(i32 %v24, i32 %v26)
   %v28 = shl i32 %v27, 16
   %v29 = ashr exact i32 %v28, 16
-  %v30 = getelementptr inbounds i16, i16* %a3, i32 %v21
-  %v31 = load i16, i16* %v30, align 2
+  %v30 = getelementptr inbounds i16, ptr %a3, i32 %v21
+  %v31 = load i16, ptr %v30, align 2
   %v32 = sext i16 %v31 to i32
   %v33 = tail call i32 @llvm.hexagon.M2.mpy.sat.ll.s1(i32 %v29, i32 %v32)
   %v34 = tail call i32 @llvm.hexagon.M2.hmmpyl.s1(i32 %v33, i32 %v29)
   %v35 = tail call i32 @llvm.hexagon.S2.asr.r.r.sat(i32 %v34, i32 10)
   %v36 = tail call i32 @llvm.hexagon.A2.addsat(i32 %v20, i32 %v35)
   %v37 = add i32 %v21, 1
-  %v38 = getelementptr inbounds i16, i16* %v18, i32 2
+  %v38 = getelementptr inbounds i16, ptr %v18, i32 2
   %v39 = icmp eq i32 %v37, 3
   br i1 %v39, label %b4, label %b3
 

diff  --git a/llvm/test/CodeGen/Hexagon/swp-conv3x3-nested.ll b/llvm/test/CodeGen/Hexagon/swp-conv3x3-nested.ll
index 2b69c3da8d0d9..91b9ff36d29ab 100644
--- a/llvm/test/CodeGen/Hexagon/swp-conv3x3-nested.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-conv3x3-nested.ll
@@ -31,23 +31,20 @@ declare <16 x i32> @llvm.hexagon.V6.lo(<32 x i32>) #0
 declare <16 x i32> @llvm.hexagon.V6.vsathub(<16 x i32>, <16 x i32>) #0
 declare <16 x i32> @llvm.hexagon.V6.vlalignbi(<16 x i32>, <16 x i32>, i32) #0
 
-define void @f0(i8* noalias nocapture readonly %a0, i32 %a1, i32 %a2, i32 %a3, i8* noalias nocapture readonly %a4, i32 %a5, i8* noalias nocapture %a6) local_unnamed_addr #1 {
+define void @f0(ptr noalias nocapture readonly %a0, i32 %a1, i32 %a2, i32 %a3, ptr noalias nocapture readonly %a4, i32 %a5, ptr noalias nocapture %a6) local_unnamed_addr #1 {
 b0:
   %v0 = add nsw i32 %a3, -1
   %v1 = icmp sgt i32 %a3, 2
   br i1 %v1, label %b1, label %b6
 
 b1:                                               ; preds = %b0
-  %v2 = getelementptr inbounds i8, i8* %a6, i32 %a1
-  %v3 = getelementptr inbounds i8, i8* %a0, i32 %a1
-  %v4 = bitcast i8* %a4 to i32*
-  %v5 = load i32, i32* %v4, align 4, !tbaa !1, !alias.scope !5, !noalias !8
-  %v6 = getelementptr inbounds i8, i8* %a4, i32 4
-  %v7 = bitcast i8* %v6 to i32*
-  %v8 = load i32, i32* %v7, align 4, !tbaa !1, !alias.scope !5, !noalias !8
-  %v9 = getelementptr inbounds i8, i8* %a4, i32 8
-  %v10 = bitcast i8* %v9 to i32*
-  %v11 = load i32, i32* %v10, align 4, !tbaa !1, !alias.scope !5, !noalias !8
+  %v2 = getelementptr inbounds i8, ptr %a6, i32 %a1
+  %v3 = getelementptr inbounds i8, ptr %a0, i32 %a1
+  %v5 = load i32, ptr %a4, align 4, !tbaa !1, !alias.scope !5, !noalias !8
+  %v6 = getelementptr inbounds i8, ptr %a4, i32 4
+  %v8 = load i32, ptr %v6, align 4, !tbaa !1, !alias.scope !5, !noalias !8
+  %v9 = getelementptr inbounds i8, ptr %a4, i32 8
+  %v11 = load i32, ptr %v9, align 4, !tbaa !1, !alias.scope !5, !noalias !8
   %v12 = sub i32 0, %a1
   %v13 = shl nsw i32 %a1, 1
   %v14 = tail call <16 x i32> @llvm.hexagon.V6.vd0() #2
@@ -55,43 +52,33 @@ b1:                                               ; preds = %b0
   br label %b2
 
 b2:                                               ; preds = %b5, %b1
-  %v16 = phi i8* [ %v2, %b1 ], [ %v102, %b5 ]
-  %v17 = phi i8* [ %v3, %b1 ], [ %v21, %b5 ]
+  %v16 = phi ptr [ %v2, %b1 ], [ %v102, %b5 ]
+  %v17 = phi ptr [ %v3, %b1 ], [ %v21, %b5 ]
   %v18 = phi i32 [ 1, %b1 ], [ %v103, %b5 ]
-  %v19 = getelementptr inbounds i8, i8* %v17, i32 %v12
-  %v20 = getelementptr inbounds i8, i8* %v17, i32 %a1
-  %v21 = getelementptr inbounds i8, i8* %v17, i32 %v13
+  %v19 = getelementptr inbounds i8, ptr %v17, i32 %v12
+  %v20 = getelementptr inbounds i8, ptr %v17, i32 %a1
+  %v21 = getelementptr inbounds i8, ptr %v17, i32 %v13
   br i1 %v15, label %b3, label %b5
 
 b3:                                               ; preds = %b2
-  %v22 = bitcast i8* %v21 to <16 x i32>*
-  %v23 = load <16 x i32>, <16 x i32>* %v22, align 64, !tbaa !11, !alias.scope !12, !noalias !13
-  %v24 = getelementptr inbounds i8, i8* %v21, i32 64
-  %v25 = bitcast i8* %v24 to <16 x i32>*
-  %v26 = bitcast i8* %v20 to <16 x i32>*
-  %v27 = load <16 x i32>, <16 x i32>* %v26, align 64, !tbaa !11, !alias.scope !12, !noalias !13
-  %v28 = getelementptr inbounds i8, i8* %v20, i32 64
-  %v29 = bitcast i8* %v28 to <16 x i32>*
-  %v30 = bitcast i8* %v17 to <16 x i32>*
-  %v31 = load <16 x i32>, <16 x i32>* %v30, align 64, !tbaa !11, !alias.scope !12, !noalias !13
-  %v32 = getelementptr inbounds i8, i8* %v17, i32 64
-  %v33 = bitcast i8* %v32 to <16 x i32>*
-  %v34 = bitcast i8* %v19 to <16 x i32>*
-  %v35 = load <16 x i32>, <16 x i32>* %v34, align 64, !tbaa !11, !alias.scope !12, !noalias !13
-  %v36 = getelementptr inbounds i8, i8* %v19, i32 64
-  %v37 = bitcast i8* %v36 to <16 x i32>*
-  %v38 = getelementptr inbounds i8, i8* %v16, i32 %a1
-  %v39 = bitcast i8* %v38 to <16 x i32>*
-  %v40 = bitcast i8* %v16 to <16 x i32>*
+  %v23 = load <16 x i32>, ptr %v21, align 64, !tbaa !11, !alias.scope !12, !noalias !13
+  %v24 = getelementptr inbounds i8, ptr %v21, i32 64
+  %v27 = load <16 x i32>, ptr %v20, align 64, !tbaa !11, !alias.scope !12, !noalias !13
+  %v28 = getelementptr inbounds i8, ptr %v20, i32 64
+  %v31 = load <16 x i32>, ptr %v17, align 64, !tbaa !11, !alias.scope !12, !noalias !13
+  %v32 = getelementptr inbounds i8, ptr %v17, i32 64
+  %v35 = load <16 x i32>, ptr %v19, align 64, !tbaa !11, !alias.scope !12, !noalias !13
+  %v36 = getelementptr inbounds i8, ptr %v19, i32 64
+  %v38 = getelementptr inbounds i8, ptr %v16, i32 %a1
   br label %b4
 
 b4:                                               ; preds = %b4, %b3
-  %v41 = phi <16 x i32>* [ %v39, %b3 ], [ %v99, %b4 ]
-  %v42 = phi <16 x i32>* [ %v40, %b3 ], [ %v84, %b4 ]
-  %v43 = phi <16 x i32>* [ %v25, %b3 ], [ %v60, %b4 ]
-  %v44 = phi <16 x i32>* [ %v29, %b3 ], [ %v58, %b4 ]
-  %v45 = phi <16 x i32>* [ %v33, %b3 ], [ %v56, %b4 ]
-  %v46 = phi <16 x i32>* [ %v37, %b3 ], [ %v54, %b4 ]
+  %v41 = phi ptr [ %v38, %b3 ], [ %v99, %b4 ]
+  %v42 = phi ptr [ %v16, %b3 ], [ %v84, %b4 ]
+  %v43 = phi ptr [ %v24, %b3 ], [ %v60, %b4 ]
+  %v44 = phi ptr [ %v28, %b3 ], [ %v58, %b4 ]
+  %v45 = phi ptr [ %v32, %b3 ], [ %v56, %b4 ]
+  %v46 = phi ptr [ %v36, %b3 ], [ %v54, %b4 ]
   %v47 = phi i32 [ %a2, %b3 ], [ %v100, %b4 ]
   %v48 = phi <16 x i32> [ %v35, %b3 ], [ %v55, %b4 ]
   %v49 = phi <16 x i32> [ %v31, %b3 ], [ %v57, %b4 ]
@@ -99,14 +86,14 @@ b4:                                               ; preds = %b4, %b3
   %v51 = phi <16 x i32> [ %v23, %b3 ], [ %v61, %b4 ]
   %v52 = phi <16 x i32> [ %v14, %b3 ], [ %v82, %b4 ]
   %v53 = phi <16 x i32> [ %v14, %b3 ], [ %v97, %b4 ]
-  %v54 = getelementptr inbounds <16 x i32>, <16 x i32>* %v46, i32 1
-  %v55 = load <16 x i32>, <16 x i32>* %v46, align 64, !tbaa !11, !alias.scope !12, !noalias !13
-  %v56 = getelementptr inbounds <16 x i32>, <16 x i32>* %v45, i32 1
-  %v57 = load <16 x i32>, <16 x i32>* %v45, align 64, !tbaa !11, !alias.scope !12, !noalias !13
-  %v58 = getelementptr inbounds <16 x i32>, <16 x i32>* %v44, i32 1
-  %v59 = load <16 x i32>, <16 x i32>* %v44, align 64, !tbaa !11, !alias.scope !12, !noalias !13
-  %v60 = getelementptr inbounds <16 x i32>, <16 x i32>* %v43, i32 1
-  %v61 = load <16 x i32>, <16 x i32>* %v43, align 64, !tbaa !11, !alias.scope !12, !noalias !13
+  %v54 = getelementptr inbounds <16 x i32>, ptr %v46, i32 1
+  %v55 = load <16 x i32>, ptr %v46, align 64, !tbaa !11, !alias.scope !12, !noalias !13
+  %v56 = getelementptr inbounds <16 x i32>, ptr %v45, i32 1
+  %v57 = load <16 x i32>, ptr %v45, align 64, !tbaa !11, !alias.scope !12, !noalias !13
+  %v58 = getelementptr inbounds <16 x i32>, ptr %v44, i32 1
+  %v59 = load <16 x i32>, ptr %v44, align 64, !tbaa !11, !alias.scope !12, !noalias !13
+  %v60 = getelementptr inbounds <16 x i32>, ptr %v43, i32 1
+  %v61 = load <16 x i32>, ptr %v43, align 64, !tbaa !11, !alias.scope !12, !noalias !13
   %v62 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %v55, <16 x i32> %v48, i32 4) #2
   %v63 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %v57, <16 x i32> %v49, i32 4) #2
   %v64 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %v59, <16 x i32> %v50, i32 4) #2
@@ -129,8 +116,8 @@ b4:                                               ; preds = %b4, %b3
   %v81 = tail call <16 x i32> @llvm.hexagon.V6.vasrwhsat(<16 x i32> %v79, <16 x i32> %v80, i32 %a5) #2
   %v82 = tail call <16 x i32> @llvm.hexagon.V6.vsathub(<16 x i32> %v78, <16 x i32> %v81) #2
   %v83 = tail call <16 x i32> @llvm.hexagon.V6.vlalignbi(<16 x i32> %v82, <16 x i32> %v52, i32 1) #2
-  %v84 = getelementptr inbounds <16 x i32>, <16 x i32>* %v42, i32 1
-  store <16 x i32> %v83, <16 x i32>* %v42, align 64, !tbaa !11, !alias.scope !14, !noalias !15
+  %v84 = getelementptr inbounds <16 x i32>, ptr %v42, i32 1
+  store <16 x i32> %v83, ptr %v42, align 64, !tbaa !11, !alias.scope !14, !noalias !15
   %v85 = tail call <32 x i32> @llvm.hexagon.V6.vrmpybusi(<32 x i32> %v67, i32 %v5, i32 0) #2
   %v86 = tail call <32 x i32> @llvm.hexagon.V6.vrmpybusi(<32 x i32> %v67, i32 %v5, i32 1) #2
   %v87 = tail call <32 x i32> @llvm.hexagon.V6.vrmpybusi.acc(<32 x i32> %v85, <32 x i32> %v68, i32 %v8, i32 0) #2
@@ -145,14 +132,14 @@ b4:                                               ; preds = %b4, %b3
   %v96 = tail call <16 x i32> @llvm.hexagon.V6.vasrwhsat(<16 x i32> %v94, <16 x i32> %v95, i32 %a5) #2
   %v97 = tail call <16 x i32> @llvm.hexagon.V6.vsathub(<16 x i32> %v93, <16 x i32> %v96) #2
   %v98 = tail call <16 x i32> @llvm.hexagon.V6.vlalignbi(<16 x i32> %v97, <16 x i32> %v53, i32 1) #2
-  %v99 = getelementptr inbounds <16 x i32>, <16 x i32>* %v41, i32 1
-  store <16 x i32> %v98, <16 x i32>* %v41, align 64, !tbaa !11, !alias.scope !14, !noalias !15
+  %v99 = getelementptr inbounds <16 x i32>, ptr %v41, i32 1
+  store <16 x i32> %v98, ptr %v41, align 64, !tbaa !11, !alias.scope !14, !noalias !15
   %v100 = add nsw i32 %v47, -64
   %v101 = icmp sgt i32 %v47, 64
   br i1 %v101, label %b4, label %b5
 
 b5:                                               ; preds = %b4, %b2
-  %v102 = getelementptr inbounds i8, i8* %v16, i32 %v13
+  %v102 = getelementptr inbounds i8, ptr %v16, i32 %v13
   %v103 = add nuw nsw i32 %v18, 2
   %v104 = icmp slt i32 %v103, %v0
   br i1 %v104, label %b2, label %b6

diff  --git a/llvm/test/CodeGen/Hexagon/swp-copytophi-dag.ll b/llvm/test/CodeGen/Hexagon/swp-copytophi-dag.ll
index f511241a7c73b..4ee05a8e7c848 100644
--- a/llvm/test/CodeGen/Hexagon/swp-copytophi-dag.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-copytophi-dag.ll
@@ -9,14 +9,14 @@
 target triple = "hexagon"
 
 ; Function Attrs: nounwind
-define void @foo(i64* nocapture readonly %r64, i16 zeroext %n, i16 zeroext %s, i64* nocapture %p64) #0 {
+define void @foo(ptr nocapture readonly %r64, i16 zeroext %n, i16 zeroext %s, ptr nocapture %p64) #0 {
 entry:
   %conv = zext i16 %n to i32
   %cmp = icmp eq i16 %n, 0
   br i1 %cmp, label %for.end, label %for.body.preheader
 
 for.body.preheader:                               ; preds = %entry
-  %tmp = load i64, i64* %r64, align 8
+  %tmp = load i64, ptr %r64, align 8
   %v.sroa.0.0.extract.trunc = trunc i64 %tmp to i16
   %v.sroa.4.0.extract.shift = lshr i64 %tmp, 16
   %v.sroa.4.0.extract.trunc = trunc i64 %v.sroa.4.0.extract.shift to i16
@@ -24,34 +24,33 @@ for.body.preheader:                               ; preds = %entry
   %v.sroa.5.0.extract.trunc = trunc i64 %v.sroa.5.0.extract.shift to i16
   %v.sroa.6.0.extract.shift = lshr i64 %tmp, 48
   %v.sroa.6.0.extract.trunc = trunc i64 %v.sroa.6.0.extract.shift to i16
-  %tmp1 = bitcast i64* %p64 to i16*
   %conv2 = zext i16 %s to i32
-  %add.ptr = getelementptr inbounds i16, i16* %tmp1, i32 %conv2
+  %add.ptr = getelementptr inbounds i16, ptr %p64, i32 %conv2
   %add.ptr.sum = add nuw nsw i32 %conv2, 1
-  %add.ptr3 = getelementptr inbounds i16, i16* %tmp1, i32 %add.ptr.sum
+  %add.ptr3 = getelementptr inbounds i16, ptr %p64, i32 %add.ptr.sum
   %add.ptr.sum50 = add nuw nsw i32 %conv2, 2
-  %add.ptr4 = getelementptr inbounds i16, i16* %tmp1, i32 %add.ptr.sum50
+  %add.ptr4 = getelementptr inbounds i16, ptr %p64, i32 %add.ptr.sum50
   %add.ptr.sum51 = add nuw nsw i32 %conv2, 3
-  %add.ptr5 = getelementptr inbounds i16, i16* %tmp1, i32 %add.ptr.sum51
+  %add.ptr5 = getelementptr inbounds i16, ptr %p64, i32 %add.ptr.sum51
   br label %for.body
 
 for.body:                                         ; preds = %for.body, %for.body.preheader
-  %add.ptr11.phi = phi i16* [ %add.ptr11.inc, %for.body ], [ %add.ptr, %for.body.preheader ]
-  %add.ptr16.phi = phi i16* [ %add.ptr16.inc, %for.body ], [ %add.ptr3, %for.body.preheader ]
-  %add.ptr21.phi = phi i16* [ %add.ptr21.inc, %for.body ], [ %add.ptr4, %for.body.preheader ]
-  %add.ptr26.phi = phi i16* [ %add.ptr26.inc, %for.body ], [ %add.ptr5, %for.body.preheader ]
+  %add.ptr11.phi = phi ptr [ %add.ptr11.inc, %for.body ], [ %add.ptr, %for.body.preheader ]
+  %add.ptr16.phi = phi ptr [ %add.ptr16.inc, %for.body ], [ %add.ptr3, %for.body.preheader ]
+  %add.ptr21.phi = phi ptr [ %add.ptr21.inc, %for.body ], [ %add.ptr4, %for.body.preheader ]
+  %add.ptr26.phi = phi ptr [ %add.ptr26.inc, %for.body ], [ %add.ptr5, %for.body.preheader ]
   %i.058.pmt = phi i32 [ %inc.pmt, %for.body ], [ 0, %for.body.preheader ]
   %v.sroa.0.157 = phi i16 [ %v.sroa.0.0.extract.trunc34, %for.body ], [ %v.sroa.0.0.extract.trunc, %for.body.preheader ]
   %v.sroa.4.156 = phi i16 [ %v.sroa.4.0.extract.trunc36, %for.body ], [ %v.sroa.4.0.extract.trunc, %for.body.preheader ]
   %v.sroa.5.155 = phi i16 [ %v.sroa.5.0.extract.trunc38, %for.body ], [ %v.sroa.5.0.extract.trunc, %for.body.preheader ]
   %v.sroa.6.154 = phi i16 [ %v.sroa.6.0.extract.trunc40, %for.body ], [ %v.sroa.6.0.extract.trunc, %for.body.preheader ]
-  %q64.153.pn = phi i64* [ %q64.153, %for.body ], [ %r64, %for.body.preheader ]
-  %q64.153 = getelementptr inbounds i64, i64* %q64.153.pn, i32 1
-  store i16 %v.sroa.0.157, i16* %add.ptr11.phi, align 2
-  store i16 %v.sroa.4.156, i16* %add.ptr16.phi, align 2
-  store i16 %v.sroa.5.155, i16* %add.ptr21.phi, align 2
-  store i16 %v.sroa.6.154, i16* %add.ptr26.phi, align 2
-  %tmp2 = load i64, i64* %q64.153, align 8
+  %q64.153.pn = phi ptr [ %q64.153, %for.body ], [ %r64, %for.body.preheader ]
+  %q64.153 = getelementptr inbounds i64, ptr %q64.153.pn, i32 1
+  store i16 %v.sroa.0.157, ptr %add.ptr11.phi, align 2
+  store i16 %v.sroa.4.156, ptr %add.ptr16.phi, align 2
+  store i16 %v.sroa.5.155, ptr %add.ptr21.phi, align 2
+  store i16 %v.sroa.6.154, ptr %add.ptr26.phi, align 2
+  %tmp2 = load i64, ptr %q64.153, align 8
   %v.sroa.0.0.extract.trunc34 = trunc i64 %tmp2 to i16
   %v.sroa.4.0.extract.shift35 = lshr i64 %tmp2, 16
   %v.sroa.4.0.extract.trunc36 = trunc i64 %v.sroa.4.0.extract.shift35 to i16
@@ -61,10 +60,10 @@ for.body:                                         ; preds = %for.body, %for.body
   %v.sroa.6.0.extract.trunc40 = trunc i64 %v.sroa.6.0.extract.shift39 to i16
   %inc.pmt = add i32 %i.058.pmt, 1
   %cmp8 = icmp slt i32 %inc.pmt, %conv
-  %add.ptr11.inc = getelementptr i16, i16* %add.ptr11.phi, i32 4
-  %add.ptr16.inc = getelementptr i16, i16* %add.ptr16.phi, i32 4
-  %add.ptr21.inc = getelementptr i16, i16* %add.ptr21.phi, i32 4
-  %add.ptr26.inc = getelementptr i16, i16* %add.ptr26.phi, i32 4
+  %add.ptr11.inc = getelementptr i16, ptr %add.ptr11.phi, i32 4
+  %add.ptr16.inc = getelementptr i16, ptr %add.ptr16.phi, i32 4
+  %add.ptr21.inc = getelementptr i16, ptr %add.ptr21.phi, i32 4
+  %add.ptr26.inc = getelementptr i16, ptr %add.ptr26.phi, i32 4
   br i1 %cmp8, label %for.body, label %for.end
 
 for.end:                                          ; preds = %for.body, %entry

diff  --git a/llvm/test/CodeGen/Hexagon/swp-cse-phi.ll b/llvm/test/CodeGen/Hexagon/swp-cse-phi.ll
index 4f30701aa8828..b6d9bb344ae69 100644
--- a/llvm/test/CodeGen/Hexagon/swp-cse-phi.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-cse-phi.ll
@@ -18,7 +18,7 @@ b3:                                               ; preds = %b1
   br i1 undef, label %b4, label %b6
 
 b4:                                               ; preds = %b3
-  %v0 = load i16, i16* undef, align 2
+  %v0 = load i16, ptr undef, align 2
   br label %b7
 
 b5:                                               ; preds = %b7

diff  --git a/llvm/test/CodeGen/Hexagon/swp-dag-phi.ll b/llvm/test/CodeGen/Hexagon/swp-dag-phi.ll
index 54d9492ebac64..40511ea097ef9 100644
--- a/llvm/test/CodeGen/Hexagon/swp-dag-phi.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-dag-phi.ll
@@ -4,7 +4,7 @@
 ; This tests check that a dependence is created between a Phi and it's uses.
 ; An assert occurs if the Phi dependences are not correct.
 
-define void @test1(i32* %f2, i32 %nc) {
+define void @test1(ptr %f2, i32 %nc) {
 entry:
   %i.011 = add i32 %nc, -1
   %cmp12 = icmp sgt i32 %i.011, 1
@@ -12,10 +12,10 @@ entry:
 
 for.body.preheader:
   %0 = add i32 %nc, -2
-  %scevgep = getelementptr i32, i32* %f2, i32 %0
-  %sri = load i32, i32* %scevgep, align 4
-  %scevgep15 = getelementptr i32, i32* %f2, i32 %i.011
-  %sri16 = load i32, i32* %scevgep15, align 4
+  %scevgep = getelementptr i32, ptr %f2, i32 %0
+  %sri = load i32, ptr %scevgep, align 4
+  %scevgep15 = getelementptr i32, ptr %f2, i32 %i.011
+  %sri16 = load i32, ptr %scevgep15, align 4
   br label %for.body
 
 for.body:
@@ -23,12 +23,12 @@ for.body:
   %i.0.in13 = phi i32 [ %i.014, %for.body ], [ %nc, %for.body.preheader ]
   %sr = phi i32 [ %1, %for.body ], [ %sri, %for.body.preheader ]
   %sr17 = phi i32 [ %sr, %for.body ], [ %sri16, %for.body.preheader ]
-  %arrayidx = getelementptr inbounds i32, i32* %f2, i32 %i.014
+  %arrayidx = getelementptr inbounds i32, ptr %f2, i32 %i.014
   %sub1 = add nsw i32 %i.0.in13, -3
-  %arrayidx2 = getelementptr inbounds i32, i32* %f2, i32 %sub1
-  %1 = load i32, i32* %arrayidx2, align 4
+  %arrayidx2 = getelementptr inbounds i32, ptr %f2, i32 %sub1
+  %1 = load i32, ptr %arrayidx2, align 4
   %sub3 = sub nsw i32 %sr17, %1
-  store i32 %sub3, i32* %arrayidx, align 4
+  store i32 %sub3, ptr %arrayidx, align 4
   %i.0 = add nsw i32 %i.014, -1
   %cmp = icmp sgt i32 %i.0, 1
   br i1 %cmp, label %for.body, label %for.end.loopexit

diff  --git a/llvm/test/CodeGen/Hexagon/swp-dag-phi1.ll b/llvm/test/CodeGen/Hexagon/swp-dag-phi1.ll
index f9cb01e1e0d24..4a67ca62513e6 100644
--- a/llvm/test/CodeGen/Hexagon/swp-dag-phi1.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-dag-phi1.ll
@@ -4,17 +4,17 @@
 ; This test check that a dependence is created between a Phi and it's uses.
 ; An assert occurs if the Phi dependences are not correct.
 
-define void @f0(float* nocapture %a0, i32 %a1) #0 {
+define void @f0(ptr nocapture %a0, i32 %a1) #0 {
 b0:
   br i1 undef, label %b1, label %b2
 
 b1:                                               ; preds = %b1, %b0
   %v0 = phi float [ %v1, %b1 ], [ undef, %b0 ]
   %v1 = phi float [ %v13, %b1 ], [ undef, %b0 ]
-  %v2 = phi float* [ null, %b1 ], [ %a0, %b0 ]
+  %v2 = phi ptr [ null, %b1 ], [ %a0, %b0 ]
   %v3 = phi i32 [ %v14, %b1 ], [ 0, %b0 ]
   %v4 = phi float [ %v5, %b1 ], [ undef, %b0 ]
-  %v5 = load float, float* %v2, align 4
+  %v5 = load float, ptr %v2, align 4
   %v6 = fmul float %v1, 0x3FFFA98000000000
   %v7 = fmul float %v0, 0xBFEF550000000000
   %v8 = fadd float %v6, %v7
@@ -23,7 +23,7 @@ b1:                                               ; preds = %b1, %b0
   %v11 = fmul float %v4, 0xBFFFAA0000000000
   %v12 = fadd float %v11, %v10
   %v13 = fadd float undef, %v12
-  store float %v13, float* %v2, align 4
+  store float %v13, ptr %v2, align 4
   %v14 = add nsw i32 %v3, 1
   %v15 = icmp eq i32 %v14, %a1
   br i1 %v15, label %b2, label %b1

diff  --git a/llvm/test/CodeGen/Hexagon/swp-dead-regseq.ll b/llvm/test/CodeGen/Hexagon/swp-dead-regseq.ll
index 180a12744d909..3e30bcd241d26 100644
--- a/llvm/test/CodeGen/Hexagon/swp-dead-regseq.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-dead-regseq.ll
@@ -4,7 +4,7 @@
 ; Check that a dead REG_SEQUENCE doesn't ICE.
 
 ; Function Attrs: nounwind
-define void @f0(i32* nocapture %a0, i32 %a1) #0 {
+define void @f0(ptr nocapture %a0, i32 %a1) #0 {
 b0:
   %v0 = mul nsw i32 %a1, 4
   %v1 = icmp sgt i32 %v0, 0
@@ -12,10 +12,9 @@ b0:
 
 b1:                                               ; preds = %b1, %b0
   %v2 = phi i32 [ %v11, %b1 ], [ 0, %b0 ]
-  %v3 = load i32, i32* null, align 4
+  %v3 = load i32, ptr null, align 4
   %v4 = zext i32 %v3 to i64
-  %v5 = getelementptr inbounds i32, i32* %a0, i32 0
-  %v6 = load i32, i32* %v5, align 4
+  %v6 = load i32, ptr %a0, align 4
   %v7 = zext i32 %v6 to i64
   %v8 = shl nuw i64 %v7, 32
   %v9 = or i64 %v8, %v4
@@ -27,7 +26,7 @@ b1:                                               ; preds = %b1, %b0
 b2:                                               ; preds = %b1, %b0
   %v13 = phi i64 [ 0, %b0 ], [ %v10, %b1 ]
   %v14 = tail call i64 @llvm.hexagon.S2.asr.r.vw(i64 %v13, i32 6)
-  store i64 %v14, i64* null, align 8
+  store i64 %v14, ptr null, align 8
   unreachable
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/swp-dep-neg-offset.ll b/llvm/test/CodeGen/Hexagon/swp-dep-neg-offset.ll
index cc19ce1ae4417..109f7c410da88 100644
--- a/llvm/test/CodeGen/Hexagon/swp-dep-neg-offset.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-dep-neg-offset.ll
@@ -14,7 +14,7 @@
 @g0 = external global [1000000 x i8], align 8
 
 ; Function Attrs: nounwind
-define void @f0(i32 %a0, [1000 x i8]* %a1, [1000 x i8]* %a2) #0 {
+define void @f0(i32 %a0, ptr %a1, ptr %a2) #0 {
 b0:
   br i1 undef, label %b1, label %b7
 
@@ -28,20 +28,20 @@ b3:                                               ; preds = %b3, %b2
   %v0 = phi i32 [ %v17, %b3 ], [ 1, %b2 ]
   %v1 = phi i32 [ %v16, %b3 ], [ 0, %b2 ]
   %v2 = add nsw i32 %v0, -1
-  %v3 = getelementptr inbounds [1000 x i8], [1000 x i8]* %a1, i32 undef, i32 %v2
-  %v4 = load i8, i8* %v3, align 1, !tbaa !0
+  %v3 = getelementptr inbounds [1000 x i8], ptr %a1, i32 undef, i32 %v2
+  %v4 = load i8, ptr %v3, align 1, !tbaa !0
   %v5 = zext i8 %v4 to i32
-  %v6 = getelementptr inbounds [1000000 x i8], [1000000 x i8]* @g0, i32 0, i32 %v1
-  %v7 = load i8, i8* %v6, align 1, !tbaa !0
+  %v6 = getelementptr inbounds [1000000 x i8], ptr @g0, i32 0, i32 %v1
+  %v7 = load i8, ptr %v6, align 1, !tbaa !0
   %v8 = sext i8 %v7 to i32
-  %v9 = getelementptr inbounds [1000 x i8], [1000 x i8]* %a2, i32 undef, i32 %v0
-  %v10 = load i8, i8* %v9, align 1, !tbaa !0
+  %v9 = getelementptr inbounds [1000 x i8], ptr %a2, i32 undef, i32 %v0
+  %v10 = load i8, ptr %v9, align 1, !tbaa !0
   %v11 = sext i8 %v10 to i32
   %v12 = mul nsw i32 %v11, %v8
   %v13 = add nsw i32 %v12, %v5
   %v14 = trunc i32 %v13 to i8
-  %v15 = getelementptr inbounds [1000 x i8], [1000 x i8]* %a1, i32 undef, i32 %v0
-  store i8 %v14, i8* %v15, align 1, !tbaa !0
+  %v15 = getelementptr inbounds [1000 x i8], ptr %a1, i32 undef, i32 %v0
+  store i8 %v14, ptr %v15, align 1, !tbaa !0
   %v16 = add nsw i32 %v1, 1
   %v17 = add nsw i32 %v0, 1
   %v18 = icmp eq i32 %v17, %a0

diff  --git a/llvm/test/CodeGen/Hexagon/swp-disable-Os.ll b/llvm/test/CodeGen/Hexagon/swp-disable-Os.ll
index 5698d37cb23ba..838202e71c50d 100644
--- a/llvm/test/CodeGen/Hexagon/swp-disable-Os.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-disable-Os.ll
@@ -4,7 +4,7 @@
 target triple = "hexagon"
 
 ; Function Attrs: norecurse nounwind optsize readonly
-define i32 @f0(i32 %a0, i8* nocapture readonly %a1, i32 %a2) local_unnamed_addr #0 {
+define i32 @f0(i32 %a0, ptr nocapture readonly %a1, i32 %a2) local_unnamed_addr #0 {
 b0:
   %v0 = lshr i32 %a0, 16
   %v1 = and i32 %a0, 65535
@@ -14,102 +14,102 @@ b0:
 b1:                                               ; preds = %b0, %b3
   %v3 = phi i32 [ %v96, %b3 ], [ %v0, %b0 ]
   %v4 = phi i32 [ %v7, %b3 ], [ %a2, %b0 ]
-  %v5 = phi i8* [ %v94, %b3 ], [ %a1, %b0 ]
+  %v5 = phi ptr [ %v94, %b3 ], [ %a1, %b0 ]
   %v6 = phi i32 [ %v95, %b3 ], [ %v1, %b0 ]
   br label %b2
 
 b2:                                               ; preds = %b2, %b1
   %v8 = phi i32 [ %v6, %b1 ], [ %v89, %b2 ]
-  %v9 = phi i8* [ %v5, %b1 ], [ %v91, %b2 ]
+  %v9 = phi ptr [ %v5, %b1 ], [ %v91, %b2 ]
   %v10 = phi i32 [ %v3, %b1 ], [ %v90, %b2 ]
   %v11 = phi i32 [ 347, %b1 ], [ %v92, %b2 ]
-  %v12 = load i8, i8* %v9, align 1, !tbaa !0
+  %v12 = load i8, ptr %v9, align 1, !tbaa !0
   %v13 = zext i8 %v12 to i32
   %v14 = add i32 %v8, %v13
   %v15 = add i32 %v14, %v10
-  %v16 = getelementptr inbounds i8, i8* %v9, i32 1
-  %v17 = load i8, i8* %v16, align 1, !tbaa !0
+  %v16 = getelementptr inbounds i8, ptr %v9, i32 1
+  %v17 = load i8, ptr %v16, align 1, !tbaa !0
   %v18 = zext i8 %v17 to i32
   %v19 = add i32 %v14, %v18
   %v20 = add i32 %v15, %v19
-  %v21 = getelementptr inbounds i8, i8* %v9, i32 2
-  %v22 = load i8, i8* %v21, align 1, !tbaa !0
+  %v21 = getelementptr inbounds i8, ptr %v9, i32 2
+  %v22 = load i8, ptr %v21, align 1, !tbaa !0
   %v23 = zext i8 %v22 to i32
   %v24 = add i32 %v19, %v23
   %v25 = add i32 %v20, %v24
-  %v26 = getelementptr inbounds i8, i8* %v9, i32 3
-  %v27 = load i8, i8* %v26, align 1, !tbaa !0
+  %v26 = getelementptr inbounds i8, ptr %v9, i32 3
+  %v27 = load i8, ptr %v26, align 1, !tbaa !0
   %v28 = zext i8 %v27 to i32
   %v29 = add i32 %v24, %v28
   %v30 = add i32 %v25, %v29
-  %v31 = getelementptr inbounds i8, i8* %v9, i32 4
-  %v32 = load i8, i8* %v31, align 1, !tbaa !0
+  %v31 = getelementptr inbounds i8, ptr %v9, i32 4
+  %v32 = load i8, ptr %v31, align 1, !tbaa !0
   %v33 = zext i8 %v32 to i32
   %v34 = add i32 %v29, %v33
   %v35 = add i32 %v30, %v34
-  %v36 = getelementptr inbounds i8, i8* %v9, i32 5
-  %v37 = load i8, i8* %v36, align 1, !tbaa !0
+  %v36 = getelementptr inbounds i8, ptr %v9, i32 5
+  %v37 = load i8, ptr %v36, align 1, !tbaa !0
   %v38 = zext i8 %v37 to i32
   %v39 = add i32 %v34, %v38
   %v40 = add i32 %v35, %v39
-  %v41 = getelementptr inbounds i8, i8* %v9, i32 6
-  %v42 = load i8, i8* %v41, align 1, !tbaa !0
+  %v41 = getelementptr inbounds i8, ptr %v9, i32 6
+  %v42 = load i8, ptr %v41, align 1, !tbaa !0
   %v43 = zext i8 %v42 to i32
   %v44 = add i32 %v39, %v43
   %v45 = add i32 %v40, %v44
-  %v46 = getelementptr inbounds i8, i8* %v9, i32 7
-  %v47 = load i8, i8* %v46, align 1, !tbaa !0
+  %v46 = getelementptr inbounds i8, ptr %v9, i32 7
+  %v47 = load i8, ptr %v46, align 1, !tbaa !0
   %v48 = zext i8 %v47 to i32
   %v49 = add i32 %v44, %v48
   %v50 = add i32 %v45, %v49
-  %v51 = getelementptr inbounds i8, i8* %v9, i32 8
-  %v52 = load i8, i8* %v51, align 1, !tbaa !0
+  %v51 = getelementptr inbounds i8, ptr %v9, i32 8
+  %v52 = load i8, ptr %v51, align 1, !tbaa !0
   %v53 = zext i8 %v52 to i32
   %v54 = add i32 %v49, %v53
   %v55 = add i32 %v50, %v54
-  %v56 = getelementptr inbounds i8, i8* %v9, i32 9
-  %v57 = load i8, i8* %v56, align 1, !tbaa !0
+  %v56 = getelementptr inbounds i8, ptr %v9, i32 9
+  %v57 = load i8, ptr %v56, align 1, !tbaa !0
   %v58 = zext i8 %v57 to i32
   %v59 = add i32 %v54, %v58
   %v60 = add i32 %v55, %v59
-  %v61 = getelementptr inbounds i8, i8* %v9, i32 10
-  %v62 = load i8, i8* %v61, align 1, !tbaa !0
+  %v61 = getelementptr inbounds i8, ptr %v9, i32 10
+  %v62 = load i8, ptr %v61, align 1, !tbaa !0
   %v63 = zext i8 %v62 to i32
   %v64 = add i32 %v59, %v63
   %v65 = add i32 %v60, %v64
-  %v66 = getelementptr inbounds i8, i8* %v9, i32 11
-  %v67 = load i8, i8* %v66, align 1, !tbaa !0
+  %v66 = getelementptr inbounds i8, ptr %v9, i32 11
+  %v67 = load i8, ptr %v66, align 1, !tbaa !0
   %v68 = zext i8 %v67 to i32
   %v69 = add i32 %v64, %v68
   %v70 = add i32 %v65, %v69
-  %v71 = getelementptr inbounds i8, i8* %v9, i32 12
-  %v72 = load i8, i8* %v71, align 1, !tbaa !0
+  %v71 = getelementptr inbounds i8, ptr %v9, i32 12
+  %v72 = load i8, ptr %v71, align 1, !tbaa !0
   %v73 = zext i8 %v72 to i32
   %v74 = add i32 %v69, %v73
   %v75 = add i32 %v70, %v74
-  %v76 = getelementptr inbounds i8, i8* %v9, i32 13
-  %v77 = load i8, i8* %v76, align 1, !tbaa !0
+  %v76 = getelementptr inbounds i8, ptr %v9, i32 13
+  %v77 = load i8, ptr %v76, align 1, !tbaa !0
   %v78 = zext i8 %v77 to i32
   %v79 = add i32 %v74, %v78
   %v80 = add i32 %v75, %v79
-  %v81 = getelementptr inbounds i8, i8* %v9, i32 14
-  %v82 = load i8, i8* %v81, align 1, !tbaa !0
+  %v81 = getelementptr inbounds i8, ptr %v9, i32 14
+  %v82 = load i8, ptr %v81, align 1, !tbaa !0
   %v83 = zext i8 %v82 to i32
   %v84 = add i32 %v79, %v83
   %v85 = add i32 %v80, %v84
-  %v86 = getelementptr inbounds i8, i8* %v9, i32 15
-  %v87 = load i8, i8* %v86, align 1, !tbaa !0
+  %v86 = getelementptr inbounds i8, ptr %v9, i32 15
+  %v87 = load i8, ptr %v86, align 1, !tbaa !0
   %v88 = zext i8 %v87 to i32
   %v89 = add i32 %v84, %v88
   %v90 = add i32 %v85, %v89
-  %v91 = getelementptr inbounds i8, i8* %v9, i32 16
+  %v91 = getelementptr inbounds i8, ptr %v9, i32 16
   %v92 = add nsw i32 %v11, -1
   %v93 = icmp eq i32 %v92, 0
   br i1 %v93, label %b3, label %b2
 
 b3:                                               ; preds = %b2
   %v7 = add i32 %v4, -5552
-  %v94 = getelementptr i8, i8* %v5, i32 5552
+  %v94 = getelementptr i8, ptr %v5, i32 5552
   %v95 = urem i32 %v89, 65521
   %v96 = urem i32 %v90, 65521
   %v97 = icmp ugt i32 %v7, 5551

diff  --git a/llvm/test/CodeGen/Hexagon/swp-epilog-numphis.ll b/llvm/test/CodeGen/Hexagon/swp-epilog-numphis.ll
index f57f94bf03cec..030ef52c823af 100644
--- a/llvm/test/CodeGen/Hexagon/swp-epilog-numphis.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-epilog-numphis.ll
@@ -13,17 +13,17 @@ b0:
 
 b1:                                               ; preds = %b1, %b0
   %v0 = phi i32 [ %v33, %b1 ], [ %a0, %b0 ]
-  %v1 = phi <16 x i32>* [ %v32, %b1 ], [ undef, %b0 ]
-  %v2 = phi <16 x i32>* [ %v23, %b1 ], [ undef, %b0 ]
-  %v3 = phi <16 x i32>* [ %v10, %b1 ], [ undef, %b0 ]
-  %v4 = phi <16 x i32>* [ %v8, %b1 ], [ null, %b0 ]
+  %v1 = phi ptr [ %v32, %b1 ], [ undef, %b0 ]
+  %v2 = phi ptr [ %v23, %b1 ], [ undef, %b0 ]
+  %v3 = phi ptr [ %v10, %b1 ], [ undef, %b0 ]
+  %v4 = phi ptr [ %v8, %b1 ], [ null, %b0 ]
   %v5 = phi <32 x i32> [ %v12, %b1 ], [ undef, %b0 ]
   %v6 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v5)
   %v7 = tail call <16 x i32> @llvm.hexagon.V6.vlalignbi(<16 x i32> %v6, <16 x i32> undef, i32 6)
-  %v8 = getelementptr inbounds <16 x i32>, <16 x i32>* %v4, i32 1
-  %v9 = load <16 x i32>, <16 x i32>* %v4, align 64
-  %v10 = getelementptr inbounds <16 x i32>, <16 x i32>* %v3, i32 1
-  %v11 = load <16 x i32>, <16 x i32>* %v3, align 64
+  %v8 = getelementptr inbounds <16 x i32>, ptr %v4, i32 1
+  %v9 = load <16 x i32>, ptr %v4, align 64
+  %v10 = getelementptr inbounds <16 x i32>, ptr %v3, i32 1
+  %v11 = load <16 x i32>, ptr %v3, align 64
   %v12 = tail call <32 x i32> @llvm.hexagon.V6.vsububh(<16 x i32> %v11, <16 x i32> %v9)
   %v13 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v12)
   %v14 = tail call <16 x i32> @llvm.hexagon.V6.vaddh(<16 x i32> %v13, <16 x i32> undef)
@@ -33,21 +33,21 @@ b1:                                               ; preds = %b1, %b0
   %v18 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %v16, <16 x i32> undef, i32 2)
   %v19 = tail call <16 x i32> @llvm.hexagon.V6.vaddh(<16 x i32> undef, <16 x i32> %v17)
   %v20 = tail call <16 x i32> @llvm.hexagon.V6.vaddh(<16 x i32> %v18, <16 x i32> %v19)
-  %v21 = getelementptr inbounds <16 x i32>, <16 x i32>* %v2, i32 1
-  %v22 = load <16 x i32>, <16 x i32>* %v2, align 64
-  %v23 = getelementptr inbounds <16 x i32>, <16 x i32>* %v2, i32 2
-  %v24 = load <16 x i32>, <16 x i32>* %v21, align 64
+  %v21 = getelementptr inbounds <16 x i32>, ptr %v2, i32 1
+  %v22 = load <16 x i32>, ptr %v2, align 64
+  %v23 = getelementptr inbounds <16 x i32>, ptr %v2, i32 2
+  %v24 = load <16 x i32>, ptr %v21, align 64
   %v25 = tail call <16 x i32> @llvm.hexagon.V6.vaddh(<16 x i32> %v22, <16 x i32> %v7)
   %v26 = tail call <16 x i32> @llvm.hexagon.V6.vaddh(<16 x i32> %v24, <16 x i32> undef)
   %v27 = tail call <16 x i32> @llvm.hexagon.V6.vaddh(<16 x i32> %v25, <16 x i32> %v20)
   %v28 = tail call <16 x i32> @llvm.hexagon.V6.vaddh(<16 x i32> %v26, <16 x i32> %v20)
-  store <16 x i32> %v27, <16 x i32>* %v2, align 64
-  store <16 x i32> %v28, <16 x i32>* %v21, align 64
+  store <16 x i32> %v27, ptr %v2, align 64
+  store <16 x i32> %v28, ptr %v21, align 64
   %v29 = tail call <16 x i32> @llvm.hexagon.V6.vmpyhsrs(<16 x i32> %v27, i32 17760527)
   %v30 = tail call <16 x i32> @llvm.hexagon.V6.vmpyhsrs(<16 x i32> %v28, i32 17760527)
   %v31 = tail call <16 x i32> @llvm.hexagon.V6.vsathub(<16 x i32> %v30, <16 x i32> %v29)
-  %v32 = getelementptr inbounds <16 x i32>, <16 x i32>* %v1, i32 1
-  store <16 x i32> %v31, <16 x i32>* %v1, align 64
+  %v32 = getelementptr inbounds <16 x i32>, ptr %v1, i32 1
+  store <16 x i32> %v31, ptr %v1, align 64
   %v33 = add nsw i32 %v0, -64
   %v34 = icmp sgt i32 %v0, 192
   br i1 %v34, label %b1, label %b2

diff  --git a/llvm/test/CodeGen/Hexagon/swp-epilog-phi10.ll b/llvm/test/CodeGen/Hexagon/swp-epilog-phi10.ll
index 5b5b45d13210c..92ef455ee00fc 100644
--- a/llvm/test/CodeGen/Hexagon/swp-epilog-phi10.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-epilog-phi10.ll
@@ -1,11 +1,11 @@
 ; RUN: llc -march=hexagon -mcpu=hexagonv5 -simplifycfg-require-and-preserve-domtree=1 < %s
 ; REQUIRES: asserts
 
-define void @test(i8* noalias nocapture readonly %src, i32 %srcStride) local_unnamed_addr #0 {
+define void @test(ptr noalias nocapture readonly %src, i32 %srcStride) local_unnamed_addr #0 {
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %src, i32 %srcStride
-  %add.ptr2 = getelementptr inbounds i8, i8* %add.ptr, i32 %srcStride
-  %add.ptr3 = getelementptr inbounds i8, i8* %add.ptr2, i32 %srcStride
+  %add.ptr = getelementptr inbounds i8, ptr %src, i32 %srcStride
+  %add.ptr2 = getelementptr inbounds i8, ptr %add.ptr, i32 %srcStride
+  %add.ptr3 = getelementptr inbounds i8, ptr %add.ptr2, i32 %srcStride
   br label %for.body9.epil
 
 for.body9.epil:
@@ -18,33 +18,33 @@ for.body9.epil:
   %add17.epil = add nuw i32 %inc.sink385.epil, 1
   %conv19.epil = zext i8 %sr.epil to i32
   %add21.epil = add i32 %inc.sink385.epil, 2
-  %arrayidx22.epil = getelementptr inbounds i8, i8* %src, i32 %add21.epil
-  %0 = load i8, i8* %arrayidx22.epil, align 1
+  %arrayidx22.epil = getelementptr inbounds i8, ptr %src, i32 %add21.epil
+  %0 = load i8, ptr %arrayidx22.epil, align 1
   %conv23.epil = zext i8 %0 to i32
-  %1 = load i8, i8* undef, align 1
+  %1 = load i8, ptr undef, align 1
   %conv42.epil = zext i8 %1 to i32
   %conv53.epil = zext i8 %sr432.epil to i32
-  %2 = load i8, i8* undef, align 1
+  %2 = load i8, ptr undef, align 1
   %conv61.epil = zext i8 %2 to i32
-  %3 = load i8, i8* undef, align 1
+  %3 = load i8, ptr undef, align 1
   %conv65.epil = zext i8 %3 to i32
-  %4 = load i8, i8* null, align 1
+  %4 = load i8, ptr null, align 1
   %conv69.epil = zext i8 %4 to i32
-  %5 = load i8, i8* undef, align 1
+  %5 = load i8, ptr undef, align 1
   %conv72.epil = zext i8 %5 to i32
-  %6 = load i8, i8* undef, align 1
+  %6 = load i8, ptr undef, align 1
   %conv76.epil = zext i8 %6 to i32
-  %7 = load i8, i8* undef, align 1
+  %7 = load i8, ptr undef, align 1
   %conv80.epil = zext i8 %7 to i32
-  %8 = load i8, i8* undef, align 1
+  %8 = load i8, ptr undef, align 1
   %conv84.epil = zext i8 %8 to i32
-  %9 = load i8, i8* undef, align 1
+  %9 = load i8, ptr undef, align 1
   %conv88.epil = zext i8 %9 to i32
-  %10 = load i8, i8* undef, align 1
+  %10 = load i8, ptr undef, align 1
   %conv91.epil = zext i8 %10 to i32
-  %11 = load i8, i8* undef, align 1
+  %11 = load i8, ptr undef, align 1
   %conv95.epil = zext i8 %11 to i32
-  %12 = load i8, i8* undef, align 1
+  %12 = load i8, ptr undef, align 1
   %conv99.epil = zext i8 %12 to i32
   %add.epil = add nuw nsw i32 0, %conv19.epil
   %add16.epil = add nuw nsw i32 %add.epil, 0
@@ -74,8 +74,8 @@ for.body9.epil:
   %add101.epil = add nsw i32 %mul.epil, 32768
   %shr369.epil = lshr i32 %add101.epil, 16
   %conv102.epil = trunc i32 %shr369.epil to i8
-  %arrayidx103.epil = getelementptr inbounds i8, i8* undef, i32 %inc.sink385.epil
-  store i8 %conv102.epil, i8* %arrayidx103.epil, align 1
+  %arrayidx103.epil = getelementptr inbounds i8, ptr undef, i32 %inc.sink385.epil
+  store i8 %conv102.epil, ptr %arrayidx103.epil, align 1
   %epil.iter.sub = add i32 %epil.iter, -1
   %epil.iter.cmp = icmp eq i32 %epil.iter.sub, 0
   br i1 %epil.iter.cmp, label %for.end, label %for.body9.epil

diff  --git a/llvm/test/CodeGen/Hexagon/swp-epilog-phi11.ll b/llvm/test/CodeGen/Hexagon/swp-epilog-phi11.ll
index 1fd6757326cc4..f98677570d93d 100644
--- a/llvm/test/CodeGen/Hexagon/swp-epilog-phi11.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-epilog-phi11.ll
@@ -20,9 +20,9 @@ for.body.prol:
   %sr.prol = phi float [ %0, %for.body.prol ], [ undef, %entry ]
   %sr109.prol = phi float [ %sr.prol, %for.body.prol ], [ undef, %entry ]
   %prol.iter = phi i32 [ %prol.iter.sub, %for.body.prol ], [ undef, %entry ]
-  %0 = load float, float* undef, align 4
+  %0 = load float, ptr undef, align 4
   %sub7.prol = fsub contract float %sr109.prol, %0
-  store float %sub7.prol, float* null, align 4
+  store float %sub7.prol, ptr null, align 4
   %prol.iter.sub = add i32 %prol.iter, -1
   %prol.iter.cmp = icmp eq i32 %prol.iter.sub, 0
   br i1 %prol.iter.cmp, label %for.body.prol.loopexit, label %for.body.prol

diff  --git a/llvm/test/CodeGen/Hexagon/swp-epilog-phi12.ll b/llvm/test/CodeGen/Hexagon/swp-epilog-phi12.ll
index 194dfa8d06a71..6395ac3ec6f97 100644
--- a/llvm/test/CodeGen/Hexagon/swp-epilog-phi12.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-epilog-phi12.ll
@@ -10,7 +10,7 @@
 ; CHECK: = add([[REG1]],#8)
 
 ; Function Attrs: nounwind
-define i32* @f0(i16* nocapture readonly %a0, i32 %a1) #0 {
+define ptr @f0(ptr nocapture readonly %a0, i32 %a1) #0 {
 b0:
   %v0 = alloca [129 x i32], align 8
   br i1 undef, label %b1, label %b3
@@ -19,28 +19,28 @@ b1:                                               ; preds = %b0
   br label %b2
 
 b2:                                               ; preds = %b2, %b1
-  %v1 = phi i16* [ %a0, %b1 ], [ %v2, %b2 ]
-  %v2 = phi i16* [ undef, %b1 ], [ %v15, %b2 ]
-  %v3 = phi i32* [ null, %b1 ], [ %v4, %b2 ]
-  %v4 = phi i32* [ null, %b1 ], [ %v14, %b2 ]
+  %v1 = phi ptr [ %a0, %b1 ], [ %v2, %b2 ]
+  %v2 = phi ptr [ undef, %b1 ], [ %v15, %b2 ]
+  %v3 = phi ptr [ null, %b1 ], [ %v4, %b2 ]
+  %v4 = phi ptr [ null, %b1 ], [ %v14, %b2 ]
   %v5 = phi i32 [ 0, %b1 ], [ %v13, %b2 ]
-  %v6 = phi i16* [ undef, %b1 ], [ %v12, %b2 ]
-  %v7 = load i16, i16* %v2, align 2
+  %v6 = phi ptr [ undef, %b1 ], [ %v12, %b2 ]
+  %v7 = load i16, ptr %v2, align 2
   %v8 = sext i16 %v7 to i32
   %v9 = call i32 @llvm.hexagon.M2.mpy.ll.s0(i32 %v8, i32 %v8) #2
-  %v10 = load i16, i16* %v6, align 2
+  %v10 = load i16, ptr %v6, align 2
   %v11 = call i32 @llvm.hexagon.M2.mpy.acc.sat.ll.s0(i32 %v9, i32 undef, i32 undef) #2
-  store i32 %v11, i32* %v4, align 4
-  %v12 = getelementptr inbounds i16, i16* %v6, i32 -1
+  store i32 %v11, ptr %v4, align 4
+  %v12 = getelementptr inbounds i16, ptr %v6, i32 -1
   %v13 = add i32 %v5, 1
-  %v14 = getelementptr inbounds i32, i32* %v3, i32 2
-  %v15 = getelementptr inbounds i16, i16* %v1, i32 2
+  %v14 = getelementptr inbounds i32, ptr %v3, i32 2
+  %v15 = getelementptr inbounds i16, ptr %v1, i32 2
   %v16 = icmp slt i32 %v13, %a1
   br i1 %v16, label %b2, label %b3
 
 b3:                                               ; preds = %b2, %b0
-  %out = phi i32* [ null, %b0 ], [ %v14, %b2 ]
-  ret i32* %out
+  %out = phi ptr [ null, %b0 ], [ %v14, %b2 ]
+  ret ptr %out
 }
 
 ; Function Attrs: nounwind readnone

diff  --git a/llvm/test/CodeGen/Hexagon/swp-epilog-phi13.ll b/llvm/test/CodeGen/Hexagon/swp-epilog-phi13.ll
index 9448c1912b345..663293220209b 100644
--- a/llvm/test/CodeGen/Hexagon/swp-epilog-phi13.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-epilog-phi13.ll
@@ -10,7 +10,7 @@
 ; CHECK: endloop0
 
 ; Function Attrs: nounwind
-define i32* @f0(i16* nocapture readonly %a0, i32 %a1, i32 %a2, i32 %a3,  i16* %b) #0 {
+define ptr @f0(ptr nocapture readonly %a0, i32 %a1, i32 %a2, i32 %a3,  ptr %b) #0 {
 b0:
   br i1 undef, label %b1, label %b3
 
@@ -18,32 +18,32 @@ b1:                                               ; preds = %b0
   br label %b2
 
 b2:                                               ; preds = %b2, %b1
-  %v1 = phi i16* [ %a0, %b1 ], [ %v2, %b2 ]
-  %v2 = phi i16* [ undef, %b1 ], [ %v15, %b2 ]
-  %v3 = phi i32* [ null, %b1 ], [ %v4, %b2 ]
-  %v4 = phi i32* [ null, %b1 ], [ %v14, %b2 ]
+  %v1 = phi ptr [ %a0, %b1 ], [ %v2, %b2 ]
+  %v2 = phi ptr [ undef, %b1 ], [ %v15, %b2 ]
+  %v3 = phi ptr [ null, %b1 ], [ %v4, %b2 ]
+  %v4 = phi ptr [ null, %b1 ], [ %v14, %b2 ]
   %v5 = phi i32 [ 0, %b1 ], [ %v13, %b2 ]
-  %v6 = phi i16* [ undef, %b1 ], [ %v12, %b2 ]
+  %v6 = phi ptr [ undef, %b1 ], [ %v12, %b2 ]
   %a = mul i32 %v5, %a2
-  %add = getelementptr inbounds i16, i16* %b, i32 %a
-  %v7 = load i16, i16* %add, align 2
+  %add = getelementptr inbounds i16, ptr %b, i32 %a
+  %v7 = load i16, ptr %add, align 2
   %v8 = sext i16 %v7 to i32
   %v9 = call i32 @llvm.hexagon.M2.mpy.ll.s0(i32 %v8, i32 %v8) #2
   %v92 = call i32 @llvm.hexagon.M2.mpy.ll.s0(i32 %v9, i32 %v9) #2
   %v93 = call i32 @llvm.hexagon.M2.mpy.ll.s0(i32 %v92, i32 %v92) #2
   %v11 = call i32 @llvm.hexagon.M2.mpy.acc.sat.ll.s0(i32 %v8, i32 undef, i32 undef) #2
-  store i32 %v11, i32* %v4, align 4
-  %v12 = getelementptr inbounds i16, i16* %v6, i32 -1
+  store i32 %v11, ptr %v4, align 4
+  %v12 = getelementptr inbounds i16, ptr %v6, i32 -1
   %v13 = add i32 %v5, 1
-  %v14 = getelementptr inbounds i32, i32* %v3, i32 2
-  store i32 %v93, i32* %v14, align 4
-  %v15 = getelementptr inbounds i16, i16* %v1, i32 2
+  %v14 = getelementptr inbounds i32, ptr %v3, i32 2
+  store i32 %v93, ptr %v14, align 4
+  %v15 = getelementptr inbounds i16, ptr %v1, i32 2
   %v16 = icmp slt i32 %v13, %a1
   br i1 %v16, label %b2, label %b3
 
 b3:                                               ; preds = %b2, %b0
-  %out = phi i32* [ null, %b0 ], [ %v14, %b2 ]
-  ret i32* %out
+  %out = phi ptr [ null, %b0 ], [ %v14, %b2 ]
+  ret ptr %out
 }
 
 ; Function Attrs: nounwind readnone

diff  --git a/llvm/test/CodeGen/Hexagon/swp-epilog-phi2.ll b/llvm/test/CodeGen/Hexagon/swp-epilog-phi2.ll
index b32fed97f26f1..e1777d0e0b260 100644
--- a/llvm/test/CodeGen/Hexagon/swp-epilog-phi2.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-epilog-phi2.ll
@@ -1,22 +1,22 @@
 ; RUN: llc -march=hexagon -enable-pipeliner -pipeliner-max-stages=3 < %s -pipeliner-experimental-cg=true | FileCheck %s
 
-%s.0 = type { i16, i8, i8, i16, i8, i8, i16, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i32, i16, i8, i8, %s.1, [2 x [16 x %s.2]], i32 (i8*, i8*, i8*, i8*, i8*)*, %s.3*, %s.3*, [120 x i8], i8, i8, %s.4*, [2 x [120 x [8 x i8]]], [56 x i8], [2 x [121 x %s.5]], [2 x %s.5], %s.5*, %s.5*, i32, i32, i16, i8, i8, %s.7, %s.9, %s.11, %s.8*, %s.8* }
+%s.0 = type { i16, i8, i8, i16, i8, i8, i16, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i32, i16, i8, i8, %s.1, [2 x [16 x %s.2]], ptr, ptr, ptr, [120 x i8], i8, i8, ptr, [2 x [120 x [8 x i8]]], [56 x i8], [2 x [121 x %s.5]], [2 x %s.5], ptr, ptr, i32, i32, i16, i8, i8, %s.7, %s.9, %s.11, ptr, ptr }
 %s.1 = type { i8, i8, i8, i8, i8, i8, i8, i8, i32, i8, [16 x i8], i8, [4 x i8], [32 x i16], [32 x i16], [2 x i8], [4 x i8], [2 x [4 x i8]], [2 x [4 x i8]], i32, i32, i16, i8 }
 %s.2 = type { [2 x i16] }
-%s.3 = type { i16*, i16*, i32, i32 }
-%s.4 = type { i8*, i8*, i8*, i32, i32, i32, i32 }
+%s.3 = type { ptr, ptr, i32, i32 }
+%s.4 = type { ptr, ptr, ptr, i32, i32, i32, i32 }
 %s.5 = type { %s.6, [2 x [4 x %s.2]], [2 x [2 x i8]], [2 x i8] }
 %s.6 = type { i8, i8, i8, i8, i8, i8, i8, i8, i32 }
 %s.7 = type { [12 x %s.8], [4 x %s.8], [2 x %s.8], [4 x %s.8], [6 x %s.8], [2 x [7 x %s.8]], [4 x %s.8], [3 x [4 x %s.8]], [3 x %s.8], [3 x %s.8] }
 %s.8 = type { i8, i8 }
 %s.9 = type { [371 x %s.8], [6 x %s.10] }
-%s.10 = type { %s.8*, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }
-%s.11 = type { i32, i32, i8* }
+%s.10 = type { ptr, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }
+%s.11 = type { i32, i32, ptr }
 
 ; Function Attrs: nounwind
-define void @f0(%s.0* %a0, i8* %a1, i16* %a2, i16** %a3, i16** %a4, i32 %a5) #0 {
+define void @f0(ptr %a0, ptr %a1, ptr %a2, ptr %a3, ptr %a4, i32 %a5) #0 {
 b0:
-  %v0 = load i8, i8* %a1, align 1, !tbaa !0
+  %v0 = load i8, ptr %a1, align 1, !tbaa !0
   %v1 = icmp eq i8 %v0, 1
   br i1 %v1, label %b1, label %b2
 
@@ -26,26 +26,26 @@ b0:
 ; CHECK: }{{[ \t]*}}:endloop0
 
 b1:                                               ; preds = %b1, %b0
-  %v2 = phi i16* [ %v17, %b1 ], [ %a2, %b0 ]
+  %v2 = phi ptr [ %v17, %b1 ], [ %a2, %b0 ]
   %v3 = phi i32 [ %v18, %b1 ], [ 0, %b0 ]
-  %v4 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 25, i32 10, i32 %v3
-  %v5 = load i8, i8* %v4, align 1, !tbaa !0
+  %v4 = getelementptr inbounds %s.0, ptr %a0, i32 0, i32 25, i32 10, i32 %v3
+  %v5 = load i8, ptr %v4, align 1, !tbaa !0
   %v6 = zext i8 %v5 to i16
   %v7 = add nsw i32 %v3, 1
-  %v8 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 25, i32 10, i32 %v7
-  %v9 = load i8, i8* %v8, align 1, !tbaa !0
+  %v8 = getelementptr inbounds %s.0, ptr %a0, i32 0, i32 25, i32 10, i32 %v7
+  %v9 = load i8, ptr %v8, align 1, !tbaa !0
   %v10 = or i16 0, %v6
-  %v11 = load i8, i8* %a1, align 1, !tbaa !0
+  %v11 = load i8, ptr %a1, align 1, !tbaa !0
   %v12 = zext i8 %v11 to i16
   %v13 = shl nuw i16 %v12, 8
   %v14 = or i16 %v10, %v13
   %v15 = or i16 %v14, 0
-  %v16 = getelementptr inbounds i16, i16* %v2, i32 1
-  store i16* %v16, i16** %a3, align 4, !tbaa !3
-  store i16 %v15, i16* %v2, align 2, !tbaa !5
-  %v17 = getelementptr inbounds i16, i16* %v2, i32 2
-  store i16* %v17, i16** %a4, align 4, !tbaa !3
-  store i16 0, i16* %v16, align 2, !tbaa !5
+  %v16 = getelementptr inbounds i16, ptr %v2, i32 1
+  store ptr %v16, ptr %a3, align 4, !tbaa !3
+  store i16 %v15, ptr %v2, align 2, !tbaa !5
+  %v17 = getelementptr inbounds i16, ptr %v2, i32 2
+  store ptr %v17, ptr %a4, align 4, !tbaa !3
+  store i16 0, ptr %v16, align 2, !tbaa !5
   %v18 = add nsw i32 %v3, 8
   %v19 = icmp slt i32 %v18, %a5
   br i1 %v19, label %b1, label %b2

diff  --git a/llvm/test/CodeGen/Hexagon/swp-epilog-phi4.ll b/llvm/test/CodeGen/Hexagon/swp-epilog-phi4.ll
index 8b611cfe0b4f2..ee253fa0885d0 100644
--- a/llvm/test/CodeGen/Hexagon/swp-epilog-phi4.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-epilog-phi4.ll
@@ -8,20 +8,20 @@
 ; CHECK-NOT: r{{[0-9]+}} = r{{[0-9]+}}
 
 ; Function Attrs: nounwind
-define void @f0(i32 %a0, i32* %a1, [1000 x i32]* %a2, i32* %a3, i32* %a4) #0 {
+define void @f0(i32 %a0, ptr %a1, ptr %a2, ptr %a3, ptr %a4) #0 {
 b0:
   br label %b1
 
 b1:                                               ; preds = %b1, %b0
   %v0 = phi i32 [ %v8, %b1 ], [ 1, %b0 ]
-  %v1 = load i32, i32* %a3, align 4, !tbaa !0
-  %v2 = getelementptr inbounds i32, i32* %a1, i32 %v0
-  %v3 = load i32, i32* %v2, align 4, !tbaa !0
-  %v4 = load i32, i32* %a4, align 4, !tbaa !0
+  %v1 = load i32, ptr %a3, align 4, !tbaa !0
+  %v2 = getelementptr inbounds i32, ptr %a1, i32 %v0
+  %v3 = load i32, ptr %v2, align 4, !tbaa !0
+  %v4 = load i32, ptr %a4, align 4, !tbaa !0
   %v5 = mul nsw i32 %v4, %v3
   %v6 = add nsw i32 %v5, %v1
-  %v7 = getelementptr inbounds [1000 x i32], [1000 x i32]* %a2, i32 %v0, i32 0
-  store i32 %v6, i32* %v7, align 4, !tbaa !0
+  %v7 = getelementptr inbounds [1000 x i32], ptr %a2, i32 %v0, i32 0
+  store i32 %v6, ptr %v7, align 4, !tbaa !0
   %v8 = add nsw i32 %v0, 1
   %v9 = icmp eq i32 %v8, %a0
   br i1 %v9, label %b2, label %b1

diff  --git a/llvm/test/CodeGen/Hexagon/swp-epilog-phi5.ll b/llvm/test/CodeGen/Hexagon/swp-epilog-phi5.ll
index 72c05284d6953..b193ce60a92e5 100644
--- a/llvm/test/CodeGen/Hexagon/swp-epilog-phi5.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-epilog-phi5.ll
@@ -12,114 +12,114 @@
 ; CHECK: [[REG2:r([0-9]+)]] = add([[REG1:r([0-9]+)]],add([[REG0]],#8
 ; CHECK: endloop1
 
-%s.0 = type { %s.1*, %s.4*, %s.7*, i8*, i8, i32, %s.8*, i32, i32, i32, i8, i8, i32, i32, double, i8, i8, i8, i8, i8, i8, i8, i8, i32, i8, i8, i8, i32, i32, i32, i32, i32, i32, i8**, i32, i32, i32, i32, i32, [64 x i32]*, [4 x %s.9*], [4 x %s.10*], [4 x %s.10*], i32, %s.23*, i8, i8, [16 x i8], [16 x i8], [16 x i8], i32, i8, i8, i8, i8, i16, i16, i8, i8, i8, %s.11*, i32, i32, i32, i32, i8*, i32, [4 x %s.23*], i32, i32, i32, [10 x i32], i32, i32, i32, i32, i32, %s.12*, %s.13*, %s.14*, %s.15*, %s.16*, %s.17*, %s.18*, %s.19*, %s.20*, %s.21*, %s.22* }
-%s.1 = type { void (%s.2*)*, void (%s.2*, i32)*, void (%s.2*)*, void (%s.2*, i8*)*, void (%s.2*)*, i32, %s.3, i32, i32, i8**, i32, i8**, i32, i32 }
-%s.2 = type { %s.1*, %s.4*, %s.7*, i8*, i8, i32 }
+%s.0 = type { ptr, ptr, ptr, ptr, i8, i32, ptr, i32, i32, i32, i8, i8, i32, i32, double, i8, i8, i8, i8, i8, i8, i8, i8, i32, i8, i8, i8, i32, i32, i32, i32, i32, i32, ptr, i32, i32, i32, i32, i32, ptr, [4 x ptr], [4 x ptr], [4 x ptr], i32, ptr, i8, i8, [16 x i8], [16 x i8], [16 x i8], i32, i8, i8, i8, i8, i16, i16, i8, i8, i8, ptr, i32, i32, i32, i32, ptr, i32, [4 x ptr], i32, i32, i32, [10 x i32], i32, i32, i32, i32, i32, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr }
+%s.1 = type { ptr, ptr, ptr, ptr, ptr, i32, %s.3, i32, i32, ptr, i32, ptr, i32, i32 }
+%s.2 = type { ptr, ptr, ptr, ptr, i8, i32 }
 %s.3 = type { [8 x i32], [48 x i8] }
-%s.4 = type { i8* (%s.2*, i32, i32)*, i8* (%s.2*, i32, i32)*, i8** (%s.2*, i32, i32, i32)*, [64 x i16]** (%s.2*, i32, i32, i32)*, %s.5* (%s.2*, i32, i8, i32, i32, i32)*, %s.6* (%s.2*, i32, i8, i32, i32, i32)*, {}*, i8** (%s.2*, %s.5*, i32, i32, i8)*, [64 x i16]** (%s.2*, %s.6*, i32, i32, i8)*, void (%s.2*, i32)*, {}*, i32, i32 }
+%s.4 = type { ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, i32, i32 }
 %s.5 = type opaque
 %s.6 = type opaque
-%s.7 = type { {}*, i32, i32, i32, i32 }
-%s.8 = type { i8*, i32, {}*, i8 (%s.0*)*, void (%s.0*, i32)*, i8 (%s.0*, i32)*, {}* }
+%s.7 = type { ptr, i32, i32, i32, i32 }
+%s.8 = type { ptr, i32, ptr, ptr, ptr, ptr, ptr }
 %s.9 = type { [64 x i16], i8 }
 %s.10 = type { [17 x i8], [256 x i8], i8 }
-%s.11 = type { %s.11*, i8, i32, i32, i8* }
-%s.12 = type { {}*, {}*, i8 }
-%s.13 = type { void (%s.0*, i8)*, void (%s.0*, i8**, i32*, i32)* }
-%s.14 = type { {}*, i32 (%s.0*)*, {}*, i32 (%s.0*, i8***)*, %s.6** }
-%s.15 = type { void (%s.0*, i8)*, void (%s.0*, i8***, i32*, i32, i8**, i32*, i32)* }
-%s.16 = type { i32 (%s.0*)*, {}*, {}*, {}*, i8, i8 }
-%s.17 = type { {}*, i32 (%s.0*)*, i8 (%s.0*)*, i8, i8, i32, i32 }
-%s.18 = type { {}*, i8 (%s.0*, [64 x i16]**)*, i8 }
-%s.19 = type { {}*, [5 x void (%s.0*, %s.23*, i16*, i8**, i32)*] }
-%s.20 = type { {}*, void (%s.0*, i8***, i32*, i32, i8**, i32*, i32)*, i8 }
-%s.21 = type { {}*, void (%s.0*, i8***, i32, i8**, i32)* }
-%s.22 = type { void (%s.0*, i8)*, void (%s.0*, i8**, i8**, i32)*, {}*, {}* }
-%s.23 = type { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i8, i32, i32, i32, i32, i32, i32, %s.9*, i8* }
+%s.11 = type { ptr, i8, i32, i32, ptr }
+%s.12 = type { ptr, ptr, i8 }
+%s.13 = type { ptr, ptr }
+%s.14 = type { ptr, ptr, ptr, ptr, ptr }
+%s.15 = type { ptr, ptr }
+%s.16 = type { ptr, ptr, ptr, ptr, i8, i8 }
+%s.17 = type { ptr, ptr, ptr, i8, i8, i32, i32 }
+%s.18 = type { ptr, ptr, i8 }
+%s.19 = type { ptr, [5 x ptr] }
+%s.20 = type { ptr, ptr, i8 }
+%s.21 = type { ptr, ptr }
+%s.22 = type { ptr, ptr, ptr, ptr }
+%s.23 = type { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i8, i32, i32, i32, i32, i32, i32, ptr, ptr }
 
 ; Function Attrs: nounwind optsize
-define hidden void @f0(%s.0* nocapture readonly %a0, %s.23* nocapture readonly %a1, i8** nocapture readonly %a2, i8*** nocapture readonly %a3) #0 {
+define hidden void @f0(ptr nocapture readonly %a0, ptr nocapture readonly %a1, ptr nocapture readonly %a2, ptr nocapture readonly %a3) #0 {
 b0:
-  %v0 = load i8**, i8*** %a3, align 4
-  %v1 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 62
-  %v2 = load i32, i32* %v1, align 4
+  %v0 = load ptr, ptr %a3, align 4
+  %v1 = getelementptr inbounds %s.0, ptr %a0, i32 0, i32 62
+  %v2 = load i32, ptr %v1, align 4
   %v3 = icmp sgt i32 %v2, 0
   br i1 %v3, label %b1, label %b10
 
 b1:                                               ; preds = %b0
-  %v4 = getelementptr inbounds %s.23, %s.23* %a1, i32 0, i32 10
+  %v4 = getelementptr inbounds %s.23, ptr %a1, i32 0, i32 10
   br label %b2
 
 b2:                                               ; preds = %b8, %b1
   %v5 = phi i32 [ 0, %b1 ], [ %v98, %b8 ]
   %v6 = phi i32 [ 0, %b1 ], [ %v99, %b8 ]
-  %v7 = getelementptr inbounds i8*, i8** %a2, i32 %v6
+  %v7 = getelementptr inbounds ptr, ptr %a2, i32 %v6
   br label %b3
 
 b3:                                               ; preds = %b7, %b2
   %v8 = phi i32 [ 0, %b2 ], [ %v96, %b7 ]
   %v9 = phi i32 [ %v5, %b2 ], [ %v16, %b7 ]
-  %v10 = load i8*, i8** %v7, align 4
+  %v10 = load ptr, ptr %v7, align 4
   %v11 = icmp eq i32 %v8, 0
   %v12 = select i1 %v11, i32 -1, i32 1
   %v13 = add i32 %v12, %v6
-  %v14 = getelementptr inbounds i8*, i8** %a2, i32 %v13
-  %v15 = load i8*, i8** %v14, align 4
+  %v14 = getelementptr inbounds ptr, ptr %a2, i32 %v13
+  %v15 = load ptr, ptr %v14, align 4
   %v16 = add nsw i32 %v9, 1
-  %v17 = getelementptr inbounds i8*, i8** %v0, i32 %v9
-  %v18 = load i8*, i8** %v17, align 4
-  %v19 = getelementptr inbounds i8, i8* %v10, i32 1
-  %v20 = load i8, i8* %v10, align 1
+  %v17 = getelementptr inbounds ptr, ptr %v0, i32 %v9
+  %v18 = load ptr, ptr %v17, align 4
+  %v19 = getelementptr inbounds i8, ptr %v10, i32 1
+  %v20 = load i8, ptr %v10, align 1
   %v21 = zext i8 %v20 to i32
   %v22 = mul nsw i32 %v21, 3
-  %v23 = getelementptr inbounds i8, i8* %v15, i32 1
-  %v24 = load i8, i8* %v15, align 1
+  %v23 = getelementptr inbounds i8, ptr %v15, i32 1
+  %v24 = load i8, ptr %v15, align 1
   %v25 = zext i8 %v24 to i32
   %v26 = add nsw i32 %v22, %v25
-  %v27 = load i8, i8* %v19, align 1
+  %v27 = load i8, ptr %v19, align 1
   %v28 = zext i8 %v27 to i32
   %v29 = mul nsw i32 %v28, 3
-  %v30 = load i8, i8* %v23, align 1
+  %v30 = load i8, ptr %v23, align 1
   %v31 = zext i8 %v30 to i32
   %v32 = add nsw i32 %v29, %v31
   %v33 = mul nsw i32 %v26, 4
   %v34 = add nsw i32 %v33, 8
   %v35 = lshr i32 %v34, 4
   %v36 = trunc i32 %v35 to i8
-  %v37 = getelementptr inbounds i8, i8* %v18, i32 1
-  store i8 %v36, i8* %v18, align 1
+  %v37 = getelementptr inbounds i8, ptr %v18, i32 1
+  store i8 %v36, ptr %v18, align 1
   %v38 = mul nsw i32 %v26, 3
   %v39 = add i32 %v38, 7
   %v40 = add i32 %v39, %v32
   %v41 = lshr i32 %v40, 4
   %v42 = trunc i32 %v41 to i8
-  store i8 %v42, i8* %v37, align 1
-  %v43 = load i32, i32* %v4, align 4
+  store i8 %v42, ptr %v37, align 1
+  %v43 = load i32, ptr %v4, align 4
   %v44 = add i32 %v43, -2
-  %v45 = getelementptr inbounds i8, i8* %v18, i32 2
+  %v45 = getelementptr inbounds i8, ptr %v18, i32 2
   %v46 = icmp eq i32 %v44, 0
   br i1 %v46, label %b7, label %b4
 
 b4:                                               ; preds = %b3
-  %v47 = getelementptr inbounds i8, i8* %v15, i32 2
-  %v48 = getelementptr inbounds i8, i8* %v10, i32 2
+  %v47 = getelementptr inbounds i8, ptr %v15, i32 2
+  %v48 = getelementptr inbounds i8, ptr %v10, i32 2
   %v49 = mul i32 %v43, 2
   br label %b5
 
 b5:                                               ; preds = %b5, %b4
-  %v50 = phi i8* [ %v45, %b4 ], [ %v76, %b5 ]
+  %v50 = phi ptr [ %v45, %b4 ], [ %v76, %b5 ]
   %v51 = phi i32 [ %v44, %b4 ], [ %v75, %b5 ]
   %v52 = phi i32 [ %v26, %b4 ], [ %v53, %b5 ]
   %v53 = phi i32 [ %v32, %b4 ], [ %v64, %b5 ]
-  %v54 = phi i8* [ %v18, %b4 ], [ %v50, %b5 ]
-  %v55 = phi i8* [ %v47, %b4 ], [ %v61, %b5 ]
-  %v56 = phi i8* [ %v48, %b4 ], [ %v57, %b5 ]
-  %v57 = getelementptr inbounds i8, i8* %v56, i32 1
-  %v58 = load i8, i8* %v56, align 1
+  %v54 = phi ptr [ %v18, %b4 ], [ %v50, %b5 ]
+  %v55 = phi ptr [ %v47, %b4 ], [ %v61, %b5 ]
+  %v56 = phi ptr [ %v48, %b4 ], [ %v57, %b5 ]
+  %v57 = getelementptr inbounds i8, ptr %v56, i32 1
+  %v58 = load i8, ptr %v56, align 1
   %v59 = zext i8 %v58 to i32
   %v60 = mul nsw i32 %v59, 3
-  %v61 = getelementptr inbounds i8, i8* %v55, i32 1
-  %v62 = load i8, i8* %v55, align 1
+  %v61 = getelementptr inbounds i8, ptr %v55, i32 1
+  %v62 = load i8, ptr %v55, align 1
   %v63 = zext i8 %v62 to i32
   %v64 = add nsw i32 %v60, %v63
   %v65 = mul nsw i32 %v53, 3
@@ -127,42 +127,42 @@ b5:                                               ; preds = %b5, %b4
   %v67 = add i32 %v66, %v65
   %v68 = lshr i32 %v67, 4
   %v69 = trunc i32 %v68 to i8
-  %v70 = getelementptr inbounds i8, i8* %v54, i32 3
-  store i8 %v69, i8* %v50, align 1
+  %v70 = getelementptr inbounds i8, ptr %v54, i32 3
+  store i8 %v69, ptr %v50, align 1
   %v71 = add i32 %v65, 7
   %v72 = add i32 %v71, %v64
   %v73 = lshr i32 %v72, 4
   %v74 = trunc i32 %v73 to i8
-  store i8 %v74, i8* %v70, align 1
+  store i8 %v74, ptr %v70, align 1
   %v75 = add i32 %v51, -1
-  %v76 = getelementptr inbounds i8, i8* %v50, i32 2
+  %v76 = getelementptr inbounds i8, ptr %v50, i32 2
   %v77 = icmp eq i32 %v75, 0
   br i1 %v77, label %b6, label %b5
 
 b6:                                               ; preds = %b5
   %v78 = add i32 %v49, -2
-  %v79 = getelementptr i8, i8* %v18, i32 %v78
+  %v79 = getelementptr i8, ptr %v18, i32 %v78
   %v80 = add i32 %v49, -4
-  %v81 = getelementptr i8, i8* %v18, i32 %v80
+  %v81 = getelementptr i8, ptr %v18, i32 %v80
   br label %b7
 
 b7:                                               ; preds = %b6, %b3
-  %v82 = phi i8* [ %v79, %b6 ], [ %v45, %b3 ]
+  %v82 = phi ptr [ %v79, %b6 ], [ %v45, %b3 ]
   %v83 = phi i32 [ %v53, %b6 ], [ %v26, %b3 ]
   %v84 = phi i32 [ %v64, %b6 ], [ %v32, %b3 ]
-  %v85 = phi i8* [ %v81, %b6 ], [ %v18, %b3 ]
+  %v85 = phi ptr [ %v81, %b6 ], [ %v18, %b3 ]
   %v86 = mul nsw i32 %v84, 3
   %v87 = add i32 %v83, 8
   %v88 = add i32 %v87, %v86
   %v89 = lshr i32 %v88, 4
   %v90 = trunc i32 %v89 to i8
-  %v91 = getelementptr inbounds i8, i8* %v85, i32 3
-  store i8 %v90, i8* %v82, align 1
+  %v91 = getelementptr inbounds i8, ptr %v85, i32 3
+  store i8 %v90, ptr %v82, align 1
   %v92 = mul nsw i32 %v84, 4
   %v93 = add nsw i32 %v92, 7
   %v94 = lshr i32 %v93, 4
   %v95 = trunc i32 %v94 to i8
-  store i8 %v95, i8* %v91, align 1
+  store i8 %v95, ptr %v91, align 1
   %v96 = add nsw i32 %v8, 1
   %v97 = icmp eq i32 %v96, 2
   br i1 %v97, label %b8, label %b3
@@ -170,7 +170,7 @@ b7:                                               ; preds = %b6, %b3
 b8:                                               ; preds = %b7
   %v98 = add i32 %v5, 2
   %v99 = add nsw i32 %v6, 1
-  %v100 = load i32, i32* %v1, align 4
+  %v100 = load i32, ptr %v1, align 4
   %v101 = icmp slt i32 %v98, %v100
   br i1 %v101, label %b2, label %b9
 

diff  --git a/llvm/test/CodeGen/Hexagon/swp-epilog-phi6.ll b/llvm/test/CodeGen/Hexagon/swp-epilog-phi6.ll
index 1751efbba09f3..f440cc8f99932 100644
--- a/llvm/test/CodeGen/Hexagon/swp-epilog-phi6.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-epilog-phi6.ll
@@ -23,10 +23,10 @@ b1:                                               ; preds = %b0
 
 b2:                                               ; preds = %b2, %b1
   %v1 = phi i32 [ %a0, %b1 ], [ %v13, %b2 ]
-  %v2 = phi <16 x i32>* [ null, %b1 ], [ %v3, %b2 ]
-  %v3 = getelementptr inbounds <16 x i32>, <16 x i32>* %v2, i32 1
-  %v4 = load <16 x i32>, <16 x i32>* %v2, align 64
-  %v5 = load <16 x i32>, <16 x i32>* undef, align 64
+  %v2 = phi ptr [ null, %b1 ], [ %v3, %b2 ]
+  %v3 = getelementptr inbounds <16 x i32>, ptr %v2, i32 1
+  %v4 = load <16 x i32>, ptr %v2, align 64
+  %v5 = load <16 x i32>, ptr undef, align 64
   %v6 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %v5, <16 x i32> undef, i32 1)
   %v7 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> undef, <16 x i32> %v6)
   %v8 = tail call <32 x i32> @llvm.hexagon.V6.vmpabus.acc(<32 x i32> undef, <32 x i32> %v7, i32 undef)
@@ -34,7 +34,7 @@ b2:                                               ; preds = %b2, %b1
   %v10 = tail call <32 x i32> @llvm.hexagon.V6.vmpybus.acc(<32 x i32> %v9, <16 x i32> zeroinitializer, i32 undef)
   %v11 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v10)
   %v12 = tail call <16 x i32> @llvm.hexagon.V6.vasrhubsat(<16 x i32> undef, <16 x i32> %v11, i32 %a1)
-  store <16 x i32> %v12, <16 x i32>* null, align 64
+  store <16 x i32> %v12, ptr null, align 64
   %v13 = add nsw i32 %v1, -64
   %v14 = icmp sgt i32 %v13, 64
   br i1 %v14, label %b2, label %b3

diff  --git a/llvm/test/CodeGen/Hexagon/swp-epilog-phi8.ll b/llvm/test/CodeGen/Hexagon/swp-epilog-phi8.ll
index 214307e25137d..74bbdb99e1ba2 100644
--- a/llvm/test/CodeGen/Hexagon/swp-epilog-phi8.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-epilog-phi8.ll
@@ -25,12 +25,12 @@ b3:                                               ; preds = %b3, %b2
   %v0 = phi i32 [ %v8, %b3 ], [ 7, %b2 ]
   %v1 = phi i32 [ %v6, %b3 ], [ undef, %b2 ]
   %v2 = phi i32 [ %v1, %b3 ], [ undef, %b2 ]
-  %v3 = getelementptr inbounds [9 x i32], [9 x i32]* undef, i32 0, i32 %v0
+  %v3 = getelementptr inbounds [9 x i32], ptr undef, i32 0, i32 %v0
   %v4 = add nsw i32 %v0, -2
-  %v5 = getelementptr inbounds [9 x i32], [9 x i32]* undef, i32 0, i32 %v4
-  %v6 = load i32, i32* %v5, align 4
+  %v5 = getelementptr inbounds [9 x i32], ptr undef, i32 0, i32 %v4
+  %v6 = load i32, ptr %v5, align 4
   %v7 = tail call i32 @llvm.hexagon.A2.subsat(i32 %v2, i32 %v6)
-  store i32 %v7, i32* %v3, align 4
+  store i32 %v7, ptr %v3, align 4
   %v8 = add i32 %v0, -1
   %v9 = icmp sgt i32 %v8, 1
   br i1 %v9, label %b3, label %b4

diff  --git a/llvm/test/CodeGen/Hexagon/swp-epilog-phi9.ll b/llvm/test/CodeGen/Hexagon/swp-epilog-phi9.ll
index 15e63e19e45e8..6f35ecdd9c659 100644
--- a/llvm/test/CodeGen/Hexagon/swp-epilog-phi9.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-epilog-phi9.ll
@@ -12,7 +12,7 @@
 ; CHECK: [[REG0]] = add(r{{[0-9]+}},#8)
 
 ; Function Attrs: nounwind
-define void @f0(i16* nocapture readonly %a0, i32 %a1) #0 {
+define void @f0(ptr nocapture readonly %a0, i32 %a1) #0 {
 b0:
   %v0 = alloca [129 x i32], align 8
   br i1 undef, label %b1, label %b3
@@ -21,22 +21,22 @@ b1:                                               ; preds = %b0
   br label %b2
 
 b2:                                               ; preds = %b2, %b1
-  %v1 = phi i16* [ %a0, %b1 ], [ %v2, %b2 ]
-  %v2 = phi i16* [ undef, %b1 ], [ %v15, %b2 ]
-  %v3 = phi i32* [ null, %b1 ], [ %v4, %b2 ]
-  %v4 = phi i32* [ null, %b1 ], [ %v14, %b2 ]
+  %v1 = phi ptr [ %a0, %b1 ], [ %v2, %b2 ]
+  %v2 = phi ptr [ undef, %b1 ], [ %v15, %b2 ]
+  %v3 = phi ptr [ null, %b1 ], [ %v4, %b2 ]
+  %v4 = phi ptr [ null, %b1 ], [ %v14, %b2 ]
   %v5 = phi i32 [ 0, %b1 ], [ %v13, %b2 ]
-  %v6 = phi i16* [ undef, %b1 ], [ %v12, %b2 ]
-  %v7 = load i16, i16* %v2, align 2
+  %v6 = phi ptr [ undef, %b1 ], [ %v12, %b2 ]
+  %v7 = load i16, ptr %v2, align 2
   %v8 = sext i16 %v7 to i32
   %v9 = call i32 @llvm.hexagon.M2.mpy.ll.s0(i32 %v8, i32 %v8) #2
-  %v10 = load i16, i16* %v6, align 2
+  %v10 = load i16, ptr %v6, align 2
   %v11 = call i32 @llvm.hexagon.M2.mpy.acc.sat.ll.s0(i32 %v9, i32 undef, i32 undef) #2
-  store i32 %v11, i32* %v4, align 4
-  %v12 = getelementptr inbounds i16, i16* %v6, i32 -1
+  store i32 %v11, ptr %v4, align 4
+  %v12 = getelementptr inbounds i16, ptr %v6, i32 -1
   %v13 = add i32 %v5, 1
-  %v14 = getelementptr inbounds i32, i32* %v3, i32 2
-  %v15 = getelementptr inbounds i16, i16* %v1, i32 2
+  %v14 = getelementptr inbounds i32, ptr %v3, i32 2
+  %v15 = getelementptr inbounds i16, ptr %v1, i32 2
   %v16 = icmp slt i32 %v13, %a1
   br i1 %v16, label %b2, label %b3
 

diff  --git a/llvm/test/CodeGen/Hexagon/swp-epilog-reuse-1.ll b/llvm/test/CodeGen/Hexagon/swp-epilog-reuse-1.ll
index 5d6823af4fb1c..d09ce2799a9b2 100644
--- a/llvm/test/CodeGen/Hexagon/swp-epilog-reuse-1.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-epilog-reuse-1.ll
@@ -8,7 +8,7 @@
 ; be reused. The bug causes an assert due to an invalid virtual register error
 ; in the live variable analysis.
 
-define void @test(i8* %a, i8* %b)  #0 {
+define void @test(ptr %a, ptr %b)  #0 {
 entry:
   br label %for.body6.us.prol
 
@@ -16,15 +16,15 @@ for.body6.us.prol:
   %i.065.us.prol = phi i32 [ 0, %entry ], [ %inc.us.prol, %for.body6.us.prol ]
   %im1.064.us.prol = phi i32 [ undef, %entry ], [ %i.065.us.prol, %for.body6.us.prol ]
   %prol.iter = phi i32 [ undef, %entry ], [ %prol.iter.sub, %for.body6.us.prol ]
-  %arrayidx8.us.prol = getelementptr inbounds i8, i8* %b, i32 %im1.064.us.prol
-  %0 = load i8, i8* %arrayidx8.us.prol, align 1
+  %arrayidx8.us.prol = getelementptr inbounds i8, ptr %b, i32 %im1.064.us.prol
+  %0 = load i8, ptr %arrayidx8.us.prol, align 1
   %conv9.us.prol = sext i8 %0 to i32
   %add.us.prol = add nsw i32 %conv9.us.prol, 0
   %add12.us.prol = add nsw i32 %add.us.prol, 0
   %mul.us.prol = mul nsw i32 %add12.us.prol, 3
   %conv13.us.prol = trunc i32 %mul.us.prol to i8
-  %arrayidx14.us.prol = getelementptr inbounds i8, i8* %a, i32 %i.065.us.prol
-  store i8 %conv13.us.prol, i8* %arrayidx14.us.prol, align 1
+  %arrayidx14.us.prol = getelementptr inbounds i8, ptr %a, i32 %i.065.us.prol
+  store i8 %conv13.us.prol, ptr %arrayidx14.us.prol, align 1
   %inc.us.prol = add nuw nsw i32 %i.065.us.prol, 1
   %prol.iter.sub = add i32 %prol.iter, -1
   %prol.iter.cmp = icmp eq i32 %prol.iter.sub, 0
@@ -32,13 +32,13 @@ for.body6.us.prol:
 
 for.body6.us:
   %im2.063.us = phi i32 [ undef, %for.body6.us ], [ %im1.064.us.prol, %for.body6.us.prol ]
-  %arrayidx10.us = getelementptr inbounds i8, i8* %b, i32 %im2.063.us
-  %1 = load i8, i8* %arrayidx10.us, align 1
+  %arrayidx10.us = getelementptr inbounds i8, ptr %b, i32 %im2.063.us
+  %1 = load i8, ptr %arrayidx10.us, align 1
   %conv11.us = sext i8 %1 to i32
   %add12.us = add nsw i32 0, %conv11.us
   %mul.us = mul nsw i32 %add12.us, 3
   %conv13.us = trunc i32 %mul.us to i8
-  store i8 %conv13.us, i8* undef, align 1
+  store i8 %conv13.us, ptr undef, align 1
   br label %for.body6.us
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/swp-epilog-reuse.ll b/llvm/test/CodeGen/Hexagon/swp-epilog-reuse.ll
index 6a2ad73f20928..4f20153b55062 100644
--- a/llvm/test/CodeGen/Hexagon/swp-epilog-reuse.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-epilog-reuse.ll
@@ -4,29 +4,29 @@
 ; Test that the pipeliner doesn't ICE due because the PHI generation
 ; code in the epilog does not attempt to reuse an existing PHI.
 
-define void @test(float* noalias %srcImg, i32 %width, float* noalias %dstImg) {
+define void @test(ptr noalias %srcImg, i32 %width, ptr noalias %dstImg) {
 entry.split:
   %shr = lshr i32 %width, 1
-  %incdec.ptr253 = getelementptr inbounds float, float* %dstImg, i32 2
+  %incdec.ptr253 = getelementptr inbounds float, ptr %dstImg, i32 2
   br i1 undef, label %for.body, label %for.end
 
 for.body:
-  %dst.21518.reg2mem.0 = phi float* [ null, %while.end712 ], [ %incdec.ptr253, %entry.split ]
-  %dstEnd.01519 = phi float* [ %add.ptr725, %while.end712 ], [ undef, %entry.split ]
-  %add.ptr367 = getelementptr inbounds float, float* %srcImg, i32 undef
-  %dst.31487 = getelementptr inbounds float, float* %dst.21518.reg2mem.0, i32 1
+  %dst.21518.reg2mem.0 = phi ptr [ null, %while.end712 ], [ %incdec.ptr253, %entry.split ]
+  %dstEnd.01519 = phi ptr [ %add.ptr725, %while.end712 ], [ undef, %entry.split ]
+  %add.ptr367 = getelementptr inbounds float, ptr %srcImg, i32 undef
+  %dst.31487 = getelementptr inbounds float, ptr %dst.21518.reg2mem.0, i32 1
   br i1 undef, label %while.body661.preheader, label %while.end712
 
 while.body661.preheader:
-  %scevgep1941 = getelementptr float, float* %add.ptr367, i32 1
+  %scevgep1941 = getelementptr float, ptr %add.ptr367, i32 1
   br label %while.body661.ur
 
 while.body661.ur:
-  %lsr.iv1942 = phi float* [ %scevgep1941, %while.body661.preheader ], [ undef, %while.body661.ur ]
+  %lsr.iv1942 = phi ptr [ %scevgep1941, %while.body661.preheader ], [ undef, %while.body661.ur ]
   %col1.31508.reg2mem.0.ur = phi float [ %col3.31506.reg2mem.0.ur, %while.body661.ur ], [ undef, %while.body661.preheader ]
   %col4.31507.reg2mem.0.ur = phi float [ %add710.ur, %while.body661.ur ], [ 0.000000e+00, %while.body661.preheader ]
   %col3.31506.reg2mem.0.ur = phi float [ %add689.ur, %while.body661.ur ], [ undef, %while.body661.preheader ]
-  %dst.41511.ur = phi float* [ %incdec.ptr674.ur, %while.body661.ur ], [ %dst.31487, %while.body661.preheader ]
+  %dst.41511.ur = phi ptr [ %incdec.ptr674.ur, %while.body661.ur ], [ %dst.31487, %while.body661.preheader ]
   %mul662.ur = fmul float %col1.31508.reg2mem.0.ur, 4.000000e+00
   %add663.ur = fadd float undef, %mul662.ur
   %add665.ur = fadd float %add663.ur, undef
@@ -34,10 +34,10 @@ while.body661.ur:
   %add669.ur = fadd float undef, %add667.ur
   %add670.ur = fadd float %col4.31507.reg2mem.0.ur, %add669.ur
   %conv673.ur = fmul float %add670.ur, 3.906250e-03
-  %incdec.ptr674.ur = getelementptr inbounds float, float* %dst.41511.ur, i32 1
-  store float %conv673.ur, float* %dst.41511.ur, align 4
-  %scevgep1959 = getelementptr float, float* %lsr.iv1942, i32 -1
-  %0 = load float, float* %scevgep1959, align 4
+  %incdec.ptr674.ur = getelementptr inbounds float, ptr %dst.41511.ur, i32 1
+  store float %conv673.ur, ptr %dst.41511.ur, align 4
+  %scevgep1959 = getelementptr float, ptr %lsr.iv1942, i32 -1
+  %0 = load float, ptr %scevgep1959, align 4
   %mul680.ur = fmul float %0, 4.000000e+00
   %add681.ur = fadd float undef, %mul680.ur
   %add684.ur = fadd float undef, %add681.ur
@@ -47,16 +47,16 @@ while.body661.ur:
   %add703.ur = fadd float undef, %add699.ur
   %add707.ur = fadd float undef, %add703.ur
   %add710.ur = fadd float undef, %add707.ur
-  %cmp660.ur = icmp ult float* %incdec.ptr674.ur, %dstEnd.01519
+  %cmp660.ur = icmp ult ptr %incdec.ptr674.ur, %dstEnd.01519
   br i1 %cmp660.ur, label %while.body661.ur, label %while.end712
 
 while.end712:
-  %dst.4.lcssa.reg2mem.0 = phi float* [ %dst.31487, %for.body ], [ undef, %while.body661.ur ]
+  %dst.4.lcssa.reg2mem.0 = phi ptr [ %dst.31487, %for.body ], [ undef, %while.body661.ur ]
   %conv721 = fpext float undef to double
   %mul722 = fmul double %conv721, 0x3F7111112119E8FB
   %conv723 = fptrunc double %mul722 to float
-  store float %conv723, float* %dst.4.lcssa.reg2mem.0, align 4
-  %add.ptr725 = getelementptr inbounds float, float* %dstEnd.01519, i32 %shr
+  store float %conv723, ptr %dst.4.lcssa.reg2mem.0, align 4
+  %add.ptr725 = getelementptr inbounds float, ptr %dstEnd.01519, i32 %shr
   %cmp259 = icmp ult i32 undef, undef
   br i1 %cmp259, label %for.body, label %for.end
 

diff  --git a/llvm/test/CodeGen/Hexagon/swp-epilog-reuse2.ll b/llvm/test/CodeGen/Hexagon/swp-epilog-reuse2.ll
index e20bf5a7bd16f..96663508b3c59 100644
--- a/llvm/test/CodeGen/Hexagon/swp-epilog-reuse2.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-epilog-reuse2.ll
@@ -7,48 +7,46 @@
 ; 
diff erences.
 
 ; Function Attrs: nounwind
-define void @f0(float* noalias %a0, float* noalias %a1) #0 {
+define void @f0(ptr noalias %a0, ptr noalias %a1) #0 {
 b0:
-  %v0 = getelementptr inbounds float, float* %a1, i32 2
+  %v0 = getelementptr inbounds float, ptr %a1, i32 2
   br i1 undef, label %b1, label %b6
 
 b1:                                               ; preds = %b5, %b0
-  %v1 = phi float* [ undef, %b5 ], [ %v0, %b0 ]
-  %v2 = phi float* [ %v32, %b5 ], [ undef, %b0 ]
-  %v3 = getelementptr inbounds float, float* %a0, i32 undef
-  %v4 = getelementptr inbounds float, float* %v1, i32 1
+  %v1 = phi ptr [ undef, %b5 ], [ %v0, %b0 ]
+  %v2 = phi ptr [ %v32, %b5 ], [ undef, %b0 ]
+  %v3 = getelementptr inbounds float, ptr %a0, i32 undef
+  %v4 = getelementptr inbounds float, ptr %v1, i32 1
   br i1 undef, label %b2, label %b5
 
 b2:                                               ; preds = %b1
-  %v5 = getelementptr float, float* %v3, i32 1
+  %v5 = getelementptr float, ptr %v3, i32 1
   br label %b3
 
 b3:                                               ; preds = %b3, %b2
-  %v6 = phi float* [ %v5, %b2 ], [ %v20, %b3 ]
+  %v6 = phi ptr [ %v5, %b2 ], [ %v20, %b3 ]
   %v7 = phi float [ %v19, %b3 ], [ undef, %b2 ]
   %v8 = phi float [ %v7, %b3 ], [ undef, %b2 ]
-  %v9 = phi float* [ %v15, %b3 ], [ %v4, %b2 ]
-  %v10 = bitcast float* %v6 to i8*
+  %v9 = phi ptr [ %v15, %b3 ], [ %v4, %b2 ]
   %v11 = fadd float undef, 0.000000e+00
   %v12 = fadd float undef, %v11
   %v13 = fadd float %v7, %v12
   %v14 = fmul float %v13, 3.906250e-03
-  %v15 = getelementptr inbounds float, float* %v9, i32 1
-  store float %v14, float* %v9, align 4, !tbaa !0
-  %v16 = getelementptr i8, i8* %v10, i32 undef
-  %v17 = bitcast i8* %v16 to float*
-  %v18 = load float, float* %v17, align 4, !tbaa !0
+  %v15 = getelementptr inbounds float, ptr %v9, i32 1
+  store float %v14, ptr %v9, align 4, !tbaa !0
+  %v16 = getelementptr i8, ptr %v6, i32 undef
+  %v18 = load float, ptr %v16, align 4, !tbaa !0
   %v19 = fadd float %v18, undef
-  %v20 = getelementptr float, float* %v6, i32 2
-  %v21 = icmp ult float* %v15, %v2
+  %v20 = getelementptr float, ptr %v6, i32 2
+  %v21 = icmp ult ptr %v15, %v2
   br i1 %v21, label %b3, label %b4
 
 b4:                                               ; preds = %b3
-  %v22 = getelementptr float, float* %v4, i32 undef
+  %v22 = getelementptr float, ptr %v4, i32 undef
   br label %b5
 
 b5:                                               ; preds = %b4, %b1
-  %v23 = phi float* [ %v4, %b1 ], [ %v22, %b4 ]
+  %v23 = phi ptr [ %v4, %b1 ], [ %v22, %b4 ]
   %v24 = phi float [ undef, %b1 ], [ %v8, %b4 ]
   %v25 = fadd float %v24, undef
   %v26 = fadd float %v25, undef
@@ -57,8 +55,8 @@ b5:                                               ; preds = %b4, %b1
   %v29 = fpext float %v28 to double
   %v30 = fmul double %v29, 0x3F7111112119E8FB
   %v31 = fptrunc double %v30 to float
-  store float %v31, float* %v23, align 4, !tbaa !0
-  %v32 = getelementptr inbounds float, float* %v2, i32 undef
+  store float %v31, ptr %v23, align 4, !tbaa !0
+  %v32 = getelementptr inbounds float, ptr %v2, i32 undef
   br i1 undef, label %b1, label %b6
 
 b6:                                               ; preds = %b5, %b0

diff  --git a/llvm/test/CodeGen/Hexagon/swp-epilog-reuse3.ll b/llvm/test/CodeGen/Hexagon/swp-epilog-reuse3.ll
index 6c6a263e42bfa..461dce491b0eb 100644
--- a/llvm/test/CodeGen/Hexagon/swp-epilog-reuse3.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-epilog-reuse3.ll
@@ -24,30 +24,30 @@ b3:                                               ; preds = %b3, %b2
   %v0 = phi i16 [ %v10, %b3 ], [ undef, %b2 ]
   %v1 = phi i16 [ %v0, %b3 ], [ undef, %b2 ]
   %v2 = phi i32 [ %v26, %b3 ], [ undef, %b2 ]
-  %v3 = phi i32* [ undef, %b3 ], [ undef, %b2 ]
-  %v4 = phi i16* [ %v5, %b3 ], [ undef, %b2 ]
-  %v5 = getelementptr inbounds i16, i16* %v4, i32 1
+  %v3 = phi ptr [ undef, %b3 ], [ undef, %b2 ]
+  %v4 = phi ptr [ %v5, %b3 ], [ undef, %b2 ]
+  %v5 = getelementptr inbounds i16, ptr %v4, i32 1
   %v6 = sext i16 %v1 to i32
   %v7 = call i32 @llvm.hexagon.M2.mpy.acc.sat.ll.s1(i32 0, i32 %v6, i32 undef)
-  %v8 = getelementptr inbounds i16, i16* %v4, i32 2
+  %v8 = getelementptr inbounds i16, ptr %v4, i32 2
   %v9 = call i32 @llvm.hexagon.M2.mpy.acc.sat.ll.s1(i32 %v7, i32 undef, i32 undef)
-  %v10 = load i16, i16* %v8, align 2, !tbaa !0
+  %v10 = load i16, ptr %v8, align 2, !tbaa !0
   %v11 = call i32 @llvm.hexagon.M2.mpy.acc.sat.ll.s1(i32 %v9, i32 undef, i32 undef)
   %v12 = call i32 @llvm.hexagon.S2.asr.r.r.sat(i32 %v11, i32 undef)
-  %v13 = getelementptr [166 x i32], [166 x i32]* null, i32 0, i32 undef
-  %v14 = load i32, i32* %v13, align 4
+  %v13 = getelementptr [166 x i32], ptr null, i32 0, i32 undef
+  %v14 = load i32, ptr %v13, align 4
   %v15 = call i64 @llvm.hexagon.M2.dpmpyss.s0(i32 %v14, i32 undef)
   %v16 = call i64 @llvm.hexagon.S2.asr.i.p(i64 %v15, i32 15)
   %v17 = call i32 @llvm.hexagon.A2.sat(i64 %v16)
   %v18 = call i32 @llvm.hexagon.A2.subsat(i32 %v12, i32 %v17)
-  %v19 = getelementptr [166 x i32], [166 x i32]* null, i32 0, i32 undef
-  %v20 = load i32, i32* %v19, align 4
+  %v19 = getelementptr [166 x i32], ptr null, i32 0, i32 undef
+  %v20 = load i32, ptr %v19, align 4
   %v21 = call i64 @llvm.hexagon.M2.dpmpyss.s0(i32 %v20, i32 0)
   %v22 = call i64 @llvm.hexagon.S2.asr.i.p(i64 %v21, i32 15)
   %v23 = call i32 @llvm.hexagon.A2.sat(i64 %v22)
   %v24 = call i32 @llvm.hexagon.A2.subsat(i32 %v18, i32 %v23)
   %v25 = call i32 @llvm.hexagon.S2.asl.r.r.sat(i32 %v24, i32 undef)
-  store i32 %v25, i32* %v3, align 4, !tbaa !4
+  store i32 %v25, ptr %v3, align 4, !tbaa !4
   %v26 = add i32 %v2, 1
   %v27 = icmp eq i32 %v26, 164
   br i1 %v27, label %b4, label %b3

diff  --git a/llvm/test/CodeGen/Hexagon/swp-epilog-reuse4.ll b/llvm/test/CodeGen/Hexagon/swp-epilog-reuse4.ll
index 5acc3626f60e8..404b7d87bd59d 100644
--- a/llvm/test/CodeGen/Hexagon/swp-epilog-reuse4.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-epilog-reuse4.ll
@@ -32,34 +32,34 @@ b3:                                               ; preds = %b3, %b2
   br i1 undef, label %b3, label %b4
 
 b4:                                               ; preds = %b3, %b2
-  %v3 = phi i32* [ undef, %b2 ], [ undef, %b3 ]
+  %v3 = phi ptr [ undef, %b2 ], [ undef, %b3 ]
   %v4 = phi i32 [ 0, %b2 ], [ %v2, %b3 ]
-  %v5 = getelementptr [18 x i16], [18 x i16]* @g0, i32 0, i32 undef
+  %v5 = getelementptr [18 x i16], ptr @g0, i32 0, i32 undef
   br label %b5
 
 b5:                                               ; preds = %b5, %b4
   %v6 = phi i16 [ 0, %b4 ], [ %v17, %b5 ]
   %v7 = phi i16 [ undef, %b4 ], [ %v6, %b5 ]
   %v8 = phi i32 [ %v4, %b4 ], [ %v35, %b5 ]
-  %v9 = phi i32* [ %v3, %b4 ], [ undef, %b5 ]
-  %v10 = phi i16* [ undef, %b4 ], [ %v12, %b5 ]
+  %v9 = phi ptr [ %v3, %b4 ], [ undef, %b5 ]
+  %v10 = phi ptr [ undef, %b4 ], [ %v12, %b5 ]
   %v11 = add i32 %v8, 0
-  %v12 = getelementptr inbounds i16, i16* %v10, i32 1
+  %v12 = getelementptr inbounds i16, ptr %v10, i32 1
   %v13 = sext i16 %v7 to i32
   %v14 = call i32 @llvm.hexagon.M2.mpy.acc.sat.ll.s1(i32 0, i32 %v13, i32 undef)
-  %v15 = getelementptr inbounds i16, i16* %v10, i32 2
+  %v15 = getelementptr inbounds i16, ptr %v10, i32 2
   %v16 = call i32 @llvm.hexagon.M2.mpy.acc.sat.ll.s1(i32 %v14, i32 undef, i32 undef)
-  %v17 = load i16, i16* %v15, align 2, !tbaa !0
+  %v17 = load i16, ptr %v15, align 2, !tbaa !0
   %v18 = sext i16 %v17 to i32
   %v19 = call i32 @llvm.hexagon.M2.mpy.acc.sat.ll.s1(i32 %v16, i32 %v18, i32 undef)
   %v20 = call i32 @llvm.hexagon.S2.asr.r.r.sat(i32 %v19, i32 undef)
-  %v21 = getelementptr [166 x i32], [166 x i32]* %v0, i32 0, i32 %v11
-  %v22 = load i32, i32* %v21, align 4
+  %v21 = getelementptr [166 x i32], ptr %v0, i32 0, i32 %v11
+  %v22 = load i32, ptr %v21, align 4
   %v23 = call i64 @llvm.hexagon.M2.dpmpyss.s0(i32 %v22, i32 undef)
   %v24 = call i64 @llvm.hexagon.S2.asr.i.p(i64 %v23, i32 15)
   %v25 = call i32 @llvm.hexagon.A2.sat(i64 %v24)
   %v26 = call i32 @llvm.hexagon.A2.subsat(i32 %v20, i32 %v25)
-  %v27 = load i16, i16* %v5, align 4
+  %v27 = load i16, ptr %v5, align 4
   %v28 = sext i16 %v27 to i32
   %v29 = call i32 @llvm.hexagon.A2.sxth(i32 %v28)
   %v30 = call i64 @llvm.hexagon.M2.dpmpyss.s0(i32 undef, i32 %v29)
@@ -67,7 +67,7 @@ b5:                                               ; preds = %b5, %b4
   %v32 = call i32 @llvm.hexagon.A2.sat(i64 %v31)
   %v33 = call i32 @llvm.hexagon.A2.subsat(i32 %v26, i32 %v32)
   %v34 = call i32 @llvm.hexagon.S2.asl.r.r.sat(i32 %v33, i32 undef)
-  store i32 %v34, i32* %v9, align 4, !tbaa !4
+  store i32 %v34, ptr %v9, align 4, !tbaa !4
   %v35 = add i32 %v8, 1
   %v36 = icmp eq i32 %v35, 164
   br i1 %v36, label %b6, label %b5

diff  --git a/llvm/test/CodeGen/Hexagon/swp-exit-fixup.ll b/llvm/test/CodeGen/Hexagon/swp-exit-fixup.ll
index 158a49ac6a033..27327a9ca439f 100644
--- a/llvm/test/CodeGen/Hexagon/swp-exit-fixup.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-exit-fixup.ll
@@ -4,7 +4,7 @@
 ; Make sure we fix up the Phis when we connect the last
 ; epilog block to the CFG.
 
-define void @f0(i16* nocapture %a0) #0 {
+define void @f0(ptr nocapture %a0) #0 {
 b0:
   br i1 undef, label %b1, label %b2
 
@@ -21,7 +21,7 @@ b4:                                               ; preds = %b3
   br i1 undef, label %b6, label %b5
 
 b5:                                               ; preds = %b4
-  store i16 4096, i16* %a0, align 2
+  store i16 4096, ptr %a0, align 2
   br label %b11
 
 b6:                                               ; preds = %b4
@@ -39,13 +39,13 @@ b9:                                               ; preds = %b8
 
 b10:                                              ; preds = %b10, %b9
   %v1 = phi i32 [ %v8, %b10 ], [ 1, %b9 ]
-  %v2 = getelementptr inbounds [11 x i32], [11 x i32]* undef, i32 0, i32 %v1
-  %v3 = load i32, i32* undef, align 4
+  %v2 = getelementptr inbounds [11 x i32], ptr undef, i32 0, i32 %v1
+  %v3 = load i32, ptr undef, align 4
   %v4 = add nsw i32 %v3, 0
   %v5 = add nsw i32 %v4, 2048
   %v6 = lshr i32 %v5, 12
   %v7 = trunc i32 %v6 to i16
-  store i16 %v7, i16* undef, align 2
+  store i16 %v7, ptr undef, align 2
   %v8 = add nsw i32 %v1, 1
   %v9 = icmp eq i32 %v8, undef
   br i1 %v9, label %b11, label %b10

diff  --git a/llvm/test/CodeGen/Hexagon/swp-fix-last-use.ll b/llvm/test/CodeGen/Hexagon/swp-fix-last-use.ll
index 877c2c9ba1c25..6ac3ac9ad950f 100644
--- a/llvm/test/CodeGen/Hexagon/swp-fix-last-use.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-fix-last-use.ll
@@ -12,8 +12,8 @@ b1:                                               ; preds = %b0
 
 b2:                                               ; preds = %b2, %b1
   %v0 = phi i32 [ %v1, %b2 ], [ 1, %b1 ]
-  store i16 0, i16* undef, align 2
-  store i16 0, i16* undef, align 2
+  store i16 0, ptr undef, align 2
+  store i16 0, ptr undef, align 2
   %v1 = add nsw i32 %v0, 4
   %v2 = icmp slt i32 %v1, undef
   br i1 %v2, label %b2, label %b3

diff  --git a/llvm/test/CodeGen/Hexagon/swp-fix-last-use1.ll b/llvm/test/CodeGen/Hexagon/swp-fix-last-use1.ll
index 818520171045e..721e581c1db02 100644
--- a/llvm/test/CodeGen/Hexagon/swp-fix-last-use1.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-fix-last-use1.ll
@@ -22,7 +22,7 @@ b2:                                               ; preds = %b2, %b1
   %v9 = lshr i64 %v8, 32
   %v10 = trunc i64 %v9 to i32
   %v11 = tail call i32 @llvm.hexagon.S2.lsr.r.r(i32 %v10, i32 undef)
-  %v12 = load i64, i64* undef, align 8, !tbaa !0
+  %v12 = load i64, ptr undef, align 8, !tbaa !0
   %v13 = trunc i64 %v12 to i32
   %v14 = lshr i64 %v12, 32
   %v15 = trunc i64 %v14 to i32
@@ -43,7 +43,7 @@ b4:                                               ; preds = %b3, %b0
   %v24 = zext i32 %v22 to i64
   %v25 = shl nuw i64 %v24, 32
   %v26 = or i64 %v25, 0
-  store i64 %v26, i64* undef, align 8, !tbaa !0
+  store i64 %v26, ptr undef, align 8, !tbaa !0
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/swp-kernel-last-use.ll b/llvm/test/CodeGen/Hexagon/swp-kernel-last-use.ll
index 5ecc1598dea53..b7923ce3faca5 100644
--- a/llvm/test/CodeGen/Hexagon/swp-kernel-last-use.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-kernel-last-use.ll
@@ -4,14 +4,14 @@
 ; This test caused an assert because there was a use of an instruction
 ; that was scheduled at stage 0, but no phi were added in the epilog.
 
-%s.0 = type <{ i8*, i8*, i16, i8, i8, i8 }>
+%s.0 = type <{ ptr, ptr, i16, i8, i8, i8 }>
 %s.1 = type { [4 x i16], [4 x i16], [4 x i16], [4 x i16], i32, i32, i32, i8, [10 x i32], [10 x [3 x i32]], [4 x i64], i8 }
 %s.2 = type { [3 x i16], [4 x i8], i32, [3 x %s.3], [3 x %s.3], [3 x %s.3], [3 x %s.3], [3 x %s.3], [3 x %s.3], [6 x %s.3], [6 x %s.3], [6 x %s.3], i8, [3 x [3 x i16]], [3 x [3 x i16]], [3 x i16], [3 x i16], [6 x i16], [2 x i32], [10 x i32], [2 x i32], [2 x i32], [2 x [3 x i32]], [2 x i32], [2 x [3 x i64]], [2 x [3 x [3 x i32]]], [2 x [3 x i32]] }
 %s.3 = type { i8, i8, i8, i8 }
 
 @g0 = external constant %s.0, align 1
 
-define void @f0(i8 zeroext %a0, i32 %a1, i32 %a2, i8 zeroext %a3, %s.1* nocapture %a4, %s.2* %a5, i8 zeroext %a6) #0 {
+define void @f0(i8 zeroext %a0, i32 %a1, i32 %a2, i8 zeroext %a3, ptr nocapture %a4, ptr %a5, i8 zeroext %a6) #0 {
 b0:
   br i1 undef, label %b1, label %b7
 
@@ -23,7 +23,7 @@ b2:                                               ; preds = %b1
 
 b3:                                               ; preds = %b1
   %v0 = select i1 undef, i32 2, i32 4
-  %v1 = load i8, i8* undef, align 1
+  %v1 = load i8, ptr undef, align 1
   %v2 = zext i8 %v1 to i32
   %v3 = icmp uge i32 %v2, %v0
   br label %b4
@@ -35,7 +35,7 @@ b5:                                               ; preds = %b10
   unreachable
 
 b6:                                               ; preds = %b10
-  call void @f1(%s.0* @g0, i32 undef, i32 %v21, i32 undef, i32 undef)
+  call void @f1(ptr @g0, i32 undef, i32 %v21, i32 undef, i32 undef)
   unreachable
 
 b7:                                               ; preds = %b0
@@ -69,6 +69,6 @@ b10:                                              ; preds = %b9
   br i1 undef, label %b6, label %b5
 }
 
-declare void @f1(%s.0*, i32, i32, i32, i32)
+declare void @f1(ptr, i32, i32, i32, i32)
 
 attributes #0 = { nounwind "target-cpu"="hexagonv55" }

diff  --git a/llvm/test/CodeGen/Hexagon/swp-kernel-phi1.ll b/llvm/test/CodeGen/Hexagon/swp-kernel-phi1.ll
index c87479f6e97db..e70fd61df1675 100644
--- a/llvm/test/CodeGen/Hexagon/swp-kernel-phi1.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-kernel-phi1.ll
@@ -16,8 +16,7 @@
 ; Function Attrs: nounwind optsize
 define void @f0() #0 {
 b0:
-  %v0 = getelementptr [8 x i16], [8 x i16]* undef, i32 0, i32 7
-  %v1 = bitcast i16* %v0 to [8 x i16]*
+  %v0 = getelementptr [8 x i16], ptr undef, i32 0, i32 7
   br label %b2
 
 b1:                                               ; preds = %b2
@@ -28,12 +27,12 @@ b2:                                               ; preds = %b2, %b0
   %v3 = phi i16 [ 17, %b0 ], [ %v7, %b2 ]
   %v4 = phi i16 [ 18, %b0 ], [ %v3, %b2 ]
   %v5 = sext i16 %v4 to i32
-  %v6 = getelementptr i16, i16* null, i32 -2
-  %v7 = load i16, i16* %v6, align 2
+  %v6 = getelementptr i16, ptr null, i32 -2
+  %v7 = load i16, ptr %v6, align 2
   %v8 = sext i16 %v7 to i32
   %v9 = tail call i32 @llvm.hexagon.A2.subsat(i32 %v5, i32 %v8)
   %v10 = trunc i32 %v9 to i16
-  store i16 %v10, i16* null, align 2
+  store i16 %v10, ptr null, align 2
   %v11 = add nsw i32 %v2, -1
   %v12 = icmp sgt i32 %v11, 1
   br i1 %v12, label %b2, label %b1

diff  --git a/llvm/test/CodeGen/Hexagon/swp-large-rec.ll b/llvm/test/CodeGen/Hexagon/swp-large-rec.ll
index 45d40df4ec0ae..5bdcad49aae5d 100644
--- a/llvm/test/CodeGen/Hexagon/swp-large-rec.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-large-rec.ll
@@ -10,7 +10,7 @@
 ; STATS-NOT: 1 pipeliner   - Number of loops software pipelined
 
 ; Function Attrs: nounwind
-define void @f0(i32 %a0, i32 %a1, double %a2, double %a3, i8* %a4, i8* %a5, i8* %a6, i8* %a7, i8* %a8, [1000 x i8]* %a9, [1000 x i8]* %a10, [1000 x i8]* %a11) #0 {
+define void @f0(i32 %a0, i32 %a1, double %a2, double %a3, ptr %a4, ptr %a5, ptr %a6, ptr %a7, ptr %a8, ptr %a9, ptr %a10, ptr %a11) #0 {
 b0:
   br i1 undef, label %b1, label %b4
 
@@ -18,45 +18,45 @@ b1:                                               ; preds = %b3, %b0
   br label %b2
 
 b2:                                               ; preds = %b2, %b1
-  %v0 = phi i8* [ %v22, %b2 ], [ %a5, %b1 ]
-  %v1 = phi i8* [ %v23, %b2 ], [ %a6, %b1 ]
-  %v2 = phi i8* [ %v24, %b2 ], [ %a7, %b1 ]
+  %v0 = phi ptr [ %v22, %b2 ], [ %a5, %b1 ]
+  %v1 = phi ptr [ %v23, %b2 ], [ %a6, %b1 ]
+  %v2 = phi ptr [ %v24, %b2 ], [ %a7, %b1 ]
   %v3 = phi i32 [ %v20, %b2 ], [ 0, %b1 ]
-  %v4 = load i8, i8* %v0, align 1, !tbaa !0
+  %v4 = load i8, ptr %v0, align 1, !tbaa !0
   %v5 = zext i8 %v4 to i32
-  %v6 = load i8, i8* %v1, align 1, !tbaa !0
+  %v6 = load i8, ptr %v1, align 1, !tbaa !0
   %v7 = sext i8 %v6 to i32
-  %v8 = load i8, i8* %v2, align 1, !tbaa !0
+  %v8 = load i8, ptr %v2, align 1, !tbaa !0
   %v9 = sext i8 %v8 to i32
   %v10 = mul nsw i32 %v9, %v7
   %v11 = add nsw i32 %v10, %v5
   %v12 = trunc i32 %v11 to i8
-  store i8 %v12, i8* undef, align 1, !tbaa !0
-  %v13 = load i8, i8* %v2, align 1, !tbaa !0
+  store i8 %v12, ptr undef, align 1, !tbaa !0
+  %v13 = load i8, ptr %v2, align 1, !tbaa !0
   %v14 = sext i8 %v13 to i32
-  %v15 = load i8, i8* undef, align 1, !tbaa !0
+  %v15 = load i8, ptr undef, align 1, !tbaa !0
   %v16 = sext i8 %v15 to i32
   %v17 = mul nsw i32 %v16, %v14
   %v18 = add i32 %v17, %v11
   %v19 = trunc i32 %v18 to i8
-  store i8 %v19, i8* %v0, align 1, !tbaa !0
+  store i8 %v19, ptr %v0, align 1, !tbaa !0
   %v20 = add nsw i32 %v3, 1
-  store i8 0, i8* undef, align 1, !tbaa !0
+  store i8 0, ptr undef, align 1, !tbaa !0
   %v21 = icmp eq i32 %v20, undef
-  %v22 = getelementptr i8, i8* %v0, i32 1
-  %v23 = getelementptr i8, i8* %v1, i32 1
-  %v24 = getelementptr i8, i8* %v2, i32 1
+  %v22 = getelementptr i8, ptr %v0, i32 1
+  %v23 = getelementptr i8, ptr %v1, i32 1
+  %v24 = getelementptr i8, ptr %v2, i32 1
   br i1 %v21, label %b3, label %b2
 
 b3:                                               ; preds = %b2
-  tail call void @f1(i32 %a1, i8* %a4, i8* %a5, i8* %a6, i8* %a7, i8* %a8, [1000 x i8]* %a9, [1000 x i8]* %a10, [1000 x i8]* %a11, i8 signext 1) #2
+  tail call void @f1(i32 %a1, ptr %a4, ptr %a5, ptr %a6, ptr %a7, ptr %a8, ptr %a9, ptr %a10, ptr %a11, i8 signext 1) #2
   br i1 undef, label %b4, label %b1
 
 b4:                                               ; preds = %b3, %b0
   ret void
 }
 
-declare void @f1(i32, i8*, i8*, i8*, i8*, i8*, [1000 x i8]*, [1000 x i8]*, [1000 x i8]*, i8 signext) #1
+declare void @f1(i32, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, i8 signext) #1
 
 attributes #0 = { nounwind "target-cpu"="hexagonv55" }
 attributes #1 = { "target-cpu"="hexagonv55" }

diff  --git a/llvm/test/CodeGen/Hexagon/swp-listen-loop3.ll b/llvm/test/CodeGen/Hexagon/swp-listen-loop3.ll
index d8e4f003d5375..d4867e2053373 100644
--- a/llvm/test/CodeGen/Hexagon/swp-listen-loop3.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-listen-loop3.ll
@@ -12,7 +12,7 @@
 ; CHECK: }{{[ \t]*}}:endloop0
 
 ; Function Attrs: nounwind
-define void @f0(i32* nocapture %a0, i16 signext %a1) #0 {
+define void @f0(ptr nocapture %a0, i16 signext %a1) #0 {
 b0:
   %v0 = sext i16 %a1 to i32
   %v1 = add i32 %v0, -1
@@ -20,8 +20,8 @@ b0:
   br i1 %v2, label %b1, label %b4
 
 b1:                                               ; preds = %b0
-  %v3 = getelementptr i32, i32* %a0, i32 %v1
-  %v4 = load i32, i32* %v3, align 4
+  %v3 = getelementptr i32, ptr %a0, i32 %v1
+  %v4 = load i32, ptr %v3, align 4
   br label %b2
 
 b2:                                               ; preds = %b2, %b1
@@ -29,14 +29,14 @@ b2:                                               ; preds = %b2, %b1
   %v6 = phi i32 [ %v5, %b2 ], [ %v0, %b1 ]
   %v7 = phi i32 [ %v10, %b2 ], [ %v4, %b1 ]
   %v8 = add nsw i32 %v6, -2
-  %v9 = getelementptr inbounds i32, i32* %a0, i32 %v8
-  %v10 = load i32, i32* %v9, align 4, !tbaa !0
+  %v9 = getelementptr inbounds i32, ptr %a0, i32 %v8
+  %v10 = load i32, ptr %v9, align 4, !tbaa !0
   %v11 = tail call i64 @llvm.hexagon.M2.dpmpyss.s0(i32 %v10, i32 7946)
   %v12 = tail call i64 @llvm.hexagon.S2.asl.r.p(i64 %v11, i32 -13)
-  %v13 = getelementptr inbounds i32, i32* %a0, i32 %v5
+  %v13 = getelementptr inbounds i32, ptr %a0, i32 %v5
   %v14 = tail call i32 @llvm.hexagon.A2.sat(i64 %v12)
   %v15 = tail call i32 @llvm.hexagon.A2.subsat(i32 %v7, i32 %v14)
-  store i32 %v15, i32* %v13, align 4, !tbaa !0
+  store i32 %v15, ptr %v13, align 4, !tbaa !0
   %v16 = add nsw i32 %v5, -1
   %v17 = icmp sgt i32 %v16, 0
   br i1 %v17, label %b2, label %b3

diff  --git a/llvm/test/CodeGen/Hexagon/swp-loop-carried-crash.ll b/llvm/test/CodeGen/Hexagon/swp-loop-carried-crash.ll
index 82d4363be9b4b..3066c26667fae 100644
--- a/llvm/test/CodeGen/Hexagon/swp-loop-carried-crash.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-loop-carried-crash.ll
@@ -6,52 +6,52 @@
 ; the pass wasn't checking for a sequence of chain edges from the load to the
 ; store. The code assumed a single chain edge only.
 
-%0 = type { %1*, [2 x [2 x %39]], [2 x [2 x %39]], [2 x i8], [2 x i8], [2 x i8], [2 x i8], i32, i32 }
-%1 = type { %0, %2, %3, %15, %16*, %98*, %99, %105*, %295*, %299, %303, %304, %304, %307, i8, i8, i32 }
-%2 = type <{ %1*, i8, [3 x i8] }>
-%3 = type { %1*, i8, i32, i8, %4*, %8, %307, %12*, [10 x i8*], [10 x i8], %307 }
-%4 = type { %5*, %6 }
+%0 = type { ptr, [2 x [2 x %39]], [2 x [2 x %39]], [2 x i8], [2 x i8], [2 x i8], [2 x i8], i32, i32 }
+%1 = type { %0, %2, %3, %15, ptr, ptr, %99, ptr, ptr, %299, %303, %304, %304, %307, i8, i8, i32 }
+%2 = type <{ ptr, i8, [3 x i8] }>
+%3 = type { ptr, i8, i32, i8, ptr, %8, %307, ptr, [10 x ptr], [10 x i8], %307 }
+%4 = type { ptr, %6 }
 %5 = type opaque
 %6 = type { %7 }
 %7 = type { i64 }
-%8 = type { %9, %4*, [16 x i32], void (%8*, i8*, i32)*, i8*, %307, %307 }
-%9 = type { [16 x %11], i16, i8, %10*, %11 }
+%8 = type { %9, ptr, [16 x i32], ptr, ptr, %307, %307 }
+%9 = type { [16 x %11], i16, i8, ptr, %11 }
 %10 = type { i64, [8 x i8] }
 %11 = type { %307 }
-%12 = type { %10, %13, %13, i32, i32, i32, void (%10*)*, void (%10*)*, i32 (%10*)*, void (%10*)*, i32, i64* }
+%12 = type { %10, %13, %13, i32, i32, i32, ptr, ptr, ptr, ptr, i32, ptr }
 %13 = type { %14 }
 %14 = type { i16, i16, i32, i32, i32 }
-%15 = type <{ %1*, i8, [3 x i8] }>
-%16 = type { %1*, i32, i32, i8, i16, i16, i8, %17, i32, %22, %27, [4 x i8], [6 x [512 x %28]], %94, [6 x %29], [6 x i8*], %94, [7 x %95], [7 x i8*], [7 x i8*], %96*, %97, [8 x i8] }
-%17 = type { %18*, %21, %21, i32, i8 }
+%15 = type <{ ptr, i8, [3 x i8] }>
+%16 = type { ptr, i32, i32, i8, i16, i16, i8, %17, i32, %22, %27, [4 x i8], [6 x [512 x %28]], %94, [6 x %29], [6 x ptr], %94, [7 x %95], [7 x ptr], [7 x ptr], ptr, %97, [8 x i8] }
+%17 = type { ptr, %21, %21, i32, i8 }
 %18 = type { %19, %19, %20 }
 %19 = type { i32, i16, i16 }
 %20 = type { i32, i32, i32 }
 %21 = type { i32, i32, i32, i32 }
-%22 = type { %23*, %24 }
+%22 = type { ptr, %24 }
 %23 = type { i8, %10 }
 %24 = type { %25 }
 %25 = type { %26 }
 %26 = type { i32 }
-%27 = type { i32, i32, i32, i8* }
+%27 = type { i32, i32, i32, ptr }
 %28 = type { i16, i16, i16, i16 }
-%29 = type <{ i8*, i8*, i32, i16, [2 x i8], %24, %28*, i32, i8, [3 x i8], i32, %30, i8, i8, [2 x i8] }>
+%29 = type <{ ptr, ptr, i32, i16, [2 x i8], %24, ptr, i32, i8, [3 x i8], i32, %30, i8, i8, [2 x i8] }>
 %30 = type { %31, %44 }
-%31 = type { %32* }
-%32 = type { %33*, %24, i16, i16, i16, %37*, i16, i16, i8, i8, i32 }
+%31 = type { ptr }
+%32 = type { ptr, %24, i16, i16, i16, ptr, i16, i16, i8, i8, i32 }
 %33 = type { %34, [5 x %35], %36 }
 %34 = type { i32, i8 }
 %35 = type { [2 x i32] }
 %36 = type { i32, i8 }
-%37 = type <{ %38, i16, i16, i8, [3 x i8], %42*, %43*, i64*, [4 x i8], i64, i16, i8, i8, i16, i16, i32, i8, [3 x i8] }>
-%38 = type { %39*, i8, %40, i8, %41 }
+%37 = type <{ %38, i16, i16, i8, [3 x i8], ptr, ptr, ptr, [4 x i8], i64, i16, i8, i8, i16, i16, i32, i8, [3 x i8] }>
+%38 = type { ptr, i8, %40, i8, %41 }
 %39 = type { i64 }
 %40 = type { i32, i32, %24, %24, i32, i32, i16, i16, i16, i8, i8, i8, i8, i16 }
-%41 = type { i8, i16*, i32*, i32, i8, i8* }
+%41 = type { i8, ptr, ptr, i32, i8, ptr }
 %42 = type { i16, i16, i16 }
 %43 = type { i64, [280 x i8] }
-%44 = type { %45* }
-%45 = type { %38, %39*, i16, i16, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, %46, i8, i8, i8, %48, i8, i8, i16, i16, i8, i8, i8, %50*, %67*, i16*, i16*, i16, i16, i8, i16*, i8, i8, i8, i8, %69*, %87*, %91*, %92*, %92*, %93*, i8, i8, i8, i8, i8, %40*, i8, i32, i8, i8, i32, i32, i32, i32, %17, i32 }
+%44 = type { ptr }
+%45 = type { %38, ptr, i16, i16, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, %46, i8, i8, i8, %48, i8, i8, i16, i16, i8, i8, i8, ptr, ptr, ptr, ptr, i16, i16, i8, ptr, i8, i8, i8, i8, ptr, ptr, ptr, ptr, ptr, ptr, i8, i8, i8, i8, i8, ptr, i8, i32, i8, i8, i32, i32, i32, i32, %17, i32 }
 %46 = type { %47 }
 %47 = type { i8 }
 %48 = type { %49 }
@@ -100,26 +100,26 @@
 %91 = type { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }
 %92 = type { i16, i16, [1024 x i16], i8, i8 }
 %93 = type { i16, i16, [8 x i16] }
-%94 = type { i32, i32, i32, i8** }
-%95 = type { %24, %24, %29*, i8, i32, i32 }
-%96 = type { i32, i32, i32, i8** }
-%97 = type { i8, %94, [10 x %96], [10 x i8*] }
+%94 = type { i32, i32, i32, ptr }
+%95 = type { %24, %24, ptr, i8, i32, i32 }
+%96 = type { i32, i32, i32, ptr }
+%97 = type { i8, %94, [10 x %96], [10 x ptr] }
 %98 = type opaque
 %99 = type { %100 }
-%100 = type { %1*, %101 }
+%100 = type { ptr, %101 }
 %101 = type { %102, %102 }
 %102 = type { i8, i8, %103 }
 %103 = type { %104 }
 %104 = type { i32, i32 }
-%105 = type <{ i8, [3 x i8], %106, [4 x i8], [512 x i64], %12*, %295*, %1*, %107*, %180*, %181*, %182*, %196, [4008 x i8], %197, %18, [228 x i8], %253, %258, %266, %267, i8, i8, i8, [5 x i8], %268, %278, %279, [4428 x i8] }>
-%106 = type { [16 x i8], i8, i16, %12*, %12*, i8, i32, i8 }
-%107 = type <{ [128 x %108], %109*, i16, [2 x i8], %145*, %150, %153, %105*, %1*, i8, [7 x i8] }>
+%105 = type <{ i8, [3 x i8], %106, [4 x i8], [512 x i64], ptr, ptr, ptr, ptr, ptr, ptr, ptr, %196, [4008 x i8], %197, %18, [228 x i8], %253, %258, %266, %267, i8, i8, i8, [5 x i8], %268, %278, %279, [4428 x i8] }>
+%106 = type { [16 x i8], i8, i16, ptr, ptr, i8, i32, i8 }
+%107 = type <{ [128 x %108], ptr, i16, [2 x i8], ptr, %150, %153, ptr, ptr, i8, [7 x i8] }>
 %108 = type { i16, i16 }
 %109 = type { [2 x [1024 x i8]], %110, [5 x %43], %125, [2 x %133], [28 x i8], %138, i8, [64 x i64], [2 x %92], %143, [10 x i8], i8, [31 x i8], [32 x i8], %150, [12 x i8], [18 x i8], [14 x i8] }
 %110 = type { %111, %113, [16 x %50], [6 x %115], [3 x %116], [6 x %117], [3 x %118], [3 x %119], [3 x %120], [3 x %121], %93, i8, [3 x %122], [3 x %91], %124 }
-%111 = type { %112*, i16, i16, [8 x %112] }
+%111 = type { ptr, i16, i16, [8 x %112] }
 %112 = type { i16, i32, i32, i32, i32, i16, i8, i32, i16, i16 }
-%113 = type { %114*, i16, i16, [3 x %114] }
+%113 = type { ptr, i16, i16, [3 x %114] }
 %114 = type { i16, i16, i16, i16, i16, i32, i32, i16, i16, i16, i16, i16, i16, i32, i8, i32 }
 %115 = type { [5 x %69] }
 %116 = type { i16, i16, i16, [12 x i8], [12 x i32], i8, [12 x i32], [12 x i16] }
@@ -159,27 +159,27 @@
 %150 = type { %151, %152 }
 %151 = type { i32, i32, i32, i32, i32, i16, i16, i16, i16, i8 }
 %152 = type { i8, i16, i8, i8, [4 x i32], i8, i8, i8, i16, [2 x i16], [2 x i16], [5 x i16], i8 }
-%153 = type <{ i8, [3 x i8], %154*, [2 x %160], i16, [2 x i8], [2 x %160], i16, [2 x i8], [2 x %160], i16, [2 x i8], [4 x %161], i16, i16, [2 x %162], i16, [2 x i8], %162*, [2 x %172], i16, [2 x i8], [24 x %173], i16, [2 x i8], [24 x %176], i16, [2 x i8], [24 x %176], i16, [2 x i8], [2 x %177], i16, [2 x i8], [2 x %174], i16, [2 x i8], [2 x %175], i16, [2 x i8], [24 x %176], i16, [2 x i8], %177, %177, [14 x %45], i16, [2 x i8], [14 x %160], i16, [2 x i8], %178*, [4 x i8], [8 x %37], i16, [4 x %42], [2 x i8], [8 x %32], i16, [4 x i16], [2 x i8], %179, i16, i16, i16, i16, i16, i16, i16, [2 x i8], [3 x i64], i16, i8, i8, i16, [2 x i8] }>
+%153 = type <{ i8, [3 x i8], ptr, [2 x %160], i16, [2 x i8], [2 x %160], i16, [2 x i8], [2 x %160], i16, [2 x i8], [4 x %161], i16, i16, [2 x %162], i16, [2 x i8], ptr, [2 x %172], i16, [2 x i8], [24 x %173], i16, [2 x i8], [24 x %176], i16, [2 x i8], [24 x %176], i16, [2 x i8], [2 x %177], i16, [2 x i8], [2 x %174], i16, [2 x i8], [2 x %175], i16, [2 x i8], [24 x %176], i16, [2 x i8], %177, %177, [14 x %45], i16, [2 x i8], [14 x %160], i16, [2 x i8], ptr, [4 x i8], [8 x %37], i16, [4 x %42], [2 x i8], [8 x %32], i16, [4 x i16], [2 x i8], %179, i16, i16, i16, i16, i16, i16, i16, [2 x i8], [3 x i64], i16, i8, i8, i16, [2 x i8] }>
 %154 = type { i16, i16, %155 }
 %155 = type { %156 }
 %156 = type { i16, %157, [2 x %158], i8, i16, i8, [12 x %159] }
 %157 = type { i8, i8, i8 }
 %158 = type { i16, i16 }
 %159 = type { i16, i16, i32, i8, i32, i32, i16, i8, i8, i8, i8, i32 }
-%160 = type { %67*, %24, i16, i16, i16, i8, i32, i8, i8, i8, i8, i32, i32, i32 }
+%160 = type { ptr, %24, i16, i16, i16, i8, i32, i8, i8, i8, i8, i32, i32, i32 }
 %161 = type <{ i16, [2 x i8], %24, i32, i32, i16, i16, i8, [3 x i8] }>
-%162 = type <{ %38, i32, i16, i16, i8, i8, i16, %163, i16, %111*, %124*, i8, i8, i8, i8, i16, i8, i8, i8, i8, i8, i8, i16*, i32*, i64*, %164*, i8*, %165*, %168*, %169*, %39*, %170*, %171*, %172*, %23**, i8*, i32, i16, i8, [5 x i8], [164 x %39], %105*, [4 x i8] }>
+%162 = type <{ %38, i32, i16, i16, i8, i8, i16, %163, i16, ptr, ptr, i8, i8, i8, i8, i16, i8, i8, i8, i8, i8, i8, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, i32, i16, i8, [5 x i8], [164 x %39], ptr, [4 x i8] }>
 %163 = type { i16 }
-%164 = type { [131 x i64], [232 x i8], [131 x i32], %39*, i16, i32, i8, i8, i8, i8, %163, i8, i8, %39*, i8, [219 x i8] }
-%165 = type { %39*, i32, i16, i16, i8, i8, %39, i64, i32, i32, i8, %166, i8, %167 }
+%164 = type { [131 x i64], [232 x i8], [131 x i32], ptr, i16, i32, i8, i8, i8, i8, %163, i8, i8, ptr, i8, [219 x i8] }
+%165 = type { ptr, i32, i16, i16, i8, i8, %39, i64, i32, i32, i8, %166, i8, %167 }
 %166 = type { i8, i8 }
 %167 = type { i32, i32, i32, i8 }
 %168 = type { i32, i64, i64, i32, i32, i32, i32, i32, i32, i64, i32, i32, i16, i16, i32, i32, i32, i32, i16, i8, i64, i8, i8, i8 }
-%169 = type { %39*, %39*, %163, i8, i32, i32, i32, i16, i16, i32, i8, i8, i8, i8, i16, i8, %168*, %170*, %111*, i8*, i8*, i32*, i8, %16* }
-%170 = type { %39*, i32, i32, i16, i16, i8, i8, i8 }
+%169 = type { ptr, ptr, %163, i8, i32, i32, i32, i16, i16, i32, i8, i8, i8, i8, i16, i8, ptr, ptr, ptr, ptr, ptr, ptr, i8, ptr }
+%170 = type { ptr, i32, i32, i16, i16, i8, i8, i8 }
 %171 = type { i64, i64, i64, i64, i32, i32, i32, i32, i32, i32, i32, i32, i16, i32, i32, i16, i32, i16, [20 x i16], i16, i16, i8, i8, i8, i8, [78 x i8], [78 x i8], [39 x i8], i8, i32, i32 }
-%172 = type { %38, %39*, i16, i16, i16*, i16*, i16, i16, i8, i8*, %118*, %119*, i16, i32, i32, %114*, %67*, i8, i8, %46, i8, i8, i8, %16*, %105*, %145, i16, i8, i8, i8, i16, i32, i16, %307 }
-%173 = type <{ %38, i16, i16, i16, i8, i8, i16, i16, %116*, i32*, i16*, i8*, i8, i8, [2 x i8], i32*, i8, i8, i8, i8, %105*, i8, [3 x i8] }>
+%172 = type { %38, ptr, i16, i16, ptr, ptr, i16, i16, i8, ptr, ptr, ptr, i16, i32, i32, ptr, ptr, i8, i8, %46, i8, i8, i8, ptr, ptr, %145, i16, i8, i8, i8, i16, i32, i16, %307 }
+%173 = type <{ %38, i16, i16, i16, i8, i8, i16, i16, ptr, ptr, ptr, ptr, i8, i8, [2 x i8], ptr, i8, i8, i8, i8, ptr, i8, [3 x i8] }>
 %174 = type { %24 }
 %175 = type { %24 }
 %176 = type <{ %160, i8, [3 x i8] }>
@@ -187,11 +187,11 @@
 %178 = type { [8 x i64] }
 %179 = type { i32, i32, %24, %24, i32, i32 }
 %180 = type opaque
-%181 = type { i16, %12*, [14 x i32], [8 x i32], i32, i32, %105* }
-%182 = type { %183*, %184*, %145*, [2 x [4 x %178]], i8, i8, i8, [10 x %185], %307, %186, i32, i32, i8, i16, i16, i32, i32, %23*, [9 x i8], [16 x %190]*, %194, [2 x %195], %109*, %295* }
+%181 = type { i16, ptr, [14 x i32], [8 x i32], i32, i32, ptr }
+%182 = type { ptr, ptr, ptr, [2 x [4 x %178]], i8, i8, i8, [10 x %185], %307, %186, i32, i32, i8, i16, i16, i32, i32, ptr, [9 x i8], ptr, %194, [2 x %195], ptr, ptr }
 %183 = type { [5 x %39], [24 x i8], [1 x [256 x %39]] }
 %184 = type { [2 x [12 x %92]] }
-%185 = type { i32, i32, i8*, %307 }
+%185 = type { i32, i32, ptr, %307 }
 %186 = type { [114 x [22 x i8]], [2 x [22 x i8]], %187, %189 }
 %187 = type { [4 x [4 x [114 x i8]]], [4 x %188], [88 x i8], [4 x [114 x i8]] }
 %188 = type { [4 x [116 x [3 x i8]]] }
@@ -200,13 +200,13 @@
 %191 = type { %192 }
 %192 = type { %193 }
 %193 = type { [1024 x i16], [1024 x i16] }
-%194 = type { i16, i16, i16, %30, %32*, %32*, i16, i8, %24, %39 }
-%195 = type <{ i8, i8, i16, %24, i8, i8, i8, i8, i16, [2 x i8], %32*, i8, i8, [2 x i8] }>
+%194 = type { i16, i16, i16, %30, ptr, ptr, i16, i8, %24, %39 }
+%195 = type <{ i8, i8, i16, %24, i8, i8, i8, i8, i16, [2 x i8], ptr, i8, i8, [2 x i8] }>
 %196 = type { i32, i8, i8, i8, i8, i8, i8, i8, i8 }
-%197 = type <{ %198, %200, %203, %206, [20 x i8], %207, %236, i16, i16, i8, [3 x i8], [4 x i32], [4 x i32], [8 x [4 x i32]], [8 x i32], [4 x i32], [22 x i32], i8, i8, i8, [2 x i8], [2 x i8], i8, %23*, [44 x i32], [8 x i32], [8 x i32], [2508 x i8], [44 x i8], [456 x i8], [456 x i8], [8 x i8], [8 x i8], i16, i8, i8, i16, i16, i16, [2 x i8], %237, %237, %237, [5 x %238], [5 x %239], %240, i8, i8, [2 x i8], %251, %251, [4 x i8], [8 x [11 x i32]], %307, %150*, %105*, %182*, %196*, %241, [4 x i8], %242, %243, %307, %249, %250, [10 x %251], %251, [4416 x i8], [5 x [5 x [352 x i8]]], [5 x i8], [7579 x i8] }>
+%197 = type <{ %198, %200, %203, %206, [20 x i8], %207, %236, i16, i16, i8, [3 x i8], [4 x i32], [4 x i32], [8 x [4 x i32]], [8 x i32], [4 x i32], [22 x i32], i8, i8, i8, [2 x i8], [2 x i8], i8, ptr, [44 x i32], [8 x i32], [8 x i32], [2508 x i8], [44 x i8], [456 x i8], [456 x i8], [8 x i8], [8 x i8], i16, i8, i8, i16, i16, i16, [2 x i8], %237, %237, %237, [5 x %238], [5 x %239], %240, i8, i8, [2 x i8], %251, %251, [4 x i8], [8 x [11 x i32]], %307, ptr, ptr, ptr, ptr, %241, [4 x i8], %242, %243, %307, %249, %250, [10 x %251], %251, [4416 x i8], [5 x [5 x [352 x i8]]], [5 x i8], [7579 x i8] }>
 %198 = type { [8 x %199] }
 %199 = type { i16, i8, i8 }
-%200 = type <{ i64, i8, [3 x i8], i32, i32, i32, %201, %201, %201*, i32, i32, i32, i8, i8, i8, [5 x i8], [57 x %39], %202, [7 x i8] }>
+%200 = type <{ i64, i8, [3 x i8], i32, i32, i32, %201, %201, ptr, i32, i32, i32, i8, i8, i8, [5 x i8], [57 x %39], %202, [7 x i8] }>
 %201 = type { i64, [57 x %39] }
 %202 = type { i8 }
 %203 = type <{ [4 x %204], [4 x %204], %205, %205, i8, [5 x i8] }>
@@ -215,79 +215,79 @@
 %206 = type { i32 }
 %207 = type { [22 x [114 x i8]], [2 x [22 x i8]], [5 x %208], %43, %209, %211, %232, %233, [29 x %210], i32, i32, i32, %234, [4 x i8], i8, i8, i32, [24 x i8] }
 %208 = type { [5 x %39], [164 x %39], [144 x float], [2 x [2 x %39]], i32, [116 x i8], i8, i8 }
-%209 = type { [87 x %39], %210*, i32, i32, i32, [2 x i8], i16, i32, i16, i8, i8 }
+%209 = type { [87 x %39], ptr, i32, i32, i32, [2 x i8], i16, i32, i16, i8, i8 }
 %210 = type { i32 }
 %211 = type { %212, %231 }
 %212 = type { %213 }
 %213 = type { %214, %215, %215, %217, %230, [32 x %39] }
-%214 = type { %39, i16*, i16*, %39*, i8 }
+%214 = type { %39, ptr, ptr, ptr, i8 }
 %215 = type { %216 }
 %216 = type { [2 x [16 x i64]], [2 x [16 x i32]] }
 %217 = type { %218 }
 %218 = type { %219, %222, %223, %225, %228, %229 }
-%219 = type { %220, %221, [30 x %39], float* }
+%219 = type { %220, %221, [30 x %39], ptr }
 %220 = type { [150 x %39], [150 x i16] }
 %221 = type { [5 x %39], [5 x i16] }
 %222 = type { [48 x i32], [48 x i32], [32 x %39], [5 x i16], [5 x i32] }
-%223 = type { %39*, i16*, i16, i32, i8, %224* }
+%223 = type { ptr, ptr, i16, i32, i8, ptr }
 %224 = type { i32, i32, i32, i8 }
-%225 = type { %39*, i8, %226*, i8, i16*, %227*, %214*, i8, i32, i32, i32, i8, i8, i32, %206* }
+%225 = type { ptr, i8, ptr, i8, ptr, ptr, ptr, i8, i32, i32, i32, i8, i8, i32, ptr }
 %226 = type { i8, i8, i8, i32, i8, i32 }
-%227 = type { %39*, %39*, i32, i8, i8, i8, i8, i8 }
+%227 = type { ptr, ptr, i32, i8, i8, i8, i8, i8 }
 %228 = type { [87 x %39], [164 x %39], [167 x %39] }
-%229 = type { %39*, i8, i8 }
+%229 = type { ptr, i8, i8 }
 %230 = type { [29 x %39] }
 %231 = type { [13 x %39], [13 x %210] }
-%232 = type { %39*, i8, i8, i8, i8, %226*, i8, i8, %224*, i8, i8, %206*, i8, i8 }
-%233 = type { i32, i32, float* }
+%232 = type { ptr, i8, i8, i8, i8, ptr, i8, i8, ptr, i8, i8, ptr, i8, i8 }
+%233 = type { i32, i32, ptr }
 %234 = type { %235 }
 %235 = type { i16 }
-%236 = type <{ [57 x %39], [57 x %39], [2 x i8], [2 x i8], [2 x i32], i8, [3 x i8], %268*, [4 x i8] }>
+%236 = type <{ [57 x %39], [57 x %39], [2 x i8], [2 x i8], [2 x i32], i8, [3 x i8], ptr, [4 x i8] }>
 %237 = type { i32, i32, i32 }
-%238 = type { %39*, %39*, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, %46, i8, i8, i8, i8, i8, i16, i16, i16, i16, i8, i8, i16, i16, i8, i32, i8, i8, i32, i32, i8, i8, i16, i8*, %253*, %207*, i8*, i32*, i16*, i32*, i32*, i32, %69*, %91*, %69*, %92*, %92*, i8, [2 x i8], i32, i8, i8, i16, i8, i8, i32, i8, i16, %145, i8, %0*, %3*, %182*, %16*, %206*, %249*, i8, %241*, %17, [2 x i8], i8*, i8* }
-%239 = type { i8*, i8*, i32*, %50*, i16, i16, i16, i16, i16, i16, i8, [4 x i8], [2 x i16], %48, i16, i16, i16, i16, [2 x i16], i16, i32, i32, i8, i8, i8, i8, i8, i8, i16, i16, [4 x i8*], i64*, i8, i8, i8, %240*, i8*, i8*, %207*, i8, i8, i8*, i8, i8*, i32 }
+%238 = type { ptr, ptr, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, %46, i8, i8, i8, i8, i8, i16, i16, i16, i16, i8, i8, i16, i16, i8, i32, i8, i8, i32, i32, i8, i8, i16, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, i32, ptr, ptr, ptr, ptr, ptr, i8, [2 x i8], i32, i8, i8, i16, i8, i8, i32, i8, i16, %145, i8, ptr, ptr, ptr, ptr, ptr, ptr, i8, ptr, %17, [2 x i8], ptr, ptr }
+%239 = type { ptr, ptr, ptr, ptr, i16, i16, i16, i16, i16, i16, i8, [4 x i8], [2 x i16], %48, i16, i16, i16, i16, [2 x i16], i16, i32, i32, i8, i8, i8, i8, i8, i8, i16, i16, [4 x ptr], ptr, i8, i8, i8, ptr, ptr, ptr, ptr, i8, i8, ptr, i8, ptr, i32 }
 %240 = type { [2 x [16 x %39]], [265 x %39], [1368 x %210] }
 %241 = type { i32, i32, i32, i32, i8, i32, i32, i32, i32, i32 }
 %242 = type <{ %43, [456 x i8], [228 x i8], [456 x i8], [4 x i8] }>
-%243 = type { %244*, i32, i32, i32, i32, i32, i32, i8, i8, %23*, %247, %248 }
+%243 = type { ptr, i32, i32, i32, i32, i32, i32, i8, i8, ptr, %247, %248 }
 %244 = type { [128 x %245], [2048 x i16] }
 %245 = type { %246, [115 x i64] }
 %246 = type { i16, i8, i32 }
 %247 = type { i32, i32, i32, i32, i8, i8, i8, i8 }
 %248 = type { i32, i32, i32, i32, i8, i8, i32 }
-%249 = type { %23*, i8, i8, [179 x %210], [20 x %210], i16 }
+%249 = type { ptr, i8, i8, [179 x %210], [20 x %210], i16 }
 %250 = type { [8 x i16], [72 x i8], [120 x i8], [3 x i32] }
-%251 = type { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i16, i16, i8, i8, i8, %48, i8, i32, i8, i32*, [4 x %252], [8 x i32], i8, %238*, %239*, %50*, [8 x i8], %93*, i32, i32, i32, i8, i8, i8, i8, %40*, i8*, i8, i8, i8, i8, i8, [2 x i8], i8, [36 x i8], [36 x i8], i32 }
-%252 = type { i8*, i8 }
-%253 = type { [5 x %254], i8, i8, i8, [114 x i8], %23*, %23*, [48 x i8], %255, %186*, i16, i16, [4 x i16], [4 x i16], i16, %182*, %1*, %105*, [216 x i8] }
-%254 = type { %32*, %37*, i32, i8 }
+%251 = type { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i16, i16, i8, i8, i8, %48, i8, i32, i8, ptr, [4 x %252], [8 x i32], i8, ptr, ptr, ptr, [8 x i8], ptr, i32, i32, i32, i8, i8, i8, i8, ptr, ptr, i8, i8, i8, i8, i8, [2 x i8], i8, [36 x i8], [36 x i8], i32 }
+%252 = type { ptr, i8 }
+%253 = type { [5 x %254], i8, i8, i8, [114 x i8], ptr, ptr, [48 x i8], %255, ptr, i16, i16, [4 x i16], [4 x i16], i16, ptr, ptr, ptr, [216 x i8] }
+%254 = type { ptr, ptr, i32, i8 }
 %255 = type { [16 x %256], %257, [64 x i8] }
 %256 = type { i64 }
 %257 = type { [8 x i64] }
-%258 = type { i32, %170, [8 x i8], %171, %172, [212 x i8], %164, i8, %165, %168, %169, [100 x %39], %23*, i32, i16, [786 x i64], i16, [2 x i8], [2 x %259], i8, %307, %182*, %105*, %260*, %261, [136 x i8] }
+%258 = type { i32, %170, [8 x i8], %171, %172, [212 x i8], %164, i8, %165, %168, %169, [100 x %39], ptr, i32, i16, [786 x i64], i16, [2 x i8], [2 x %259], i8, %307, ptr, ptr, ptr, %261, [136 x i8] }
 %259 = type <{ %40, %24, i32, i8, [3 x i8] }>
-%260 = type { %141*, i8, %23*, i32, i32, i32, i32, i32, i32, i32, i32, %295*, %3*, %105*, %2* }
+%260 = type { ptr, i8, ptr, i32, i32, i32, i32, i32, i32, i32, i32, ptr, ptr, ptr, ptr }
 %261 = type { [2 x %262], %39, %39, %263, [66 x %39], %264, %265, [5 x i16], i32, i8, i16, i16, i32, i16, i16 }
 %262 = type { [170 x %39] }
 %263 = type { [164 x %39] }
 %264 = type { [162 x %210] }
 %265 = type { [312 x %39] }
-%266 = type { %23*, %182*, %260* }
-%267 = type { %23*, %182*, %260* }
-%268 = type { %269, i32, i8, [24 x %277], %23*, i32, i32, %68*, %277*, [24 x i32], i32*, [8 x i16], %96, [6 x i8*], %307, %105*, %23*, %1*, [4 x i8] }
-%269 = type <{ [2 x %270], [2 x %30], [8 x %30], [24 x %30], [2 x %162*], i8, i8, [2 x i8], [2 x %172*], i8, [3 x i8], i32, i32, i32, i16, i16, i32, i8, i8, i16, i16, i8, i8, i8, i8, i8, [8 x i8], i8, [8 x %276], [8 x %69], [8 x %50], i8, i8, i8, i8, i8, i8, i8, i8, i32, i32, i32, [8 x i16], [8 x i16], %93*, %93*, i8, i8, i16, i8, [3 x i8], %24, i8, i8, [2 x i8], %24, %67*, i8, i8, i8, i8, i8, i8, i8, i8, %40*, %91* }>
-%270 = type { %120*, [16 x %30], %30, [8 x %30], [1 x %30], [2 x %158], i8, i8, %30, %30, %271*, i16, %121* }
+%266 = type { ptr, ptr, ptr }
+%267 = type { ptr, ptr, ptr }
+%268 = type { %269, i32, i8, [24 x %277], ptr, i32, i32, ptr, ptr, [24 x i32], ptr, [8 x i16], %96, [6 x ptr], %307, ptr, ptr, ptr, [4 x i8] }
+%269 = type <{ [2 x %270], [2 x %30], [8 x %30], [24 x %30], [2 x ptr], i8, i8, [2 x i8], [2 x ptr], i8, [3 x i8], i32, i32, i32, i16, i16, i32, i8, i8, i16, i16, i8, i8, i8, i8, i8, [8 x i8], i8, [8 x %276], [8 x %69], [8 x %50], i8, i8, i8, i8, i8, i8, i8, i8, i32, i32, i32, [8 x i16], [8 x i16], ptr, ptr, i8, i8, i16, i8, [3 x i8], %24, i8, i8, [2 x i8], %24, ptr, i8, i8, i8, i8, i8, i8, i8, i8, ptr, ptr }>
+%270 = type { ptr, [16 x %30], %30, [8 x %30], [1 x %30], [2 x %158], i8, i8, %30, %30, ptr, i16, ptr }
 %271 = type { i16, %272, i16, [4 x %273], i16, [4 x %275], i32, i32, i32, i32, i8, i8, i32, i32 }
 %272 = type { i16 }
-%273 = type { %274, %43* }
+%273 = type { %274, ptr }
 %274 = type { i16 }
 %275 = type { i16, i16, i32, i32, i8, i16, i16, i8 }
-%276 = type { i8, %40* }
+%276 = type { i8, ptr }
 %277 = type { i8, %30, i8, i8, i8, i32 }
-%278 = type { i16*, %1* }
-%279 = type <{ %280, %280, [16 x %282], %3*, %24, i32, %307, %283, i32, %289, i8, i8, i8, i8, i8, [3 x i8], i32, i32, %24, i8, i8, i8, i8 }>
+%278 = type { ptr, ptr }
+%279 = type <{ %280, %280, [16 x %282], ptr, %24, i32, %307, %283, i32, %289, i8, i8, i8, i8, i8, [3 x i8], i32, i32, %24, i8, i8, i8, i8 }>
 %280 = type { %281 }
-%281 = type { %281*, %281* }
+%281 = type { ptr, ptr }
 %282 = type <{ %281, %24, %24, %40, i8, [3 x i8] }>
 %283 = type { i32, %284, i32, %286, %287 }
 %284 = type { i8, [5 x %285] }
@@ -296,14 +296,14 @@
 %287 = type { i32, %288 }
 %288 = type { i32, i32 }
 %289 = type { %290 }
-%290 = type { %291, i32**, i32, i32, i32 }
+%290 = type { %291, ptr, i32, i32, i32 }
 %291 = type { %292, %294 }
 %292 = type { %293 }
 %293 = type { i8 }
 %294 = type { i8 }
-%295 = type { i8, i8, %23*, [16 x %296], i8, %105*, %182*, %1*, %260, i8, i8, i8, i8, i8, i8, %23* }
+%295 = type { i8, i8, ptr, [16 x %296], i8, ptr, ptr, ptr, %260, i8, i8, i8, i8, i8, i8, ptr }
 %296 = type { i8, i8, %297, i16 }
-%297 = type { i8, %298, i8* }
+%297 = type { i8, %298, ptr }
 %298 = type { i8 }
 %299 = type { %300, %302 }
 %300 = type { %301, i32 }
@@ -315,20 +315,18 @@
 %306 = type { i32 }
 %307 = type { i32 }
 
-define void @f0(%0* %a0) align 2 #0 {
+define void @f0(ptr %a0) align 2 #0 {
 b0:
   br label %b1
 
 b1:                                               ; preds = %b1, %b0
   %v0 = phi i32 [ %v7, %b1 ], [ 0, %b0 ]
-  %v1 = getelementptr inbounds %0, %0* %a0, i32 0, i32 2, i32 undef, i32 %v0
-  %v2 = getelementptr inbounds %39, %39* %v1, i32 0, i32 0
-  %v3 = load i64, i64* %v2, align 8
+  %v1 = getelementptr inbounds %0, ptr %a0, i32 0, i32 2, i32 undef, i32 %v0
+  %v3 = load i64, ptr %v1, align 8
   %v4 = call i64 @llvm.hexagon.S2.brevp(i64 %v3) #1
-  store i64 %v4, i64* %v2, align 8
-  %v5 = bitcast %39* %v1 to [2 x i32]*
-  %v6 = getelementptr inbounds [2 x i32], [2 x i32]* %v5, i32 0, i32 1
-  store i32 0, i32* %v6, align 4
+  store i64 %v4, ptr %v1, align 8
+  %v6 = getelementptr inbounds [2 x i32], ptr %v1, i32 0, i32 1
+  store i32 0, ptr %v6, align 4
   %v7 = add nuw nsw i32 %v0, 1
   %v8 = icmp eq i32 %v7, 2
   br i1 %v8, label %b2, label %b1

diff  --git a/llvm/test/CodeGen/Hexagon/swp-loop-carried-unknown.ll b/llvm/test/CodeGen/Hexagon/swp-loop-carried-unknown.ll
index 9f145189786ef..ecc81fca5501c 100644
--- a/llvm/test/CodeGen/Hexagon/swp-loop-carried-unknown.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-loop-carried-unknown.ll
@@ -20,9 +20,8 @@ b0:
 
 b1:                                               ; preds = %b1, %b0
   %v0 = phi i32 [ 0, %b0 ], [ %v22, %b1 ]
-  %v1 = load %s.0*, %s.0** undef, align 4
-  %v2 = getelementptr inbounds %s.0, %s.0* %v1, i32 0, i32 0
-  %v3 = load i16, i16* %v2, align 2
+  %v1 = load ptr, ptr undef, align 4
+  %v3 = load i16, ptr %v1, align 2
   %v4 = add i16 0, %v3
   %v5 = add i16 %v4, 0
   %v6 = add i16 %v5, 0
@@ -39,10 +38,10 @@ b1:                                               ; preds = %b1, %b0
   %v17 = add i16 %v16, 0
   %v18 = add i16 %v17, 0
   %v19 = add i16 %v18, 0
-  %v20 = load %s.0*, %s.0** undef, align 4
-  store i16 %v19, i16* undef, align 2
-  %v21 = getelementptr inbounds %s.0, %s.0* %v20, i32 0, i32 1
-  store i16 0, i16* %v21, align 2
+  %v20 = load ptr, ptr undef, align 4
+  store i16 %v19, ptr undef, align 2
+  %v21 = getelementptr inbounds %s.0, ptr %v20, i32 0, i32 1
+  store i16 0, ptr %v21, align 2
   %v22 = add nuw nsw i32 %v0, 1
   %v23 = icmp eq i32 %v22, 6
   br i1 %v23, label %b2, label %b1

diff  --git a/llvm/test/CodeGen/Hexagon/swp-loopval.ll b/llvm/test/CodeGen/Hexagon/swp-loopval.ll
index 3b4ce8245a378..6ede28af02c43 100644
--- a/llvm/test/CodeGen/Hexagon/swp-loopval.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-loopval.ll
@@ -9,7 +9,7 @@
 %s.2 = type { [4 x i8] }
 
 ; Function Attrs: nounwind
-define void @f0(%s.0* nocapture %a0) #0 {
+define void @f0(ptr nocapture %a0) #0 {
 b0:
   br i1 undef, label %b1, label %b2
 
@@ -30,16 +30,15 @@ b5:                                               ; preds = %b4
 
 b6:                                               ; preds = %b6, %b5
   %v0 = phi i32 [ %v10, %b6 ], [ 0, %b5 ]
-  %v1 = load i32, i32* undef, align 4
-  %v2 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 29, i32 %v0
-  %v3 = bitcast %s.2* %v2 to i32*
-  %v4 = load i32, i32* %v3, align 4
+  %v1 = load i32, ptr undef, align 4
+  %v2 = getelementptr inbounds %s.0, ptr %a0, i32 0, i32 29, i32 %v0
+  %v4 = load i32, ptr %v2, align 4
   %v5 = and i32 %v1, 65535
   %v6 = and i32 %v4, -65536
   %v7 = or i32 %v6, %v5
   %v8 = and i32 %v7, -2031617
   %v9 = or i32 %v8, 0
-  store i32 %v9, i32* %v3, align 4
+  store i32 %v9, ptr %v2, align 4
   %v10 = add nsw i32 %v0, 1
   %v11 = icmp eq i32 %v10, undef
   br i1 %v11, label %b7, label %b6

diff  --git a/llvm/test/CodeGen/Hexagon/swp-lots-deps.ll b/llvm/test/CodeGen/Hexagon/swp-lots-deps.ll
index 631d02649d11e..919ebd670295f 100644
--- a/llvm/test/CodeGen/Hexagon/swp-lots-deps.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-lots-deps.ll
@@ -4,7 +4,7 @@
 ; STATS: 1 pipeliner        - Number of loops software pipelined
 
 ; Function Attrs: nounwind
-define i64 @f0(i32 %a0, i32* %a1) #0 {
+define i64 @f0(i32 %a0, ptr %a1) #0 {
 b0:
   %v0 = icmp slt i32 %a0, 123469
   br i1 %v0, label %b1, label %b4
@@ -21,7 +21,7 @@ b2:                                               ; preds = %b2, %b1
   %v6 = phi i64 [ undef, %b1 ], [ %v11, %b2 ]
   %v7 = tail call i64 @llvm.hexagon.M2.vdmacs.s0(i64 %v5, i64 %v6, i64 %v6)
   %v8 = tail call i64 @llvm.hexagon.S2.packhl(i32 undef, i32 %v4)
-  %v9 = load i32, i32* %a1, align 4, !tbaa !0
+  %v9 = load i32, ptr %a1, align 4, !tbaa !0
   %v10 = tail call i64 @llvm.hexagon.M2.vdmacs.s0(i64 %v2, i64 %v6, i64 %v8)
   %v11 = tail call i64 @llvm.hexagon.S2.packhl(i32 %v9, i32 undef)
   %v12 = tail call i64 @llvm.hexagon.M2.vdmacs.s0(i64 %v1, i64 %v6, i64 %v11)

diff  --git a/llvm/test/CodeGen/Hexagon/swp-matmul-bitext.ll b/llvm/test/CodeGen/Hexagon/swp-matmul-bitext.ll
index 3b26d141238ad..9f2e3533525a1 100644
--- a/llvm/test/CodeGen/Hexagon/swp-matmul-bitext.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-matmul-bitext.ll
@@ -17,7 +17,7 @@
 
 @sem_i = common global [0 x %union_h2_sem_t] zeroinitializer, align 4
 
-define void @matrix_mul_matrix_bitextract(i32 %N, i32* %C, i16* %A, i16* %B) {
+define void @matrix_mul_matrix_bitextract(i32 %N, ptr %C, ptr %A, ptr %B) {
 entry:
   %cmp53 = icmp eq i32 %N, 0
   br i1 %cmp53, label %for_end27, label %for_body3_lr_ph_us
@@ -25,7 +25,7 @@ entry:
 for_body3_lr_ph_us:
   %i_054_us = phi i32 [ %inc26_us, %for_cond1_for_inc25_crit_edge_us ], [ 0, %entry ]
   %0 = mul i32 %i_054_us, %N
-  %arrayidx9_us_us_gep = getelementptr i16, i16* %A, i32 %0
+  %arrayidx9_us_us_gep = getelementptr i16, ptr %A, i32 %0
   br label %for_body3_us_us
 
 for_cond1_for_inc25_crit_edge_us:
@@ -36,26 +36,26 @@ for_cond1_for_inc25_crit_edge_us:
 for_body3_us_us:
   %j_052_us_us = phi i32 [ %inc23_us_us, %for_cond4_for_inc22_crit_edge_us_us ], [ 0, %for_body3_lr_ph_us ]
   %add_us_us = add i32 %j_052_us_us, %0
-  %arrayidx_us_us = getelementptr inbounds i32, i32* %C, i32 %add_us_us
-  store i32 0, i32* %arrayidx_us_us, align 4
+  %arrayidx_us_us = getelementptr inbounds i32, ptr %C, i32 %add_us_us
+  store i32 0, ptr %arrayidx_us_us, align 4
   br label %for_body6_us_us
 
 for_cond4_for_inc22_crit_edge_us_us:
-  store i32 %add21_us_us, i32* %arrayidx_us_us, align 4
+  store i32 %add21_us_us, ptr %arrayidx_us_us, align 4
   %inc23_us_us = add i32 %j_052_us_us, 1
   %exitcond88 = icmp eq i32 %inc23_us_us, %N
   br i1 %exitcond88, label %for_cond1_for_inc25_crit_edge_us, label %for_body3_us_us
 
 for_body6_us_us:
   %1 = phi i32 [ 0, %for_body3_us_us ], [ %add21_us_us, %for_body6_us_us ]
-  %arrayidx9_us_us_phi = phi i16* [ %arrayidx9_us_us_gep, %for_body3_us_us ], [ %arrayidx9_us_us_inc, %for_body6_us_us ]
+  %arrayidx9_us_us_phi = phi ptr [ %arrayidx9_us_us_gep, %for_body3_us_us ], [ %arrayidx9_us_us_inc, %for_body6_us_us ]
   %k_050_us_us = phi i32 [ 0, %for_body3_us_us ], [ %inc_us_us, %for_body6_us_us ]
-  %2 = load i16, i16* %arrayidx9_us_us_phi, align 2
+  %2 = load i16, ptr %arrayidx9_us_us_phi, align 2
   %conv_us_us = sext i16 %2 to i32
   %mul10_us_us = mul i32 %k_050_us_us, %N
   %add11_us_us = add i32 %mul10_us_us, %j_052_us_us
-  %arrayidx12_us_us = getelementptr inbounds i16, i16* %B, i32 %add11_us_us
-  %3 = load i16, i16* %arrayidx12_us_us, align 2
+  %arrayidx12_us_us = getelementptr inbounds i16, ptr %B, i32 %add11_us_us
+  %3 = load i16, ptr %arrayidx12_us_us, align 2
   %conv13_us_us = sext i16 %3 to i32
   %mul14_us_us = mul nsw i32 %conv13_us_us, %conv_us_us
   %shr47_us_us = lshr i32 %mul14_us_us, 2
@@ -66,7 +66,7 @@ for_body6_us_us:
   %add21_us_us = add i32 %mul17_us_us, %1
   %inc_us_us = add i32 %k_050_us_us, 1
   %exitcond87 = icmp eq i32 %inc_us_us, %N
-  %arrayidx9_us_us_inc = getelementptr i16, i16* %arrayidx9_us_us_phi, i32 1
+  %arrayidx9_us_us_inc = getelementptr i16, ptr %arrayidx9_us_us_phi, i32 1
   br i1 %exitcond87, label %for_cond4_for_inc22_crit_edge_us_us, label %for_body6_us_us
 
 for_end27:

diff  --git a/llvm/test/CodeGen/Hexagon/swp-max-stage3.ll b/llvm/test/CodeGen/Hexagon/swp-max-stage3.ll
index 01e1b7dca7b5b..3b3a7495d489e 100644
--- a/llvm/test/CodeGen/Hexagon/swp-max-stage3.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-max-stage3.ll
@@ -6,9 +6,9 @@
 @g0 = private unnamed_addr constant [6 x i8] c"s4116\00", align 1
 
 ; Function Attrs: noinline nounwind
-define void @f0(i32 %a0, i32 %a1, float* nocapture readonly %a2, [1000 x float]* nocapture readonly %a3, i32* nocapture readonly %a4, i32 %a5) #0 {
+define void @f0(i32 %a0, i32 %a1, ptr nocapture readonly %a2, ptr nocapture readonly %a3, ptr nocapture readonly %a4, i32 %a5) #0 {
 b0:
-  %v0 = tail call i32 bitcast (i32 (...)* @f1 to i32 ()*)() #2
+  %v0 = tail call i32 @f1() #2
   %v1 = sitofp i32 %v0 to double
   %v2 = add nsw i32 %a1, -1
   %v3 = icmp sgt i32 %a1, 1
@@ -16,19 +16,19 @@ b0:
 
 b1:                                               ; preds = %b1, %b0
   %v4 = phi float [ %v13, %b1 ], [ 0.000000e+00, %b0 ]
-  %v5 = phi float* [ %v16, %b1 ], [ %a2, %b0 ]
-  %v6 = phi i32* [ %v17, %b1 ], [ %a4, %b0 ]
+  %v5 = phi ptr [ %v16, %b1 ], [ %a2, %b0 ]
+  %v6 = phi ptr [ %v17, %b1 ], [ %a4, %b0 ]
   %v7 = phi i32 [ %v14, %b1 ], [ 0, %b0 ]
-  %v8 = load float, float* %v5, align 4
-  %v9 = load i32, i32* %v6, align 4
-  %v10 = getelementptr inbounds [1000 x float], [1000 x float]* %a3, i32 %v9, i32 %a5
-  %v11 = load float, float* %v10, align 4
+  %v8 = load float, ptr %v5, align 4
+  %v9 = load i32, ptr %v6, align 4
+  %v10 = getelementptr inbounds [1000 x float], ptr %a3, i32 %v9, i32 %a5
+  %v11 = load float, ptr %v10, align 4
   %v12 = fmul float %v8, %v11
   %v13 = fadd float %v4, %v12
   %v14 = add nuw nsw i32 %v7, 1
   %v15 = icmp slt i32 %v14, %v2
-  %v16 = getelementptr float, float* %v5, i32 1
-  %v17 = getelementptr i32, i32* %v6, i32 1
+  %v16 = getelementptr float, ptr %v5, i32 1
+  %v17 = getelementptr i32, ptr %v6, i32 1
   br i1 %v15, label %b1, label %b2
 
 b2:                                               ; preds = %b1
@@ -37,13 +37,13 @@ b2:                                               ; preds = %b1
 
 b3:                                               ; preds = %b2, %b0
   %v19 = phi double [ %v18, %b2 ], [ 0.000000e+00, %b0 ]
-  tail call void @f2(double %v19, i32 %a1, i32 %a1, double %v1, i8* getelementptr inbounds ([6 x i8], [6 x i8]* @g0, i32 0, i32 0)) #2
+  tail call void @f2(double %v19, i32 %a1, i32 %a1, double %v1, ptr @g0) #2
   ret void
 }
 
 declare i32 @f1(...) #1
 
-declare void @f2(double, i32, i32, double, i8*) #1
+declare void @f2(double, i32, i32, double, ptr) #1
 
 attributes #0 = { noinline nounwind "target-cpu"="hexagonv60" }
 attributes #1 = { "target-cpu"="hexagonv60" }

diff  --git a/llvm/test/CodeGen/Hexagon/swp-max.ll b/llvm/test/CodeGen/Hexagon/swp-max.ll
index 32282204ec52a..2376cc50e325b 100644
--- a/llvm/test/CodeGen/Hexagon/swp-max.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-max.ll
@@ -24,8 +24,8 @@ for.body:
   %MaxLeftBorderSum.012 = phi i32 [ %MaxLeftBorderSum.1, %for.body ], [ 0, %for.body.preheader ]
   %i.011 = phi i32 [ %dec, %for.body ], [ %div, %for.body.preheader ]
   %LeftBorderSum.010 = phi i32 [ %add1, %for.body ], [ 0, %for.body.preheader ]
-  %arrayidx = getelementptr inbounds [8 x i32], [8 x i32]* @A, i32 0, i32 %i.011
-  %0 = load i32, i32* %arrayidx, align 4
+  %arrayidx = getelementptr inbounds [8 x i32], ptr @A, i32 0, i32 %i.011
+  %0 = load i32, ptr %arrayidx, align 4
   %add1 = add nsw i32 %0, %LeftBorderSum.010
   %cmp2 = icmp sgt i32 %add1, %MaxLeftBorderSum.012
   %MaxLeftBorderSum.1 = select i1 %cmp2, i32 %add1, i32 %MaxLeftBorderSum.012

diff  --git a/llvm/test/CodeGen/Hexagon/swp-maxstart.ll b/llvm/test/CodeGen/Hexagon/swp-maxstart.ll
index 8d65e76913f36..9364fc45f0ef6 100644
--- a/llvm/test/CodeGen/Hexagon/swp-maxstart.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-maxstart.ll
@@ -10,23 +10,23 @@
 ; CHECK-NOT: r{{[0-9]+}} = memw([[REG0]]+#12)
 ; CHECK: memw([[REG0]]+#12) = r{{[0-9]+}}
 
-%s.0 = type { i64, i32, i32, i32, i8* }
+%s.0 = type { i64, i32, i32, i32, ptr }
 
 @g0 = external global %s.0, align 8
 
 ; Function Attrs: nounwind
 define void @f0() #0 {
 b0:
-  %v0 = load i32, i32* getelementptr inbounds (%s.0, %s.0* @g0, i32 0, i32 1), align 8
+  %v0 = load i32, ptr getelementptr inbounds (%s.0, ptr @g0, i32 0, i32 1), align 8
   %v1 = ashr i32 %v0, 3
   br i1 undef, label %b1, label %b2
 
 b1:                                               ; preds = %b1, %b0
   %v2 = phi i32 [ %v5, %b1 ], [ 0, %b0 ]
-  %v3 = load i8*, i8** getelementptr inbounds (%s.0, %s.0* @g0, i32 0, i32 4), align 4
-  %v4 = getelementptr inbounds i8, i8* %v3, i32 -1
-  store i8* %v4, i8** getelementptr inbounds (%s.0, %s.0* @g0, i32 0, i32 4), align 4
-  store i8 0, i8* %v4, align 1
+  %v3 = load ptr, ptr getelementptr inbounds (%s.0, ptr @g0, i32 0, i32 4), align 4
+  %v4 = getelementptr inbounds i8, ptr %v3, i32 -1
+  store ptr %v4, ptr getelementptr inbounds (%s.0, ptr @g0, i32 0, i32 4), align 4
+  store i8 0, ptr %v4, align 1
   %v5 = add nsw i32 %v2, 1
   %v6 = icmp eq i32 %v5, %v1
   br i1 %v6, label %b2, label %b1

diff  --git a/llvm/test/CodeGen/Hexagon/swp-more-phi.ll b/llvm/test/CodeGen/Hexagon/swp-more-phi.ll
index 28cdd1cae347a..5612e9f1b11d0 100644
--- a/llvm/test/CodeGen/Hexagon/swp-more-phi.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-more-phi.ll
@@ -19,7 +19,7 @@ b3:                                               ; preds = %b3, %b2
 b4:                                               ; preds = %b3, %b2
   %v0 = ashr i32 undef, 25
   %v1 = mul nsw i32 %v0, 2
-  %v2 = load i8, i8* undef, align 1
+  %v2 = load i8, ptr undef, align 1
   br i1 undef, label %b5, label %b10
 
 b5:                                               ; preds = %b4
@@ -38,7 +38,7 @@ b9:                                               ; preds = %b9, %b8, %b5
   %v3 = phi i8 [ %v7, %b9 ], [ undef, %b8 ], [ %v2, %b5 ]
   %v4 = phi i32 [ %v8, %b9 ], [ undef, %b8 ], [ 1, %b5 ]
   %v5 = add i32 %v4, undef
-  %v6 = load i8, i8* undef, align 1
+  %v6 = load i8, ptr undef, align 1
   %v7 = select i1 undef, i8 %v6, i8 %v3
   %v8 = add nsw i32 %v4, 1
   %v9 = icmp eq i32 %v8, %v1

diff  --git a/llvm/test/CodeGen/Hexagon/swp-multi-loops.ll b/llvm/test/CodeGen/Hexagon/swp-multi-loops.ll
index 5a2e7d4e14d5b..c356353a0264d 100644
--- a/llvm/test/CodeGen/Hexagon/swp-multi-loops.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-multi-loops.ll
@@ -16,7 +16,7 @@
 ; CHECK-NEXT: memw(r{{[0-9]+}}++#4)
 ; CHECK-NEXT: endloop0
 
-define i32 @test(i32* %a, i32 %n, i32 %l) {
+define i32 @test(ptr %a, i32 %n, i32 %l) {
 entry:
   %cmp23 = icmp sgt i32 %n, 0
   br i1 %cmp23, label %for.body3.lr.ph.preheader, label %for.end14
@@ -32,28 +32,28 @@ for.body3.lr.ph:
 
 for.body3:
   %sum.118 = phi i32 [ %sum.025, %for.body3.lr.ph ], [ %add, %for.body3 ]
-  %arrayidx.phi = phi i32* [ %a, %for.body3.lr.ph ], [ %arrayidx.inc, %for.body3 ]
+  %arrayidx.phi = phi ptr [ %a, %for.body3.lr.ph ], [ %arrayidx.inc, %for.body3 ]
   %i.017 = phi i32 [ 0, %for.body3.lr.ph ], [ %inc, %for.body3 ]
-  %0 = load i32, i32* %arrayidx.phi, align 4
+  %0 = load i32, ptr %arrayidx.phi, align 4
   %add = add nsw i32 %0, %sum.118
   %inc = add nsw i32 %i.017, 1
   %exitcond = icmp eq i32 %inc, %n
-  %arrayidx.inc = getelementptr i32, i32* %arrayidx.phi, i32 1
+  %arrayidx.inc = getelementptr i32, ptr %arrayidx.phi, i32 1
   br i1 %exitcond, label %for.end, label %for.body3
 
 for.end:
-  tail call void @bar(i32* %a) #2
+  tail call void @bar(ptr %a) #2
   br label %for.body6
 
 for.body6:
   %sum1.121 = phi i32 [ %sum1.026, %for.end ], [ %add8, %for.body6 ]
-  %arrayidx7.phi = phi i32* [ %a, %for.end ], [ %arrayidx7.inc, %for.body6 ]
+  %arrayidx7.phi = phi ptr [ %a, %for.end ], [ %arrayidx7.inc, %for.body6 ]
   %i.120 = phi i32 [ 0, %for.end ], [ %inc10, %for.body6 ]
-  %1 = load i32, i32* %arrayidx7.phi, align 4
+  %1 = load i32, ptr %arrayidx7.phi, align 4
   %add8 = add nsw i32 %1, %sum1.121
   %inc10 = add nsw i32 %i.120, 1
   %exitcond29 = icmp eq i32 %inc10, %n
-  %arrayidx7.inc = getelementptr i32, i32* %arrayidx7.phi, i32 1
+  %arrayidx7.inc = getelementptr i32, ptr %arrayidx7.phi, i32 1
   br i1 %exitcond29, label %for.inc12, label %for.body6
 
 for.inc12:
@@ -71,5 +71,5 @@ for.end14:
   ret i32 %add15
 }
 
-declare void @bar(i32*)
+declare void @bar(ptr)
 

diff  --git a/llvm/test/CodeGen/Hexagon/swp-multi-phi-refs.ll b/llvm/test/CodeGen/Hexagon/swp-multi-phi-refs.ll
index e6604651e2a81..b4f08c2dd7d76 100644
--- a/llvm/test/CodeGen/Hexagon/swp-multi-phi-refs.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-multi-phi-refs.ll
@@ -8,35 +8,33 @@
 ;  v8 = phi(v2, v7)
 
 ; Function Attrs: nounwind
-define void @f0(i8* noalias nocapture readonly %a0, i32 %a1, i32 %a2, i8* noalias nocapture %a3, i32 %a4) #0 {
+define void @f0(ptr noalias nocapture readonly %a0, i32 %a1, i32 %a2, ptr noalias nocapture %a3, i32 %a4) #0 {
 b0:
   %v0 = add i32 %a1, -1
-  %v1 = getelementptr inbounds i8, i8* %a0, i32 0
-  %v2 = getelementptr inbounds i8, i8* %a0, i32 undef
-  %v3 = getelementptr inbounds i8, i8* %a3, i32 0
+  %v2 = getelementptr inbounds i8, ptr %a0, i32 undef
   br i1 undef, label %b1, label %b4
 
 b1:                                               ; preds = %b1, %b0
   br i1 undef, label %b1, label %b2
 
 b2:                                               ; preds = %b1
-  %v4 = getelementptr inbounds i8, i8* %a0, i32 undef
+  %v4 = getelementptr inbounds i8, ptr %a0, i32 undef
   br label %b3
 
 b3:                                               ; preds = %b3, %b2
-  %v5 = phi i8* [ %v10, %b3 ], [ %v3, %b2 ]
-  %v6 = phi i8* [ %v25, %b3 ], [ %v4, %b2 ]
-  %v7 = phi i8* [ %v6, %b3 ], [ %v2, %b2 ]
-  %v8 = phi i8* [ %v7, %b3 ], [ %v1, %b2 ]
+  %v5 = phi ptr [ %v10, %b3 ], [ %a3, %b2 ]
+  %v6 = phi ptr [ %v25, %b3 ], [ %v4, %b2 ]
+  %v7 = phi ptr [ %v6, %b3 ], [ %v2, %b2 ]
+  %v8 = phi ptr [ %v7, %b3 ], [ %a0, %b2 ]
   %v9 = phi i32 [ %v26, %b3 ], [ 1, %b2 ]
-  %v10 = getelementptr inbounds i8, i8* %v5, i32 %a4
-  %v11 = getelementptr inbounds i8, i8* %v8, i32 -1
-  %v12 = load i8, i8* %v11, align 1, !tbaa !0
+  %v10 = getelementptr inbounds i8, ptr %v5, i32 %a4
+  %v11 = getelementptr inbounds i8, ptr %v8, i32 -1
+  %v12 = load i8, ptr %v11, align 1, !tbaa !0
   %v13 = zext i8 %v12 to i32
   %v14 = add nuw nsw i32 %v13, 0
   %v15 = add nuw nsw i32 %v14, 0
   %v16 = add nuw nsw i32 %v15, 0
-  %v17 = load i8, i8* %v6, align 1, !tbaa !0
+  %v17 = load i8, ptr %v6, align 1, !tbaa !0
   %v18 = zext i8 %v17 to i32
   %v19 = add nuw nsw i32 %v16, %v18
   %v20 = add nuw nsw i32 %v19, 0
@@ -44,8 +42,8 @@ b3:                                               ; preds = %b3, %b2
   %v22 = add nsw i32 %v21, 32768
   %v23 = lshr i32 %v22, 16
   %v24 = trunc i32 %v23 to i8
-  store i8 %v24, i8* %v10, align 1, !tbaa !0
-  %v25 = getelementptr inbounds i8, i8* %v6, i32 %a2
+  store i8 %v24, ptr %v10, align 1, !tbaa !0
+  %v25 = getelementptr inbounds i8, ptr %v6, i32 %a2
   %v26 = add i32 %v9, 1
   %v27 = icmp eq i32 %v26, %v0
   br i1 %v27, label %b4, label %b3

diff  --git a/llvm/test/CodeGen/Hexagon/swp-node-order.ll b/llvm/test/CodeGen/Hexagon/swp-node-order.ll
index d93f826cb89f7..a7719eddf12e8 100644
--- a/llvm/test/CodeGen/Hexagon/swp-node-order.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-node-order.ll
@@ -27,8 +27,8 @@ b1:                                               ; preds = %b1, %b0
 
 b2:                                               ; preds = %b1
   %v12 = trunc i64 %v5 to i32
-  %v13 = inttoptr i32 %v0 to i32*
-  store i32 %v12, i32* %v13, align 4, !tbaa !0
+  %v13 = inttoptr i32 %v0 to ptr
+  store i32 %v12, ptr %v13, align 4, !tbaa !0
   call void @llvm.trap()
   unreachable
 }

diff  --git a/llvm/test/CodeGen/Hexagon/swp-order-carried.ll b/llvm/test/CodeGen/Hexagon/swp-order-carried.ll
index a360038ddb6c9..f496b9fc78733 100644
--- a/llvm/test/CodeGen/Hexagon/swp-order-carried.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-order-carried.ll
@@ -25,14 +25,14 @@ b1:                                               ; preds = %b1, %b0
   %v12 = tail call i64 @llvm.hexagon.A2.combinew(i32 %v5, i32 %v5)
   %v13 = tail call i64 @llvm.hexagon.S2.valignib(i64 %v10, i64 undef, i32 2)
   %v14 = tail call i64 @llvm.hexagon.M2.vdmacs.s0(i64 %v2, i64 %v12, i64 %v13)
-  %v15 = inttoptr i32 %v7 to i16*
-  %v16 = load i16, i16* %v15, align 2
+  %v15 = inttoptr i32 %v7 to ptr
+  %v16 = load i16, ptr %v15, align 2
   %v17 = sext i16 %v16 to i32
   %v18 = add nsw i32 %v7, -8
   %v19 = tail call i64 @llvm.hexagon.M2.vdmacs.s0(i64 undef, i64 %v12, i64 0)
   %v20 = tail call i32 @llvm.hexagon.A2.combine.ll(i32 %v17, i32 %v1)
-  %v21 = inttoptr i32 %v18 to i16*
-  %v22 = load i16, i16* %v21, align 2
+  %v21 = inttoptr i32 %v18 to ptr
+  %v22 = load i16, ptr %v21, align 2
   %v23 = sext i16 %v22 to i32
   %v24 = add nsw i32 %v7, -16
   %v25 = add nsw i32 %v4, 1
@@ -53,13 +53,10 @@ b3:                                               ; preds = %b2, %b0
   %v34 = phi i32 [ %v30, %b2 ], [ undef, %b0 ]
   %v35 = phi i32 [ %v31, %b2 ], [ undef, %b0 ]
   %v36 = phi i32 [ %v33, %b2 ], [ undef, %b0 ]
-  %v37 = bitcast i8* undef to i32*
-  store i32 %v35, i32* %v37, align 4
-  %v38 = getelementptr inbounds i8, i8* null, i32 8
-  %v39 = bitcast i8* %v38 to i32*
-  store i32 %v34, i32* %v39, align 4
-  %v40 = bitcast i8* undef to i32*
-  store i32 %v36, i32* %v40, align 4
+  store i32 %v35, ptr undef, align 4
+  %v38 = getelementptr inbounds i8, ptr null, i32 8
+  store i32 %v34, ptr %v38, align 4
+  store i32 %v36, ptr undef, align 4
   call void @llvm.trap()
   unreachable
 }

diff  --git a/llvm/test/CodeGen/Hexagon/swp-order-copies.ll b/llvm/test/CodeGen/Hexagon/swp-order-copies.ll
index 0a017c4ab7f6f..c667bf50a19ff 100644
--- a/llvm/test/CodeGen/Hexagon/swp-order-copies.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-order-copies.ll
@@ -11,20 +11,20 @@
 ; CHECK-NOT: {
 ; CHECK: :endloop0
 
-define void @test(i64* nocapture %v1, i64 %v2, i32 %len) local_unnamed_addr #0 {
+define void @test(ptr nocapture %v1, i64 %v2, i32 %len) local_unnamed_addr #0 {
 entry:
   %cmp7 = icmp sgt i32 %len, 0
   br i1 %cmp7, label %for.body, label %for.end
 
 for.body:
-  %arrayidx.phi = phi i64* [ %arrayidx.inc, %for.body ], [ %v1, %entry ]
+  %arrayidx.phi = phi ptr [ %arrayidx.inc, %for.body ], [ %v1, %entry ]
   %i.08 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
-  %0 = load i64, i64* %arrayidx.phi, align 8
+  %0 = load i64, ptr %arrayidx.phi, align 8
   %1 = tail call i64 @llvm.hexagon.M2.mmpyul.rs1(i64 %0, i64 %v2)
-  store i64 %1, i64* %arrayidx.phi, align 8
+  store i64 %1, ptr %arrayidx.phi, align 8
   %inc = add nuw nsw i32 %i.08, 1
   %exitcond = icmp eq i32 %inc, %len
-  %arrayidx.inc = getelementptr i64, i64* %arrayidx.phi, i32 1
+  %arrayidx.inc = getelementptr i64, ptr %arrayidx.phi, i32 1
   br i1 %exitcond, label %for.end, label %for.body
 
 for.end:

diff  --git a/llvm/test/CodeGen/Hexagon/swp-order-deps1.ll b/llvm/test/CodeGen/Hexagon/swp-order-deps1.ll
index 8e9af05cf8f4c..7316deaa15848 100644
--- a/llvm/test/CodeGen/Hexagon/swp-order-deps1.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-order-deps1.ll
@@ -13,7 +13,7 @@ b1:                                               ; preds = %b0
   br i1 undef, label %b2, label %b4
 
 b2:                                               ; preds = %b1
-  %v0 = load i16, i16* undef, align 2
+  %v0 = load i16, ptr undef, align 2
   br label %b5
 
 b3:                                               ; preds = %b5

diff  --git a/llvm/test/CodeGen/Hexagon/swp-order-deps3.ll b/llvm/test/CodeGen/Hexagon/swp-order-deps3.ll
index e85d35549385a..5236606aeffba 100644
--- a/llvm/test/CodeGen/Hexagon/swp-order-deps3.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-order-deps3.ll
@@ -10,15 +10,12 @@ b0:
 
 b1:                                               ; preds = %b1, %b0
   %v2 = phi i32 [ %v10, %b1 ], [ undef, %b0 ]
-  %v3 = phi i8* [ %v7, %b1 ], [ undef, %b0 ]
-  %v4 = ptrtoint i8* %v3 to i32
+  %v3 = phi ptr [ %v7, %b1 ], [ undef, %b0 ]
+  %v4 = ptrtoint ptr %v3 to i32
   %v5 = add i32 %v4, %v1
-  %v6 = bitcast i8* %v3 to i32*
-  store i32 %v5, i32* %v6, align 4
-  %v7 = getelementptr inbounds i8, i8* %v3, i32 %v1
-  %v8 = getelementptr inbounds i8, i8* %v3, i32 0
-  %v9 = bitcast i8* %v8 to i32*
-  store i32 1111638594, i32* %v9, align 4
+  store i32 %v5, ptr %v3, align 4
+  %v7 = getelementptr inbounds i8, ptr %v3, i32 %v1
+  store i32 1111638594, ptr %v3, align 4
   %v10 = add nsw i32 %v2, -1
   %v11 = icmp sgt i32 %v10, 0
   br i1 %v11, label %b1, label %b2

diff  --git a/llvm/test/CodeGen/Hexagon/swp-order-deps4.ll b/llvm/test/CodeGen/Hexagon/swp-order-deps4.ll
index 408d842e111bc..ce231f3a60a6d 100644
--- a/llvm/test/CodeGen/Hexagon/swp-order-deps4.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-order-deps4.ll
@@ -10,31 +10,31 @@
 %1 = type { i8, [32 x %2] }
 %2 = type { i8, %3, i8, i8, i16, i8, [20 x i16], [20 x i16] }
 %3 = type { i16, i8 }
-%4 = type { i8, [64 x %5], [64 x %5*] }
-%5 = type { i8, i8, i8*, %6 }
+%4 = type { i8, [64 x %5], [64 x ptr] }
+%5 = type { i8, i8, ptr, %6 }
 %6 = type { %7 }
-%7 = type { i8*, %3, i8, i8, i8, i8, i16, i8, i8, i8, i16, i32, i8, [3 x i8], [3 x i16], i16, i8, i16, i8, %8, i16, i8, i16 }
+%7 = type { ptr, %3, i8, i8, i8, i8, i16, i8, i8, i8, i16, i32, i8, [3 x i8], [3 x i16], i16, i8, i16, i8, %8, i16, i8, i16 }
 %8 = type { i8, i8 }
-%9 = type { i8, i8, %10*, i8, [8 x %7*], i8, i8, i8, i8, i8, %7*, i8, %7*, i8, i8, i8, i8, i8, i8, i8, i8, i32, i8, i32, i32, i32, i32, i32, i32, i32, i8, i8, i16, i8, void (i8)*, i8, i8, i8, i8, i8, i8 }
+%9 = type { i8, i8, ptr, i8, [8 x ptr], i8, i8, i8, i8, i8, ptr, i8, ptr, i8, i8, i8, i8, i8, i8, i8, i8, i32, i8, i32, i32, i32, i32, i32, i32, i32, i8, i8, i16, i8, ptr, i8, i8, i8, i8, i8, i8 }
 %10 = type { i8, i8, i8, i8, i8, %11, %12, %13, %14 }
 %11 = type { i8, i16, i16 }
-%12 = type { i8, i16, i8* }
+%12 = type { i8, i16, ptr }
 %13 = type { i8, i16 }
 %14 = type { %15, %20, %25 }
 %15 = type { i8, i8, %16, i8, [18 x %17] }
 %16 = type { i8, i16, i16 }
-%17 = type { i8, i8, [10 x %3], [10 x i16], [10 x i16], [10 x i8], %18* }
+%17 = type { i8, i8, [10 x %3], [10 x i16], [10 x i16], [10 x i8], ptr }
 %18 = type { %19, i16, i16, %19, i16 }
 %19 = type { i16, i16, i16, i8 }
 %20 = type { i8, i8, %21 }
-%21 = type { i8*, %22, %23 }
+%21 = type { ptr, %22, %23 }
 %22 = type { %3, i8, i8, i16, i16, i16, i8, i16 }
 %23 = type { [2 x %24], [4 x i8] }
-%24 = type { i8, %3, i16, i16, i16, i16, %18* }
+%24 = type { i8, %3, i16, i16, i16, i16, ptr }
 %25 = type { i8, i8, [8 x %26] }
-%26 = type { i8*, %27, %24 }
+%26 = type { ptr, %27, %24 }
 %27 = type { %3, i8, i16, i16, i16 }
-%28 = type { [2 x %29], [2 x i16], i8, i8*, i16, i8, i8, %31*, %32*, %33*, %33*, [3 x %34*], i8, [2 x i8], i8, i8, [2 x i8], [2 x i8], [3 x i8] }
+%28 = type { [2 x %29], [2 x i16], i8, ptr, i16, i8, i8, ptr, ptr, ptr, ptr, [3 x ptr], i8, [2 x i8], i8, i8, [2 x i8], [2 x i8], [3 x i8] }
 %29 = type <{ %30, i8, [1000 x i8] }>
 %30 = type { i16, i16, [2 x i32] }
 %31 = type <{ i8, i8, i16, i8 }>
@@ -51,12 +51,11 @@ b0:
 
 b1:                                               ; preds = %b1, %b0
   %v0 = phi i32 [ 0, %b0 ], [ %v5, %b1 ]
-  %v1 = getelementptr inbounds [2 x %0], [2 x %0]* @g0, i32 0, i32 undef, i32 1, i32 1, i32 %v0
-  %v2 = getelementptr inbounds [2 x %0], [2 x %0]* @g0, i32 0, i32 undef, i32 1, i32 2, i32 %v0
-  store %5* %v1, %5** %v2, align 4
-  %v3 = getelementptr inbounds [2 x %0], [2 x %0]* @g0, i32 0, i32 undef, i32 1, i32 1, i32 %v0, i32 3
-  %v4 = bitcast %6* %v3 to %5**
-  store %5* %v1, %5** %v4, align 4
+  %v1 = getelementptr inbounds [2 x %0], ptr @g0, i32 0, i32 undef, i32 1, i32 1, i32 %v0
+  %v2 = getelementptr inbounds [2 x %0], ptr @g0, i32 0, i32 undef, i32 1, i32 2, i32 %v0
+  store ptr %v1, ptr %v2, align 4
+  %v3 = getelementptr inbounds [2 x %0], ptr @g0, i32 0, i32 undef, i32 1, i32 1, i32 %v0, i32 3
+  store ptr %v1, ptr %v3, align 4
   %v5 = add nuw nsw i32 %v0, 1
   %v6 = icmp eq i32 %v5, 64
   br i1 %v6, label %b2, label %b1

diff  --git a/llvm/test/CodeGen/Hexagon/swp-order-deps5.ll b/llvm/test/CodeGen/Hexagon/swp-order-deps5.ll
index f7958a1cb8684..c90cf2f9418da 100644
--- a/llvm/test/CodeGen/Hexagon/swp-order-deps5.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-order-deps5.ll
@@ -9,7 +9,7 @@ b0:
 b1:                                               ; preds = %b1, %b0
   %v0 = phi i32 [ 1, %b0 ], [ %v9, %b1 ]
   %v1 = phi i64 [ 0, %b0 ], [ %v10, %b1 ]
-  %v2 = load i32, i32* undef, align 4
+  %v2 = load i32, ptr undef, align 4
   %v3 = sub nsw i32 0, %v2
   %v4 = select i1 undef, i32 undef, i32 %v3
   %v5 = sext i32 %v4 to i64

diff  --git a/llvm/test/CodeGen/Hexagon/swp-order-deps6.ll b/llvm/test/CodeGen/Hexagon/swp-order-deps6.ll
index b3db41f45c53e..e23de951f0448 100644
--- a/llvm/test/CodeGen/Hexagon/swp-order-deps6.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-order-deps6.ll
@@ -6,7 +6,7 @@
 ; Function Attrs: nounwind readnone
 declare i64 @llvm.hexagon.A2.combinew(i32, i32) #0
 
-define void @f0(%s.0* noalias nocapture %a0, i32 %a1) local_unnamed_addr {
+define void @f0(ptr noalias nocapture %a0, i32 %a1) local_unnamed_addr {
 b0:
   %v0 = call i64 @llvm.hexagon.A2.combinew(i32 %a1, i32 %a1)
   br label %b1
@@ -14,10 +14,10 @@ b0:
 b1:                                               ; preds = %b1, %b0
   %v1 = phi i32 [ 0, %b0 ], [ %v6, %b1 ]
   %v2 = mul nuw nsw i32 %v1, 13
-  %v3 = getelementptr inbounds %s.0, %s.0* %a0, i32 %v2, i32 0
-  %v4 = load i64, i64* %v3, align 8
+  %v3 = getelementptr inbounds %s.0, ptr %a0, i32 %v2, i32 0
+  %v4 = load i64, ptr %v3, align 8
   %v5 = add nsw i64 %v4, %v0
-  store i64 %v5, i64* %v3, align 8
+  store i64 %v5, ptr %v3, align 8
   %v6 = add nuw nsw i32 %v1, 1
   %v7 = icmp eq i32 %v6, 12
   br i1 %v7, label %b2, label %b1

diff  --git a/llvm/test/CodeGen/Hexagon/swp-order-deps7.ll b/llvm/test/CodeGen/Hexagon/swp-order-deps7.ll
index 4cd29a4a0baf8..efac1e71c94c6 100644
--- a/llvm/test/CodeGen/Hexagon/swp-order-deps7.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-order-deps7.ll
@@ -10,23 +10,23 @@
 ; CHECK: [[REG1]] =
 ; CHECK: endloop0
 
-define void @f0(i16* nocapture %a0, float* nocapture readonly %a1, float %a2, i32 %a3) {
+define void @f0(ptr nocapture %a0, ptr nocapture readonly %a1, float %a2, i32 %a3) {
 b0:
   %v0 = icmp sgt i32 %a3, 0
   br i1 %v0, label %b1, label %b2
 
 b1:                                               ; preds = %b1, %b0
   %v1 = phi i32 [ %v11, %b1 ], [ 0, %b0 ]
-  %v2 = phi i16* [ %v10, %b1 ], [ %a0, %b0 ]
-  %v3 = phi float* [ %v4, %b1 ], [ %a1, %b0 ]
-  %v4 = getelementptr inbounds float, float* %v3, i32 1
-  %v5 = load float, float* %v3, align 4, !tbaa !0
+  %v2 = phi ptr [ %v10, %b1 ], [ %a0, %b0 ]
+  %v3 = phi ptr [ %v4, %b1 ], [ %a1, %b0 ]
+  %v4 = getelementptr inbounds float, ptr %v3, i32 1
+  %v5 = load float, ptr %v3, align 4, !tbaa !0
   %v6 = fmul float %v5, %a2
   %v7 = tail call i32 @llvm.hexagon.F2.conv.sf2w(float %v6)
   %v8 = tail call i32 @llvm.hexagon.A2.sath(i32 %v7)
   %v9 = trunc i32 %v8 to i16
-  %v10 = getelementptr inbounds i16, i16* %v2, i32 1
-  store i16 %v9, i16* %v2, align 2, !tbaa !4
+  %v10 = getelementptr inbounds i16, ptr %v2, i32 1
+  store i16 %v9, ptr %v2, align 2, !tbaa !4
   %v11 = add nuw nsw i32 %v1, 1
   %v12 = icmp eq i32 %v11, %a3
   br i1 %v12, label %b2, label %b1

diff  --git a/llvm/test/CodeGen/Hexagon/swp-order-prec.ll b/llvm/test/CodeGen/Hexagon/swp-order-prec.ll
index c50b1d89bd9ea..6662a868567b4 100644
--- a/llvm/test/CodeGen/Hexagon/swp-order-prec.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-order-prec.ll
@@ -14,10 +14,10 @@ b0:
 
 b1:                                               ; preds = %b1, %b0
   %v0 = phi i32 [ %v3, %b1 ], [ 0, %b0 ]
-  %v1 = getelementptr inbounds %s.0, %s.0* undef, i32 0, i32 1, i32 %v0, i32 0
-  store i16 0, i16* %v1, align 1
-  %v2 = getelementptr inbounds %s.0, %s.0* undef, i32 0, i32 1, i32 %v0, i32 1
-  store i16 -1, i16* %v2, align 1
+  %v1 = getelementptr inbounds %s.0, ptr undef, i32 0, i32 1, i32 %v0, i32 0
+  store i16 0, ptr %v1, align 1
+  %v2 = getelementptr inbounds %s.0, ptr undef, i32 0, i32 1, i32 %v0, i32 1
+  store i16 -1, ptr %v2, align 1
   %v3 = add nsw i32 %v0, 1
   %v4 = icmp eq i32 %v3, 20
   br i1 %v4, label %b2, label %b1

diff  --git a/llvm/test/CodeGen/Hexagon/swp-order.ll b/llvm/test/CodeGen/Hexagon/swp-order.ll
index 14cc682eb7e57..ce7888cb338c3 100644
--- a/llvm/test/CodeGen/Hexagon/swp-order.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-order.ll
@@ -16,42 +16,40 @@
 @g0 = external hidden unnamed_addr constant [19 x i8], align 1
 
 ; Function Attrs: nounwind optsize
-declare i32 @f0(i8* nocapture readonly, ...) #0
+declare i32 @f0(ptr nocapture readonly, ...) #0
 
 ; Function Attrs: nounwind optsize
-declare void @f1(i32*, i32*, i32* nocapture readnone) #0
+declare void @f1(ptr, ptr, ptr nocapture readnone) #0
 
 ; Function Attrs: argmemonly nounwind
-declare i8* @llvm.hexagon.circ.stw(i8*, i32, i32, i32) #1
+declare ptr @llvm.hexagon.circ.stw(ptr, i32, i32, i32) #1
 
 ; Function Attrs: nounwind optsize
-define void @f2(i32* %a0, i32* %a1, i32* %a2) #0 {
+define void @f2(ptr %a0, ptr %a1, ptr %a2) #0 {
 b0:
   %v0 = alloca i32, align 4
-  call void @f1(i32* %a2, i32* %a0, i32* %v0) #2
-  %v1 = bitcast i32* %a1 to i8*
+  call void @f1(ptr %a2, ptr %a0, ptr %v0) #2
   br label %b1
 
 b1:                                               ; preds = %b1, %b0
   %v2 = phi i32 [ 0, %b0 ], [ %v13, %b1 ]
-  %v3 = phi i32* [ %a2, %b0 ], [ %v16, %b1 ]
+  %v3 = phi ptr [ %a2, %b0 ], [ %v16, %b1 ]
   %v4 = phi i32 [ 0, %b0 ], [ %v14, %b1 ]
-  %v5 = load i32, i32* %a1, align 4, !tbaa !0
+  %v5 = load i32, ptr %a1, align 4, !tbaa !0
   %v6 = add nsw i32 %v2, %v5
-  %v7 = load i32, i32* %v3, align 4, !tbaa !0
-  %v8 = tail call i8* @llvm.hexagon.circ.stw(i8* %v1, i32 %v7, i32 150995968, i32 4) #3
-  %v9 = bitcast i8* %v8 to i32*
-  %v10 = load i32, i32* %v3, align 4, !tbaa !0
+  %v7 = load i32, ptr %v3, align 4, !tbaa !0
+  %v8 = tail call ptr @llvm.hexagon.circ.stw(ptr %a1, i32 %v7, i32 150995968, i32 4) #3
+  %v10 = load i32, ptr %v3, align 4, !tbaa !0
   %v11 = add nsw i32 %v6, %v10
-  %v12 = load i32, i32* %v9, align 4, !tbaa !0
+  %v12 = load i32, ptr %v8, align 4, !tbaa !0
   %v13 = add nsw i32 %v11, %v12
   %v14 = add nsw i32 %v4, 1
   %v15 = icmp eq i32 %v14, 2
-  %v16 = getelementptr i32, i32* %v3, i32 1
+  %v16 = getelementptr i32, ptr %v3, i32 1
   br i1 %v15, label %b2, label %b1
 
 b2:                                               ; preds = %b1
-  %v17 = tail call i32 (i8*, ...) @f0(i8* getelementptr inbounds ([19 x i8], [19 x i8]* @g0, i32 0, i32 0), i32 %v13) #4
+  %v17 = tail call i32 (ptr, ...) @f0(ptr @g0, i32 %v13) #4
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/swp-order1.ll b/llvm/test/CodeGen/Hexagon/swp-order1.ll
index d7a5b229c80d2..96341c63ec247 100644
--- a/llvm/test/CodeGen/Hexagon/swp-order1.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-order1.ll
@@ -1,29 +1,27 @@
 ; RUN: llc -O2 -march=hexagon < %s
 ; REQUIRES: asserts
 
-%0 = type { [2 x [8 x [16 x i8]]], [4 x [16 x %1*]] }
-%1 = type { i32, i32, i8, i8, %2, %6* }
-%2 = type { i32, i32, %3*, i8, i16, i16, i8 }
+%0 = type { [2 x [8 x [16 x i8]]], [4 x [16 x ptr]] }
+%1 = type { i32, i32, i8, i8, %2, ptr }
+%2 = type { i32, i32, ptr, i8, i16, i16, i8 }
 %3 = type { i16, i16, %4, i16, i8, i16, %5, i32 }
 %4 = type { i32 }
 %5 = type { i16, i16 }
-%6 = type { %7* }
+%6 = type { ptr }
 %7 = type { [16 x i16], [16 x i16] }
 
 ; Function Attrs: norecurse nounwind
-define void @f0(%0* nocapture %a0) #0 {
+define void @f0(ptr nocapture %a0) #0 {
 b0:
   br label %b1
 
 b1:                                               ; preds = %b1, %b0
   %v0 = phi i32 [ 0, %b0 ], [ %v6, %b1 ]
-  %v1 = getelementptr inbounds %0, %0* %a0, i32 0, i32 1, i32 3, i32 %v0
-  %v2 = bitcast %1** %v1 to i32*
-  %v3 = load i32, i32* %v2, align 4
-  store i32 %v3, i32* undef, align 4
-  %v4 = getelementptr inbounds %0, %0* %a0, i32 0, i32 1, i32 0, i32 %v0
-  %v5 = bitcast %1** %v4 to i32*
-  store i32 %v3, i32* %v5, align 4
+  %v1 = getelementptr inbounds %0, ptr %a0, i32 0, i32 1, i32 3, i32 %v0
+  %v3 = load i32, ptr %v1, align 4
+  store i32 %v3, ptr undef, align 4
+  %v4 = getelementptr inbounds %0, ptr %a0, i32 0, i32 1, i32 0, i32 %v0
+  store i32 %v3, ptr %v4, align 4
   %v6 = add nuw nsw i32 %v0, 1
   %v7 = icmp eq i32 %v6, 16
   br i1 %v7, label %b2, label %b1

diff  --git a/llvm/test/CodeGen/Hexagon/swp-phi-ch-offset.ll b/llvm/test/CodeGen/Hexagon/swp-phi-ch-offset.ll
index 31b98328a2fac..3c909f31bda9d 100644
--- a/llvm/test/CodeGen/Hexagon/swp-phi-ch-offset.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-phi-ch-offset.ll
@@ -21,13 +21,11 @@ b1:                                               ; preds = %b2
   ret void
 
 b2:                                               ; preds = %b2, %b0
-  %v0 = phi i8* [ getelementptr inbounds ([400 x i8], [400 x i8]* @g0, i32 0, i32 0), %b0 ], [ %v23, %b2 ]
-  %v1 = phi i8* [ getelementptr inbounds ([400 x i8], [400 x i8]* @g1, i32 0, i32 0), %b0 ], [ %v24, %b2 ]
+  %v0 = phi ptr [ @g0, %b0 ], [ %v23, %b2 ]
+  %v1 = phi ptr [ @g1, %b0 ], [ %v24, %b2 ]
   %v2 = phi i32 [ 0, %b0 ], [ %v21, %b2 ]
-  %v3 = bitcast i8* %v0 to <8 x i8>*
-  %v4 = load <8 x i8>, <8 x i8>* %v3, align 8
-  %v5 = bitcast i8* %v1 to <8 x i8>*
-  %v6 = load <8 x i8>, <8 x i8>* %v5, align 8
+  %v4 = load <8 x i8>, ptr %v0, align 8
+  %v6 = load <8 x i8>, ptr %v1, align 8
   %v7 = bitcast <8 x i8> %v4 to <2 x i32>
   %v8 = extractelement <2 x i32> %v7, i32 0
   %v9 = extractelement <2 x i32> %v7, i32 1
@@ -42,11 +40,11 @@ b2:                                               ; preds = %b2, %b0
   %v18 = tail call i32 @llvm.hexagon.S2.vtrunehb(i64 %v16)
   %v19 = tail call i64 @llvm.hexagon.A2.combinew(i32 %v18, i32 %v17)
   %v20 = bitcast i64 %v19 to <8 x i8>
-  store <8 x i8> %v20, <8 x i8>* %v5, align 8
+  store <8 x i8> %v20, ptr %v1, align 8
   %v21 = add nsw i32 %v2, 8
   %v22 = icmp slt i32 %v2, 392
-  %v23 = getelementptr i8, i8* %v0, i32 8
-  %v24 = getelementptr i8, i8* %v1, i32 8
+  %v23 = getelementptr i8, ptr %v0, i32 8
+  %v24 = getelementptr i8, ptr %v1, i32 8
   br i1 %v22, label %b2, label %b1
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/swp-phi-chains.ll b/llvm/test/CodeGen/Hexagon/swp-phi-chains.ll
index 3037dcc2d5e17..34f79dcc89106 100644
--- a/llvm/test/CodeGen/Hexagon/swp-phi-chains.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-phi-chains.ll
@@ -15,7 +15,7 @@
 ; CHECK: Predecessors:
 ; CHECK: SU([[SU1]]): Data Latency=0
 
-%s.0 = type { i16, i8, i32, i8*, i8*, i8*, i8*, i8*, i8*, i32*, [2 x i32], i8*, i8*, i8*, %s.1, i8*, [8 x i8], i8 }
+%s.0 = type { i16, i8, i32, ptr, ptr, ptr, ptr, ptr, ptr, ptr, [2 x i32], ptr, ptr, ptr, %s.1, ptr, [8 x i8], i8 }
 %s.1 = type { i32, i16, i16 }
 %s.2 = type { i32, i32, i32, i32, i32, i32, i32, i32, i32 }
 
@@ -29,20 +29,20 @@
 @g7 = private unnamed_addr constant [4 x i8] c"%d\0A\00", align 1
 
 ; Function Attrs: nounwind
-declare i32 @f0(%s.0* nocapture, i8* nocapture readonly, ...) #0
+declare i32 @f0(ptr nocapture, ptr nocapture readonly, ...) #0
 
 ; Function Attrs: nounwind
-define void @f1(%s.2* nocapture %a0, i32* nocapture readonly %a1, i32* nocapture readonly %a2, i16 signext %a3) #0 {
+define void @f1(ptr nocapture %a0, ptr nocapture readonly %a1, ptr nocapture readonly %a2, i16 signext %a3) #0 {
 b0:
-  %v0 = load i32, i32* %a2, align 4
+  %v0 = load i32, ptr %a2, align 4
   %v1 = tail call i32 @llvm.hexagon.S2.asr.r.r.sat(i32 %v0, i32 2)
   %v2 = tail call i32 @llvm.hexagon.A2.sath(i32 %v1)
-  store i32 0, i32* @g5, align 4
-  %v3 = load i32, i32* @g0, align 4
-  %v4 = load i32, i32* @g1, align 4
-  %v5 = load i32, i32* @g2, align 4
-  %v6 = load i32, i32* @g3, align 4
-  %v7 = load i32, i32* @g4, align 4
+  store i32 0, ptr @g5, align 4
+  %v3 = load i32, ptr @g0, align 4
+  %v4 = load i32, ptr @g1, align 4
+  %v5 = load i32, ptr @g2, align 4
+  %v6 = load i32, ptr @g3, align 4
+  %v7 = load i32, ptr @g4, align 4
   br label %b1
 
 b1:                                               ; preds = %b1, %b0
@@ -52,20 +52,20 @@ b1:                                               ; preds = %b1, %b0
   %v11 = phi i32 [ %v4, %b0 ], [ %v44, %b1 ]
   %v12 = phi i32 [ %v3, %b0 ], [ %v38, %b1 ]
   %v13 = phi i32 [ 0, %b0 ], [ %v53, %b1 ]
-  %v14 = phi i32* [ %a2, %b0 ], [ %v26, %b1 ]
-  %v15 = phi i32* [ %a1, %b0 ], [ %v19, %b1 ]
+  %v14 = phi ptr [ %a2, %b0 ], [ %v26, %b1 ]
+  %v15 = phi ptr [ %a1, %b0 ], [ %v19, %b1 ]
   %v16 = phi i32 [ %v2, %b0 ], [ %v32, %b1 ]
   %v17 = phi i32 [ 0, %b0 ], [ %v25, %b1 ]
   %v18 = phi i32 [ 0, %b0 ], [ %v16, %b1 ]
-  %v19 = getelementptr inbounds i32, i32* %v15, i32 1
-  %v20 = load i32, i32* %v15, align 4
+  %v19 = getelementptr inbounds i32, ptr %v15, i32 1
+  %v20 = load i32, ptr %v15, align 4
   %v21 = tail call i32 @llvm.hexagon.A2.asrh(i32 %v20)
   %v22 = shl i32 %v21, 16
   %v23 = ashr exact i32 %v22, 16
   %v24 = tail call i32 @llvm.hexagon.S2.asr.r.r.sat(i32 %v23, i32 2)
   %v25 = tail call i32 @llvm.hexagon.A2.sath(i32 %v24)
-  %v26 = getelementptr inbounds i32, i32* %v14, i32 1
-  %v27 = load i32, i32* %v14, align 4
+  %v26 = getelementptr inbounds i32, ptr %v14, i32 1
+  %v27 = load i32, ptr %v14, align 4
   %v28 = tail call i32 @llvm.hexagon.A2.asrh(i32 %v27)
   %v29 = shl i32 %v28, 16
   %v30 = ashr exact i32 %v29, 16
@@ -77,35 +77,35 @@ b1:                                               ; preds = %b1, %b0
   %v36 = shl i32 %v16, 16
   %v37 = ashr exact i32 %v36, 16
   %v38 = tail call i32 @llvm.hexagon.M2.mpy.nac.sat.ll.s1(i32 %v35, i32 %v37, i32 %v37)
-  store i32 %v38, i32* @g0, align 4
+  store i32 %v38, ptr @g0, align 4
   %v39 = shl i32 %v25, 16
   %v40 = ashr exact i32 %v39, 16
   %v41 = tail call i32 @llvm.hexagon.M2.mpy.acc.sat.ll.s1(i32 %v11, i32 %v40, i32 %v34)
   %v42 = shl i32 %v32, 16
   %v43 = ashr exact i32 %v42, 16
   %v44 = tail call i32 @llvm.hexagon.M2.mpy.acc.sat.ll.s1(i32 %v41, i32 %v43, i32 %v37)
-  store i32 %v44, i32* @g1, align 4
+  store i32 %v44, ptr @g1, align 4
   %v45 = tail call i32 @llvm.hexagon.M2.mpy.acc.sat.ll.s1(i32 %v10, i32 %v43, i32 %v34)
   %v46 = tail call i32 @llvm.hexagon.M2.mpy.acc.sat.ll.s1(i32 %v45, i32 %v40, i32 %v37)
-  store i32 %v46, i32* @g2, align 4
+  store i32 %v46, ptr @g2, align 4
   %v47 = tail call i32 @llvm.hexagon.M2.mpy.acc.sat.ll.s1(i32 %v9, i32 %v40, i32 0)
   %v48 = shl i32 %v18, 16
   %v49 = ashr exact i32 %v48, 16
   %v50 = tail call i32 @llvm.hexagon.M2.mpy.acc.sat.ll.s1(i32 %v47, i32 %v43, i32 %v49)
-  store i32 %v50, i32* @g3, align 4
+  store i32 %v50, ptr @g3, align 4
   %v51 = tail call i32 @llvm.hexagon.M2.mpy.acc.sat.ll.s1(i32 %v8, i32 %v43, i32 0)
   %v52 = tail call i32 @llvm.hexagon.M2.mpy.acc.sat.ll.s1(i32 %v51, i32 %v40, i32 %v49)
-  store i32 %v52, i32* @g4, align 4
+  store i32 %v52, ptr @g4, align 4
   %v53 = add nsw i32 %v13, 1
   %v54 = icmp slt i32 %v53, 4
-  store i32 %v53, i32* @g5, align 4
+  store i32 %v53, ptr @g5, align 4
   br i1 %v54, label %b1, label %b2
 
 b2:                                               ; preds = %b1
-  %v55 = tail call i32 (%s.0*, i8*, ...) @f0(%s.0* @g6, i8* getelementptr inbounds ([4 x i8], [4 x i8]* @g7, i32 0, i32 0), i32 %v46) #2
-  %v56 = load i32, i32* @g2, align 4
-  %v57 = getelementptr inbounds %s.2, %s.2* %a0, i32 0, i32 5
-  store i32 %v56, i32* %v57, align 4, !tbaa !0
+  %v55 = tail call i32 (ptr, ptr, ...) @f0(ptr @g6, ptr @g7, i32 %v46) #2
+  %v56 = load i32, ptr @g2, align 4
+  %v57 = getelementptr inbounds %s.2, ptr %a0, i32 0, i32 5
+  store i32 %v56, ptr %v57, align 4, !tbaa !0
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/swp-phi-def-use.ll b/llvm/test/CodeGen/Hexagon/swp-phi-def-use.ll
index 1ce62b4204241..62069eab07936 100644
--- a/llvm/test/CodeGen/Hexagon/swp-phi-def-use.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-phi-def-use.ll
@@ -4,48 +4,47 @@
 ; Test that the pipeliner doesn't assert when renaming a phi
 ; that looks like: a = PHI b, a
 
-%s.0 = type { i32, i32*, [0 x i32], [0 x i32], [1 x i32] }
+%s.0 = type { i32, ptr, [0 x i32], [0 x i32], [1 x i32] }
 %s.1 = type { %s.2, %s.4, %s.5 }
 %s.2 = type { %s.3 }
 %s.3 = type { i32 }
 %s.4 = type { i32 }
-%s.5 = type { [0 x i32], [0 x i32 (i32*, i32*, i32*, i32*, i32*, i32, i32*)*] }
+%s.5 = type { [0 x i32], [0 x ptr] }
 
 @g0 = external global i32, align 4
 @g1 = external global %s.0, align 4
 @g2 = external global i32, align 4
 @g3 = external global i32, align 4
- at g4 = external global i32*, align 4
+ at g4 = external global ptr, align 4
 
-define void @f0(%s.1* nocapture readonly %a0) #0 {
+define void @f0(ptr nocapture readonly %a0) #0 {
 b0:
   %v0 = alloca [0 x i32], align 4
-  %v1 = load i32, i32* @g0, align 4
-  %v2 = load i32, i32* undef, align 4
-  %v3 = load i32*, i32** getelementptr inbounds (%s.0, %s.0* @g1, i32 0, i32 1), align 4
-  %v4 = load i32, i32* @g2, align 4
+  %v1 = load i32, ptr @g0, align 4
+  %v2 = load i32, ptr undef, align 4
+  %v3 = load ptr, ptr getelementptr inbounds (%s.0, ptr @g1, i32 0, i32 1), align 4
+  %v4 = load i32, ptr @g2, align 4
   %v5 = sub i32 0, %v4
-  %v6 = getelementptr inbounds i32, i32* %v3, i32 %v5
-  %v7 = load i32, i32* undef, align 4
+  %v6 = getelementptr inbounds i32, ptr %v3, i32 %v5
+  %v7 = load i32, ptr undef, align 4
   switch i32 %v7, label %b15 [
     i32 0, label %b1
     i32 1, label %b2
   ]
 
 b1:                                               ; preds = %b0
-  store i32 0, i32* @g3, align 4
+  store i32 0, ptr @g3, align 4
   br label %b2
 
 b2:                                               ; preds = %b1, %b0
   %v8 = icmp eq i32 %v1, 0
   %v9 = icmp sgt i32 %v2, 0
-  %v10 = getelementptr inbounds [0 x i32], [0 x i32]* %v0, i32 0, i32 0
   %v11 = sdiv i32 %v2, 2
   %v12 = add i32 %v11, -1
-  %v13 = getelementptr inbounds [0 x i32], [0 x i32]* %v0, i32 0, i32 1
-  %v14 = getelementptr inbounds %s.1, %s.1* %a0, i32 0, i32 2, i32 1, i32 %v1
+  %v13 = getelementptr inbounds [0 x i32], ptr %v0, i32 0, i32 1
+  %v14 = getelementptr inbounds %s.1, ptr %a0, i32 0, i32 2, i32 1, i32 %v1
   %v15 = sub i32 1, %v4
-  %v16 = getelementptr inbounds i32, i32* %v3, i32 %v15
+  %v16 = getelementptr inbounds i32, ptr %v3, i32 %v15
   %v17 = sdiv i32 %v2, 4
   %v18 = icmp slt i32 %v2, -3
   %v19 = add i32 %v2, -1
@@ -58,7 +57,7 @@ b2:                                               ; preds = %b1, %b0
   br label %b4
 
 b3:                                               ; preds = %b14
-  store i32 %v25, i32* @g3, align 4
+  store i32 %v25, ptr @g3, align 4
   br label %b4
 
 b4:                                               ; preds = %b13, %b3, %b2
@@ -76,14 +75,14 @@ b6:                                               ; preds = %b4
   br i1 %v9, label %b8, label %b7
 
 b7:                                               ; preds = %b6
-  store i32 0, i32* @g3, align 4
+  store i32 0, ptr @g3, align 4
   br label %b11
 
 b8:                                               ; preds = %b6
   br i1 undef, label %b9, label %b11
 
 b9:                                               ; preds = %b8
-  %v31 = load i32*, i32** @g4, align 4
+  %v31 = load ptr, ptr @g4, align 4
   br label %b10
 
 b10:                                              ; preds = %b10, %b9
@@ -91,11 +90,11 @@ b10:                                              ; preds = %b10, %b9
   %v33 = phi i32 [ %v29, %b9 ], [ %v38, %b10 ]
   %v34 = add nsw i32 %v32, %v28
   %v35 = shl i32 %v34, 1
-  %v36 = getelementptr inbounds i32, i32* %v31, i32 %v35
-  %v37 = load i32, i32* %v36, align 4
+  %v36 = getelementptr inbounds i32, ptr %v31, i32 %v35
+  %v37 = load i32, ptr %v36, align 4
   %v38 = select i1 false, i32 0, i32 %v33
   %v39 = add nsw i32 %v32, 1
-  store i32 %v39, i32* @g3, align 4
+  store i32 %v39, ptr @g3, align 4
   %v40 = icmp slt i32 %v39, 0
   br i1 %v40, label %b10, label %b11
 
@@ -107,7 +106,7 @@ b12:                                              ; preds = %b11
   br label %b13
 
 b13:                                              ; preds = %b12, %b11
-  %v42 = load i32, i32* %v10, align 4
+  %v42 = load i32, ptr %v0, align 4
   %v43 = select i1 false, i32 %v41, i32 1
   br i1 %v18, label %b4, label %b14
 

diff  --git a/llvm/test/CodeGen/Hexagon/swp-phi-dep.ll b/llvm/test/CodeGen/Hexagon/swp-phi-dep.ll
index ec7af41a31fa7..0c47078193926 100644
--- a/llvm/test/CodeGen/Hexagon/swp-phi-dep.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-phi-dep.ll
@@ -8,7 +8,7 @@
 ; CHECK-NOT: = addasl(r{{[0-9]+}},[[REG0]],#1)
 
 ; Function Attrs: nounwind
-define void @f0(i32 %a0, i16* nocapture %a1) #0 {
+define void @f0(i32 %a0, ptr nocapture %a1) #0 {
 b0:
   br i1 undef, label %b2, label %b1
 
@@ -46,10 +46,10 @@ b11:                                              ; preds = %b11, %b10, %b8
   %v0 = phi i32 [ %v6, %b11 ], [ undef, %b8 ], [ undef, %b10 ]
   %v1 = phi i32 [ %v0, %b11 ], [ %a0, %b8 ], [ undef, %b10 ]
   %v2 = add nsw i32 %v1, -2
-  %v3 = getelementptr inbounds i16, i16* %a1, i32 %v2
-  %v4 = load i16, i16* %v3, align 2, !tbaa !0
-  %v5 = getelementptr inbounds i16, i16* %a1, i32 %v0
-  store i16 %v4, i16* %v5, align 2, !tbaa !0
+  %v3 = getelementptr inbounds i16, ptr %a1, i32 %v2
+  %v4 = load i16, ptr %v3, align 2, !tbaa !0
+  %v5 = getelementptr inbounds i16, ptr %a1, i32 %v0
+  store i16 %v4, ptr %v5, align 2, !tbaa !0
   %v6 = add nsw i32 %v0, -1
   %v7 = icmp sgt i32 %v6, 0
   br i1 %v7, label %b11, label %b12

diff  --git a/llvm/test/CodeGen/Hexagon/swp-phi-dep1.ll b/llvm/test/CodeGen/Hexagon/swp-phi-dep1.ll
index 89e7bb25d646f..3d1e33d49534a 100644
--- a/llvm/test/CodeGen/Hexagon/swp-phi-dep1.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-phi-dep1.ll
@@ -5,7 +5,7 @@
 ; the two Phis are scheduled in 
diff erent iterations.
 
 ; Function Attrs: nounwind
-define void @f0(i8* noalias nocapture readonly %a0, i32 %a1, i32 %a2, i32 %a3, i8* noalias nocapture %a4, i32 %a5) #0 {
+define void @f0(ptr noalias nocapture readonly %a0, i32 %a1, i32 %a2, i32 %a3, ptr noalias nocapture %a4, i32 %a5) #0 {
 b0:
   %v0 = add i32 %a2, -1
   %v1 = icmp ugt i32 %v0, 1
@@ -17,14 +17,14 @@ b1:                                               ; preds = %b0
   %v4 = add i32 %v3, 1
   %v5 = add i32 %a3, 1
   %v6 = add i32 %a1, -2
-  %v7 = getelementptr i8, i8* %a0, i32 2
+  %v7 = getelementptr i8, ptr %a0, i32 2
   %v8 = add i32 %a5, 1
-  %v9 = getelementptr i8, i8* %a4, i32 %v8
+  %v9 = getelementptr i8, ptr %a4, i32 %v8
   br label %b2
 
 b2:                                               ; preds = %b5, %b1
-  %v10 = phi i8* [ %v85, %b5 ], [ %v9, %b1 ]
-  %v11 = phi i8* [ %v84, %b5 ], [ %v7, %b1 ]
+  %v10 = phi ptr [ %v85, %b5 ], [ %v9, %b1 ]
+  %v11 = phi ptr [ %v84, %b5 ], [ %v7, %b1 ]
   %v12 = phi i32 [ 0, %b1 ], [ %v83, %b5 ]
   %v13 = phi i32 [ 1, %b1 ], [ %v82, %b5 ]
   %v14 = icmp ugt i32 %v2, 1
@@ -38,23 +38,23 @@ b3:                                               ; preds = %b2
   %v19 = add i32 %v4, %v15
   %v20 = add i32 %v15, %a3
   %v21 = add i32 %v5, %v15
-  %v22 = getelementptr i8, i8* %a0, i32 %v15
-  %v23 = getelementptr i8, i8* %a0, i32 %v17
-  %v24 = getelementptr i8, i8* %a0, i32 %v18
-  %v25 = getelementptr i8, i8* %a0, i32 %v19
-  %v26 = getelementptr i8, i8* %a0, i32 %v20
-  %v27 = getelementptr i8, i8* %a0, i32 %v21
-  %v28 = load i8, i8* %v23, align 1
-  %v29 = load i8, i8* %v22, align 1
-  %v30 = load i8, i8* %v25, align 1
-  %v31 = load i8, i8* %v24, align 1
-  %v32 = load i8, i8* %v27, align 1
-  %v33 = load i8, i8* %v26, align 1
+  %v22 = getelementptr i8, ptr %a0, i32 %v15
+  %v23 = getelementptr i8, ptr %a0, i32 %v17
+  %v24 = getelementptr i8, ptr %a0, i32 %v18
+  %v25 = getelementptr i8, ptr %a0, i32 %v19
+  %v26 = getelementptr i8, ptr %a0, i32 %v20
+  %v27 = getelementptr i8, ptr %a0, i32 %v21
+  %v28 = load i8, ptr %v23, align 1
+  %v29 = load i8, ptr %v22, align 1
+  %v30 = load i8, ptr %v25, align 1
+  %v31 = load i8, ptr %v24, align 1
+  %v32 = load i8, ptr %v27, align 1
+  %v33 = load i8, ptr %v26, align 1
   br label %b4
 
 b4:                                               ; preds = %b4, %b3
-  %v34 = phi i8* [ %v80, %b4 ], [ %v10, %b3 ]
-  %v35 = phi i8* [ %v79, %b4 ], [ %v11, %b3 ]
+  %v34 = phi ptr [ %v80, %b4 ], [ %v10, %b3 ]
+  %v35 = phi ptr [ %v79, %b4 ], [ %v11, %b3 ]
   %v36 = phi i32 [ %v78, %b4 ], [ %v6, %b3 ]
   %v37 = phi i8 [ %v28, %b3 ], [ %v43, %b4 ]
   %v38 = phi i8 [ %v29, %b3 ], [ %v37, %b4 ]
@@ -62,11 +62,11 @@ b4:                                               ; preds = %b4, %b3
   %v40 = phi i8 [ %v31, %b3 ], [ %v39, %b4 ]
   %v41 = phi i8 [ %v32, %b3 ], [ %v45, %b4 ]
   %v42 = phi i8 [ %v33, %b3 ], [ %v41, %b4 ]
-  %v43 = load i8, i8* %v35, align 1, !tbaa !0
-  %v44 = getelementptr i8, i8* %v35, i32 %a3
-  %v45 = load i8, i8* %v44, align 1, !tbaa !0
-  %v46 = getelementptr i8, i8* %v35, i32 %v3
-  %v47 = load i8, i8* %v46, align 1, !tbaa !0
+  %v43 = load i8, ptr %v35, align 1, !tbaa !0
+  %v44 = getelementptr i8, ptr %v35, i32 %a3
+  %v45 = load i8, ptr %v44, align 1, !tbaa !0
+  %v46 = getelementptr i8, ptr %v35, i32 %v3
+  %v47 = load i8, ptr %v46, align 1, !tbaa !0
   %v48 = zext i8 %v38 to i32
   %v49 = zext i8 %v37 to i32
   %v50 = zext i8 %v43 to i32
@@ -97,18 +97,18 @@ b4:                                               ; preds = %b4, %b3
   %v75 = icmp ugt i32 %v74, 255
   %v76 = trunc i32 %v74 to i8
   %v77 = select i1 %v75, i8 -1, i8 %v76
-  store i8 %v77, i8* %v34, align 1, !tbaa !0
+  store i8 %v77, ptr %v34, align 1, !tbaa !0
   %v78 = add i32 %v36, -1
-  %v79 = getelementptr i8, i8* %v35, i32 1
-  %v80 = getelementptr i8, i8* %v34, i32 1
+  %v79 = getelementptr i8, ptr %v35, i32 1
+  %v80 = getelementptr i8, ptr %v34, i32 1
   %v81 = icmp eq i32 %v78, 0
   br i1 %v81, label %b5, label %b4
 
 b5:                                               ; preds = %b4, %b2
   %v82 = add i32 %v13, 1
   %v83 = add i32 %v12, 1
-  %v84 = getelementptr i8, i8* %v11, i32 %a3
-  %v85 = getelementptr i8, i8* %v10, i32 %a5
+  %v84 = getelementptr i8, ptr %v11, i32 %a3
+  %v85 = getelementptr i8, ptr %v10, i32 %a5
   %v86 = icmp eq i32 %v82, %v0
   br i1 %v86, label %b6, label %b2
 

diff  --git a/llvm/test/CodeGen/Hexagon/swp-phi-order.ll b/llvm/test/CodeGen/Hexagon/swp-phi-order.ll
index 72709afde9b8c..5dc01ab5eb8e5 100644
--- a/llvm/test/CodeGen/Hexagon/swp-phi-order.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-phi-order.ll
@@ -3,7 +3,7 @@
 
 %s.0 = type { i32, i32, i32, i32, i32, i32, i32, i32, i32, [49 x i8], [49 x i8], [25 x i8], [6 x i8], [29 x i8], i8, [6 x i8], [6 x i8] }
 
-define void @f0(%s.0* nocapture %a0) {
+define void @f0(ptr nocapture %a0) {
 b0:
   br i1 undef, label %b2, label %b1
 
@@ -11,12 +11,12 @@ b1:                                               ; preds = %b1, %b0
   %v0 = phi i32 [ %v6, %b1 ], [ undef, %b0 ]
   %v1 = phi i32 [ %v8, %b1 ], [ 1, %b0 ]
   %v2 = and i32 %v0, 255
-  %v3 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 9, i32 %v1
-  %v4 = load i8, i8* %v3, align 1
+  %v3 = getelementptr inbounds %s.0, ptr %a0, i32 0, i32 9, i32 %v1
+  %v4 = load i8, ptr %v3, align 1
   %v5 = zext i8 %v4 to i32
   %v6 = add nsw i32 %v5, %v2
   %v7 = trunc i32 %v6 to i8
-  store i8 %v7, i8* %v3, align 1
+  store i8 %v7, ptr %v3, align 1
   %v8 = add nsw i32 %v1, 1
   %v9 = icmp sgt i32 %v8, undef
   br i1 %v9, label %b2, label %b1

diff  --git a/llvm/test/CodeGen/Hexagon/swp-phi-ref.ll b/llvm/test/CodeGen/Hexagon/swp-phi-ref.ll
index be838e767aa07..1b942ed7aa5c8 100644
--- a/llvm/test/CodeGen/Hexagon/swp-phi-ref.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-phi-ref.ll
@@ -27,7 +27,7 @@ b1:                                               ; preds = %b1, %b0
   %v4 = tail call <16 x i32> @llvm.hexagon.V6.vaddh(<16 x i32> undef, <16 x i32> undef)
   %v5 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v4, <16 x i32> %v1, i32 2)
   %v6 = tail call <16 x i32> @llvm.hexagon.V6.vabs
diff h(<16 x i32> %v3, <16 x i32> %v5)
-  store <16 x i32> %v6, <16 x i32>* null, align 64
+  store <16 x i32> %v6, ptr null, align 64
   %v7 = add nsw i32 %v0, 1
   %v8 = icmp slt i32 %v7, undef
   br i1 %v8, label %b1, label %b2

diff  --git a/llvm/test/CodeGen/Hexagon/swp-phi-ref1.ll b/llvm/test/CodeGen/Hexagon/swp-phi-ref1.ll
index 357e8725cc0d6..bdd7aa77547f4 100644
--- a/llvm/test/CodeGen/Hexagon/swp-phi-ref1.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-phi-ref1.ll
@@ -32,14 +32,14 @@ b4:                                               ; preds = %b4, %b3
   %v6 = tail call i64 @llvm.hexagon.A2.combinew(i32 %v3, i32 %v3)
   %v7 = tail call i64 @llvm.hexagon.A2.combinew(i32 %v0, i32 undef)
   %v8 = tail call i64 @llvm.hexagon.S2.valignib(i64 %v7, i64 undef, i32 2)
-  %v9 = inttoptr i32 %v5 to i16*
-  %v10 = load i16, i16* %v9, align 2, !tbaa !0
+  %v9 = inttoptr i32 %v5 to ptr
+  %v10 = load i16, ptr %v9, align 2, !tbaa !0
   %v11 = sext i16 %v10 to i32
   %v12 = add nsw i32 %v5, -8
   %v13 = tail call i64 @llvm.hexagon.M2.vdmacs.s0(i64 %v1, i64 %v6, i64 %v8)
   %v14 = tail call i32 @llvm.hexagon.A2.combine.ll(i32 %v11, i32 %v0)
-  %v15 = inttoptr i32 %v12 to i16*
-  %v16 = load i16, i16* %v15, align 2, !tbaa !0
+  %v15 = inttoptr i32 %v12 to ptr
+  %v16 = load i16, ptr %v15, align 2, !tbaa !0
   %v17 = sext i16 %v16 to i32
   %v18 = add nsw i32 %v5, -16
   %v19 = add nsw i32 %v2, 1

diff  --git a/llvm/test/CodeGen/Hexagon/swp-phi-start.ll b/llvm/test/CodeGen/Hexagon/swp-phi-start.ll
index 0e451f924a923..8ab1424cea19e 100644
--- a/llvm/test/CodeGen/Hexagon/swp-phi-start.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-phi-start.ll
@@ -12,7 +12,7 @@
 ; CHECK: }{{[ \t]*}}:endloop
 
 ; Function Attrs: nounwind
-define void @f0(i32 %a0, i16* nocapture %a1) #0 {
+define void @f0(i32 %a0, ptr nocapture %a1) #0 {
 b0:
   br i1 undef, label %b1, label %b2
 
@@ -24,10 +24,10 @@ b2:                                               ; preds = %b2, %b1, %b0
   %v1 = phi i32 [ %v7, %b2 ], [ undef, %b0 ], [ %v0, %b1 ]
   %v2 = phi i32 [ %v1, %b2 ], [ %a0, %b0 ], [ undef, %b1 ]
   %v3 = add nsw i32 %v2, -2
-  %v4 = getelementptr inbounds i16, i16* %a1, i32 %v3
-  %v5 = load i16, i16* %v4, align 2, !tbaa !0
-  %v6 = getelementptr inbounds i16, i16* %a1, i32 %v1
-  store i16 %v5, i16* %v6, align 2, !tbaa !0
+  %v4 = getelementptr inbounds i16, ptr %a1, i32 %v3
+  %v5 = load i16, ptr %v4, align 2, !tbaa !0
+  %v6 = getelementptr inbounds i16, ptr %a1, i32 %v1
+  store i16 %v5, ptr %v6, align 2, !tbaa !0
   %v7 = add nsw i32 %v1, -1
   %v8 = icmp sgt i32 %v7, 0
   br i1 %v8, label %b2, label %b3

diff  --git a/llvm/test/CodeGen/Hexagon/swp-phi.ll b/llvm/test/CodeGen/Hexagon/swp-phi.ll
index cb025f9324d0c..9606a2407e477 100644
--- a/llvm/test/CodeGen/Hexagon/swp-phi.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-phi.ll
@@ -6,42 +6,42 @@
 ; the Phi operand refer to Phis from the same loop.
 
 ; Function Attrs: nounwind
-define void @f0(float* nocapture %a0, float* nocapture %a1) #0 {
+define void @f0(ptr nocapture %a0, ptr nocapture %a1) #0 {
 b0:
   %v0 = alloca [400 x float], align 4
-  %v1 = getelementptr inbounds float, float* %a1, i32 1
-  %v2 = getelementptr inbounds float, float* %a1, i32 2
-  %v3 = getelementptr inbounds float, float* %a1, i32 3
-  %v4 = getelementptr inbounds float, float* %a1, i32 4
-  %v5 = getelementptr inbounds float, float* %a1, i32 5
-  %v6 = getelementptr inbounds float, float* %a1, i32 6
-  %v7 = getelementptr inbounds float, float* %a1, i32 7
-  %v8 = getelementptr inbounds float, float* %a1, i32 8
-  %v9 = getelementptr inbounds float, float* %a1, i32 9
-  %v10 = getelementptr inbounds float, float* %a1, i32 10
-  %v11 = getelementptr inbounds float, float* %a1, i32 11
-  %v12 = getelementptr inbounds float, float* %a1, i32 12
-  %v13 = getelementptr inbounds float, float* %a1, i32 13
-  %v14 = getelementptr inbounds float, float* %a1, i32 14
-  %v15 = getelementptr inbounds float, float* %a1, i32 15
-  %v16 = getelementptr inbounds float, float* %a1, i32 16
-  %v17 = load float, float* %a1, align 4
-  %v18 = load float, float* %v1, align 4
-  %v19 = load float, float* %v2, align 4
-  %v20 = load float, float* %v3, align 4
-  %v21 = load float, float* %v4, align 4
-  %v22 = load float, float* %v5, align 4
-  %v23 = load float, float* %v6, align 4
-  %v24 = load float, float* %v7, align 4
-  %v25 = load float, float* %v8, align 4
-  %v26 = load float, float* %v9, align 4
-  %v27 = load float, float* %v10, align 4
-  %v28 = load float, float* %v11, align 4
-  %v29 = load float, float* %v12, align 4
-  %v30 = load float, float* %v13, align 4
-  %v31 = load float, float* %v14, align 4
-  %v32 = load float, float* %v15, align 4
-  %v33 = load float, float* %v16, align 4
+  %v1 = getelementptr inbounds float, ptr %a1, i32 1
+  %v2 = getelementptr inbounds float, ptr %a1, i32 2
+  %v3 = getelementptr inbounds float, ptr %a1, i32 3
+  %v4 = getelementptr inbounds float, ptr %a1, i32 4
+  %v5 = getelementptr inbounds float, ptr %a1, i32 5
+  %v6 = getelementptr inbounds float, ptr %a1, i32 6
+  %v7 = getelementptr inbounds float, ptr %a1, i32 7
+  %v8 = getelementptr inbounds float, ptr %a1, i32 8
+  %v9 = getelementptr inbounds float, ptr %a1, i32 9
+  %v10 = getelementptr inbounds float, ptr %a1, i32 10
+  %v11 = getelementptr inbounds float, ptr %a1, i32 11
+  %v12 = getelementptr inbounds float, ptr %a1, i32 12
+  %v13 = getelementptr inbounds float, ptr %a1, i32 13
+  %v14 = getelementptr inbounds float, ptr %a1, i32 14
+  %v15 = getelementptr inbounds float, ptr %a1, i32 15
+  %v16 = getelementptr inbounds float, ptr %a1, i32 16
+  %v17 = load float, ptr %a1, align 4
+  %v18 = load float, ptr %v1, align 4
+  %v19 = load float, ptr %v2, align 4
+  %v20 = load float, ptr %v3, align 4
+  %v21 = load float, ptr %v4, align 4
+  %v22 = load float, ptr %v5, align 4
+  %v23 = load float, ptr %v6, align 4
+  %v24 = load float, ptr %v7, align 4
+  %v25 = load float, ptr %v8, align 4
+  %v26 = load float, ptr %v9, align 4
+  %v27 = load float, ptr %v10, align 4
+  %v28 = load float, ptr %v11, align 4
+  %v29 = load float, ptr %v12, align 4
+  %v30 = load float, ptr %v13, align 4
+  %v31 = load float, ptr %v14, align 4
+  %v32 = load float, ptr %v15, align 4
+  %v33 = load float, ptr %v16, align 4
   br label %b1
 
 b1:                                               ; preds = %b1, %b0
@@ -113,36 +113,36 @@ b1:                                               ; preds = %b1, %b0
   %v99 = fmul float %v49, %v34
   %v100 = fadd float %v51, %v99
   %v101 = add nsw i32 %v67, 16
-  %v102 = getelementptr inbounds [400 x float], [400 x float]* %v0, i32 0, i32 %v101
-  %v103 = load float, float* %v102, align 4, !tbaa !0
+  %v102 = getelementptr inbounds [400 x float], ptr %v0, i32 0, i32 %v101
+  %v103 = load float, ptr %v102, align 4, !tbaa !0
   %v104 = fmul float %v49, %v103
   %v105 = fadd float %v50, %v104
   %v106 = icmp eq i32 %v70, 384
   br i1 %v106, label %b2, label %b1
 
 b2:                                               ; preds = %b1
-  store float %v69, float* %a1, align 4
-  store float %v72, float* %v1, align 4
-  store float %v74, float* %v2, align 4
-  store float %v76, float* %v3, align 4
-  store float %v78, float* %v4, align 4
-  store float %v80, float* %v5, align 4
-  store float %v82, float* %v6, align 4
-  store float %v84, float* %v7, align 4
-  store float %v86, float* %v8, align 4
-  store float %v88, float* %v9, align 4
-  store float %v90, float* %v10, align 4
-  store float %v92, float* %v11, align 4
-  store float %v94, float* %v12, align 4
-  store float %v96, float* %v13, align 4
-  store float %v98, float* %v14, align 4
-  store float %v100, float* %v15, align 4
-  store float %v105, float* %v16, align 4
+  store float %v69, ptr %a1, align 4
+  store float %v72, ptr %v1, align 4
+  store float %v74, ptr %v2, align 4
+  store float %v76, ptr %v3, align 4
+  store float %v78, ptr %v4, align 4
+  store float %v80, ptr %v5, align 4
+  store float %v82, ptr %v6, align 4
+  store float %v84, ptr %v7, align 4
+  store float %v86, ptr %v8, align 4
+  store float %v88, ptr %v9, align 4
+  store float %v90, ptr %v10, align 4
+  store float %v92, ptr %v11, align 4
+  store float %v94, ptr %v12, align 4
+  store float %v96, ptr %v13, align 4
+  store float %v98, ptr %v14, align 4
+  store float %v100, ptr %v15, align 4
+  store float %v105, ptr %v16, align 4
   %v107 = fcmp olt float %v69, 1.000000e+00
   br i1 %v107, label %b3, label %b4
 
 b3:                                               ; preds = %b2
-  store float 1.000000e+00, float* %a1, align 4, !tbaa !0
+  store float 1.000000e+00, ptr %a1, align 4, !tbaa !0
   br label %b4
 
 b4:                                               ; preds = %b3, %b2

diff  --git a/llvm/test/CodeGen/Hexagon/swp-physreg.ll b/llvm/test/CodeGen/Hexagon/swp-physreg.ll
index 0811b3d3125e3..407acff508ae6 100644
--- a/llvm/test/CodeGen/Hexagon/swp-physreg.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-physreg.ll
@@ -4,10 +4,10 @@
 ; Make sure pipeliner handle physical registers (e.g., used in
 ; inline asm
 
- at g0 = external global i32*, align 4
+ at g0 = external global ptr, align 4
 
 ; Function Attrs: nounwind
-define i32 @f0(i32 %a0, i8** nocapture %a1) #0 {
+define i32 @f0(i32 %a0, ptr nocapture %a1) #0 {
 b0:
   br i1 undef, label %b1, label %b2
 
@@ -24,13 +24,13 @@ b4:                                               ; preds = %b3
   br label %b5
 
 b5:                                               ; preds = %b5, %b4
-  %v0 = phi i32* [ inttoptr (i32 33554432 to i32*), %b4 ], [ %v4, %b5 ]
+  %v0 = phi ptr [ inttoptr (i32 33554432 to ptr), %b4 ], [ %v4, %b5 ]
   %v1 = phi i32 [ 0, %b4 ], [ %v5, %b5 ]
-  %v2 = ptrtoint i32* %v0 to i32
+  %v2 = ptrtoint ptr %v0 to i32
   tail call void asm sideeffect "    r1 = $1\0A    r0 = $0\0A    memw(r0) = r1\0A    dcfetch(r0)\0A", "r,r,~{r0},~{r1}"(i32 %v2, i32 %v1) #0
-  %v3 = load i32*, i32** @g0, align 4
-  %v4 = getelementptr inbounds i32, i32* %v3, i32 1
-  store i32* %v4, i32** @g0, align 4
+  %v3 = load ptr, ptr @g0, align 4
+  %v4 = getelementptr inbounds i32, ptr %v3, i32 1
+  store ptr %v4, ptr @g0, align 4
   %v5 = add nsw i32 %v1, 1
   %v6 = icmp eq i32 %v5, 200
   br i1 %v6, label %b6, label %b5

diff  --git a/llvm/test/CodeGen/Hexagon/swp-pragma-disable-bug.ll b/llvm/test/CodeGen/Hexagon/swp-pragma-disable-bug.ll
index 63c0f9502ee29..7fd94a9b26c92 100644
--- a/llvm/test/CodeGen/Hexagon/swp-pragma-disable-bug.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-pragma-disable-bug.ll
@@ -8,18 +8,18 @@
 ; CHECK-NOT: Can not pipeline loop
 
 ; Function Attrs: nofree norecurse nounwind
-define dso_local i32 @foo(i32* nocapture %a, i32* nocapture readonly %b, i32* nocapture %c) local_unnamed_addr #0 {
+define dso_local i32 @foo(ptr nocapture %a, ptr nocapture readonly %b, ptr nocapture %c) local_unnamed_addr #0 {
 entry:
   br label %for.body
 
 for.body:                                         ; preds = %for.body, %entry
   %i.023 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
-  %arrayidx = getelementptr inbounds i32, i32* %b, i32 %i.023
-  %0 = load i32, i32* %arrayidx, align 4, !tbaa !2
-  %arrayidx1 = getelementptr inbounds i32, i32* %a, i32 %i.023
-  %1 = load i32, i32* %arrayidx1, align 4, !tbaa !2
+  %arrayidx = getelementptr inbounds i32, ptr %b, i32 %i.023
+  %0 = load i32, ptr %arrayidx, align 4, !tbaa !2
+  %arrayidx1 = getelementptr inbounds i32, ptr %a, i32 %i.023
+  %1 = load i32, ptr %arrayidx1, align 4, !tbaa !2
   %add = add nsw i32 %1, %0
-  store i32 %add, i32* %arrayidx1, align 4, !tbaa !2
+  store i32 %add, ptr %arrayidx1, align 4, !tbaa !2
   %inc = add nuw nsw i32 %i.023, 1
   %exitcond24 = icmp eq i32 %inc, 10
   br i1 %exitcond24, label %for.body6, label %for.body, !llvm.loop !6
@@ -29,12 +29,12 @@ for.cond.cleanup5:                                ; preds = %for.body6
 
 for.body6:                                        ; preds = %for.body, %for.body6
   %i2.022 = phi i32 [ %inc11, %for.body6 ], [ 0, %for.body ]
-  %arrayidx7 = getelementptr inbounds i32, i32* %a, i32 %i2.022
-  %2 = load i32, i32* %arrayidx7, align 4, !tbaa !2
-  %arrayidx8 = getelementptr inbounds i32, i32* %c, i32 %i2.022
-  %3 = load i32, i32* %arrayidx8, align 4, !tbaa !2
+  %arrayidx7 = getelementptr inbounds i32, ptr %a, i32 %i2.022
+  %2 = load i32, ptr %arrayidx7, align 4, !tbaa !2
+  %arrayidx8 = getelementptr inbounds i32, ptr %c, i32 %i2.022
+  %3 = load i32, ptr %arrayidx8, align 4, !tbaa !2
   %add9 = add nsw i32 %3, %2
-  store i32 %add9, i32* %arrayidx8, align 4, !tbaa !2
+  store i32 %add9, ptr %arrayidx8, align 4, !tbaa !2
   %inc11 = add nuw nsw i32 %i2.022, 1
   %exitcond = icmp eq i32 %inc11, 10
   br i1 %exitcond, label %for.cond.cleanup5, label %for.body6, !llvm.loop !8

diff  --git a/llvm/test/CodeGen/Hexagon/swp-prolog-phi4.ll b/llvm/test/CodeGen/Hexagon/swp-prolog-phi4.ll
index 8535ccb67476b..50fc9a9c89302 100644
--- a/llvm/test/CodeGen/Hexagon/swp-prolog-phi4.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-prolog-phi4.ll
@@ -8,10 +8,10 @@ entry:
   br label %for.body
 
 for.body:
-  %add.ptr3.pn = phi i8* [ undef, %entry ], [ %src4.0394, %for.end ]
-  %src2.0390 = phi i8* [ undef, %entry ], [ %add.ptr3.pn, %for.end ]
-  %src4.0394 = getelementptr inbounds i8, i8* %add.ptr3.pn, i32 %srcStride
-  %sri414 = load i8, i8* undef, align 1
+  %add.ptr3.pn = phi ptr [ undef, %entry ], [ %src4.0394, %for.end ]
+  %src2.0390 = phi ptr [ undef, %entry ], [ %add.ptr3.pn, %for.end ]
+  %src4.0394 = getelementptr inbounds i8, ptr %add.ptr3.pn, i32 %srcStride
+  %sri414 = load i8, ptr undef, align 1
   br i1 undef, label %for.body9.epil, label %for.body9.preheader.new
 
 for.body9.preheader.new:
@@ -25,12 +25,12 @@ for.body9.epil:
   %epil.iter = phi i32 [ %epil.iter.sub, %for.body9.epil ], [ undef, %for.body9.preheader.new ], [ undef, %for.body ]
   %add17.epil = add nuw i32 %inc.sink385.epil, 1
   %add21.epil = add i32 %inc.sink385.epil, 2
-  %arrayidx22.epil = getelementptr inbounds i8, i8* undef, i32 %add21.epil
+  %arrayidx22.epil = getelementptr inbounds i8, ptr undef, i32 %add21.epil
   %conv27.epil = zext i8 %sr422.epil to i32
-  %0 = load i8, i8* null, align 1
+  %0 = load i8, ptr null, align 1
   %conv61.epil = zext i8 %0 to i32
-  %arrayidx94.epil = getelementptr inbounds i8, i8* %src4.0394, i32 %add17.epil
-  %1 = load i8, i8* %arrayidx94.epil, align 1
+  %arrayidx94.epil = getelementptr inbounds i8, ptr %src4.0394, i32 %add17.epil
+  %1 = load i8, ptr %arrayidx94.epil, align 1
   %add35.epil = add i32 0, %conv27.epil
   %add39.epil = add i32 %add35.epil, 0
   %add43.epil = add i32 %add39.epil, 0
@@ -53,7 +53,7 @@ for.body9.epil:
   %add101.epil = add nsw i32 %mul.epil, 32768
   %shr369.epil = lshr i32 %add101.epil, 16
   %conv102.epil = trunc i32 %shr369.epil to i8
-  store i8 %conv102.epil, i8* undef, align 1
+  store i8 %conv102.epil, ptr undef, align 1
   %epil.iter.sub = add i32 %epil.iter, -1
   %epil.iter.cmp = icmp eq i32 %epil.iter.sub, 0
   br i1 %epil.iter.cmp, label %for.end, label %for.body9.epil

diff  --git a/llvm/test/CodeGen/Hexagon/swp-regseq.ll b/llvm/test/CodeGen/Hexagon/swp-regseq.ll
index ff6535ab8eef3..a36796f150de9 100644
--- a/llvm/test/CodeGen/Hexagon/swp-regseq.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-regseq.ll
@@ -3,14 +3,14 @@
 
 %s.0 = type { i64 }
 
-define i64 @f0(%s.0* nocapture %a0, i32 %a1) {
+define i64 @f0(ptr nocapture %a0, i32 %a1) {
 b0:
   br i1 undef, label %b1, label %b2
 
 b1:                                               ; preds = %b1, %b0
   %v0 = phi i32 [ %v6, %b1 ], [ 0, %b0 ]
   %v1 = phi i64 [ %v5, %b1 ], [ undef, %b0 ]
-  %v2 = load i16, i16* undef, align 2
+  %v2 = load i16, ptr undef, align 2
   %v3 = zext i16 %v2 to i64
   %v4 = and i64 %v1, -4294967296
   %v5 = or i64 %v3, %v4

diff  --git a/llvm/test/CodeGen/Hexagon/swp-remove-dep-ice.ll b/llvm/test/CodeGen/Hexagon/swp-remove-dep-ice.ll
index de0d30bf6371f..bcf9d2801310e 100644
--- a/llvm/test/CodeGen/Hexagon/swp-remove-dep-ice.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-remove-dep-ice.ll
@@ -14,11 +14,11 @@ b0:
 b1:                                               ; preds = %b1, %b0
   %v1 = phi i32 [ %v7, %b1 ], [ undef, %b0 ]
   %v2 = add i32 %v1, -1
-  %v3 = getelementptr inbounds [10 x i16], [10 x i16]* %v0, i32 0, i32 %v2
+  %v3 = getelementptr inbounds [10 x i16], ptr %v0, i32 0, i32 %v2
   %v4 = add i32 %v1, -2
-  %v5 = getelementptr inbounds [10 x i16], [10 x i16]* %v0, i32 0, i32 %v4
-  %v6 = load i16, i16* %v5, align 2, !tbaa !0
-  store i16 %v6, i16* %v3, align 2, !tbaa !0
+  %v5 = getelementptr inbounds [10 x i16], ptr %v0, i32 0, i32 %v4
+  %v6 = load i16, ptr %v5, align 2, !tbaa !0
+  store i16 %v6, ptr %v3, align 2, !tbaa !0
   %v7 = add i32 %v1, -4
   %v8 = icmp sgt i32 %v7, 3
   br i1 %v8, label %b1, label %b2

diff  --git a/llvm/test/CodeGen/Hexagon/swp-rename-dead-phi.ll b/llvm/test/CodeGen/Hexagon/swp-rename-dead-phi.ll
index 32bf27d0abdcd..30ac657a48d20 100644
--- a/llvm/test/CodeGen/Hexagon/swp-rename-dead-phi.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-rename-dead-phi.ll
@@ -10,7 +10,7 @@ b0:
   br i1 undef, label %b1, label %b12
 
 b1:                                               ; preds = %b0
-  %v0 = load float, float* undef, align 4
+  %v0 = load float, ptr undef, align 4
   br i1 undef, label %b2, label %b5
 
 b2:                                               ; preds = %b1

diff  --git a/llvm/test/CodeGen/Hexagon/swp-rename.ll b/llvm/test/CodeGen/Hexagon/swp-rename.ll
index eb60a0e38d0e7..8c3947a047f0d 100644
--- a/llvm/test/CodeGen/Hexagon/swp-rename.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-rename.ll
@@ -13,12 +13,12 @@ b0:
   br label %b1
 
 b1:                                               ; preds = %b1, %b0
-  %v1 = phi i16* [ %v4, %b1 ], [ null, %b0 ]
+  %v1 = phi ptr [ %v4, %b1 ], [ null, %b0 ]
   %v2 = phi i32 [ %v5, %b1 ], [ 0, %b0 ]
-  %v3 = getelementptr inbounds i16, i16* %v1, i32 1
-  store i16 0, i16* %v1, align 2
-  %v4 = getelementptr inbounds i16, i16* %v1, i32 2
-  store i16 0, i16* %v3, align 2
+  %v3 = getelementptr inbounds i16, ptr %v1, i32 1
+  store i16 0, ptr %v1, align 2
+  %v4 = getelementptr inbounds i16, ptr %v1, i32 2
+  store i16 0, ptr %v3, align 2
   %v5 = add nsw i32 %v2, 8
   %v6 = icmp slt i32 %v5, %v0
   br i1 %v6, label %b1, label %b2

diff  --git a/llvm/test/CodeGen/Hexagon/swp-replace-uses1.ll b/llvm/test/CodeGen/Hexagon/swp-replace-uses1.ll
index 14b242a6b6318..45258234a7663 100644
--- a/llvm/test/CodeGen/Hexagon/swp-replace-uses1.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-replace-uses1.ll
@@ -17,13 +17,13 @@ b1:                                               ; preds = %b1, %b0
   %v7 = tail call i64 @llvm.hexagon.A2.combinew(i32 %v3, i32 %v3)
   %v8 = tail call i64 @llvm.hexagon.S2.valignib(i64 %v6, i64 undef, i32 2)
   %v9 = tail call i64 @llvm.hexagon.M2.vdmacs.s0(i64 undef, i64 %v7, i64 %v8)
-  %v10 = inttoptr i32 %v5 to i16*
-  %v11 = load i16, i16* %v10, align 2
+  %v10 = inttoptr i32 %v5 to ptr
+  %v11 = load i16, ptr %v10, align 2
   %v12 = sext i16 %v11 to i32
   %v13 = add nsw i32 %v5, -8
   %v14 = tail call i32 @llvm.hexagon.A2.combine.ll(i32 %v12, i32 %v1)
-  %v15 = inttoptr i32 %v13 to i16*
-  %v16 = load i16, i16* %v15, align 2
+  %v15 = inttoptr i32 %v13 to ptr
+  %v16 = load i16, ptr %v15, align 2
   %v17 = sext i16 %v16 to i32
   %v18 = add nsw i32 %v5, -16
   %v19 = add nsw i32 %v2, 1
@@ -33,8 +33,7 @@ b1:                                               ; preds = %b1, %b0
 b2:                                               ; preds = %b1
   %v21 = phi i64 [ %v9, %b1 ]
   %v22 = trunc i64 %v21 to i32
-  %v23 = bitcast i8* undef to i32*
-  store i32 %v22, i32* %v23, align 4
+  store i32 %v22, ptr undef, align 4
   call void @llvm.trap()
   unreachable
 }

diff  --git a/llvm/test/CodeGen/Hexagon/swp-resmii-1.ll b/llvm/test/CodeGen/Hexagon/swp-resmii-1.ll
index 1e2eefae6ac84..6f8f19878df76 100644
--- a/llvm/test/CodeGen/Hexagon/swp-resmii-1.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-resmii-1.ll
@@ -6,7 +6,7 @@
 ; CHECK: MII = 4 MAX_II = 14 (rec=1, res=4)
 
 ; Function Attrs: nounwind
-define void @f0(i16* noalias nocapture readonly %a0, i32 %a1, i32 %a2, i32 %a3, i8* noalias nocapture %a4, i32 %a5) #0 {
+define void @f0(ptr noalias nocapture readonly %a0, i32 %a1, i32 %a2, i32 %a3, ptr noalias nocapture %a4, i32 %a5) #0 {
 b0:
   %v0 = ashr i32 %a3, 2
   %v1 = ashr i32 %a3, 1
@@ -32,36 +32,30 @@ b3:                                               ; preds = %b2
   %v12 = add i32 %v2, %v8
   %v13 = add i32 %v8, %v0
   %v14 = add i32 %v8, %v1
-  %v15 = getelementptr inbounds i8, i8* %a4, i32 %v10
-  %v16 = getelementptr inbounds i8, i8* %a4, i32 %v11
-  %v17 = getelementptr inbounds i16, i16* %a0, i32 %v12
-  %v18 = getelementptr inbounds i16, i16* %a0, i32 %v13
-  %v19 = getelementptr inbounds i16, i16* %a0, i32 %v14
-  %v20 = getelementptr inbounds i16, i16* %a0, i32 %v8
-  %v21 = bitcast i8* %v15 to <16 x i32>*
-  %v22 = bitcast i8* %v16 to <16 x i32>*
-  %v23 = bitcast i16* %v17 to <16 x i32>*
-  %v24 = bitcast i16* %v18 to <16 x i32>*
-  %v25 = bitcast i16* %v19 to <16 x i32>*
-  %v26 = bitcast i16* %v20 to <16 x i32>*
+  %v15 = getelementptr inbounds i8, ptr %a4, i32 %v10
+  %v16 = getelementptr inbounds i8, ptr %a4, i32 %v11
+  %v17 = getelementptr inbounds i16, ptr %a0, i32 %v12
+  %v18 = getelementptr inbounds i16, ptr %a0, i32 %v13
+  %v19 = getelementptr inbounds i16, ptr %a0, i32 %v14
+  %v20 = getelementptr inbounds i16, ptr %a0, i32 %v8
   br label %b4
 
 b4:                                               ; preds = %b4, %b3
   %v27 = phi i32 [ 0, %b3 ], [ %v54, %b4 ]
-  %v28 = phi <16 x i32>* [ %v26, %b3 ], [ %v34, %b4 ]
-  %v29 = phi <16 x i32>* [ %v25, %b3 ], [ %v36, %b4 ]
-  %v30 = phi <16 x i32>* [ %v24, %b3 ], [ %v38, %b4 ]
-  %v31 = phi <16 x i32>* [ %v23, %b3 ], [ %v40, %b4 ]
-  %v32 = phi <16 x i32>* [ %v21, %b3 ], [ %v53, %b4 ]
-  %v33 = phi <16 x i32>* [ %v22, %b3 ], [ %v52, %b4 ]
-  %v34 = getelementptr inbounds <16 x i32>, <16 x i32>* %v28, i32 1
-  %v35 = load <16 x i32>, <16 x i32>* %v28, align 64
-  %v36 = getelementptr inbounds <16 x i32>, <16 x i32>* %v29, i32 1
-  %v37 = load <16 x i32>, <16 x i32>* %v29, align 64
-  %v38 = getelementptr inbounds <16 x i32>, <16 x i32>* %v30, i32 1
-  %v39 = load <16 x i32>, <16 x i32>* %v30, align 64
-  %v40 = getelementptr inbounds <16 x i32>, <16 x i32>* %v31, i32 1
-  %v41 = load <16 x i32>, <16 x i32>* %v31, align 64
+  %v28 = phi ptr [ %v20, %b3 ], [ %v34, %b4 ]
+  %v29 = phi ptr [ %v19, %b3 ], [ %v36, %b4 ]
+  %v30 = phi ptr [ %v18, %b3 ], [ %v38, %b4 ]
+  %v31 = phi ptr [ %v17, %b3 ], [ %v40, %b4 ]
+  %v32 = phi ptr [ %v15, %b3 ], [ %v53, %b4 ]
+  %v33 = phi ptr [ %v16, %b3 ], [ %v52, %b4 ]
+  %v34 = getelementptr inbounds <16 x i32>, ptr %v28, i32 1
+  %v35 = load <16 x i32>, ptr %v28, align 64
+  %v36 = getelementptr inbounds <16 x i32>, ptr %v29, i32 1
+  %v37 = load <16 x i32>, ptr %v29, align 64
+  %v38 = getelementptr inbounds <16 x i32>, ptr %v30, i32 1
+  %v39 = load <16 x i32>, ptr %v30, align 64
+  %v40 = getelementptr inbounds <16 x i32>, ptr %v31, i32 1
+  %v41 = load <16 x i32>, ptr %v31, align 64
   %v42 = tail call <16 x i32> @llvm.hexagon.V6.vaddh(<16 x i32> %v35, <16 x i32> %v37)
   %v43 = tail call <16 x i32> @llvm.hexagon.V6.vsubh(<16 x i32> %v35, <16 x i32> %v37)
   %v44 = tail call <16 x i32> @llvm.hexagon.V6.vaddh(<16 x i32> %v39, <16 x i32> %v41)
@@ -72,10 +66,10 @@ b4:                                               ; preds = %b4, %b3
   %v49 = tail call <16 x i32> @llvm.hexagon.V6.vnavgh(<16 x i32> %v43, <16 x i32> %v45)
   %v50 = tail call <16 x i32> @llvm.hexagon.V6.vsathub(<16 x i32> %v47, <16 x i32> %v46)
   %v51 = tail call <16 x i32> @llvm.hexagon.V6.vsathub(<16 x i32> %v49, <16 x i32> %v48)
-  %v52 = getelementptr inbounds <16 x i32>, <16 x i32>* %v33, i32 1
-  store <16 x i32> %v50, <16 x i32>* %v33, align 64
-  %v53 = getelementptr inbounds <16 x i32>, <16 x i32>* %v32, i32 1
-  store <16 x i32> %v51, <16 x i32>* %v32, align 64
+  %v52 = getelementptr inbounds <16 x i32>, ptr %v33, i32 1
+  store <16 x i32> %v50, ptr %v33, align 64
+  %v53 = getelementptr inbounds <16 x i32>, ptr %v32, i32 1
+  store <16 x i32> %v51, ptr %v32, align 64
   %v54 = add nsw i32 %v27, 1
   %v55 = icmp slt i32 %v54, %v4
   br i1 %v55, label %b4, label %b5

diff  --git a/llvm/test/CodeGen/Hexagon/swp-resmii.ll b/llvm/test/CodeGen/Hexagon/swp-resmii.ll
index 99812af3be5d0..f65ae7bec3ad8 100644
--- a/llvm/test/CodeGen/Hexagon/swp-resmii.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-resmii.ll
@@ -7,28 +7,28 @@
 ; CHECK: MII = 1 MAX_II = 11 (rec=1, res=1)
 
 ; Function Attrs: nounwind
-define void @f0(i32* nocapture %a0, i32 %a1) #0 {
+define void @f0(ptr nocapture %a0, i32 %a1) #0 {
 b0:
   %v0 = icmp sgt i32 %a1, 1
   br i1 %v0, label %b1, label %b4
 
 b1:                                               ; preds = %b0
-  %v1 = load i32, i32* %a0, align 4
+  %v1 = load i32, ptr %a0, align 4
   %v2 = add i32 %v1, 10
-  %v3 = getelementptr i32, i32* %a0, i32 1
+  %v3 = getelementptr i32, ptr %a0, i32 1
   %v4 = add i32 %a1, -1
   br label %b2
 
 b2:                                               ; preds = %b2, %b1
   %v5 = phi i32 [ %v12, %b2 ], [ %v4, %b1 ]
-  %v6 = phi i32* [ %v11, %b2 ], [ %v3, %b1 ]
+  %v6 = phi ptr [ %v11, %b2 ], [ %v3, %b1 ]
   %v7 = phi i32 [ %v10, %b2 ], [ %v2, %b1 ]
-  store i32 %v7, i32* %v6, align 4
+  store i32 %v7, ptr %v6, align 4
   %v8 = add i32 %v7, 10
-  %v9 = getelementptr i32, i32* %v6, i32 -1
-  store i32 %v8, i32* %v9, align 4
+  %v9 = getelementptr i32, ptr %v6, i32 -1
+  store i32 %v8, ptr %v9, align 4
   %v10 = add i32 %v7, 10
-  %v11 = getelementptr i32, i32* %v6, i32 1
+  %v11 = getelementptr i32, ptr %v6, i32 1
   %v12 = add i32 %v5, -1
   %v13 = icmp eq i32 %v12, 0
   br i1 %v13, label %b3, label %b2

diff  --git a/llvm/test/CodeGen/Hexagon/swp-reuse-phi-1.ll b/llvm/test/CodeGen/Hexagon/swp-reuse-phi-1.ll
index a7a90d80ef97a..e1466929b0fa9 100644
--- a/llvm/test/CodeGen/Hexagon/swp-reuse-phi-1.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-reuse-phi-1.ll
@@ -19,7 +19,7 @@ b2:                                               ; preds = %b2, %b1
   %v6 = tail call <16 x i32> @llvm.hexagon.V6.vaddh(<16 x i32> undef, <16 x i32> %v3)
   %v7 = tail call <16 x i32> @llvm.hexagon.V6.vaddh(<16 x i32> %v6, <16 x i32> %v5)
   %v8 = tail call <16 x i32> @llvm.hexagon.V6.vabsh(<16 x i32> %v7)
-  store <16 x i32> %v8, <16 x i32>* undef, align 64
+  store <16 x i32> %v8, ptr undef, align 64
   %v9 = add nsw i32 %v0, 1
   %v10 = icmp slt i32 %v9, undef
   br i1 %v10, label %b2, label %b3

diff  --git a/llvm/test/CodeGen/Hexagon/swp-reuse-phi-2.ll b/llvm/test/CodeGen/Hexagon/swp-reuse-phi-2.ll
index f63977955e5ed..12df50301d60d 100644
--- a/llvm/test/CodeGen/Hexagon/swp-reuse-phi-2.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-reuse-phi-2.ll
@@ -6,31 +6,30 @@
 ; number of stages is two or more.
 
 ; Function Attrs: nounwind
-define void @f0(i16* noalias nocapture %a0) #0 {
+define void @f0(ptr noalias nocapture %a0) #0 {
 b0:
   br i1 undef, label %b1, label %b3
 
 b1:                                               ; preds = %b0
-  %v0 = bitcast i16* %a0 to <16 x i32>*
   br label %b2
 
 b2:                                               ; preds = %b2, %b1
   %v1 = phi i32 [ 0, %b1 ], [ %v15, %b2 ]
-  %v2 = phi <16 x i32>* [ %v0, %b1 ], [ %v14, %b2 ]
-  %v3 = phi <16 x i32>* [ undef, %b1 ], [ %v6, %b2 ]
+  %v2 = phi ptr [ %a0, %b1 ], [ %v14, %b2 ]
+  %v3 = phi ptr [ undef, %b1 ], [ %v6, %b2 ]
   %v4 = phi <16 x i32> [ undef, %b1 ], [ %v10, %b2 ]
   %v5 = phi <16 x i32> [ undef, %b1 ], [ %v4, %b2 ]
-  %v6 = getelementptr inbounds <16 x i32>, <16 x i32>* %v3, i32 1
-  %v7 = load <16 x i32>, <16 x i32>* %v3, align 64
+  %v6 = getelementptr inbounds <16 x i32>, ptr %v3, i32 1
+  %v7 = load <16 x i32>, ptr %v3, align 64
   %v8 = tail call <16 x i32> @llvm.hexagon.V6.vaddh(<16 x i32> undef, <16 x i32> %v7)
   %v9 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v4, <16 x i32> %v5, i32 62)
   %v10 = tail call <16 x i32> @llvm.hexagon.V6.vaddh(<16 x i32> %v8, <16 x i32> undef)
   %v11 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v10, <16 x i32> %v4, i32 2)
   %v12 = tail call <16 x i32> @llvm.hexagon.V6.vabs
diff h(<16 x i32> %v9, <16 x i32> %v11)
-  %v13 = getelementptr inbounds <16 x i32>, <16 x i32>* %v2, i32 1
-  store <16 x i32> %v12, <16 x i32>* %v2, align 64
-  %v14 = getelementptr inbounds <16 x i32>, <16 x i32>* %v2, i32 2
-  store <16 x i32> zeroinitializer, <16 x i32>* %v13, align 64
+  %v13 = getelementptr inbounds <16 x i32>, ptr %v2, i32 1
+  store <16 x i32> %v12, ptr %v2, align 64
+  %v14 = getelementptr inbounds <16 x i32>, ptr %v2, i32 2
+  store <16 x i32> zeroinitializer, ptr %v13, align 64
   %v15 = add nsw i32 %v1, 1
   %v16 = icmp slt i32 %v15, undef
   br i1 %v16, label %b2, label %b3

diff  --git a/llvm/test/CodeGen/Hexagon/swp-reuse-phi-4.ll b/llvm/test/CodeGen/Hexagon/swp-reuse-phi-4.ll
index 4a309d5e31e11..d0e70a4f4be75 100644
--- a/llvm/test/CodeGen/Hexagon/swp-reuse-phi-4.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-reuse-phi-4.ll
@@ -37,13 +37,13 @@ declare <16 x i32> @llvm.hexagon.V6.vmpyiwb.acc(<16 x i32>, <16 x i32>, i32) #0
 declare <16 x i32> @llvm.hexagon.V6.vshuffob(<16 x i32>, <16 x i32>) #0
 
 ; Function Attrs: nounwind
-define void @f0(i8* noalias nocapture readonly %a0, i32 %a1, i32 %a2) #1 {
+define void @f0(ptr noalias nocapture readonly %a0, i32 %a1, i32 %a2) #1 {
 b0:
   %v0 = mul nsw i32 %a1, 2
   br i1 undef, label %b1, label %b5
 
 b1:                                               ; preds = %b0
-  %v1 = getelementptr inbounds i8, i8* %a0, i32 %v0
+  %v1 = getelementptr inbounds i8, ptr %a0, i32 %v0
   %v2 = icmp sgt i32 %a2, 64
   %v3 = add i32 %v0, 64
   %v4 = add i32 %a1, 64
@@ -52,38 +52,33 @@ b1:                                               ; preds = %b0
   br i1 %v2, label %b2, label %b4
 
 b2:                                               ; preds = %b1
-  %v7 = getelementptr inbounds i8, i8* %v1, i32 %v3
-  %v8 = getelementptr inbounds i8, i8* %v1, i32 %v4
-  %v9 = getelementptr inbounds i8, i8* %v1, i32 64
-  %v10 = getelementptr inbounds i8, i8* %v1, i32 %v5
-  %v11 = getelementptr inbounds i8, i8* %v1, i32 %v6
-  %v12 = bitcast i8* %v7 to <16 x i32>*
-  %v13 = bitcast i8* %v8 to <16 x i32>*
-  %v14 = bitcast i8* %v9 to <16 x i32>*
-  %v15 = bitcast i8* %v10 to <16 x i32>*
-  %v16 = bitcast i8* %v11 to <16 x i32>*
+  %v7 = getelementptr inbounds i8, ptr %v1, i32 %v3
+  %v8 = getelementptr inbounds i8, ptr %v1, i32 %v4
+  %v9 = getelementptr inbounds i8, ptr %v1, i32 64
+  %v10 = getelementptr inbounds i8, ptr %v1, i32 %v5
+  %v11 = getelementptr inbounds i8, ptr %v1, i32 %v6
   br label %b3
 
 b3:                                               ; preds = %b3, %b2
-  %v17 = phi <16 x i32>* [ null, %b2 ], [ %v52, %b3 ]
-  %v18 = phi <16 x i32>* [ %v12, %b2 ], [ %v34, %b3 ]
-  %v19 = phi <16 x i32>* [ %v13, %b2 ], [ %v32, %b3 ]
-  %v20 = phi <16 x i32>* [ %v14, %b2 ], [ %v30, %b3 ]
-  %v21 = phi <16 x i32>* [ %v15, %b2 ], [ %v28, %b3 ]
-  %v22 = phi <16 x i32>* [ %v16, %b2 ], [ %v26, %b3 ]
+  %v17 = phi ptr [ null, %b2 ], [ %v52, %b3 ]
+  %v18 = phi ptr [ %v7, %b2 ], [ %v34, %b3 ]
+  %v19 = phi ptr [ %v8, %b2 ], [ %v32, %b3 ]
+  %v20 = phi ptr [ %v9, %b2 ], [ %v30, %b3 ]
+  %v21 = phi ptr [ %v10, %b2 ], [ %v28, %b3 ]
+  %v22 = phi ptr [ %v11, %b2 ], [ %v26, %b3 ]
   %v23 = phi <32 x i32> [ undef, %b2 ], [ %v37, %b3 ]
   %v24 = phi <32 x i32> [ zeroinitializer, %b2 ], [ %v23, %b3 ]
   %v25 = phi i32 [ %a2, %b2 ], [ %v53, %b3 ]
-  %v26 = getelementptr inbounds <16 x i32>, <16 x i32>* %v22, i32 1
-  %v27 = load <16 x i32>, <16 x i32>* %v22, align 64
-  %v28 = getelementptr inbounds <16 x i32>, <16 x i32>* %v21, i32 1
-  %v29 = load <16 x i32>, <16 x i32>* %v21, align 64
-  %v30 = getelementptr inbounds <16 x i32>, <16 x i32>* %v20, i32 1
-  %v31 = load <16 x i32>, <16 x i32>* %v20, align 64
-  %v32 = getelementptr inbounds <16 x i32>, <16 x i32>* %v19, i32 1
-  %v33 = load <16 x i32>, <16 x i32>* %v19, align 64
-  %v34 = getelementptr inbounds <16 x i32>, <16 x i32>* %v18, i32 1
-  %v35 = load <16 x i32>, <16 x i32>* %v18, align 64
+  %v26 = getelementptr inbounds <16 x i32>, ptr %v22, i32 1
+  %v27 = load <16 x i32>, ptr %v22, align 64
+  %v28 = getelementptr inbounds <16 x i32>, ptr %v21, i32 1
+  %v29 = load <16 x i32>, ptr %v21, align 64
+  %v30 = getelementptr inbounds <16 x i32>, ptr %v20, i32 1
+  %v31 = load <16 x i32>, ptr %v20, align 64
+  %v32 = getelementptr inbounds <16 x i32>, ptr %v19, i32 1
+  %v33 = load <16 x i32>, ptr %v19, align 64
+  %v34 = getelementptr inbounds <16 x i32>, ptr %v18, i32 1
+  %v35 = load <16 x i32>, ptr %v18, align 64
   %v36 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v33, <16 x i32> %v29) #3
   %v37 = tail call <32 x i32> @llvm.hexagon.V6.vmpabus.acc(<32 x i32> undef, <32 x i32> %v36, i32 67372036) #3
   %v38 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v23) #3
@@ -100,8 +95,8 @@ b3:                                               ; preds = %b3, %b2
   %v49 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwb.acc(<16 x i32> %v48, <16 x i32> undef, i32 101058054) #3
   %v50 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwb.acc(<16 x i32> %v49, <16 x i32> zeroinitializer, i32 67372036) #3
   %v51 = tail call <16 x i32> @llvm.hexagon.V6.vshuffob(<16 x i32> %v50, <16 x i32> %v47) #3
-  %v52 = getelementptr inbounds <16 x i32>, <16 x i32>* %v17, i32 1
-  store <16 x i32> %v51, <16 x i32>* %v17, align 64
+  %v52 = getelementptr inbounds <16 x i32>, ptr %v17, i32 1
+  store <16 x i32> %v51, ptr %v17, align 64
   %v53 = add nsw i32 %v25, -64
   %v54 = icmp sgt i32 %v53, 64
   br i1 %v54, label %b3, label %b4
@@ -114,9 +109,9 @@ b5:                                               ; preds = %b0
 }
 
 ; Function Attrs: nounwind
-define void @f1(i32 %a0, i32* %a1) #1 {
+define void @f1(i32 %a0, ptr %a1) #1 {
 b0:
-  %v0 = ptrtoint i32* %a1 to i32
+  %v0 = ptrtoint ptr %a1 to i32
   %v1 = ashr i32 %a0, 1
   %v2 = tail call i32 @llvm.hexagon.A2.combine.ll(i32 undef, i32 undef)
   br i1 undef, label %b1, label %b2
@@ -143,10 +138,9 @@ b3:                                               ; preds = %b2
   %v15 = phi i64 [ %v7, %b2 ]
   %v16 = trunc i64 %v14 to i32
   %v17 = trunc i64 %v15 to i32
-  %v18 = inttoptr i32 %v0 to i32*
-  store i32 %v17, i32* %v18, align 4
-  %v19 = bitcast i8* undef to i32*
-  store i32 %v16, i32* %v19, align 4
+  %v18 = inttoptr i32 %v0 to ptr
+  store i32 %v17, ptr %v18, align 4
+  store i32 %v16, ptr undef, align 4
   call void @llvm.trap()
   unreachable
 }

diff  --git a/llvm/test/CodeGen/Hexagon/swp-reuse-phi-5.ll b/llvm/test/CodeGen/Hexagon/swp-reuse-phi-5.ll
index fcbf72ae31944..102548dd4a4c4 100644
--- a/llvm/test/CodeGen/Hexagon/swp-reuse-phi-5.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-reuse-phi-5.ll
@@ -31,7 +31,7 @@ b1:                                               ; preds = %b1, %b0
 b2:                                               ; preds = %b1
   %v12 = lshr i64 %v7, 32
   %v13 = trunc i64 %v12 to i32
-  store i32 %v13, i32* undef, align 4
+  store i32 %v13, ptr undef, align 4
   %v14 = lshr i64 %v5, 32
   ret void
 }

diff  --git a/llvm/test/CodeGen/Hexagon/swp-reuse-phi-6.ll b/llvm/test/CodeGen/Hexagon/swp-reuse-phi-6.ll
index 6883e51503b14..e6aa9ccb53080 100644
--- a/llvm/test/CodeGen/Hexagon/swp-reuse-phi-6.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-reuse-phi-6.ll
@@ -13,47 +13,42 @@
 ; CHECK: }{{[ \t]*}}:endloop0
 
 ; Function Attrs: nounwind
-define void @f0(i32 %a0, i32 %a1, i8* %a2, <16 x i32>* %a3) #0 {
+define void @f0(i32 %a0, i32 %a1, ptr %a2, ptr %a3) #0 {
 b0:
   %v0 = shl nsw i32 %a0, 1
   %v1 = sub i32 0, %v0
   %v2 = sub i32 0, %a0
-  %v3 = getelementptr inbounds i8, i8* %a2, i32 %v1
-  %v4 = getelementptr inbounds i8, i8* %a2, i32 %v2
-  %v5 = getelementptr inbounds i8, i8* %a2, i32 %a0
-  %v6 = getelementptr inbounds i8, i8* %a2, i32 %v0
-  %v7 = getelementptr inbounds i8, i8* %v6, i32 64
-  %v8 = bitcast i8* %v7 to <16 x i32>*
-  %v9 = getelementptr inbounds i8, i8* %v5, i32 64
-  %v10 = bitcast i8* %v9 to <16 x i32>*
-  %v11 = getelementptr inbounds i8, i8* %a2, i32 64
-  %v12 = bitcast i8* %v11 to <16 x i32>*
-  %v13 = getelementptr inbounds i8, i8* %v4, i32 64
-  %v14 = bitcast i8* %v13 to <16 x i32>*
-  %v15 = getelementptr inbounds i8, i8* %v3, i32 64
-  %v16 = bitcast i8* %v15 to <16 x i32>*
+  %v3 = getelementptr inbounds i8, ptr %a2, i32 %v1
+  %v4 = getelementptr inbounds i8, ptr %a2, i32 %v2
+  %v5 = getelementptr inbounds i8, ptr %a2, i32 %a0
+  %v6 = getelementptr inbounds i8, ptr %a2, i32 %v0
+  %v7 = getelementptr inbounds i8, ptr %v6, i32 64
+  %v9 = getelementptr inbounds i8, ptr %v5, i32 64
+  %v11 = getelementptr inbounds i8, ptr %a2, i32 64
+  %v13 = getelementptr inbounds i8, ptr %v4, i32 64
+  %v15 = getelementptr inbounds i8, ptr %v3, i32 64
   br label %b1
 
 b1:                                               ; preds = %b1, %b0
-  %v17 = phi <16 x i32>* [ %v59, %b1 ], [ %a3, %b0 ]
-  %v18 = phi <16 x i32>* [ %v34, %b1 ], [ %v8, %b0 ]
-  %v19 = phi <16 x i32>* [ %v32, %b1 ], [ %v10, %b0 ]
-  %v20 = phi <16 x i32>* [ %v30, %b1 ], [ %v12, %b0 ]
-  %v21 = phi <16 x i32>* [ %v28, %b1 ], [ %v14, %b0 ]
-  %v22 = phi <16 x i32>* [ %v26, %b1 ], [ %v16, %b0 ]
+  %v17 = phi ptr [ %v59, %b1 ], [ %a3, %b0 ]
+  %v18 = phi ptr [ %v34, %b1 ], [ %v7, %b0 ]
+  %v19 = phi ptr [ %v32, %b1 ], [ %v9, %b0 ]
+  %v20 = phi ptr [ %v30, %b1 ], [ %v11, %b0 ]
+  %v21 = phi ptr [ %v28, %b1 ], [ %v13, %b0 ]
+  %v22 = phi ptr [ %v26, %b1 ], [ %v15, %b0 ]
   %v23 = phi <32 x i32> [ %v39, %b1 ], [ undef, %b0 ]
   %v24 = phi <32 x i32> [ %v23, %b1 ], [ undef, %b0 ]
   %v25 = phi i32 [ %v60, %b1 ], [ %a1, %b0 ]
-  %v26 = getelementptr inbounds <16 x i32>, <16 x i32>* %v22, i32 1
-  %v27 = load <16 x i32>, <16 x i32>* %v22, align 64
-  %v28 = getelementptr inbounds <16 x i32>, <16 x i32>* %v21, i32 1
-  %v29 = load <16 x i32>, <16 x i32>* %v21, align 64
-  %v30 = getelementptr inbounds <16 x i32>, <16 x i32>* %v20, i32 1
-  %v31 = load <16 x i32>, <16 x i32>* %v20, align 64
-  %v32 = getelementptr inbounds <16 x i32>, <16 x i32>* %v19, i32 1
-  %v33 = load <16 x i32>, <16 x i32>* %v19, align 64
-  %v34 = getelementptr inbounds <16 x i32>, <16 x i32>* %v18, i32 1
-  %v35 = load <16 x i32>, <16 x i32>* %v18, align 64
+  %v26 = getelementptr inbounds <16 x i32>, ptr %v22, i32 1
+  %v27 = load <16 x i32>, ptr %v22, align 64
+  %v28 = getelementptr inbounds <16 x i32>, ptr %v21, i32 1
+  %v29 = load <16 x i32>, ptr %v21, align 64
+  %v30 = getelementptr inbounds <16 x i32>, ptr %v20, i32 1
+  %v31 = load <16 x i32>, ptr %v20, align 64
+  %v32 = getelementptr inbounds <16 x i32>, ptr %v19, i32 1
+  %v33 = load <16 x i32>, ptr %v19, align 64
+  %v34 = getelementptr inbounds <16 x i32>, ptr %v18, i32 1
+  %v35 = load <16 x i32>, ptr %v18, align 64
   %v36 = tail call <32 x i32> @llvm.hexagon.V6.vaddubh(<16 x i32> %v27, <16 x i32> %v35) #2
   %v37 = tail call <32 x i32> @llvm.hexagon.V6.vmpybus.acc(<32 x i32> %v36, <16 x i32> %v31, i32 101058054) #2
   %v38 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v33, <16 x i32> %v29) #2
@@ -77,8 +72,8 @@ b1:                                               ; preds = %b1, %b0
   %v56 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwb.acc(<16 x i32> %v55, <16 x i32> %v43, i32 101058054) #2
   %v57 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwb.acc(<16 x i32> %v56, <16 x i32> %v51, i32 67372036) #2
   %v58 = tail call <16 x i32> @llvm.hexagon.V6.vshuffob(<16 x i32> %v57, <16 x i32> %v54) #2
-  %v59 = getelementptr inbounds <16 x i32>, <16 x i32>* %v17, i32 1
-  store <16 x i32> %v58, <16 x i32>* %v17, align 64
+  %v59 = getelementptr inbounds <16 x i32>, ptr %v17, i32 1
+  store <16 x i32> %v58, ptr %v17, align 64
   %v60 = add nsw i32 %v25, -64
   %v61 = icmp sgt i32 %v25, 128
   br i1 %v61, label %b1, label %b2

diff  --git a/llvm/test/CodeGen/Hexagon/swp-reuse-phi.ll b/llvm/test/CodeGen/Hexagon/swp-reuse-phi.ll
index a3221e56b34ef..f62069a786f85 100644
--- a/llvm/test/CodeGen/Hexagon/swp-reuse-phi.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-reuse-phi.ll
@@ -15,14 +15,14 @@ b1:                                               ; preds = %b1, %b0
   %v1 = phi i32 [ %v11, %b1 ], [ 0, %b0 ]
   %v2 = phi float [ %v4, %b1 ], [ undef, %b0 ]
   %v3 = phi float [ %v2, %b1 ], [ undef, %b0 ]
-  %v4 = load float, float* undef, align 4
+  %v4 = load float, ptr undef, align 4
   %v5 = fmul float %v4, 0x3FEFAA0000000000
   %v6 = fadd float undef, %v5
   %v7 = fmul float %v2, 0xBFFFAA0000000000
   %v8 = fadd float %v7, %v6
   %v9 = fmul float %v3, 0x3FEFAA0000000000
   %v10 = fadd float %v9, %v8
-  store float %v10, float* undef, align 4
+  store float %v10, ptr undef, align 4
   %v11 = add nsw i32 %v1, 1
   %v12 = icmp eq i32 %v11, %a0
   br i1 %v12, label %b2, label %b1

diff  --git a/llvm/test/CodeGen/Hexagon/swp-sigma.ll b/llvm/test/CodeGen/Hexagon/swp-sigma.ll
index 1e376323a32f3..f4e0b0fac29c1 100644
--- a/llvm/test/CodeGen/Hexagon/swp-sigma.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-sigma.ll
@@ -30,7 +30,7 @@ declare <16 x i32> @llvm.hexagon.V6.lo(<32 x i32>) #0
 declare <16 x i32> @llvm.hexagon.V6.hi(<32 x i32>) #0
 declare <16 x i32> @llvm.hexagon.V6.vsathub(<16 x i32>, <16 x i32>) #0
 
-define void @f0(i8* nocapture readonly %a0, i32 %a1, i32 %a2, i32 %a3, i8 zeroext %a4, i8* nocapture %a5) #1 {
+define void @f0(ptr nocapture readonly %a0, i32 %a1, i32 %a2, i32 %a3, i8 zeroext %a4, ptr nocapture %a5) #1 {
 b0:
   %v0 = add nsw i32 %a3, -1
   %v1 = icmp sgt i32 %v0, 1
@@ -38,7 +38,7 @@ b0:
 
 b1:                                               ; preds = %b0
   %v2 = mul i32 %a1, 2
-  %v3 = load <16 x i32>, <16 x i32>* bitcast ([10 x i16]* @g0 to <16 x i32>*), align 128
+  %v3 = load <16 x i32>, ptr @g0, align 128
   %v4 = tail call <16 x i32> @llvm.hexagon.V6.vshuffh(<16 x i32> %v3) #2
   %v5 = zext i8 %a4 to i32
   %v6 = tail call i32 @llvm.hexagon.S2.vsplatrb(i32 %v5) #2
@@ -51,48 +51,41 @@ b1:                                               ; preds = %b0
   %v13 = add i32 %v12, %a1
   %v14 = icmp sgt i32 %a2, 0
   %v15 = add i32 %a3, -2
-  %v16 = bitcast i8* %a0 to <16 x i32>*
-  %v17 = load <16 x i32>, <16 x i32>* %v16, align 64
+  %v17 = load <16 x i32>, ptr %a0, align 64
   br label %b2
 
 b2:                                               ; preds = %b7, %b1
   %v18 = phi <16 x i32> [ %v17, %b1 ], [ %v28, %b7 ]
-  %v19 = phi i8* [ %a0, %b1 ], [ %v23, %b7 ]
-  %v20 = phi i8* [ %a5, %b1 ], [ %v22, %b7 ]
+  %v19 = phi ptr [ %a0, %b1 ], [ %v23, %b7 ]
+  %v20 = phi ptr [ %a5, %b1 ], [ %v22, %b7 ]
   %v21 = phi i32 [ 1, %b1 ], [ %v118, %b7 ]
-  %v22 = getelementptr inbounds i8, i8* %v20, i32 %a1
-  %v23 = getelementptr inbounds i8, i8* %v19, i32 %a1
-  %v24 = bitcast i8* %v23 to <16 x i32>*
-  %v25 = getelementptr inbounds i8, i8* %v19, i32 %v2
-  %v26 = bitcast i8* %v25 to <16 x i32>*
-  %v27 = bitcast i8* %v22 to <16 x i32>*
-  %v28 = load <16 x i32>, <16 x i32>* %v24, align 64
-  %v29 = load <16 x i32>, <16 x i32>* %v26, align 64
+  %v22 = getelementptr inbounds i8, ptr %v20, i32 %a1
+  %v23 = getelementptr inbounds i8, ptr %v19, i32 %a1
+  %v25 = getelementptr inbounds i8, ptr %v19, i32 %v2
+  %v28 = load <16 x i32>, ptr %v23, align 64
+  %v29 = load <16 x i32>, ptr %v25, align 64
   br i1 %v11, label %b3, label %b4
 
 b3:                                               ; preds = %b2
-  %v30 = getelementptr inbounds i8, i8* %v19, i32 64
-  %v31 = getelementptr inbounds i8, i8* %v19, i32 %v12
-  %v32 = bitcast i8* %v31 to <16 x i32>*
-  %v33 = getelementptr inbounds i8, i8* %v19, i32 %v13
-  %v34 = bitcast i8* %v33 to <16 x i32>*
+  %v30 = getelementptr inbounds i8, ptr %v19, i32 64
+  %v31 = getelementptr inbounds i8, ptr %v19, i32 %v12
+  %v33 = getelementptr inbounds i8, ptr %v19, i32 %v13
   br label %b5
 
 b4:                                               ; preds = %b2
   br i1 %v14, label %b5, label %b7
 
 b5:                                               ; preds = %b4, %b3
-  %v35 = phi <16 x i32>* [ %v26, %b4 ], [ %v34, %b3 ]
-  %v36 = phi <16 x i32>* [ %v24, %b4 ], [ %v32, %b3 ]
-  %v37 = phi i8* [ %v19, %b4 ], [ %v30, %b3 ]
-  %v38 = bitcast i8* %v37 to <16 x i32>*
+  %v35 = phi ptr [ %v25, %b4 ], [ %v33, %b3 ]
+  %v36 = phi ptr [ %v23, %b4 ], [ %v31, %b3 ]
+  %v37 = phi ptr [ %v19, %b4 ], [ %v30, %b3 ]
   br label %b6
 
 b6:                                               ; preds = %b6, %b5
-  %v39 = phi <16 x i32>* [ %v108, %b6 ], [ %v27, %b5 ]
-  %v40 = phi <16 x i32>* [ %v115, %b6 ], [ %v35, %b5 ]
-  %v41 = phi <16 x i32>* [ %v114, %b6 ], [ %v36, %b5 ]
-  %v42 = phi <16 x i32>* [ %v113, %b6 ], [ %v38, %b5 ]
+  %v39 = phi ptr [ %v108, %b6 ], [ %v22, %b5 ]
+  %v40 = phi ptr [ %v115, %b6 ], [ %v35, %b5 ]
+  %v41 = phi ptr [ %v114, %b6 ], [ %v36, %b5 ]
+  %v42 = phi ptr [ %v113, %b6 ], [ %v37, %b5 ]
   %v43 = phi i32 [ %v116, %b6 ], [ %a2, %b5 ]
   %v44 = phi <16 x i32> [ %v45, %b6 ], [ %v8, %b5 ]
   %v45 = phi <16 x i32> [ %v50, %b6 ], [ %v18, %b5 ]
@@ -100,9 +93,9 @@ b6:                                               ; preds = %b6, %b5
   %v47 = phi <16 x i32> [ %v51, %b6 ], [ %v28, %b5 ]
   %v48 = phi <16 x i32> [ %v49, %b6 ], [ %v8, %b5 ]
   %v49 = phi <16 x i32> [ %v52, %b6 ], [ %v29, %b5 ]
-  %v50 = load <16 x i32>, <16 x i32>* %v42, align 64
-  %v51 = load <16 x i32>, <16 x i32>* %v41, align 64
-  %v52 = load <16 x i32>, <16 x i32>* %v40, align 64
+  %v50 = load <16 x i32>, ptr %v42, align 64
+  %v51 = load <16 x i32>, ptr %v41, align 64
+  %v52 = load <16 x i32>, ptr %v40, align 64
   %v53 = tail call <32 x i32> @llvm.hexagon.V6.vsububh(<16 x i32> %v8, <16 x i32> %v47) #2
   %v54 = tail call <16 x i32> @llvm.hexagon.V6.vabs
diff ub(<16 x i32> %v45, <16 x i32> %v47) #2
   %v55 = tail call <16 x i32> @llvm.hexagon.V6.vabs
diff ub(<16 x i32> %v49, <16 x i32> %v47) #2
@@ -158,15 +151,15 @@ b6:                                               ; preds = %b6, %b5
   %v105 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v100) #2
   %v106 = tail call <16 x i32> @llvm.hexagon.V6.vmpyhvsrs(<16 x i32> %v104, <16 x i32> %v105) #2
   %v107 = tail call <16 x i32> @llvm.hexagon.V6.vsathub(<16 x i32> %v106, <16 x i32> %v103) #2
-  %v108 = getelementptr inbounds <16 x i32>, <16 x i32>* %v39, i32 1
-  store <16 x i32> %v107, <16 x i32>* %v39, align 64
+  %v108 = getelementptr inbounds <16 x i32>, ptr %v39, i32 1
+  store <16 x i32> %v107, ptr %v39, align 64
   %v109 = icmp sgt i32 %v43, 128
-  %v110 = getelementptr inbounds <16 x i32>, <16 x i32>* %v42, i32 1
-  %v111 = getelementptr inbounds <16 x i32>, <16 x i32>* %v41, i32 1
-  %v112 = getelementptr inbounds <16 x i32>, <16 x i32>* %v40, i32 1
-  %v113 = select i1 %v109, <16 x i32>* %v110, <16 x i32>* %v42
-  %v114 = select i1 %v109, <16 x i32>* %v111, <16 x i32>* %v41
-  %v115 = select i1 %v109, <16 x i32>* %v112, <16 x i32>* %v40
+  %v110 = getelementptr inbounds <16 x i32>, ptr %v42, i32 1
+  %v111 = getelementptr inbounds <16 x i32>, ptr %v41, i32 1
+  %v112 = getelementptr inbounds <16 x i32>, ptr %v40, i32 1
+  %v113 = select i1 %v109, ptr %v110, ptr %v42
+  %v114 = select i1 %v109, ptr %v111, ptr %v41
+  %v115 = select i1 %v109, ptr %v112, ptr %v40
   %v116 = add nsw i32 %v43, -64
   %v117 = icmp sgt i32 %v43, 64
   br i1 %v117, label %b6, label %b7

diff  --git a/llvm/test/CodeGen/Hexagon/swp-stages.ll b/llvm/test/CodeGen/Hexagon/swp-stages.ll
index f575260ec7143..5778e1b9e6849 100644
--- a/llvm/test/CodeGen/Hexagon/swp-stages.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-stages.ll
@@ -6,7 +6,7 @@
 
 ; Test that we generate pipelines with multiple stages correctly.
 
-%s.0 = type { [194 x i32], i32*, [10 x i32], [10 x i32], i32, i32, i32, i32, i32, [9 x i32], [9 x i32], i16, i16, i16, i16, %s.1*, %s.2*, %s.3*, %s.4*, %s.5*, %s.6*, %s.7*, %s.8*, %s.9* }
+%s.0 = type { [194 x i32], ptr, [10 x i32], [10 x i32], i32, i32, i32, i32, i32, [9 x i32], [9 x i32], i16, i16, i16, i16, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr }
 %s.1 = type { [60 x i32], i16 }
 %s.2 = type { i32, [7 x i32], i16 }
 %s.3 = type { [10 x i32] }
@@ -18,18 +18,18 @@
 %s.9 = type { i8, i32, i32, i32, [10 x i32], [10 x i32], [80 x i32], [80 x i32], [8 x i32], i32, i16, i16, i16, i16, i16, i16, i16, i16, i16, i16, i16 }
 
 ; Function Attrs: nounwind
-define fastcc void @f0(%s.0* %a0) #0 {
+define fastcc void @f0(ptr %a0) #0 {
 b0:
   %v0 = alloca [40 x i32], align 8
-  %v1 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 5
-  %v2 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 6
-  %v3 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 4
-  %v4 = select i1 undef, i32* %v2, i32* %v1
-  %v5 = load i32, i32* %v4, align 4
+  %v1 = getelementptr inbounds %s.0, ptr %a0, i32 0, i32 5
+  %v2 = getelementptr inbounds %s.0, ptr %a0, i32 0, i32 6
+  %v3 = getelementptr inbounds %s.0, ptr %a0, i32 0, i32 4
+  %v4 = select i1 undef, ptr %v2, ptr %v1
+  %v5 = load i32, ptr %v4, align 4
   br i1 false, label %b2, label %b1
 
 b1:                                               ; preds = %b0
-  %v6 = load i32, i32* %v3, align 4
+  %v6 = load i32, ptr %v3, align 4
   br label %b2
 
 b2:                                               ; preds = %b1, %b0
@@ -40,32 +40,32 @@ b2:                                               ; preds = %b1, %b0
 b3:                                               ; preds = %b3, %b2
   %v9 = phi i32 [ %v34, %b3 ], [ %v5, %b2 ]
   %v10 = add nsw i32 %v9, 2
-  %v11 = getelementptr inbounds [40 x i32], [40 x i32]* %v0, i32 0, i32 undef
-  %v12 = load i32, i32* %v11, align 4
+  %v11 = getelementptr inbounds [40 x i32], ptr %v0, i32 0, i32 undef
+  %v12 = load i32, ptr %v11, align 4
   %v13 = mul nsw i32 %v12, %v8
   %v14 = ashr i32 %v13, 15
-  %v15 = getelementptr inbounds [40 x i32], [40 x i32]* %v0, i32 0, i32 %v10
+  %v15 = getelementptr inbounds [40 x i32], ptr %v0, i32 0, i32 %v10
   %v16 = add nsw i32 %v14, 0
-  store i32 %v16, i32* %v15, align 4
+  store i32 %v16, ptr %v15, align 4
   %v17 = add nsw i32 %v9, 3
   %v18 = sub nsw i32 %v17, %v5
-  %v19 = getelementptr inbounds [40 x i32], [40 x i32]* %v0, i32 0, i32 %v18
-  %v20 = load i32, i32* %v19, align 4
+  %v19 = getelementptr inbounds [40 x i32], ptr %v0, i32 0, i32 %v18
+  %v20 = load i32, ptr %v19, align 4
   %v21 = mul nsw i32 %v20, %v8
   %v22 = ashr i32 %v21, 15
-  %v23 = getelementptr inbounds [40 x i32], [40 x i32]* %v0, i32 0, i32 %v17
+  %v23 = getelementptr inbounds [40 x i32], ptr %v0, i32 0, i32 %v17
   %v24 = add nsw i32 %v22, 0
-  store i32 %v24, i32* %v23, align 4
+  store i32 %v24, ptr %v23, align 4
   %v25 = add nsw i32 %v9, 6
   %v26 = sub nsw i32 %v25, %v5
-  %v27 = getelementptr inbounds [40 x i32], [40 x i32]* %v0, i32 0, i32 %v26
-  %v28 = load i32, i32* %v27, align 4
+  %v27 = getelementptr inbounds [40 x i32], ptr %v0, i32 0, i32 %v26
+  %v28 = load i32, ptr %v27, align 4
   %v29 = mul nsw i32 %v28, %v8
   %v30 = ashr i32 %v29, 15
-  %v31 = getelementptr inbounds [40 x i32], [40 x i32]* %v0, i32 0, i32 %v25
-  %v32 = load i32, i32* %v31, align 4
+  %v31 = getelementptr inbounds [40 x i32], ptr %v0, i32 0, i32 %v25
+  %v32 = load i32, ptr %v31, align 4
   %v33 = add nsw i32 %v30, %v32
-  store i32 %v33, i32* %v31, align 4
+  store i32 %v33, ptr %v31, align 4
   %v34 = add nsw i32 %v9, 8
   %v35 = icmp slt i32 %v34, 33
   br i1 %v35, label %b3, label %b4

diff  --git a/llvm/test/CodeGen/Hexagon/swp-stages3.ll b/llvm/test/CodeGen/Hexagon/swp-stages3.ll
index 94f24592bd8af..bf0c8af76c93e 100644
--- a/llvm/test/CodeGen/Hexagon/swp-stages3.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-stages3.ll
@@ -4,7 +4,7 @@
 ; Test that the compiler doesn't seg fault due to incorrect names in epilog.
 
 ; Function Attrs: nounwind
-define void @f0(i16* nocapture %a0, i16* nocapture %a1, i16 signext %a2) #0 {
+define void @f0(ptr nocapture %a0, ptr nocapture %a1, i16 signext %a2) #0 {
 b0:
   %v0 = icmp sgt i16 %a2, 0
   br i1 %v0, label %b1, label %b3
@@ -16,10 +16,10 @@ b1:                                               ; preds = %b0
 b2:                                               ; preds = %b2, %b1
   %v2 = phi i16 [ %v16, %b2 ], [ undef, %b1 ]
   %v3 = phi i32 [ %v17, %b2 ], [ 0, %b1 ]
-  %v4 = phi i16* [ undef, %b2 ], [ %a0, %b1 ]
-  %v5 = phi i16* [ %v6, %b2 ], [ %a1, %b1 ]
-  %v6 = getelementptr inbounds i16, i16* %v5, i32 1
-  %v7 = load i16, i16* %v5, align 2, !tbaa !0
+  %v4 = phi ptr [ undef, %b2 ], [ %a0, %b1 ]
+  %v5 = phi ptr [ %v6, %b2 ], [ %a1, %b1 ]
+  %v6 = getelementptr inbounds i16, ptr %v5, i32 1
+  %v7 = load i16, ptr %v5, align 2, !tbaa !0
   %v8 = sext i16 %v7 to i32
   %v9 = tail call i32 @llvm.hexagon.A2.aslh(i32 %v8)
   %v10 = tail call i32 @llvm.hexagon.S2.asr.r.r.sat(i32 %v9, i32 undef)
@@ -29,7 +29,7 @@ b2:                                               ; preds = %b2, %b1
   %v14 = tail call i32 @llvm.hexagon.A2.addsat(i32 %v13, i32 32768)
   %v15 = tail call i32 @llvm.hexagon.A2.asrh(i32 %v14)
   %v16 = trunc i32 %v15 to i16
-  store i16 %v16, i16* %v4, align 2, !tbaa !0
+  store i16 %v16, ptr %v4, align 2, !tbaa !0
   %v17 = add i32 %v3, 1
   %v18 = icmp eq i32 %v17, %v1
   br i1 %v18, label %b3, label %b2

diff  --git a/llvm/test/CodeGen/Hexagon/swp-stages4.ll b/llvm/test/CodeGen/Hexagon/swp-stages4.ll
index 9d8fa52341e45..ea88b79de9369 100644
--- a/llvm/test/CodeGen/Hexagon/swp-stages4.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-stages4.ll
@@ -16,13 +16,13 @@
 ; CHECK: endloop
 
 ; Function Attrs: nounwind
-define void @test(i8* noalias nocapture %src, i32 %srcWidth, i32 %srcHeight, i32 %srcStride, i8* noalias nocapture %dst, i32 %dstStride) #0 {
+define void @test(ptr noalias nocapture %src, i32 %srcWidth, i32 %srcHeight, i32 %srcStride, ptr noalias nocapture %dst, i32 %dstStride) #0 {
 entry:
   %sub = add i32 %srcWidth, -1
   %sub1 = add i32 %srcHeight, -1
-  %add.ptr = getelementptr inbounds i8, i8* %src, i32 %srcStride
+  %add.ptr = getelementptr inbounds i8, ptr %src, i32 %srcStride
   %add.ptr.sum = mul i32 %srcStride, 2
-  %add.ptr2 = getelementptr inbounds i8, i8* %src, i32 %add.ptr.sum
+  %add.ptr2 = getelementptr inbounds i8, ptr %src, i32 %add.ptr.sum
   br label %for.body.lr.ph
 
 for.body.lr.ph:
@@ -33,19 +33,19 @@ for.body.lr.ph:
   br label %for.cond
 
 for.cond:
-  %scevgep = getelementptr i8, i8* %dst, i32 %1
-  %scevgep220 = getelementptr i8, i8* %src, i32 %2
-  %scevgep221 = getelementptr i8, i8* %src, i32 %3
-  %arrayidx6 = getelementptr inbounds i8, i8* %src, i32 1
+  %scevgep = getelementptr i8, ptr %dst, i32 %1
+  %scevgep220 = getelementptr i8, ptr %src, i32 %2
+  %scevgep221 = getelementptr i8, ptr %src, i32 %3
+  %arrayidx6 = getelementptr inbounds i8, ptr %src, i32 1
   %add11 = add i32 %srcStride, 1
-  %arrayidx12 = getelementptr inbounds i8, i8* %src, i32 %add11
+  %arrayidx12 = getelementptr inbounds i8, ptr %src, i32 %add11
   br label %for.body75.preheader
 
 for.body75.preheader:
-  %sri = load i8, i8* %arrayidx6, align 1
-  %sri224 = load i8, i8* %src, align 1
-  %sri227 = load i8, i8* %arrayidx12, align 1
-  %sri229 = load i8, i8* %add.ptr, align 1
+  %sri = load i8, ptr %arrayidx6, align 1
+  %sri224 = load i8, ptr %src, align 1
+  %sri227 = load i8, ptr %arrayidx12, align 1
+  %sri229 = load i8, ptr %add.ptr, align 1
   br label %for.body75
 
 for.body75:
@@ -58,8 +58,8 @@ for.body75:
   %conv80 = zext i8 %sr to i32
   %add81 = add nsw i32 %conv80, %conv78
   %add82 = add i32 %j.0211, 1
-  %arrayidx83 = getelementptr inbounds i8, i8* %src, i32 %add82
-  %4 = load i8, i8* %arrayidx83, align 1, !tbaa !0
+  %arrayidx83 = getelementptr inbounds i8, ptr %src, i32 %add82
+  %4 = load i8, ptr %arrayidx83, align 1, !tbaa !0
   %conv84 = zext i8 %4 to i32
   %add85 = add nsw i32 %add81, %conv84
   %conv88 = zext i8 %sr231 to i32
@@ -67,16 +67,16 @@ for.body75:
   %conv91 = zext i8 %sr230 to i32
   %add92 = add nsw i32 %add89, %conv91
   %add.ptr.sum208 = add i32 %add82, %srcStride
-  %arrayidx94 = getelementptr inbounds i8, i8* %src, i32 %add.ptr.sum208
-  %5 = load i8, i8* %arrayidx94, align 1, !tbaa !0
+  %arrayidx94 = getelementptr inbounds i8, ptr %src, i32 %add.ptr.sum208
+  %5 = load i8, ptr %arrayidx94, align 1, !tbaa !0
   %conv95 = zext i8 %5 to i32
   %add96 = add nsw i32 %add92, %conv95
   %mul97 = mul nsw i32 %add96, 7282
   %add98 = add nsw i32 %mul97, 32768
   %shr99209 = lshr i32 %add98, 16
   %conv100 = trunc i32 %shr99209 to i8
-  %arrayidx101 = getelementptr inbounds i8, i8* %dst, i32 %j.0211
-  store i8 %conv100, i8* %arrayidx101, align 1, !tbaa !0
+  %arrayidx101 = getelementptr inbounds i8, ptr %dst, i32 %j.0211
+  store i8 %conv100, ptr %arrayidx101, align 1, !tbaa !0
   %exitcond = icmp eq i32 %add82, %sub
   br i1 %exitcond, label %for.end104.loopexit, label %for.body75
 

diff  --git a/llvm/test/CodeGen/Hexagon/swp-stages5.ll b/llvm/test/CodeGen/Hexagon/swp-stages5.ll
index fdfb2101cd36c..888e6e1d8a769 100644
--- a/llvm/test/CodeGen/Hexagon/swp-stages5.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-stages5.ll
@@ -11,13 +11,13 @@
 ; CHECK: [[REG0]]{{[:0-9]*}} =
 ; CHECK: endloop
 
-define void @fred(i8* noalias nocapture %src, i32 %srcWidth, i32 %srcHeight, i32 %srcStride, i8* noalias nocapture %dst, i32 %dstStride) #0 {
+define void @fred(ptr noalias nocapture %src, i32 %srcWidth, i32 %srcHeight, i32 %srcStride, ptr noalias nocapture %dst, i32 %dstStride) #0 {
 entry:
   %sub = add i32 %srcWidth, -1
   %sub1 = add i32 %srcHeight, -1
-  %add.ptr = getelementptr inbounds i8, i8* %src, i32 %srcStride
+  %add.ptr = getelementptr inbounds i8, ptr %src, i32 %srcStride
   %add.ptr.sum = mul i32 %srcStride, 2
-  %add.ptr2 = getelementptr inbounds i8, i8* %src, i32 %add.ptr.sum
+  %add.ptr2 = getelementptr inbounds i8, ptr %src, i32 %add.ptr.sum
   %cmp212 = icmp ugt i32 %sub1, 1
   br i1 %cmp212, label %for.body.lr.ph, label %for.end
 
@@ -25,12 +25,12 @@ for.body.lr.ph:
   br label %for.body74.preheader
 
 for.body74.preheader:
-  %0 = load i8, i8* %add.ptr, align 1, !tbaa !0
-  %arrayidx40 = getelementptr inbounds i8, i8* %add.ptr, i32 1
-  %1 = load i8, i8* %arrayidx40, align 1, !tbaa !0
-  %2 = load i8, i8* %add.ptr, align 1, !tbaa !0
-  %arrayidx46 = getelementptr inbounds i8, i8* %add.ptr, i32 1
-  %3 = load i8, i8* %arrayidx46, align 1, !tbaa !0
+  %0 = load i8, ptr %add.ptr, align 1, !tbaa !0
+  %arrayidx40 = getelementptr inbounds i8, ptr %add.ptr, i32 1
+  %1 = load i8, ptr %arrayidx40, align 1, !tbaa !0
+  %2 = load i8, ptr %add.ptr, align 1, !tbaa !0
+  %arrayidx46 = getelementptr inbounds i8, ptr %add.ptr, i32 1
+  %3 = load i8, ptr %arrayidx46, align 1, !tbaa !0
   br label %for.body74
 
 for.body74:
@@ -43,16 +43,16 @@ for.body74:
   %conv79 = zext i8 %6 to i32
   %add80 = add nsw i32 %conv79, %conv77
   %add81 = add i32 %j.0211, 1
-  %arrayidx82 = getelementptr inbounds i8, i8* %src, i32 %add81
-  %8 = load i8, i8* %arrayidx82, align 1, !tbaa !0
+  %arrayidx82 = getelementptr inbounds i8, ptr %src, i32 %add81
+  %8 = load i8, ptr %arrayidx82, align 1, !tbaa !0
   %conv83 = zext i8 %8 to i32
   %add84 = add nsw i32 %add80, %conv83
   %conv87 = zext i8 %5 to i32
   %add88 = add nsw i32 %add84, %conv87
   %conv90 = zext i8 %4 to i32
   %add91 = add nsw i32 %add88, %conv90
-  %arrayidx93 = getelementptr inbounds i8, i8* %add.ptr, i32 %add81
-  %9 = load i8, i8* %arrayidx93, align 1, !tbaa !0
+  %arrayidx93 = getelementptr inbounds i8, ptr %add.ptr, i32 %add81
+  %9 = load i8, ptr %arrayidx93, align 1, !tbaa !0
   %conv94 = zext i8 %9 to i32
   %add95 = add nsw i32 %add91, %conv94
   %mul96 = mul nsw i32 %add95, 7282
@@ -60,8 +60,8 @@ for.body74:
   %shr98208 = lshr i32 %add97, 16
   %conv99 = trunc i32 %shr98208 to i8
   %add.ptr5.sum209 = add i32 %j.0211, %dstStride
-  %arrayidx100 = getelementptr inbounds i8, i8* %dst, i32 %add.ptr5.sum209
-  store i8 %conv99, i8* %arrayidx100, align 1, !tbaa !0
+  %arrayidx100 = getelementptr inbounds i8, ptr %dst, i32 %add.ptr5.sum209
+  store i8 %conv99, ptr %arrayidx100, align 1, !tbaa !0
   %exitcond = icmp eq i32 %add81, %sub
   br i1 %exitcond, label %for.end103.loopexit, label %for.body74
 

diff  --git a/llvm/test/CodeGen/Hexagon/swp-swap.ll b/llvm/test/CodeGen/Hexagon/swp-swap.ll
index 4cd073cb16b83..7812d0cd1819b 100644
--- a/llvm/test/CodeGen/Hexagon/swp-swap.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-swap.ll
@@ -5,7 +5,7 @@
 
 ; STATS-NOT: 1 pipeliner   - Number of loops software pipelined
 
- at g0 = common global i32* null, align 4
+ at g0 = common global ptr null, align 4
 
 ; Function Attrs: nounwind
 define void @f0(i32 %a0, i32 %a1, i32 %a2) #0 {
@@ -14,19 +14,19 @@ b0:
   br i1 %v0, label %b1, label %b4
 
 b1:                                               ; preds = %b0
-  %v1 = load i32*, i32** @g0, align 4, !tbaa !0
+  %v1 = load ptr, ptr @g0, align 4, !tbaa !0
   br label %b2
 
 b2:                                               ; preds = %b2, %b1
   %v2 = phi i32 [ %a0, %b1 ], [ %v9, %b2 ]
   %v3 = phi i32 [ %a2, %b1 ], [ %v11, %b2 ]
   %v4 = phi i32 [ %a1, %b1 ], [ %v10, %b2 ]
-  %v5 = getelementptr inbounds i32, i32* %v1, i32 %v2
-  %v6 = load i32, i32* %v5, align 4, !tbaa !4
-  %v7 = getelementptr inbounds i32, i32* %v1, i32 %v4
-  %v8 = load i32, i32* %v7, align 4, !tbaa !4
-  store i32 %v8, i32* %v5, align 4, !tbaa !4
-  store i32 %v6, i32* %v7, align 4, !tbaa !4
+  %v5 = getelementptr inbounds i32, ptr %v1, i32 %v2
+  %v6 = load i32, ptr %v5, align 4, !tbaa !4
+  %v7 = getelementptr inbounds i32, ptr %v1, i32 %v4
+  %v8 = load i32, ptr %v7, align 4, !tbaa !4
+  store i32 %v8, ptr %v5, align 4, !tbaa !4
+  store i32 %v6, ptr %v7, align 4, !tbaa !4
   %v9 = add nsw i32 %v2, 1
   %v10 = add nsw i32 %v4, 1
   %v11 = add nsw i32 %v3, -1

diff  --git a/llvm/test/CodeGen/Hexagon/swp-tfri.ll b/llvm/test/CodeGen/Hexagon/swp-tfri.ll
index f0c26045430c2..9b44eaa8c0a63 100644
--- a/llvm/test/CodeGen/Hexagon/swp-tfri.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-tfri.ll
@@ -14,8 +14,8 @@ b1:                                               ; preds = %b0
   br i1 undef, label %b2, label %b4
 
 b2:                                               ; preds = %b1
-  %v0 = load i16, i16* undef, align 2
-  %v1 = load i16, i16* undef, align 2
+  %v0 = load i16, ptr undef, align 2
+  %v1 = load i16, ptr undef, align 2
   br i1 undef, label %b5, label %b3
 
 b3:                                               ; preds = %b5, %b2

diff  --git a/llvm/test/CodeGen/Hexagon/swp-vect-dotprod.ll b/llvm/test/CodeGen/Hexagon/swp-vect-dotprod.ll
index 1d675c8664f4a..85ff24b494e7c 100644
--- a/llvm/test/CodeGen/Hexagon/swp-vect-dotprod.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-vect-dotprod.ll
@@ -29,12 +29,10 @@ polly.loop_body:
   %polly.loopiv13 = phi i32 [ 0, %entry ], [ %polly.next_loopiv, %polly.loop_body ]
   %reduction.012 = phi <2 x i32> [ zeroinitializer, %entry ], [ %addp_vec, %polly.loop_body ]
   %polly.next_loopiv = add nsw i32 %polly.loopiv13, 2
-  %p_arrayidx1 = getelementptr [5000 x i32], [5000 x i32]* @b, i32 0, i32 %polly.loopiv13
-  %p_arrayidx = getelementptr [5000 x i32], [5000 x i32]* @a, i32 0, i32 %polly.loopiv13
-  %vector_ptr = bitcast i32* %p_arrayidx1 to <2 x i32>*
-  %_p_vec_full = load <2 x i32>, <2 x i32>* %vector_ptr, align 8
-  %vector_ptr7 = bitcast i32* %p_arrayidx to <2 x i32>*
-  %_p_vec_full8 = load <2 x i32>, <2 x i32>* %vector_ptr7, align 8
+  %p_arrayidx1 = getelementptr [5000 x i32], ptr @b, i32 0, i32 %polly.loopiv13
+  %p_arrayidx = getelementptr [5000 x i32], ptr @a, i32 0, i32 %polly.loopiv13
+  %_p_vec_full = load <2 x i32>, ptr %p_arrayidx1, align 8
+  %_p_vec_full8 = load <2 x i32>, ptr %p_arrayidx, align 8
   %mulp_vec = mul <2 x i32> %_p_vec_full8, %_p_vec_full
   %addp_vec = add <2 x i32> %mulp_vec, %reduction.012
   %2 = icmp slt i32 %polly.next_loopiv, 5000

diff  --git a/llvm/test/CodeGen/Hexagon/swp-vmult.ll b/llvm/test/CodeGen/Hexagon/swp-vmult.ll
index fd9cdf9b38c90..532a7caaece4f 100644
--- a/llvm/test/CodeGen/Hexagon/swp-vmult.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-vmult.ll
@@ -7,23 +7,23 @@
 ; CHECK-NEXT: [[REG0]] = memw(r{{[0-9]+}}++#4)
 ; CHECK-NEXT: endloop0
 
-define i32 @f0(i32* %a0, i32* %a1, i32 %a2) {
+define i32 @f0(ptr %a0, ptr %a1, i32 %a2) {
 b0:
   br label %b1
 
 b1:                                               ; preds = %b1, %b0
   %v0 = phi i32 [ 0, %b0 ], [ %v7, %b1 ]
-  %v1 = phi i32* [ %a0, %b0 ], [ %v10, %b1 ]
-  %v2 = phi i32* [ %a1, %b0 ], [ %v11, %b1 ]
+  %v1 = phi ptr [ %a0, %b0 ], [ %v10, %b1 ]
+  %v2 = phi ptr [ %a1, %b0 ], [ %v11, %b1 ]
   %v3 = phi i32 [ 0, %b0 ], [ %v8, %b1 ]
-  %v4 = load i32, i32* %v1, align 4
-  %v5 = load i32, i32* %v2, align 4
+  %v4 = load i32, ptr %v1, align 4
+  %v5 = load i32, ptr %v2, align 4
   %v6 = mul nsw i32 %v5, %v4
   %v7 = add nsw i32 %v6, %v0
   %v8 = add nsw i32 %v3, 1
   %v9 = icmp eq i32 %v8, 10000
-  %v10 = getelementptr i32, i32* %v1, i32 1
-  %v11 = getelementptr i32, i32* %v2, i32 1
+  %v10 = getelementptr i32, ptr %v1, i32 1
+  %v11 = getelementptr i32, ptr %v2, i32 1
   br i1 %v9, label %b2, label %b1
 
 b2:                                               ; preds = %b1

diff  --git a/llvm/test/CodeGen/Hexagon/swp-vsum.ll b/llvm/test/CodeGen/Hexagon/swp-vsum.ll
index 5dcd2824550d6..39d3517325675 100644
--- a/llvm/test/CodeGen/Hexagon/swp-vsum.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-vsum.ll
@@ -12,19 +12,19 @@
 ; CHECKV60: memw(r{{[0-9]+}}++#4)
 ; CHECKV60: add(r{{[0-9]+}},r{{[0-9]+}})
 
-define i32 @f0(i32* %a0, i32 %a1) {
+define i32 @f0(ptr %a0, i32 %a1) {
 b0:
   br label %b1
 
 b1:                                               ; preds = %b1, %b0
   %v0 = phi i32 [ 0, %b0 ], [ %v4, %b1 ]
-  %v1 = phi i32* [ %a0, %b0 ], [ %v7, %b1 ]
+  %v1 = phi ptr [ %a0, %b0 ], [ %v7, %b1 ]
   %v2 = phi i32 [ 0, %b0 ], [ %v5, %b1 ]
-  %v3 = load i32, i32* %v1, align 4
+  %v3 = load i32, ptr %v1, align 4
   %v4 = add nsw i32 %v3, %v0
   %v5 = add nsw i32 %v2, 1
   %v6 = icmp eq i32 %v5, 10000
-  %v7 = getelementptr i32, i32* %v1, i32 1
+  %v7 = getelementptr i32, ptr %v1, i32 1
   br i1 %v6, label %b2, label %b1
 
 b2:                                               ; preds = %b1

diff  --git a/llvm/test/CodeGen/Hexagon/swp-xxh2.ll b/llvm/test/CodeGen/Hexagon/swp-xxh2.ll
index 55f39e263d533..16ca2136ca0a9 100644
--- a/llvm/test/CodeGen/Hexagon/swp-xxh2.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-xxh2.ll
@@ -12,7 +12,7 @@
 ; CHECK: = PHI
 
 ; Function Attrs: nounwind
-define void @f0(i32 %a0, i32* %a1) #0 {
+define void @f0(i32 %a0, ptr %a1) #0 {
 b0:
   %v0 = ashr i32 %a0, 1
   br label %b1
@@ -21,8 +21,8 @@ b1:                                               ; preds = %b1, %b0
   %v1 = phi i64 [ %v8, %b1 ], [ undef, %b0 ]
   %v2 = phi i32 [ %v9, %b1 ], [ 0, %b0 ]
   %v3 = phi i32 [ %v7, %b1 ], [ undef, %b0 ]
-  %v4 = inttoptr i32 %v3 to i32*
-  %v5 = load i32, i32* %v4, align 4, !tbaa !0
+  %v4 = inttoptr i32 %v3 to ptr
+  %v5 = load i32, ptr %v4, align 4, !tbaa !0
   %v6 = tail call i64 @llvm.hexagon.S2.packhl(i32 %v5, i32 undef)
   %v7 = add nsw i32 %v3, -16
   %v8 = tail call i64 @llvm.hexagon.M2.vdmacs.s0(i64 %v1, i64 undef, i64 %v6)
@@ -32,8 +32,8 @@ b1:                                               ; preds = %b1, %b0
 
 b2:                                               ; preds = %b1
   %v11 = trunc i64 %v8 to i32
-  %v12 = getelementptr inbounds i32, i32* %a1, i32 8
-  store i32 %v11, i32* %v12, align 4, !tbaa !0
+  %v12 = getelementptr inbounds i32, ptr %a1, i32 8
+  store i32 %v11, ptr %v12, align 4, !tbaa !0
   call void @llvm.trap()
   unreachable
 }

diff  --git a/llvm/test/CodeGen/Hexagon/tail-call-mem-intrinsics.ll b/llvm/test/CodeGen/Hexagon/tail-call-mem-intrinsics.ll
index 7f0fb6281ff55..d701a72b2fcc0 100644
--- a/llvm/test/CodeGen/Hexagon/tail-call-mem-intrinsics.ll
+++ b/llvm/test/CodeGen/Hexagon/tail-call-mem-intrinsics.ll
@@ -2,30 +2,30 @@
 
 ; CHECK-LABEL: tail_memcpy:
 ; CHECK: jump memcpy
-define void @tail_memcpy(i8* nocapture %p, i8* nocapture readonly %q, i32 %n) #0 {
+define void @tail_memcpy(ptr nocapture %p, ptr nocapture readonly %q, i32 %n) #0 {
 entry:
-  tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %p, i8* %q, i32 %n, i1 false)
+  tail call void @llvm.memcpy.p0.p0.i32(ptr %p, ptr %q, i32 %n, i1 false)
   ret void
 }
 
 ; CHECK-LABEL: tail_memmove:
 ; CHECK: jump memmove
-define void @tail_memmove(i8* nocapture %p, i8* nocapture readonly %q, i32 %n) #0 {
+define void @tail_memmove(ptr nocapture %p, ptr nocapture readonly %q, i32 %n) #0 {
 entry:
-  tail call void @llvm.memmove.p0i8.p0i8.i32(i8* %p, i8* %q, i32 %n, i1 false)
+  tail call void @llvm.memmove.p0.p0.i32(ptr %p, ptr %q, i32 %n, i1 false)
   ret void
 }
 
 ; CHECK-LABEL: tail_memset:
 ; CHECK: jump memset
-define void @tail_memset(i8* nocapture %p, i8 %c, i32 %n) #0 {
+define void @tail_memset(ptr nocapture %p, i8 %c, i32 %n) #0 {
 entry:
-  tail call void @llvm.memset.p0i8.i32(i8* %p, i8 %c, i32 %n, i1 false)
+  tail call void @llvm.memset.p0.i32(ptr %p, i8 %c, i32 %n, i1 false)
   ret void
 }
 
-declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i1) #0
-declare void @llvm.memmove.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i1) #0
-declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i1) #0
+declare void @llvm.memcpy.p0.p0.i32(ptr nocapture, ptr nocapture readonly, i32, i1) #0
+declare void @llvm.memmove.p0.p0.i32(ptr nocapture, ptr nocapture readonly, i32, i1) #0
+declare void @llvm.memset.p0.i32(ptr nocapture, i8, i32, i1) #0
 
 attributes #0 = { nounwind }

diff  --git a/llvm/test/CodeGen/Hexagon/tail-dup-subreg-map.ll b/llvm/test/CodeGen/Hexagon/tail-dup-subreg-map.ll
index bd87e2a2b213d..073a953d89bb5 100644
--- a/llvm/test/CodeGen/Hexagon/tail-dup-subreg-map.ll
+++ b/llvm/test/CodeGen/Hexagon/tail-dup-subreg-map.ll
@@ -12,11 +12,11 @@ target triple = "hexagon"
 %struct.0 = type { i64, i16 }
 %struct.1 = type { i64, i64 }
 
-declare hidden fastcc void @foo(%struct.0* noalias nocapture, i8 signext, i8 zeroext, i32, i64, i64) unnamed_addr #0
+declare hidden fastcc void @foo(ptr noalias nocapture, i8 signext, i8 zeroext, i32, i64, i64) unnamed_addr #0
 
-define void @fred(%struct.0* noalias nocapture sret(%struct.0) %agg.result, %struct.1* byval(%struct.1) nocapture readonly align 8 %a, i32 %a0) #1 {
+define void @fred(ptr noalias nocapture sret(%struct.0) %agg.result, ptr byval(%struct.1) nocapture readonly align 8 %a, i32 %a0) #1 {
 entry:
-  %0 = load i64, i64* undef, align 8
+  %0 = load i64, ptr undef, align 8
   switch i32 %a0, label %if.else [
     i32 32767, label %if.then
     i32 0, label %if.then7
@@ -59,7 +59,7 @@ if.end13:                                         ; preds = %if.else, %if.else16
   %shl2.i = shl i64 %aSig0.2, 15
   %shr.i = lshr i64 %aSig1.1, 49
   %or.i = or i64 %shl2.i, %shr.i
-  tail call fastcc void @foo(%struct.0* noalias %agg.result, i8 signext 80, i8 zeroext undef, i32 %aExp.0, i64 %or.i, i64 undef)
+  tail call fastcc void @foo(ptr noalias %agg.result, i8 signext 80, i8 zeroext undef, i32 %aExp.0, i64 %or.i, i64 undef)
   unreachable
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/tc_duplex.ll b/llvm/test/CodeGen/Hexagon/tc_duplex.ll
index a98e56293644e..18344ecf02d40 100644
--- a/llvm/test/CodeGen/Hexagon/tc_duplex.ll
+++ b/llvm/test/CodeGen/Hexagon/tc_duplex.ll
@@ -12,15 +12,15 @@
 ; CHECK-NEXT: memw
 ; CHECK: }
 
-define i32 @test(i32* noalias nocapture readonly %a, i32* noalias nocapture readonly %b, i32 %n) local_unnamed_addr #0 {
+define i32 @test(ptr noalias nocapture readonly %a, ptr noalias nocapture readonly %b, i32 %n) local_unnamed_addr #0 {
 entry:
-  %0 = load i32, i32* %a, align 4
-  %1 = load i32, i32* %b, align 4
+  %0 = load i32, ptr %a, align 4
+  %1 = load i32, ptr %b, align 4
   %mul = mul nsw i32 %1, %0
-  %arrayidx.inc = getelementptr i32, i32* %a, i32 1
-  %arrayidx1.inc = getelementptr i32, i32* %b, i32 1
-  %2 = load i32, i32* %arrayidx.inc, align 4
-  %3 = load i32, i32* %arrayidx1.inc, align 4
+  %arrayidx.inc = getelementptr i32, ptr %a, i32 1
+  %arrayidx1.inc = getelementptr i32, ptr %b, i32 1
+  %2 = load i32, ptr %arrayidx.inc, align 4
+  %3 = load i32, ptr %arrayidx1.inc, align 4
   %mul.1 = mul nsw i32 %3, %2
   %add.1 = add nsw i32 %mul.1, %mul
   ret i32 %add.1

diff  --git a/llvm/test/CodeGen/Hexagon/tc_duplex_asm.ll b/llvm/test/CodeGen/Hexagon/tc_duplex_asm.ll
index 1c6366d1f4a13..3f97f98aefc3b 100644
--- a/llvm/test/CodeGen/Hexagon/tc_duplex_asm.ll
+++ b/llvm/test/CodeGen/Hexagon/tc_duplex_asm.ll
@@ -7,15 +7,15 @@
 ; CHECK: memw{{.*}};{{.*}}memw
 ; CHECK: memw{{.*}};{{.*}}memw
 
-define i32 @test(i32* noalias nocapture readonly %a, i32* noalias nocapture readonly %b, i32 %n) local_unnamed_addr #0 {
+define i32 @test(ptr noalias nocapture readonly %a, ptr noalias nocapture readonly %b, i32 %n) local_unnamed_addr #0 {
 entry:
-  %0 = load i32, i32* %a, align 4
-  %1 = load i32, i32* %b, align 4
+  %0 = load i32, ptr %a, align 4
+  %1 = load i32, ptr %b, align 4
   %mul = mul nsw i32 %1, %0
-  %arrayidx.inc = getelementptr i32, i32* %a, i32 1
-  %arrayidx1.inc = getelementptr i32, i32* %b, i32 1
-  %2 = load i32, i32* %arrayidx.inc, align 4
-  %3 = load i32, i32* %arrayidx1.inc, align 4
+  %arrayidx.inc = getelementptr i32, ptr %a, i32 1
+  %arrayidx1.inc = getelementptr i32, ptr %b, i32 1
+  %2 = load i32, ptr %arrayidx.inc, align 4
+  %3 = load i32, ptr %arrayidx1.inc, align 4
   %mul.1 = mul nsw i32 %3, %2
   %add.1 = add nsw i32 %mul.1, %mul
   ret i32 %add.1

diff  --git a/llvm/test/CodeGen/Hexagon/tc_sched.ll b/llvm/test/CodeGen/Hexagon/tc_sched.ll
index 130f49207be3e..2ef2ce6ce6940 100644
--- a/llvm/test/CodeGen/Hexagon/tc_sched.ll
+++ b/llvm/test/CodeGen/Hexagon/tc_sched.ll
@@ -10,15 +10,15 @@
 ; CHECK: {
 ; CHECK-NEXT: = add([[REG2]],[[REG1]])
 
-define i32 @test(i32* nocapture readonly %p) local_unnamed_addr #0 {
+define i32 @test(ptr nocapture readonly %p) local_unnamed_addr #0 {
 entry:
-  %incdec.ptr = getelementptr inbounds i32, i32* %p, i32 1
-  %0 = load i32, i32* %p, align 4
-  %incdec.ptr1 = getelementptr inbounds i32, i32* %p, i32 2
-  %1 = load i32, i32* %incdec.ptr, align 4
-  %incdec.ptr2 = getelementptr inbounds i32, i32* %p, i32 3
-  %2 = load i32, i32* %incdec.ptr1, align 4
-  %3 = load i32, i32* %incdec.ptr2, align 4
+  %incdec.ptr = getelementptr inbounds i32, ptr %p, i32 1
+  %0 = load i32, ptr %p, align 4
+  %incdec.ptr1 = getelementptr inbounds i32, ptr %p, i32 2
+  %1 = load i32, ptr %incdec.ptr, align 4
+  %incdec.ptr2 = getelementptr inbounds i32, ptr %p, i32 3
+  %2 = load i32, ptr %incdec.ptr1, align 4
+  %3 = load i32, ptr %incdec.ptr2, align 4
   %add = add nsw i32 %1, %0
   %add4 = add nsw i32 %3, %2
   %mul = mul nsw i32 %add4, %add
@@ -36,15 +36,15 @@ entry:
 ; CHECK-NEXT: {
 ; CHECK-NEXT: = sub([[REG7]]
 
-define i32 @test1(i32* nocapture readonly %p) local_unnamed_addr #0 {
+define i32 @test1(ptr nocapture readonly %p) local_unnamed_addr #0 {
 entry:
-  %incdec.ptr = getelementptr inbounds i32, i32* %p, i32 1
-  %0 = load i32, i32* %p, align 4
-  %incdec.ptr1 = getelementptr inbounds i32, i32* %p, i32 2
-  %1 = load i32, i32* %incdec.ptr, align 4
-  %incdec.ptr2 = getelementptr inbounds i32, i32* %p, i32 3
-  %2 = load i32, i32* %incdec.ptr1, align 4
-  %3 = load i32, i32* %incdec.ptr2, align 4
+  %incdec.ptr = getelementptr inbounds i32, ptr %p, i32 1
+  %0 = load i32, ptr %p, align 4
+  %incdec.ptr1 = getelementptr inbounds i32, ptr %p, i32 2
+  %1 = load i32, ptr %incdec.ptr, align 4
+  %incdec.ptr2 = getelementptr inbounds i32, ptr %p, i32 3
+  %2 = load i32, ptr %incdec.ptr1, align 4
+  %3 = load i32, ptr %incdec.ptr2, align 4
   %add4.neg = add i32 %1, %0
   %add = sub i32 %add4.neg, %2
   %sub = sub i32 %add, %3
@@ -61,15 +61,15 @@ entry:
 ; CHECK: }
 ; CHECK: = mpyi
 
-define i32 @test2(i32* nocapture readonly %p) local_unnamed_addr #1 {
+define i32 @test2(ptr nocapture readonly %p) local_unnamed_addr #1 {
 entry:
-  %incdec.ptr = getelementptr inbounds i32, i32* %p, i32 1
-  %0 = load i32, i32* %p, align 4
-  %incdec.ptr1 = getelementptr inbounds i32, i32* %p, i32 2
-  %1 = load i32, i32* %incdec.ptr, align 4
-  %incdec.ptr2 = getelementptr inbounds i32, i32* %p, i32 3
-  %2 = load i32, i32* %incdec.ptr1, align 4
-  %3 = load i32, i32* %incdec.ptr2, align 4
+  %incdec.ptr = getelementptr inbounds i32, ptr %p, i32 1
+  %0 = load i32, ptr %p, align 4
+  %incdec.ptr1 = getelementptr inbounds i32, ptr %p, i32 2
+  %1 = load i32, ptr %incdec.ptr, align 4
+  %incdec.ptr2 = getelementptr inbounds i32, ptr %p, i32 3
+  %2 = load i32, ptr %incdec.ptr1, align 4
+  %3 = load i32, ptr %incdec.ptr2, align 4
   %mul = mul nsw i32 %1, %0
   %mul4 = mul nsw i32 %3, %2
   %mul5 = mul nsw i32 %3, %0

diff  --git a/llvm/test/CodeGen/Hexagon/tc_sched1.ll b/llvm/test/CodeGen/Hexagon/tc_sched1.ll
index 7ea71f730d11a..261b6fdcce5b3 100644
--- a/llvm/test/CodeGen/Hexagon/tc_sched1.ll
+++ b/llvm/test/CodeGen/Hexagon/tc_sched1.ll
@@ -17,15 +17,15 @@
 ; CHECK: jumpr
 ; CHECK: }
 
-define i32 @test(i32* noalias nocapture readonly %a, i32* noalias nocapture readonly %b, i32 %n) local_unnamed_addr #0 {
+define i32 @test(ptr noalias nocapture readonly %a, ptr noalias nocapture readonly %b, i32 %n) local_unnamed_addr #0 {
 entry:
-  %0 = load i32, i32* %a, align 4
-  %1 = load i32, i32* %b, align 4
+  %0 = load i32, ptr %a, align 4
+  %1 = load i32, ptr %b, align 4
   %mul = mul nsw i32 %1, %0
-  %arrayidx.inc = getelementptr i32, i32* %a, i32 1
-  %arrayidx1.inc = getelementptr i32, i32* %b, i32 1
-  %2 = load i32, i32* %arrayidx.inc, align 4
-  %3 = load i32, i32* %arrayidx1.inc, align 4
+  %arrayidx.inc = getelementptr i32, ptr %a, i32 1
+  %arrayidx1.inc = getelementptr i32, ptr %b, i32 1
+  %2 = load i32, ptr %arrayidx.inc, align 4
+  %3 = load i32, ptr %arrayidx1.inc, align 4
   %mul.1 = mul nsw i32 %3, %2
   %add.1 = add nsw i32 %mul.1, %mul
   ret i32 %add.1

diff  --git a/llvm/test/CodeGen/Hexagon/tfr-to-combine.ll b/llvm/test/CodeGen/Hexagon/tfr-to-combine.ll
index 86801dbc71f18..98895dad9df48 100644
--- a/llvm/test/CodeGen/Hexagon/tfr-to-combine.ll
+++ b/llvm/test/CodeGen/Hexagon/tfr-to-combine.ll
@@ -21,7 +21,7 @@ entry:
 ; CHECK: combine(#0,r{{[0-9]+}})
 define i32 @test2() #0 {
 entry:
-  %t0 = load i16, i16* @c, align 2
+  %t0 = load i16, ptr @c, align 2
   %t1 = zext i16 %t0 to i32
   call void @test0b(i32 %t1, i32 0, i32 %t1, i32 0)
   ret i32 0

diff  --git a/llvm/test/CodeGen/Hexagon/tied_oper.ll b/llvm/test/CodeGen/Hexagon/tied_oper.ll
index a995506666db0..5f98944818f92 100644
--- a/llvm/test/CodeGen/Hexagon/tied_oper.ll
+++ b/llvm/test/CodeGen/Hexagon/tied_oper.ll
@@ -5,23 +5,23 @@
 target triple = "hexagon-unknown--elf"
 
 ; Function Attrs: nounwind
-define void @f0(i16* nocapture %a0) #0 {
+define void @f0(ptr nocapture %a0) #0 {
 b0:
   br label %b1
 
 b1:                                               ; preds = %b5, %b0
-  %v0 = phi i16* [ %a0, %b0 ], [ %v5, %b5 ]
+  %v0 = phi ptr [ %a0, %b0 ], [ %v5, %b5 ]
   %v1 = phi i16 [ undef, %b0 ], [ %v10, %b5 ]
   br i1 undef, label %b2, label %b3
 
 b2:                                               ; preds = %b1
-  %v2 = getelementptr inbounds i16, i16* %v0, i32 1
-  %v3 = load i16, i16* %v0, align 2, !tbaa !0
+  %v2 = getelementptr inbounds i16, ptr %v0, i32 1
+  %v3 = load i16, ptr %v0, align 2, !tbaa !0
   br label %b3
 
 b3:                                               ; preds = %b2, %b1
   %v4 = phi i16 [ %v3, %b2 ], [ %v1, %b1 ]
-  %v5 = phi i16* [ %v2, %b2 ], [ %v0, %b1 ]
+  %v5 = phi ptr [ %v2, %b2 ], [ %v0, %b1 ]
   %v6 = lshr i16 %v4, 4
   %v7 = zext i16 %v6 to i32
   %v8 = and i32 %v7, 15

diff  --git a/llvm/test/CodeGen/Hexagon/tiny_bkfir_artdeps.ll b/llvm/test/CodeGen/Hexagon/tiny_bkfir_artdeps.ll
index 89f9daf08a884..d4103d1da76b0 100644
--- a/llvm/test/CodeGen/Hexagon/tiny_bkfir_artdeps.ll
+++ b/llvm/test/CodeGen/Hexagon/tiny_bkfir_artdeps.ll
@@ -5,14 +5,12 @@
 
 ; CHECK: Ord  Latency=0 Artificial
 
-define void @bkfir(i32* nocapture readonly %in, i32* nocapture readonly %coefs, i32 %tap, i32 %length, i32* nocapture %out) local_unnamed_addr #0 {
+define void @bkfir(ptr nocapture readonly %in, ptr nocapture readonly %coefs, i32 %tap, i32 %length, ptr nocapture %out) local_unnamed_addr #0 {
 entry:
-  %0 = bitcast i32* %out to i64*
   %cmp141 = icmp sgt i32 %length, 0
   br i1 %cmp141, label %for.body.lr.ph, label %for.end52
 
 for.body.lr.ph:
-  %1 = bitcast i32* %coefs to i64*
   %cmp8127 = icmp sgt i32 %tap, 0
   br i1 %cmp8127, label %for.body.us.preheader, label %for.body.lr.ph.split
 
@@ -20,101 +18,98 @@ for.body.us.preheader:
   br label %for.body.us
 
 for.body.us:
-  %add.ptr.us.phi = phi i32* [ %add.ptr.us.inc, %for.cond7.for.end_crit_edge.us ], [ %in, %for.body.us.preheader ]
+  %add.ptr.us.phi = phi ptr [ %add.ptr.us.inc, %for.cond7.for.end_crit_edge.us ], [ %in, %for.body.us.preheader ]
   %i.0143.us = phi i32 [ %add51.us, %for.cond7.for.end_crit_edge.us ], [ 0, %for.body.us.preheader ]
-  %optr.0142.us = phi i64* [ %incdec.ptr49.us, %for.cond7.for.end_crit_edge.us ], [ %0, %for.body.us.preheader ]
-  %2 = bitcast i32* %add.ptr.us.phi to i64*
-  %incdec.ptr.us = getelementptr inbounds i32, i32* %add.ptr.us.phi, i32 2
-  %3 = bitcast i32* %incdec.ptr.us to i64*
-  %4 = load i64, i64* %2, align 8
-  %incdec.ptr1.us = getelementptr inbounds i32, i32* %add.ptr.us.phi, i32 4
-  %5 = bitcast i32* %incdec.ptr1.us to i64*
-  %6 = load i64, i64* %3, align 8
-  %_Q6V64_internal_union.sroa.0.0.extract.trunc.us = trunc i64 %6 to i32
-  %_Q6V64_internal_union2.sroa.3.0.extract.shift.us = lshr i64 %4, 32
+  %optr.0142.us = phi ptr [ %incdec.ptr49.us, %for.cond7.for.end_crit_edge.us ], [ %out, %for.body.us.preheader ]
+  %incdec.ptr.us = getelementptr inbounds i32, ptr %add.ptr.us.phi, i32 2
+  %0 = load i64, ptr %add.ptr.us.phi, align 8
+  %incdec.ptr1.us = getelementptr inbounds i32, ptr %add.ptr.us.phi, i32 4
+  %1 = load i64, ptr %incdec.ptr.us, align 8
+  %_Q6V64_internal_union.sroa.0.0.extract.trunc.us = trunc i64 %1 to i32
+  %_Q6V64_internal_union2.sroa.3.0.extract.shift.us = lshr i64 %0, 32
   %_Q6V64_internal_union2.sroa.3.0.extract.trunc.us = trunc i64 %_Q6V64_internal_union2.sroa.3.0.extract.shift.us to i32
-  %7 = tail call i64 @llvm.hexagon.A2.combinew(i32 %_Q6V64_internal_union.sroa.0.0.extract.trunc.us, i32 %_Q6V64_internal_union2.sroa.3.0.extract.trunc.us)
-  %add.ptr.us.inc = getelementptr i32, i32* %add.ptr.us.phi, i32 4
+  %2 = tail call i64 @llvm.hexagon.A2.combinew(i32 %_Q6V64_internal_union.sroa.0.0.extract.trunc.us, i32 %_Q6V64_internal_union2.sroa.3.0.extract.trunc.us)
+  %add.ptr.us.inc = getelementptr i32, ptr %add.ptr.us.phi, i32 4
   br label %for.body9.us
 
 for.body9.us:
   %j.0137.us = phi i32 [ 0, %for.body.us ], [ %add.us, %for.body9.us ]
-  %x0x1.0136.us = phi i64 [ %4, %for.body.us ], [ %10, %for.body9.us ]
-  %x2x3.0135.us = phi i64 [ %6, %for.body.us ], [ %11, %for.body9.us ]
-  %x1x2.0134.us = phi i64 [ %7, %for.body.us ], [ %13, %for.body9.us ]
-  %iptrD.0133.us = phi i64* [ %5, %for.body.us ], [ %incdec.ptr13.us, %for.body9.us ]
-  %iptrC.0132.us = phi i64* [ %1, %for.body.us ], [ %incdec.ptr11.us, %for.body9.us ]
-  %sum0.0131.us = phi i64 [ 0, %for.body.us ], [ %18, %for.body9.us ]
-  %sum1.0130.us = phi i64 [ 0, %for.body.us ], [ %19, %for.body9.us ]
-  %sum2.0129.us = phi i64 [ 0, %for.body.us ], [ %20, %for.body9.us ]
-  %sum3.0128.us = phi i64 [ 0, %for.body.us ], [ %21, %for.body9.us ]
-  %incdec.ptr10.us = getelementptr inbounds i64, i64* %iptrC.0132.us, i32 1
-  %8 = load i64, i64* %iptrC.0132.us, align 8
-  %incdec.ptr11.us = getelementptr inbounds i64, i64* %iptrC.0132.us, i32 2
-  %9 = load i64, i64* %incdec.ptr10.us, align 8
-  %incdec.ptr12.us = getelementptr inbounds i64, i64* %iptrD.0133.us, i32 1
-  %10 = load i64, i64* %iptrD.0133.us, align 8
-  %incdec.ptr13.us = getelementptr inbounds i64, i64* %iptrD.0133.us, i32 2
-  %11 = load i64, i64* %incdec.ptr12.us, align 8
-  %_Q6V64_internal_union14.sroa.0.0.extract.trunc.us = trunc i64 %10 to i32
-  %_Q6V64_internal_union14.sroa.4.0.extract.shift.us = lshr i64 %10, 32
+  %x0x1.0136.us = phi i64 [ %0, %for.body.us ], [ %5, %for.body9.us ]
+  %x2x3.0135.us = phi i64 [ %1, %for.body.us ], [ %6, %for.body9.us ]
+  %x1x2.0134.us = phi i64 [ %2, %for.body.us ], [ %8, %for.body9.us ]
+  %iptrD.0133.us = phi ptr [ %incdec.ptr1.us, %for.body.us ], [ %incdec.ptr13.us, %for.body9.us ]
+  %iptrC.0132.us = phi ptr [ %coefs, %for.body.us ], [ %incdec.ptr11.us, %for.body9.us ]
+  %sum0.0131.us = phi i64 [ 0, %for.body.us ], [ %13, %for.body9.us ]
+  %sum1.0130.us = phi i64 [ 0, %for.body.us ], [ %14, %for.body9.us ]
+  %sum2.0129.us = phi i64 [ 0, %for.body.us ], [ %15, %for.body9.us ]
+  %sum3.0128.us = phi i64 [ 0, %for.body.us ], [ %16, %for.body9.us ]
+  %incdec.ptr10.us = getelementptr inbounds i64, ptr %iptrC.0132.us, i32 1
+  %3 = load i64, ptr %iptrC.0132.us, align 8
+  %incdec.ptr11.us = getelementptr inbounds i64, ptr %iptrC.0132.us, i32 2
+  %4 = load i64, ptr %incdec.ptr10.us, align 8
+  %incdec.ptr12.us = getelementptr inbounds i64, ptr %iptrD.0133.us, i32 1
+  %5 = load i64, ptr %iptrD.0133.us, align 8
+  %incdec.ptr13.us = getelementptr inbounds i64, ptr %iptrD.0133.us, i32 2
+  %6 = load i64, ptr %incdec.ptr12.us, align 8
+  %_Q6V64_internal_union14.sroa.0.0.extract.trunc.us = trunc i64 %5 to i32
+  %_Q6V64_internal_union14.sroa.4.0.extract.shift.us = lshr i64 %5, 32
   %_Q6V64_internal_union19.sroa.3.0.extract.shift.us = lshr i64 %x2x3.0135.us, 32
   %_Q6V64_internal_union19.sroa.3.0.extract.trunc.us = trunc i64 %_Q6V64_internal_union19.sroa.3.0.extract.shift.us to i32
-  %12 = tail call i64 @llvm.hexagon.A2.combinew(i32 %_Q6V64_internal_union14.sroa.0.0.extract.trunc.us, i32 %_Q6V64_internal_union19.sroa.3.0.extract.trunc.us)
-  %_Q6V64_internal_union24.sroa.0.0.extract.trunc.us = trunc i64 %11 to i32
+  %7 = tail call i64 @llvm.hexagon.A2.combinew(i32 %_Q6V64_internal_union14.sroa.0.0.extract.trunc.us, i32 %_Q6V64_internal_union19.sroa.3.0.extract.trunc.us)
+  %_Q6V64_internal_union24.sroa.0.0.extract.trunc.us = trunc i64 %6 to i32
   %_Q6V64_internal_union29.sroa.3.0.extract.trunc.us = trunc i64 %_Q6V64_internal_union14.sroa.4.0.extract.shift.us to i32
-  %13 = tail call i64 @llvm.hexagon.A2.combinew(i32 %_Q6V64_internal_union24.sroa.0.0.extract.trunc.us, i32 %_Q6V64_internal_union29.sroa.3.0.extract.trunc.us)
-  %14 = tail call i64 @llvm.hexagon.M7.dcmpyrwc.acc(i64 %sum0.0131.us, i64 %x0x1.0136.us, i64 %8)
-  %15 = tail call i64 @llvm.hexagon.M7.dcmpyrwc.acc(i64 %sum1.0130.us, i64 %x1x2.0134.us, i64 %8)
-  %16 = tail call i64 @llvm.hexagon.M7.dcmpyrwc.acc(i64 %sum2.0129.us, i64 %x2x3.0135.us, i64 %8)
-  %17 = tail call i64 @llvm.hexagon.M7.dcmpyrwc.acc(i64 %sum3.0128.us, i64 %12, i64 %8)
-  %18 = tail call i64 @llvm.hexagon.M7.dcmpyrwc.acc(i64 %14, i64 %x2x3.0135.us, i64 %9)
-  %19 = tail call i64 @llvm.hexagon.M7.dcmpyrwc.acc(i64 %15, i64 %12, i64 %9)
-  %20 = tail call i64 @llvm.hexagon.M7.dcmpyrwc.acc(i64 %16, i64 %10, i64 %9)
-  %21 = tail call i64 @llvm.hexagon.M7.dcmpyrwc.acc(i64 %17, i64 %13, i64 %9)
+  %8 = tail call i64 @llvm.hexagon.A2.combinew(i32 %_Q6V64_internal_union24.sroa.0.0.extract.trunc.us, i32 %_Q6V64_internal_union29.sroa.3.0.extract.trunc.us)
+  %9 = tail call i64 @llvm.hexagon.M7.dcmpyrwc.acc(i64 %sum0.0131.us, i64 %x0x1.0136.us, i64 %3)
+  %10 = tail call i64 @llvm.hexagon.M7.dcmpyrwc.acc(i64 %sum1.0130.us, i64 %x1x2.0134.us, i64 %3)
+  %11 = tail call i64 @llvm.hexagon.M7.dcmpyrwc.acc(i64 %sum2.0129.us, i64 %x2x3.0135.us, i64 %3)
+  %12 = tail call i64 @llvm.hexagon.M7.dcmpyrwc.acc(i64 %sum3.0128.us, i64 %7, i64 %3)
+  %13 = tail call i64 @llvm.hexagon.M7.dcmpyrwc.acc(i64 %9, i64 %x2x3.0135.us, i64 %4)
+  %14 = tail call i64 @llvm.hexagon.M7.dcmpyrwc.acc(i64 %10, i64 %7, i64 %4)
+  %15 = tail call i64 @llvm.hexagon.M7.dcmpyrwc.acc(i64 %11, i64 %5, i64 %4)
+  %16 = tail call i64 @llvm.hexagon.M7.dcmpyrwc.acc(i64 %12, i64 %8, i64 %4)
   %add.us = add nuw nsw i32 %j.0137.us, 4
   %cmp8.us = icmp slt i32 %add.us, %tap
   br i1 %cmp8.us, label %for.body9.us, label %for.cond7.for.end_crit_edge.us
 
 for.cond7.for.end_crit_edge.us:
-  %22 = ashr i64 %18, 39
-  %23 = ashr i64 %19, 39
-  %24 = ashr i64 %20, 39
-  %25 = ashr i64 %21, 39
-  %26 = tail call i32 @llvm.hexagon.A2.sat(i64 %22)
-  %27 = tail call i32 @llvm.hexagon.A2.sat(i64 %23)
-  %28 = tail call i32 @llvm.hexagon.A2.sat(i64 %24)
-  %29 = tail call i32 @llvm.hexagon.A2.sat(i64 %25)
-  %_Q6V64_internal_union34.sroa.4.0.insert.ext.us = zext i32 %27 to i64
+  %17 = ashr i64 %13, 39
+  %18 = ashr i64 %14, 39
+  %19 = ashr i64 %15, 39
+  %20 = ashr i64 %16, 39
+  %21 = tail call i32 @llvm.hexagon.A2.sat(i64 %17)
+  %22 = tail call i32 @llvm.hexagon.A2.sat(i64 %18)
+  %23 = tail call i32 @llvm.hexagon.A2.sat(i64 %19)
+  %24 = tail call i32 @llvm.hexagon.A2.sat(i64 %20)
+  %_Q6V64_internal_union34.sroa.4.0.insert.ext.us = zext i32 %22 to i64
   %_Q6V64_internal_union34.sroa.4.0.insert.shift.us = shl nuw i64 %_Q6V64_internal_union34.sroa.4.0.insert.ext.us, 32
-  %_Q6V64_internal_union34.sroa.0.0.insert.ext.us = zext i32 %26 to i64
+  %_Q6V64_internal_union34.sroa.0.0.insert.ext.us = zext i32 %21 to i64
   %_Q6V64_internal_union34.sroa.0.0.insert.insert.us = or i64 %_Q6V64_internal_union34.sroa.4.0.insert.shift.us, %_Q6V64_internal_union34.sroa.0.0.insert.ext.us
-  %incdec.ptr41.us = getelementptr inbounds i64, i64* %optr.0142.us, i32 1
-  store i64 %_Q6V64_internal_union34.sroa.0.0.insert.insert.us, i64* %optr.0142.us, align 8
-  %_Q6V64_internal_union42.sroa.4.0.insert.ext.us = zext i32 %29 to i64
+  %incdec.ptr41.us = getelementptr inbounds i64, ptr %optr.0142.us, i32 1
+  store i64 %_Q6V64_internal_union34.sroa.0.0.insert.insert.us, ptr %optr.0142.us, align 8
+  %_Q6V64_internal_union42.sroa.4.0.insert.ext.us = zext i32 %24 to i64
   %_Q6V64_internal_union42.sroa.4.0.insert.shift.us = shl nuw i64 %_Q6V64_internal_union42.sroa.4.0.insert.ext.us, 32
-  %_Q6V64_internal_union42.sroa.0.0.insert.ext.us = zext i32 %28 to i64
+  %_Q6V64_internal_union42.sroa.0.0.insert.ext.us = zext i32 %23 to i64
   %_Q6V64_internal_union42.sroa.0.0.insert.insert.us = or i64 %_Q6V64_internal_union42.sroa.4.0.insert.shift.us, %_Q6V64_internal_union42.sroa.0.0.insert.ext.us
-  %incdec.ptr49.us = getelementptr inbounds i64, i64* %optr.0142.us, i32 2
-  store i64 %_Q6V64_internal_union42.sroa.0.0.insert.insert.us, i64* %incdec.ptr41.us, align 8
+  %incdec.ptr49.us = getelementptr inbounds i64, ptr %optr.0142.us, i32 2
+  store i64 %_Q6V64_internal_union42.sroa.0.0.insert.insert.us, ptr %incdec.ptr41.us, align 8
   %add51.us = add nuw nsw i32 %i.0143.us, 4
   %cmp.us = icmp slt i32 %add51.us, %length
   br i1 %cmp.us, label %for.body.us, label %for.end52
 
 for.body.lr.ph.split:
-  %30 = tail call i32 @llvm.hexagon.A2.sat(i64 0)
-  %_Q6V64_internal_union34.sroa.4.0.insert.ext = zext i32 %30 to i64
+  %25 = tail call i32 @llvm.hexagon.A2.sat(i64 0)
+  %_Q6V64_internal_union34.sroa.4.0.insert.ext = zext i32 %25 to i64
   %_Q6V64_internal_union34.sroa.4.0.insert.shift = shl nuw i64 %_Q6V64_internal_union34.sroa.4.0.insert.ext, 32
   %_Q6V64_internal_union34.sroa.0.0.insert.insert = or i64 %_Q6V64_internal_union34.sroa.4.0.insert.shift, %_Q6V64_internal_union34.sroa.4.0.insert.ext
   br label %for.body
 
 for.body:
   %i.0143 = phi i32 [ 0, %for.body.lr.ph.split ], [ %add51, %for.body ]
-  %optr.0142 = phi i64* [ %0, %for.body.lr.ph.split ], [ %incdec.ptr49, %for.body ]
-  %incdec.ptr41 = getelementptr inbounds i64, i64* %optr.0142, i32 1
-  store i64 %_Q6V64_internal_union34.sroa.0.0.insert.insert, i64* %optr.0142, align 8
-  %incdec.ptr49 = getelementptr inbounds i64, i64* %optr.0142, i32 2
-  store i64 %_Q6V64_internal_union34.sroa.0.0.insert.insert, i64* %incdec.ptr41, align 8
+  %optr.0142 = phi ptr [ %out, %for.body.lr.ph.split ], [ %incdec.ptr49, %for.body ]
+  %incdec.ptr41 = getelementptr inbounds i64, ptr %optr.0142, i32 1
+  store i64 %_Q6V64_internal_union34.sroa.0.0.insert.insert, ptr %optr.0142, align 8
+  %incdec.ptr49 = getelementptr inbounds i64, ptr %optr.0142, i32 2
+  store i64 %_Q6V64_internal_union34.sroa.0.0.insert.insert, ptr %incdec.ptr41, align 8
   %add51 = add nuw nsw i32 %i.0143, 4
   %cmp = icmp slt i32 %add51, %length
   br i1 %cmp, label %for.body, label %for.end52

diff  --git a/llvm/test/CodeGen/Hexagon/tiny_bkfir_loop_align.ll b/llvm/test/CodeGen/Hexagon/tiny_bkfir_loop_align.ll
index f2682b2db5a48..b733c0aad67e5 100644
--- a/llvm/test/CodeGen/Hexagon/tiny_bkfir_loop_align.ll
+++ b/llvm/test/CodeGen/Hexagon/tiny_bkfir_loop_align.ll
@@ -8,14 +8,12 @@
 ; CHECK: p2align 4
 ; CHECK: } :endloop0
 
-define void @bkfir(i32* nocapture readonly %in, i32* nocapture readonly %coefs, i32 %tap, i32 %length, i32* nocapture %out) local_unnamed_addr #0 {
+define void @bkfir(ptr nocapture readonly %in, ptr nocapture readonly %coefs, i32 %tap, i32 %length, ptr nocapture %out) local_unnamed_addr #0 {
 entry:
-  %0 = bitcast i32* %out to i64*
   %cmp141 = icmp sgt i32 %length, 0
   br i1 %cmp141, label %for.body.lr.ph, label %for.end52
 
 for.body.lr.ph:
-  %1 = bitcast i32* %coefs to i64*
   %cmp8127 = icmp sgt i32 %tap, 0
   br i1 %cmp8127, label %for.body.us.preheader, label %for.body.lr.ph.split
 
@@ -23,101 +21,98 @@ for.body.us.preheader:
   br label %for.body.us
 
 for.body.us:
-  %add.ptr.us.phi = phi i32* [ %add.ptr.us.inc, %for.cond7.for.end_crit_edge.us ], [ %in, %for.body.us.preheader ]
+  %add.ptr.us.phi = phi ptr [ %add.ptr.us.inc, %for.cond7.for.end_crit_edge.us ], [ %in, %for.body.us.preheader ]
   %i.0143.us = phi i32 [ %add51.us, %for.cond7.for.end_crit_edge.us ], [ 0, %for.body.us.preheader ]
-  %optr.0142.us = phi i64* [ %incdec.ptr49.us, %for.cond7.for.end_crit_edge.us ], [ %0, %for.body.us.preheader ]
-  %2 = bitcast i32* %add.ptr.us.phi to i64*
-  %incdec.ptr.us = getelementptr inbounds i32, i32* %add.ptr.us.phi, i32 2
-  %3 = bitcast i32* %incdec.ptr.us to i64*
-  %4 = load i64, i64* %2, align 8
-  %incdec.ptr1.us = getelementptr inbounds i32, i32* %add.ptr.us.phi, i32 4
-  %5 = bitcast i32* %incdec.ptr1.us to i64*
-  %6 = load i64, i64* %3, align 8
-  %_Q6V64_internal_union.sroa.0.0.extract.trunc.us = trunc i64 %6 to i32
-  %_Q6V64_internal_union2.sroa.3.0.extract.shift.us = lshr i64 %4, 32
+  %optr.0142.us = phi ptr [ %incdec.ptr49.us, %for.cond7.for.end_crit_edge.us ], [ %out, %for.body.us.preheader ]
+  %incdec.ptr.us = getelementptr inbounds i32, ptr %add.ptr.us.phi, i32 2
+  %0 = load i64, ptr %add.ptr.us.phi, align 8
+  %incdec.ptr1.us = getelementptr inbounds i32, ptr %add.ptr.us.phi, i32 4
+  %1 = load i64, ptr %incdec.ptr.us, align 8
+  %_Q6V64_internal_union.sroa.0.0.extract.trunc.us = trunc i64 %1 to i32
+  %_Q6V64_internal_union2.sroa.3.0.extract.shift.us = lshr i64 %0, 32
   %_Q6V64_internal_union2.sroa.3.0.extract.trunc.us = trunc i64 %_Q6V64_internal_union2.sroa.3.0.extract.shift.us to i32
-  %7 = tail call i64 @llvm.hexagon.A2.combinew(i32 %_Q6V64_internal_union.sroa.0.0.extract.trunc.us, i32 %_Q6V64_internal_union2.sroa.3.0.extract.trunc.us)
-  %add.ptr.us.inc = getelementptr i32, i32* %add.ptr.us.phi, i32 4
+  %2 = tail call i64 @llvm.hexagon.A2.combinew(i32 %_Q6V64_internal_union.sroa.0.0.extract.trunc.us, i32 %_Q6V64_internal_union2.sroa.3.0.extract.trunc.us)
+  %add.ptr.us.inc = getelementptr i32, ptr %add.ptr.us.phi, i32 4
   br label %for.body9.us
 
 for.body9.us:
   %j.0137.us = phi i32 [ 0, %for.body.us ], [ %add.us, %for.body9.us ]
-  %x0x1.0136.us = phi i64 [ %4, %for.body.us ], [ %10, %for.body9.us ]
-  %x2x3.0135.us = phi i64 [ %6, %for.body.us ], [ %11, %for.body9.us ]
-  %x1x2.0134.us = phi i64 [ %7, %for.body.us ], [ %13, %for.body9.us ]
-  %iptrD.0133.us = phi i64* [ %5, %for.body.us ], [ %incdec.ptr13.us, %for.body9.us ]
-  %iptrC.0132.us = phi i64* [ %1, %for.body.us ], [ %incdec.ptr11.us, %for.body9.us ]
-  %sum0.0131.us = phi i64 [ 0, %for.body.us ], [ %18, %for.body9.us ]
-  %sum1.0130.us = phi i64 [ 0, %for.body.us ], [ %19, %for.body9.us ]
-  %sum2.0129.us = phi i64 [ 0, %for.body.us ], [ %20, %for.body9.us ]
-  %sum3.0128.us = phi i64 [ 0, %for.body.us ], [ %21, %for.body9.us ]
-  %incdec.ptr10.us = getelementptr inbounds i64, i64* %iptrC.0132.us, i32 1
-  %8 = load i64, i64* %iptrC.0132.us, align 8
-  %incdec.ptr11.us = getelementptr inbounds i64, i64* %iptrC.0132.us, i32 2
-  %9 = load i64, i64* %incdec.ptr10.us, align 8
-  %incdec.ptr12.us = getelementptr inbounds i64, i64* %iptrD.0133.us, i32 1
-  %10 = load i64, i64* %iptrD.0133.us, align 8
-  %incdec.ptr13.us = getelementptr inbounds i64, i64* %iptrD.0133.us, i32 2
-  %11 = load i64, i64* %incdec.ptr12.us, align 8
-  %_Q6V64_internal_union14.sroa.0.0.extract.trunc.us = trunc i64 %10 to i32
-  %_Q6V64_internal_union14.sroa.4.0.extract.shift.us = lshr i64 %10, 32
+  %x0x1.0136.us = phi i64 [ %0, %for.body.us ], [ %5, %for.body9.us ]
+  %x2x3.0135.us = phi i64 [ %1, %for.body.us ], [ %6, %for.body9.us ]
+  %x1x2.0134.us = phi i64 [ %2, %for.body.us ], [ %8, %for.body9.us ]
+  %iptrD.0133.us = phi ptr [ %incdec.ptr1.us, %for.body.us ], [ %incdec.ptr13.us, %for.body9.us ]
+  %iptrC.0132.us = phi ptr [ %coefs, %for.body.us ], [ %incdec.ptr11.us, %for.body9.us ]
+  %sum0.0131.us = phi i64 [ 0, %for.body.us ], [ %13, %for.body9.us ]
+  %sum1.0130.us = phi i64 [ 0, %for.body.us ], [ %14, %for.body9.us ]
+  %sum2.0129.us = phi i64 [ 0, %for.body.us ], [ %15, %for.body9.us ]
+  %sum3.0128.us = phi i64 [ 0, %for.body.us ], [ %16, %for.body9.us ]
+  %incdec.ptr10.us = getelementptr inbounds i64, ptr %iptrC.0132.us, i32 1
+  %3 = load i64, ptr %iptrC.0132.us, align 8
+  %incdec.ptr11.us = getelementptr inbounds i64, ptr %iptrC.0132.us, i32 2
+  %4 = load i64, ptr %incdec.ptr10.us, align 8
+  %incdec.ptr12.us = getelementptr inbounds i64, ptr %iptrD.0133.us, i32 1
+  %5 = load i64, ptr %iptrD.0133.us, align 8
+  %incdec.ptr13.us = getelementptr inbounds i64, ptr %iptrD.0133.us, i32 2
+  %6 = load i64, ptr %incdec.ptr12.us, align 8
+  %_Q6V64_internal_union14.sroa.0.0.extract.trunc.us = trunc i64 %5 to i32
+  %_Q6V64_internal_union14.sroa.4.0.extract.shift.us = lshr i64 %5, 32
   %_Q6V64_internal_union19.sroa.3.0.extract.shift.us = lshr i64 %x2x3.0135.us, 32
   %_Q6V64_internal_union19.sroa.3.0.extract.trunc.us = trunc i64 %_Q6V64_internal_union19.sroa.3.0.extract.shift.us to i32
-  %12 = tail call i64 @llvm.hexagon.A2.combinew(i32 %_Q6V64_internal_union14.sroa.0.0.extract.trunc.us, i32 %_Q6V64_internal_union19.sroa.3.0.extract.trunc.us)
-  %_Q6V64_internal_union24.sroa.0.0.extract.trunc.us = trunc i64 %11 to i32
+  %7 = tail call i64 @llvm.hexagon.A2.combinew(i32 %_Q6V64_internal_union14.sroa.0.0.extract.trunc.us, i32 %_Q6V64_internal_union19.sroa.3.0.extract.trunc.us)
+  %_Q6V64_internal_union24.sroa.0.0.extract.trunc.us = trunc i64 %6 to i32
   %_Q6V64_internal_union29.sroa.3.0.extract.trunc.us = trunc i64 %_Q6V64_internal_union14.sroa.4.0.extract.shift.us to i32
-  %13 = tail call i64 @llvm.hexagon.A2.combinew(i32 %_Q6V64_internal_union24.sroa.0.0.extract.trunc.us, i32 %_Q6V64_internal_union29.sroa.3.0.extract.trunc.us)
-  %14 = tail call i64 @llvm.hexagon.M7.dcmpyrwc.acc(i64 %sum0.0131.us, i64 %x0x1.0136.us, i64 %8)
-  %15 = tail call i64 @llvm.hexagon.M7.dcmpyrwc.acc(i64 %sum1.0130.us, i64 %x1x2.0134.us, i64 %8)
-  %16 = tail call i64 @llvm.hexagon.M7.dcmpyrwc.acc(i64 %sum2.0129.us, i64 %x2x3.0135.us, i64 %8)
-  %17 = tail call i64 @llvm.hexagon.M7.dcmpyrwc.acc(i64 %sum3.0128.us, i64 %12, i64 %8)
-  %18 = tail call i64 @llvm.hexagon.M7.dcmpyrwc.acc(i64 %14, i64 %x2x3.0135.us, i64 %9)
-  %19 = tail call i64 @llvm.hexagon.M7.dcmpyrwc.acc(i64 %15, i64 %12, i64 %9)
-  %20 = tail call i64 @llvm.hexagon.M7.dcmpyrwc.acc(i64 %16, i64 %10, i64 %9)
-  %21 = tail call i64 @llvm.hexagon.M7.dcmpyrwc.acc(i64 %17, i64 %13, i64 %9)
+  %8 = tail call i64 @llvm.hexagon.A2.combinew(i32 %_Q6V64_internal_union24.sroa.0.0.extract.trunc.us, i32 %_Q6V64_internal_union29.sroa.3.0.extract.trunc.us)
+  %9 = tail call i64 @llvm.hexagon.M7.dcmpyrwc.acc(i64 %sum0.0131.us, i64 %x0x1.0136.us, i64 %3)
+  %10 = tail call i64 @llvm.hexagon.M7.dcmpyrwc.acc(i64 %sum1.0130.us, i64 %x1x2.0134.us, i64 %3)
+  %11 = tail call i64 @llvm.hexagon.M7.dcmpyrwc.acc(i64 %sum2.0129.us, i64 %x2x3.0135.us, i64 %3)
+  %12 = tail call i64 @llvm.hexagon.M7.dcmpyrwc.acc(i64 %sum3.0128.us, i64 %7, i64 %3)
+  %13 = tail call i64 @llvm.hexagon.M7.dcmpyrwc.acc(i64 %9, i64 %x2x3.0135.us, i64 %4)
+  %14 = tail call i64 @llvm.hexagon.M7.dcmpyrwc.acc(i64 %10, i64 %7, i64 %4)
+  %15 = tail call i64 @llvm.hexagon.M7.dcmpyrwc.acc(i64 %11, i64 %5, i64 %4)
+  %16 = tail call i64 @llvm.hexagon.M7.dcmpyrwc.acc(i64 %12, i64 %8, i64 %4)
   %add.us = add nuw nsw i32 %j.0137.us, 4
   %cmp8.us = icmp slt i32 %add.us, %tap
   br i1 %cmp8.us, label %for.body9.us, label %for.cond7.for.end_crit_edge.us
 
 for.cond7.for.end_crit_edge.us:
-  %22 = ashr i64 %18, 39
-  %23 = ashr i64 %19, 39
-  %24 = ashr i64 %20, 39
-  %25 = ashr i64 %21, 39
-  %26 = tail call i32 @llvm.hexagon.A2.sat(i64 %22)
-  %27 = tail call i32 @llvm.hexagon.A2.sat(i64 %23)
-  %28 = tail call i32 @llvm.hexagon.A2.sat(i64 %24)
-  %29 = tail call i32 @llvm.hexagon.A2.sat(i64 %25)
-  %_Q6V64_internal_union34.sroa.4.0.insert.ext.us = zext i32 %27 to i64
+  %17 = ashr i64 %13, 39
+  %18 = ashr i64 %14, 39
+  %19 = ashr i64 %15, 39
+  %20 = ashr i64 %16, 39
+  %21 = tail call i32 @llvm.hexagon.A2.sat(i64 %17)
+  %22 = tail call i32 @llvm.hexagon.A2.sat(i64 %18)
+  %23 = tail call i32 @llvm.hexagon.A2.sat(i64 %19)
+  %24 = tail call i32 @llvm.hexagon.A2.sat(i64 %20)
+  %_Q6V64_internal_union34.sroa.4.0.insert.ext.us = zext i32 %22 to i64
   %_Q6V64_internal_union34.sroa.4.0.insert.shift.us = shl nuw i64 %_Q6V64_internal_union34.sroa.4.0.insert.ext.us, 32
-  %_Q6V64_internal_union34.sroa.0.0.insert.ext.us = zext i32 %26 to i64
+  %_Q6V64_internal_union34.sroa.0.0.insert.ext.us = zext i32 %21 to i64
   %_Q6V64_internal_union34.sroa.0.0.insert.insert.us = or i64 %_Q6V64_internal_union34.sroa.4.0.insert.shift.us, %_Q6V64_internal_union34.sroa.0.0.insert.ext.us
-  %incdec.ptr41.us = getelementptr inbounds i64, i64* %optr.0142.us, i32 1
-  store i64 %_Q6V64_internal_union34.sroa.0.0.insert.insert.us, i64* %optr.0142.us, align 8
-  %_Q6V64_internal_union42.sroa.4.0.insert.ext.us = zext i32 %29 to i64
+  %incdec.ptr41.us = getelementptr inbounds i64, ptr %optr.0142.us, i32 1
+  store i64 %_Q6V64_internal_union34.sroa.0.0.insert.insert.us, ptr %optr.0142.us, align 8
+  %_Q6V64_internal_union42.sroa.4.0.insert.ext.us = zext i32 %24 to i64
   %_Q6V64_internal_union42.sroa.4.0.insert.shift.us = shl nuw i64 %_Q6V64_internal_union42.sroa.4.0.insert.ext.us, 32
-  %_Q6V64_internal_union42.sroa.0.0.insert.ext.us = zext i32 %28 to i64
+  %_Q6V64_internal_union42.sroa.0.0.insert.ext.us = zext i32 %23 to i64
   %_Q6V64_internal_union42.sroa.0.0.insert.insert.us = or i64 %_Q6V64_internal_union42.sroa.4.0.insert.shift.us, %_Q6V64_internal_union42.sroa.0.0.insert.ext.us
-  %incdec.ptr49.us = getelementptr inbounds i64, i64* %optr.0142.us, i32 2
-  store i64 %_Q6V64_internal_union42.sroa.0.0.insert.insert.us, i64* %incdec.ptr41.us, align 8
+  %incdec.ptr49.us = getelementptr inbounds i64, ptr %optr.0142.us, i32 2
+  store i64 %_Q6V64_internal_union42.sroa.0.0.insert.insert.us, ptr %incdec.ptr41.us, align 8
   %add51.us = add nuw nsw i32 %i.0143.us, 4
   %cmp.us = icmp slt i32 %add51.us, %length
   br i1 %cmp.us, label %for.body.us, label %for.end52
 
 for.body.lr.ph.split:
-  %30 = tail call i32 @llvm.hexagon.A2.sat(i64 0)
-  %_Q6V64_internal_union34.sroa.4.0.insert.ext = zext i32 %30 to i64
+  %25 = tail call i32 @llvm.hexagon.A2.sat(i64 0)
+  %_Q6V64_internal_union34.sroa.4.0.insert.ext = zext i32 %25 to i64
   %_Q6V64_internal_union34.sroa.4.0.insert.shift = shl nuw i64 %_Q6V64_internal_union34.sroa.4.0.insert.ext, 32
   %_Q6V64_internal_union34.sroa.0.0.insert.insert = or i64 %_Q6V64_internal_union34.sroa.4.0.insert.shift, %_Q6V64_internal_union34.sroa.4.0.insert.ext
   br label %for.body
 
 for.body:
   %i.0143 = phi i32 [ 0, %for.body.lr.ph.split ], [ %add51, %for.body ]
-  %optr.0142 = phi i64* [ %0, %for.body.lr.ph.split ], [ %incdec.ptr49, %for.body ]
-  %incdec.ptr41 = getelementptr inbounds i64, i64* %optr.0142, i32 1
-  store i64 %_Q6V64_internal_union34.sroa.0.0.insert.insert, i64* %optr.0142, align 8
-  %incdec.ptr49 = getelementptr inbounds i64, i64* %optr.0142, i32 2
-  store i64 %_Q6V64_internal_union34.sroa.0.0.insert.insert, i64* %incdec.ptr41, align 8
+  %optr.0142 = phi ptr [ %out, %for.body.lr.ph.split ], [ %incdec.ptr49, %for.body ]
+  %incdec.ptr41 = getelementptr inbounds i64, ptr %optr.0142, i32 1
+  store i64 %_Q6V64_internal_union34.sroa.0.0.insert.insert, ptr %optr.0142, align 8
+  %incdec.ptr49 = getelementptr inbounds i64, ptr %optr.0142, i32 2
+  store i64 %_Q6V64_internal_union34.sroa.0.0.insert.insert, ptr %incdec.ptr41, align 8
   %add51 = add nuw nsw i32 %i.0143, 4
   %cmp = icmp slt i32 %add51, %length
   br i1 %cmp, label %for.body, label %for.end52

diff  --git a/llvm/test/CodeGen/Hexagon/tinycore.ll b/llvm/test/CodeGen/Hexagon/tinycore.ll
index b8ddb9c2c43e8..70f063c8a9be7 100644
--- a/llvm/test/CodeGen/Hexagon/tinycore.ll
+++ b/llvm/test/CodeGen/Hexagon/tinycore.ll
@@ -24,24 +24,24 @@
 ; CHECK-BIG-NEXT: = memw
 ; CHECK-BIG-NEXT: } :endloop0
 
-define i32 @test(i32* noalias nocapture readonly %a, i32* noalias nocapture readonly %b, i32 %n) local_unnamed_addr #0 {
+define i32 @test(ptr noalias nocapture readonly %a, ptr noalias nocapture readonly %b, i32 %n) local_unnamed_addr #0 {
 entry:
   %cmp8 = icmp sgt i32 %n, 0
   br i1 %cmp8, label %for.body, label %for.end
 
 for.body:
   %sum.010 = phi i32 [ %add, %for.body ], [ 0, %entry ]
-  %arrayidx.phi = phi i32* [ %arrayidx.inc, %for.body ], [ %a, %entry ]
-  %arrayidx1.phi = phi i32* [ %arrayidx1.inc, %for.body ], [ %b, %entry ]
+  %arrayidx.phi = phi ptr [ %arrayidx.inc, %for.body ], [ %a, %entry ]
+  %arrayidx1.phi = phi ptr [ %arrayidx1.inc, %for.body ], [ %b, %entry ]
   %i.09 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
-  %0 = load i32, i32* %arrayidx.phi, align 4
-  %1 = load i32, i32* %arrayidx1.phi, align 4
+  %0 = load i32, ptr %arrayidx.phi, align 4
+  %1 = load i32, ptr %arrayidx1.phi, align 4
   %mul = mul nsw i32 %1, %0
   %add = add nsw i32 %mul, %sum.010
   %inc = add nuw nsw i32 %i.09, 1
   %exitcond = icmp eq i32 %inc, %n
-  %arrayidx.inc = getelementptr i32, i32* %arrayidx.phi, i32 1
-  %arrayidx1.inc = getelementptr i32, i32* %arrayidx1.phi, i32 1
+  %arrayidx.inc = getelementptr i32, ptr %arrayidx.phi, i32 1
+  %arrayidx1.inc = getelementptr i32, ptr %arrayidx1.phi, i32 1
   br i1 %exitcond, label %for.end, label %for.body
 
 for.end:

diff  --git a/llvm/test/CodeGen/Hexagon/tls_gd.ll b/llvm/test/CodeGen/Hexagon/tls_gd.ll
index 27b37e2b8a938..a52469b92191c 100644
--- a/llvm/test/CodeGen/Hexagon/tls_gd.ll
+++ b/llvm/test/CodeGen/Hexagon/tls_gd.ll
@@ -9,8 +9,8 @@
 ; Function Attrs: nounwind
 define i32 @f0() #0 {
 b0:
-  %v0 = load i32, i32* @g1, align 4, !tbaa !0
-  store i32 %v0, i32* @g0, align 4, !tbaa !0
+  %v0 = load i32, ptr @g1, align 4, !tbaa !0
+  store i32 %v0, ptr @g0, align 4, !tbaa !0
   tail call void @f1(i32 %v0) #0
   ret i32 0
 }

diff  --git a/llvm/test/CodeGen/Hexagon/tls_pic.ll b/llvm/test/CodeGen/Hexagon/tls_pic.ll
index c6e5f5af582fb..2f9b189b4878e 100644
--- a/llvm/test/CodeGen/Hexagon/tls_pic.ll
+++ b/llvm/test/CodeGen/Hexagon/tls_pic.ll
@@ -10,8 +10,8 @@
 ; CHECK-NOT:  call
 define i32 @test_initial_exec() nounwind {
 entry:
-  %0 = load i32, i32* @src_ie, align 4
-  store i32 %0, i32* @dst_ie, align 4
+  %0 = load i32, ptr @src_ie, align 4
+  store i32 %0, ptr @dst_ie, align 4
   ret i32 0
 }
 
@@ -30,8 +30,8 @@ entry:
 
 define i32 @test_dynamic() nounwind {
 entry:
-  %0 = load i32, i32* @src_gd, align 4
-  store i32 %0, i32* @dst_gd, align 4
+  %0 = load i32, ptr @src_gd, align 4
+  store i32 %0, ptr @dst_gd, align 4
   ret i32 0
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/tls_static.ll b/llvm/test/CodeGen/Hexagon/tls_static.ll
index 224753d80e0ca..72d30b5e0a46b 100644
--- a/llvm/test/CodeGen/Hexagon/tls_static.ll
+++ b/llvm/test/CodeGen/Hexagon/tls_static.ll
@@ -8,8 +8,8 @@
 ; CHECK-DAG:   ##dst_le at TPREL
 define dso_local i32 @test_local_exec() nounwind {
 entry:
-  %0 = load i32, i32* @src_le, align 4
-  store i32 %0, i32* @dst_le, align 4
+  %0 = load i32, ptr @src_le, align 4
+  store i32 %0, ptr @dst_le, align 4
   ret i32 0
 }
 
@@ -21,8 +21,8 @@ entry:
 ; CHECK-DAG:   = memw(##dst_ie at IE)
 define dso_local i32 @test_initial_exec() nounwind {
 entry:
-  %0 = load i32, i32* @src_ie, align 4
-  store i32 %0, i32* @dst_ie, align 4
+  %0 = load i32, ptr @src_ie, align 4
+  store i32 %0, ptr @dst_ie, align 4
   ret i32 0
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/trivialmemaliascheck.ll b/llvm/test/CodeGen/Hexagon/trivialmemaliascheck.ll
index 822de22093c85..49b615197b622 100644
--- a/llvm/test/CodeGen/Hexagon/trivialmemaliascheck.ll
+++ b/llvm/test/CodeGen/Hexagon/trivialmemaliascheck.ll
@@ -17,14 +17,14 @@
 @g0 = common global [10 x i32] zeroinitializer, align 8
 
 ; Function Attrs: nounwind
-define void @f0(i32* nocapture %a0) #0 {
+define void @f0(ptr nocapture %a0) #0 {
 b0:
-  %v0 = load i32, i32* %a0, align 4, !tbaa !0
+  %v0 = load i32, ptr %a0, align 4, !tbaa !0
   %v1 = add nsw i32 %v0, 3
-  store i32 %v1, i32* %a0, align 4, !tbaa !0
-  %v2 = getelementptr inbounds i32, i32* %a0, i32 1
-  %v3 = load i32, i32* %v2, align 4, !tbaa !0
-  store i32 %v3, i32* getelementptr inbounds ([10 x i32], [10 x i32]* @g0, i32 0, i32 0), align 8, !tbaa !0
+  store i32 %v1, ptr %a0, align 4, !tbaa !0
+  %v2 = getelementptr inbounds i32, ptr %a0, i32 1
+  %v3 = load i32, ptr %v2, align 4, !tbaa !0
+  store i32 %v3, ptr @g0, align 8, !tbaa !0
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/trunc-mpy.ll b/llvm/test/CodeGen/Hexagon/trunc-mpy.ll
index 9f5a2b5f8922b..ee2a85008019e 100644
--- a/llvm/test/CodeGen/Hexagon/trunc-mpy.ll
+++ b/llvm/test/CodeGen/Hexagon/trunc-mpy.ll
@@ -5,27 +5,27 @@
 
 ; CHECK-LABEL: f0:
 ; CHECK-NOT:  r{{[0-9]+}}:{{[0-9]+}} = mpy(
-define void @f0(i32* nocapture readonly %a0, i32* nocapture %a1) #0 {
+define void @f0(ptr nocapture readonly %a0, ptr nocapture %a1) #0 {
 b0:
-  %v0 = getelementptr i32, i32* %a1, i32 448
+  %v0 = getelementptr i32, ptr %a1, i32 448
   br label %b1
 
 b1:                                               ; preds = %b1, %b0
   br i1 undef, label %b2, label %b1
 
 b2:                                               ; preds = %b1
-  %v1 = getelementptr inbounds i32, i32* %a0, i32 64
-  %v2 = load i32, i32* %a0, align 4
-  %v3 = getelementptr inbounds i32, i32* %a0, i32 2
-  %v4 = load i32, i32* %v1, align 4
+  %v1 = getelementptr inbounds i32, ptr %a0, i32 64
+  %v2 = load i32, ptr %a0, align 4
+  %v3 = getelementptr inbounds i32, ptr %a0, i32 2
+  %v4 = load i32, ptr %v1, align 4
   %v5 = sext i32 %v2 to i64
   %v6 = sext i32 %v4 to i64
   %v7 = mul nsw i64 %v6, %v5
   %v8 = lshr i64 %v7, 32
   %v9 = trunc i64 %v8 to i32
   %v10 = sub nsw i32 0, %v9
-  %v11 = getelementptr inbounds i32, i32* %v0, i32 1
-  store i32 %v10, i32* %v1, align 4
+  %v11 = getelementptr inbounds i32, ptr %v0, i32 1
+  store i32 %v10, ptr %v1, align 4
   ret void
 }
 
@@ -33,24 +33,24 @@ b2:                                               ; preds = %b1
 
 ; CHECK-LABEL: f1:
 ; CHECK: r{{[0-9]+}} = mpy(
-define void @f1(i32 %a0, i32 %a1, i32* nocapture readonly %a2, i32* nocapture %a3) #0 {
+define void @f1(i32 %a0, i32 %a1, ptr nocapture readonly %a2, ptr nocapture %a3) #0 {
 b0:
-  %v0 = getelementptr i32, i32* %a3, i32 448
+  %v0 = getelementptr i32, ptr %a3, i32 448
   br label %b1
 
 b1:                                               ; preds = %b1, %b0
   br i1 undef, label %b2, label %b1
 
 b2:                                               ; preds = %b1
-  %v1 = getelementptr inbounds i32, i32* %a2, i32 64
+  %v1 = getelementptr inbounds i32, ptr %a2, i32 64
   %v2 = sext i32 %a0 to i64
   %v3 = sext i32 %a1 to i64
   %v4 = mul nsw i64 %v3, %v2
   %v5 = lshr i64 %v4, 32
   %v6 = trunc i64 %v5 to i32
   %v7 = sub nsw i32 0, %v6
-  %v8 = getelementptr inbounds i32, i32* %v0, i32 1
-  store i32 %v7, i32* %v1, align 4
+  %v8 = getelementptr inbounds i32, ptr %v0, i32 1
+  store i32 %v7, ptr %v1, align 4
   ret void
 }
 
@@ -58,30 +58,30 @@ b2:                                               ; preds = %b1
 
 ; CHECK-LABEL: f2:
 ; CHECK: r{{[0-9]+}} = mpy(
-define void @f2(i32* nocapture readonly %a0, i32* nocapture %a1) #0 {
+define void @f2(ptr nocapture readonly %a0, ptr nocapture %a1) #0 {
 b0:
-  %v0 = getelementptr i32, i32* %a1, i32 448
+  %v0 = getelementptr i32, ptr %a1, i32 448
   br label %b1
 
 b1:                                               ; preds = %b0
-  %v1 = getelementptr inbounds i32, i32* %a0, i32 64
+  %v1 = getelementptr inbounds i32, ptr %a0, i32 64
   br label %b2
 
 b2:                                               ; preds = %b2, %b1
-  %v2 = phi i32* [ %v0, %b1 ], [ %v14, %b2 ]
-  %v3 = phi i32* [ %v1, %b1 ], [ undef, %b2 ]
-  %v4 = phi i32* [ null, %b1 ], [ %v6, %b2 ]
-  %v5 = load i32, i32* %v4, align 4
-  %v6 = getelementptr inbounds i32, i32* %v4, i32 2
-  %v7 = load i32, i32* %v3, align 4
+  %v2 = phi ptr [ %v0, %b1 ], [ %v14, %b2 ]
+  %v3 = phi ptr [ %v1, %b1 ], [ undef, %b2 ]
+  %v4 = phi ptr [ null, %b1 ], [ %v6, %b2 ]
+  %v5 = load i32, ptr %v4, align 4
+  %v6 = getelementptr inbounds i32, ptr %v4, i32 2
+  %v7 = load i32, ptr %v3, align 4
   %v8 = sext i32 %v5 to i64
   %v9 = sext i32 %v7 to i64
   %v10 = mul nsw i64 %v9, %v8
   %v11 = lshr i64 %v10, 32
   %v12 = trunc i64 %v11 to i32
   %v13 = sub nsw i32 0, %v12
-  %v14 = getelementptr inbounds i32, i32* %v2, i32 1
-  store i32 %v13, i32* %v2, align 4
+  %v14 = getelementptr inbounds i32, ptr %v2, i32 1
+  store i32 %v13, ptr %v2, align 4
   br label %b2
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/twoaddressbug.ll b/llvm/test/CodeGen/Hexagon/twoaddressbug.ll
index a5a7333295e1a..8fab1f0a55d23 100644
--- a/llvm/test/CodeGen/Hexagon/twoaddressbug.ll
+++ b/llvm/test/CodeGen/Hexagon/twoaddressbug.ll
@@ -7,7 +7,7 @@
 
 target triple = "hexagon"
 
-%0 = type { i8, i8, %1, i32, i32, %7, i8, i8, %8, i8, i32, i16, i16, [2500 x i8], i16, i16, i16, i8*, [1024 x i8], i32, i32, i32, i32, i32, i8 }
+%0 = type { i8, i8, %1, i32, i32, %7, i8, i8, %8, i8, i32, i16, i16, [2500 x i8], i16, i16, i16, ptr, [1024 x i8], i32, i32, i32, i32, i32, i8 }
 %1 = type { i8, %2, i8, i8, i32 }
 %2 = type { %3 }
 %3 = type { i8, [256 x i8], %4, i8, i16, i32 }
@@ -16,7 +16,7 @@ target triple = "hexagon"
 %6 = type { [2 x i64] }
 %7 = type { i32, i8 }
 %8 = type { %7, i32, i32, %1 }
-%9 = type { %10, i8* }
+%9 = type { %10, ptr }
 %10 = type { i16, i16, i32 }
 %11 = type { i8, i32 }
 
@@ -27,7 +27,7 @@ target triple = "hexagon"
 @g4 = external hidden constant %9, align 4
 
 ; Function Attrs: optsize
-declare void @f0(%9*, i32, i32, i32) #0
+declare void @f0(ptr, i32, i32, i32) #0
 
 ; Function Attrs: nounwind optsize ssp
 define hidden fastcc void @f1(i64 %a0, i8 zeroext %a1, i8 zeroext %a2) #1 {
@@ -38,35 +38,34 @@ b0:
   br i1 %v1, label %b1, label %b4
 
 b1:                                               ; preds = %b0
-  call void @f0(%9* @g1, i32 %v2, i32 0, i32 0) #2
-  %v3 = getelementptr inbounds [2 x %0], [2 x %0]* @g0, i32 0, i32 %v2, i32 7
-  store i8 1, i8* %v3, align 1
+  call void @f0(ptr @g1, i32 %v2, i32 0, i32 0) #2
+  %v3 = getelementptr inbounds [2 x %0], ptr @g0, i32 0, i32 %v2, i32 7
+  store i8 1, ptr %v3, align 1
   %v4 = icmp eq i8 %a2, 0
   br i1 %v4, label %b4, label %b2
 
 b2:                                               ; preds = %b1
-  %v5 = getelementptr inbounds %11, %11* %v0, i32 0, i32 0
-  store i8 0, i8* %v5, align 4
-  %v6 = getelementptr inbounds %11, %11* %v0, i32 0, i32 1
-  store i32 0, i32* %v6, align 4
-  %v7 = getelementptr inbounds [2 x %0], [2 x %0]* @g0, i32 0, i32 %v2, i32 3
-  %v8 = load i32, i32* %v7, align 8
-  %v9 = getelementptr inbounds [2 x %0], [2 x %0]* @g0, i32 0, i32 %v2, i32 4
-  %v10 = load i32, i32* %v9, align 4
-  %v11 = getelementptr inbounds [2 x %0], [2 x %0]* @g0, i32 0, i32 %v2, i32 19
-  %v12 = load i32, i32* %v11, align 4
-  %v13 = call zeroext i8 @f2(i64 %a0, i32 %v8, i32 %v10, i32 %v12, i8 zeroext 0, %11* %v0) #2
+  store i8 0, ptr %v0, align 4
+  %v6 = getelementptr inbounds %11, ptr %v0, i32 0, i32 1
+  store i32 0, ptr %v6, align 4
+  %v7 = getelementptr inbounds [2 x %0], ptr @g0, i32 0, i32 %v2, i32 3
+  %v8 = load i32, ptr %v7, align 8
+  %v9 = getelementptr inbounds [2 x %0], ptr @g0, i32 0, i32 %v2, i32 4
+  %v10 = load i32, ptr %v9, align 4
+  %v11 = getelementptr inbounds [2 x %0], ptr @g0, i32 0, i32 %v2, i32 19
+  %v12 = load i32, ptr %v11, align 4
+  %v13 = call zeroext i8 @f2(i64 %a0, i32 %v8, i32 %v10, i32 %v12, i8 zeroext 0, ptr %v0) #2
   %v14 = icmp eq i8 %v13, 0
   br i1 %v14, label %b4, label %b3
 
 b3:                                               ; preds = %b2
   %v15 = zext i8 %v13 to i32
-  call void @f0(%9* @g2, i32 %v15, i32 %v2, i32 0) #2
+  call void @f0(ptr @g2, i32 %v15, i32 %v2, i32 0) #2
   br label %b4
 
 b4:                                               ; preds = %b3, %b2, %b1, %b0
-  %v16 = getelementptr inbounds [2 x %0], [2 x %0]* @g0, i32 0, i32 %v2, i32 1
-  %v17 = load i8, i8* %v16, align 1
+  %v16 = getelementptr inbounds [2 x %0], ptr @g0, i32 0, i32 %v2, i32 1
+  %v17 = load i8, ptr %v16, align 1
   %v18 = zext i8 %v17 to i32
   switch i32 %v18, label %b14 [
     i32 2, label %b11
@@ -76,7 +75,7 @@ b4:                                               ; preds = %b3, %b2, %b1, %b0
   ]
 
 b5:                                               ; preds = %b4
-  call void @f0(%9* @g3, i32 %v2, i32 0, i32 0) #2
+  call void @f0(ptr @g3, i32 %v2, i32 0, i32 0) #2
   br i1 %v1, label %b7, label %b6
 
 b6:                                               ; preds = %b5
@@ -88,9 +87,9 @@ b7:                                               ; preds = %b5
   br label %b14
 
 b8:                                               ; preds = %b4
-  call void @f0(%9* @g4, i32 %v2, i32 0, i32 0) #2
-  %v19 = getelementptr inbounds [2 x %0], [2 x %0]* @g0, i32 0, i32 %v2, i32 6
-  store i8 1, i8* %v19, align 8
+  call void @f0(ptr @g4, i32 %v2, i32 0, i32 0) #2
+  %v19 = getelementptr inbounds [2 x %0], ptr @g0, i32 0, i32 %v2, i32 6
+  store i8 1, ptr %v19, align 8
   br i1 %v1, label %b10, label %b9
 
 b9:                                               ; preds = %b8
@@ -117,7 +116,7 @@ b14:                                              ; preds = %b13, %b12, %b10, %b
 }
 
 ; Function Attrs: optsize
-declare zeroext i8 @f2(i64, i32, i32, i32, i8 zeroext, %11*) #0
+declare zeroext i8 @f2(i64, i32, i32, i32, i8 zeroext, ptr) #0
 
 ; Function Attrs: nounwind optsize ssp
 declare hidden fastcc void @f3(i64, i8 zeroext, i8 zeroext, i32) #1

diff  --git a/llvm/test/CodeGen/Hexagon/undef-ret.ll b/llvm/test/CodeGen/Hexagon/undef-ret.ll
index 7fd9abde37f4f..e879d7d01a47e 100644
--- a/llvm/test/CodeGen/Hexagon/undef-ret.ll
+++ b/llvm/test/CodeGen/Hexagon/undef-ret.ll
@@ -3,11 +3,11 @@
 ; CHECK: = add(r0,add(r1,#2))
 
 ; Function Attrs: nounwind
-define i32 @f0(i32 %a0, i32 %a1, i32* nocapture %a2) #0 {
+define i32 @f0(i32 %a0, i32 %a1, ptr nocapture %a2) #0 {
 b0:
   %v0 = add nsw i32 %a0, 2
   %v1 = add nsw i32 %v0, %a1
-  store i32 %v1, i32* %a2, align 4, !tbaa !0
+  store i32 %v1, ptr %a2, align 4, !tbaa !0
   ret i32 undef
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/undo-dag-shift.ll b/llvm/test/CodeGen/Hexagon/undo-dag-shift.ll
index 5aa7f39121d83..8cf2b86fca415 100644
--- a/llvm/test/CodeGen/Hexagon/undo-dag-shift.ll
+++ b/llvm/test/CodeGen/Hexagon/undo-dag-shift.ll
@@ -25,32 +25,32 @@ target triple = "hexagon"
 
 ; CHECK-LABEL: load_0
 ; CHECK: memw(r{{[0-9]+}}+r{{[0-9]}}<<#2)
-define i32 @load_0(i32 %b, i32* nocapture readonly %a) #0 {
+define i32 @load_0(i32 %b, ptr nocapture readonly %a) #0 {
 entry:
   %shr = lshr i32 %b, 5
-  %arrayidx = getelementptr inbounds i32, i32* %a, i32 %shr
-  %0 = load i32, i32* %arrayidx, align 4
+  %arrayidx = getelementptr inbounds i32, ptr %a, i32 %shr
+  %0 = load i32, ptr %arrayidx, align 4
   ret i32 %0
 }
 
 ; This would require r0<<#3, which is not legal.
 ; CHECK-LABEL: load_1
 ; CHECK: memw(r{{[0-9]+}}+r{{[0-9]}}<<#0)
-define i32 @load_1(i32 %b, [3 x i32]* nocapture readonly %a) #0 {
+define i32 @load_1(i32 %b, ptr nocapture readonly %a) #0 {
 entry:
   %shr = lshr i32 %b, 5
-  %arrayidx = getelementptr inbounds [3 x i32], [3 x i32]* %a, i32 %shr, i32 0
-  %0 = load i32, i32* %arrayidx, align 4
+  %arrayidx = getelementptr inbounds [3 x i32], ptr %a, i32 %shr, i32 0
+  %0 = load i32, ptr %arrayidx, align 4
   ret i32 %0
 }
 
 ; CHECK-LABEL: store_0
 ; CHECK: memw(r{{[0-9]+}}+r{{[0-9]}}<<#2)
-define void @store_0(i32 %b, i32* nocapture %a, i32 %v) #1 {
+define void @store_0(i32 %b, ptr nocapture %a, i32 %v) #1 {
 entry:
   %shr = lshr i32 %b, 5
-  %arrayidx = getelementptr inbounds i32, i32* %a, i32 %shr
-  store i32 %v, i32* %arrayidx, align 4
+  %arrayidx = getelementptr inbounds i32, ptr %a, i32 %shr
+  store i32 %v, ptr %arrayidx, align 4
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/union-1.ll b/llvm/test/CodeGen/Hexagon/union-1.ll
index 970ded79deb87..5d98f915945e4 100644
--- a/llvm/test/CodeGen/Hexagon/union-1.ll
+++ b/llvm/test/CodeGen/Hexagon/union-1.ll
@@ -3,12 +3,12 @@
 ; CHECK-NOT: combine(#0
 ; CHECK: jump f1
 
-define void @f0(i32* nocapture %a0) #0 {
+define void @f0(ptr nocapture %a0) #0 {
 b0:
-  %v0 = load i32, i32* %a0, align 4
+  %v0 = load i32, ptr %a0, align 4
   %v1 = zext i32 %v0 to i64
-  %v2 = getelementptr inbounds i32, i32* %a0, i32 1
-  %v3 = load i32, i32* %v2, align 4
+  %v2 = getelementptr inbounds i32, ptr %a0, i32 1
+  %v3 = load i32, ptr %v2, align 4
   %v4 = zext i32 %v3 to i64
   %v5 = shl nuw i64 %v4, 32
   %v6 = or i64 %v5, %v1

diff  --git a/llvm/test/CodeGen/Hexagon/upper-mpy.ll b/llvm/test/CodeGen/Hexagon/upper-mpy.ll
index 9fa2696ed3ad2..35bdc0cd439c5 100644
--- a/llvm/test/CodeGen/Hexagon/upper-mpy.ll
+++ b/llvm/test/CodeGen/Hexagon/upper-mpy.ll
@@ -11,36 +11,36 @@
 @g1 = external constant [2 x i32], align 8
 
 ; Function Attrs: nounwind
-define void @f0(i32* nocapture readonly %a0, i32* %a1, i32* nocapture %a2, i32 %a3, i32 %a4) #0 {
+define void @f0(ptr nocapture readonly %a0, ptr %a1, ptr nocapture %a2, i32 %a3, i32 %a4) #0 {
 b0:
-  %v0 = getelementptr inbounds i32, i32* %a0, i32 512
-  %v1 = getelementptr inbounds i32, i32* %a0, i32 511
-  %v2 = getelementptr inbounds i32, i32* %a2, i32 1023
-  %v3 = getelementptr inbounds i32, i32* %a1, i32 1023
+  %v0 = getelementptr inbounds i32, ptr %a0, i32 512
+  %v1 = getelementptr inbounds i32, ptr %a0, i32 511
+  %v2 = getelementptr inbounds i32, ptr %a2, i32 1023
+  %v3 = getelementptr inbounds i32, ptr %a1, i32 1023
   br label %b1
 
 b1:                                               ; preds = %b0
-  %v4 = load i32, i32* getelementptr inbounds ([2 x i32], [2 x i32]* @g1, i32 0, i32 1), align 4
-  %v5 = getelementptr inbounds [1152 x i32], [1152 x i32]* @g0, i32 0, i32 %v4
+  %v4 = load i32, ptr getelementptr inbounds ([2 x i32], ptr @g1, i32 0, i32 1), align 4
+  %v5 = getelementptr inbounds [1152 x i32], ptr @g0, i32 0, i32 %v4
   br label %b2
 
 b2:                                               ; preds = %b1
   br label %b3
 
 b3:                                               ; preds = %b3, %b2
-  %v6 = phi i32* [ %v30, %b3 ], [ %a2, %b2 ]
-  %v7 = phi i32* [ %v44, %b3 ], [ %a1, %b2 ]
-  %v8 = phi i32* [ %v17, %b3 ], [ %v0, %b2 ]
-  %v9 = phi i32* [ %v34, %b3 ], [ %v1, %b2 ]
-  %v10 = phi i32* [ %v40, %b3 ], [ %v3, %b2 ]
-  %v11 = phi i32* [ %v33, %b3 ], [ %v2, %b2 ]
-  %v12 = phi i32* [ %v15, %b3 ], [ %v5, %b2 ]
-  %v13 = getelementptr inbounds i32, i32* %v12, i32 1
-  %v14 = load i32, i32* %v12, align 4
-  %v15 = getelementptr inbounds i32, i32* %v12, i32 2
-  %v16 = load i32, i32* %v13, align 4
-  %v17 = getelementptr inbounds i32, i32* %v8, i32 1
-  %v18 = load i32, i32* %v8, align 4
+  %v6 = phi ptr [ %v30, %b3 ], [ %a2, %b2 ]
+  %v7 = phi ptr [ %v44, %b3 ], [ %a1, %b2 ]
+  %v8 = phi ptr [ %v17, %b3 ], [ %v0, %b2 ]
+  %v9 = phi ptr [ %v34, %b3 ], [ %v1, %b2 ]
+  %v10 = phi ptr [ %v40, %b3 ], [ %v3, %b2 ]
+  %v11 = phi ptr [ %v33, %b3 ], [ %v2, %b2 ]
+  %v12 = phi ptr [ %v15, %b3 ], [ %v5, %b2 ]
+  %v13 = getelementptr inbounds i32, ptr %v12, i32 1
+  %v14 = load i32, ptr %v12, align 4
+  %v15 = getelementptr inbounds i32, ptr %v12, i32 2
+  %v16 = load i32, ptr %v13, align 4
+  %v17 = getelementptr inbounds i32, ptr %v8, i32 1
+  %v18 = load i32, ptr %v8, align 4
   %v19 = sext i32 %v14 to i64
   %v20 = sext i32 %v18 to i64
   %v21 = mul nsw i64 %v20, %v19
@@ -50,28 +50,28 @@ b3:                                               ; preds = %b3, %b2
   %v25 = mul nsw i64 %v20, %v24
   %v26 = lshr i64 %v25, 32
   %v27 = trunc i64 %v26 to i32
-  %v28 = load i32, i32* %v7, align 4
+  %v28 = load i32, ptr %v7, align 4
   %v29 = sub nsw i32 %v28, %v23
-  %v30 = getelementptr inbounds i32, i32* %v6, i32 1
-  store i32 %v29, i32* %v6, align 4
-  %v31 = load i32, i32* %v10, align 4
+  %v30 = getelementptr inbounds i32, ptr %v6, i32 1
+  store i32 %v29, ptr %v6, align 4
+  %v31 = load i32, ptr %v10, align 4
   %v32 = add nsw i32 %v27, %v31
-  %v33 = getelementptr inbounds i32, i32* %v11, i32 -1
-  store i32 %v32, i32* %v11, align 4
-  %v34 = getelementptr inbounds i32, i32* %v9, i32 -1
-  %v35 = load i32, i32* %v9, align 4
+  %v33 = getelementptr inbounds i32, ptr %v11, i32 -1
+  store i32 %v32, ptr %v11, align 4
+  %v34 = getelementptr inbounds i32, ptr %v9, i32 -1
+  %v35 = load i32, ptr %v9, align 4
   %v36 = sext i32 %v35 to i64
   %v37 = mul nsw i64 %v36, %v19
   %v38 = lshr i64 %v37, 32
   %v39 = trunc i64 %v38 to i32
-  %v40 = getelementptr inbounds i32, i32* %v10, i32 -1
-  store i32 %v39, i32* %v10, align 4
+  %v40 = getelementptr inbounds i32, ptr %v10, i32 -1
+  store i32 %v39, ptr %v10, align 4
   %v41 = mul nsw i64 %v36, %v24
   %v42 = lshr i64 %v41, 32
   %v43 = trunc i64 %v42 to i32
-  %v44 = getelementptr inbounds i32, i32* %v7, i32 1
-  store i32 %v43, i32* %v7, align 4
-  %v45 = icmp ult i32* %v44, %v40
+  %v44 = getelementptr inbounds i32, ptr %v7, i32 1
+  store i32 %v43, ptr %v7, align 4
+  %v45 = icmp ult ptr %v44, %v40
   br i1 %v45, label %b3, label %b4
 
 b4:                                               ; preds = %b3

diff  --git a/llvm/test/CodeGen/Hexagon/v6-inlasm1.ll b/llvm/test/CodeGen/Hexagon/v6-inlasm1.ll
index 105dac8f44818..01b5cc878b9d3 100644
--- a/llvm/test/CodeGen/Hexagon/v6-inlasm1.ll
+++ b/llvm/test/CodeGen/Hexagon/v6-inlasm1.ll
@@ -4,13 +4,13 @@
 target triple = "hexagon"
 
 ; Function Attrs: nounwind
-define void @f0(i8* %a0, i32 %a1, i8* %a2, i32 %a3, i8* %a4) #0 {
+define void @f0(ptr %a0, i32 %a1, ptr %a2, i32 %a3, ptr %a4) #0 {
 b0:
-  %v0 = alloca i8*, align 4
+  %v0 = alloca ptr, align 4
   %v1 = alloca i32, align 4
-  %v2 = alloca i8*, align 4
+  %v2 = alloca ptr, align 4
   %v3 = alloca i32, align 4
-  %v4 = alloca i8*, align 4
+  %v4 = alloca ptr, align 4
   %v5 = alloca i32, align 4
   %v6 = alloca i32, align 4
   %v7 = alloca i32, align 4
@@ -27,74 +27,71 @@ b0:
   %v18 = alloca <16 x i32>, align 64
   %v19 = alloca <16 x i32>, align 64
   %v20 = alloca <16 x i32>, align 64
-  store i8* %a0, i8** %v0, align 4
-  store i32 %a1, i32* %v1, align 4
-  store i8* %a2, i8** %v2, align 4
-  store i32 %a3, i32* %v3, align 4
-  store i8* %a4, i8** %v4, align 4
-  %v21 = load i32, i32* %v1, align 4
+  store ptr %a0, ptr %v0, align 4
+  store i32 %a1, ptr %v1, align 4
+  store ptr %a2, ptr %v2, align 4
+  store i32 %a3, ptr %v3, align 4
+  store ptr %a4, ptr %v4, align 4
+  %v21 = load i32, ptr %v1, align 4
   %v22 = ashr i32 %v21, 16
   %v23 = and i32 65535, %v22
-  store i32 %v23, i32* %v8, align 4
-  %v24 = load i32, i32* %v1, align 4
+  store i32 %v23, ptr %v8, align 4
+  %v24 = load i32, ptr %v1, align 4
   %v25 = and i32 65535, %v24
-  store i32 %v25, i32* %v5, align 4
-  %v26 = load i32, i32* %v3, align 4
+  store i32 %v25, ptr %v5, align 4
+  %v26 = load i32, ptr %v3, align 4
   %v27 = and i32 65535, %v26
-  store i32 %v27, i32* %v6, align 4
-  %v28 = load i32, i32* %v3, align 4
+  store i32 %v27, ptr %v6, align 4
+  %v28 = load i32, ptr %v3, align 4
   %v29 = ashr i32 %v28, 16
   %v30 = and i32 65535, %v29
-  store i32 %v30, i32* %v9, align 4
-  %v31 = load i8*, i8** %v4, align 4
-  %v32 = bitcast i8* %v31 to <16 x i32>*
-  %v33 = load <16 x i32>, <16 x i32>* %v32, align 64
-  store <16 x i32> %v33, <16 x i32>* %v10, align 64
-  %v34 = load i8*, i8** %v4, align 4
-  %v35 = getelementptr inbounds i8, i8* %v34, i32 64
-  %v36 = bitcast i8* %v35 to <16 x i32>*
-  %v37 = load <16 x i32>, <16 x i32>* %v36, align 64
-  store <16 x i32> %v37, <16 x i32>* %v12, align 64
-  %v38 = load i32, i32* %v9, align 4
-  store i32 %v38, i32* %v7, align 4
+  store i32 %v30, ptr %v9, align 4
+  %v31 = load ptr, ptr %v4, align 4
+  %v33 = load <16 x i32>, ptr %v31, align 64
+  store <16 x i32> %v33, ptr %v10, align 64
+  %v34 = load ptr, ptr %v4, align 4
+  %v35 = getelementptr inbounds i8, ptr %v34, i32 64
+  %v37 = load <16 x i32>, ptr %v35, align 64
+  store <16 x i32> %v37, ptr %v12, align 64
+  %v38 = load i32, ptr %v9, align 4
+  store i32 %v38, ptr %v7, align 4
   br label %b1
 
 b1:                                               ; preds = %b3, %b0
-  %v39 = load i32, i32* %v7, align 4
+  %v39 = load i32, ptr %v7, align 4
   %v40 = icmp sge i32 %v39, 0
   br i1 %v40, label %b2, label %b4
 
 b2:                                               ; preds = %b1
-  %v41 = load i8*, i8** %v0, align 4
-  %v42 = bitcast i8* %v41 to <16 x i32>*
-  %v43 = load <16 x i32>, <16 x i32>* %v42, align 4
-  store <16 x i32> %v43, <16 x i32>* %v14, align 64
-  %v44 = load i32, i32* %v5, align 4
-  %v45 = load i8*, i8** %v0, align 4
-  %v46 = getelementptr inbounds i8, i8* %v45, i32 %v44
-  store i8* %v46, i8** %v0, align 4
-  %v47 = load <16 x i32>, <16 x i32>* %v14, align 64
-  %v48 = load <16 x i32>, <16 x i32>* %v10, align 64
+  %v41 = load ptr, ptr %v0, align 4
+  %v43 = load <16 x i32>, ptr %v41, align 4
+  store <16 x i32> %v43, ptr %v14, align 64
+  %v44 = load i32, ptr %v5, align 4
+  %v45 = load ptr, ptr %v0, align 4
+  %v46 = getelementptr inbounds i8, ptr %v45, i32 %v44
+  store ptr %v46, ptr %v0, align 4
+  %v47 = load <16 x i32>, ptr %v14, align 64
+  %v48 = load <16 x i32>, ptr %v10, align 64
   %v49 = call <16 x i32> @llvm.hexagon.V6.vrdelta(<16 x i32> %v47, <16 x i32> %v48)
-  store <16 x i32> %v49, <16 x i32>* %v15, align 64
-  %v50 = load <16 x i32>, <16 x i32>* %v14, align 64
-  %v51 = load <16 x i32>, <16 x i32>* %v12, align 64
+  store <16 x i32> %v49, ptr %v15, align 64
+  %v50 = load <16 x i32>, ptr %v14, align 64
+  %v51 = load <16 x i32>, ptr %v12, align 64
   %v52 = call <16 x i32> @llvm.hexagon.V6.vrdelta(<16 x i32> %v50, <16 x i32> %v51)
-  store <16 x i32> %v52, <16 x i32>* %v17, align 64
-  %v53 = load <16 x i32>, <16 x i32>* %v15, align 64
-  %v54 = load <16 x i32>, <16 x i32>* %v17, align 64
+  store <16 x i32> %v52, ptr %v17, align 64
+  %v53 = load <16 x i32>, ptr %v15, align 64
+  %v54 = load <16 x i32>, ptr %v17, align 64
   %v55 = call <16 x i32> @llvm.hexagon.V6.vavgub(<16 x i32> %v53, <16 x i32> %v54)
-  store <16 x i32> %v55, <16 x i32>* %v19, align 64
-  %v56 = load i8*, i8** %v2, align 4
-  %v57 = load <16 x i32>, <16 x i32>* %v19, align 64
-  call void asm sideeffect "  vmemu($0) = $1;\0A", "r,v,~{memory}"(i8* %v56, <16 x i32> %v57) #2, !srcloc !0
+  store <16 x i32> %v55, ptr %v19, align 64
+  %v56 = load ptr, ptr %v2, align 4
+  %v57 = load <16 x i32>, ptr %v19, align 64
+  call void asm sideeffect "  vmemu($0) = $1;\0A", "r,v,~{memory}"(ptr %v56, <16 x i32> %v57) #2, !srcloc !0
   br label %b3
 
 b3:                                               ; preds = %b2
-  %v58 = load i32, i32* %v6, align 4
-  %v59 = load i32, i32* %v7, align 4
+  %v58 = load i32, ptr %v6, align 4
+  %v59 = load i32, ptr %v7, align 4
   %v60 = sub nsw i32 %v59, %v58
-  store i32 %v60, i32* %v7, align 4
+  store i32 %v60, ptr %v7, align 4
   br label %b1
 
 b4:                                               ; preds = %b1

diff  --git a/llvm/test/CodeGen/Hexagon/v6-inlasm2.ll b/llvm/test/CodeGen/Hexagon/v6-inlasm2.ll
index 1e9d5268f20b3..9506e2352b914 100644
--- a/llvm/test/CodeGen/Hexagon/v6-inlasm2.ll
+++ b/llvm/test/CodeGen/Hexagon/v6-inlasm2.ll
@@ -4,19 +4,19 @@
 target triple = "hexagon"
 
 ; Function Attrs: nounwind
-define void @f0(i8* %a0, i8* %a1) #0 {
+define void @f0(ptr %a0, ptr %a1) #0 {
 b0:
-  %v0 = alloca i8*, align 4
-  %v1 = alloca i8*, align 4
+  %v0 = alloca ptr, align 4
+  %v1 = alloca ptr, align 4
   %v2 = alloca <16 x i32>, align 64
-  store i8* %a0, i8** %v0, align 4
-  store i8* %a1, i8** %v1, align 4
-  %v3 = load i8*, i8** %v0, align 4
-  %v4 = load <16 x i32>, <16 x i32>* %v2, align 64
-  call void asm sideeffect "  $1 = vmemu($0);\0A", "r,v"(i8* %v3, <16 x i32> %v4) #1, !srcloc !0
-  %v5 = load i8*, i8** %v1, align 4
-  %v6 = load <16 x i32>, <16 x i32>* %v2, align 64
-  call void asm sideeffect "  vmemu($0) = $1;\0A", "r,v,~{memory}"(i8* %v5, <16 x i32> %v6) #1, !srcloc !1
+  store ptr %a0, ptr %v0, align 4
+  store ptr %a1, ptr %v1, align 4
+  %v3 = load ptr, ptr %v0, align 4
+  %v4 = load <16 x i32>, ptr %v2, align 64
+  call void asm sideeffect "  $1 = vmemu($0);\0A", "r,v"(ptr %v3, <16 x i32> %v4) #1, !srcloc !0
+  %v5 = load ptr, ptr %v1, align 4
+  %v6 = load <16 x i32>, ptr %v2, align 64
+  call void asm sideeffect "  vmemu($0) = $1;\0A", "r,v,~{memory}"(ptr %v5, <16 x i32> %v6) #1, !srcloc !1
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/v6-inlasm3.ll b/llvm/test/CodeGen/Hexagon/v6-inlasm3.ll
index b4b305586bb6f..0a9933daad0ca 100644
--- a/llvm/test/CodeGen/Hexagon/v6-inlasm3.ll
+++ b/llvm/test/CodeGen/Hexagon/v6-inlasm3.ll
@@ -4,28 +4,28 @@
 target triple = "hexagon"
 
 ; Function Attrs: nounwind
-define void @f0(i8* %a0, i8* %a1) #0 {
+define void @f0(ptr %a0, ptr %a1) #0 {
 b0:
-  %v0 = alloca i8*, align 4
-  %v1 = alloca i8*, align 4
+  %v0 = alloca ptr, align 4
+  %v1 = alloca ptr, align 4
   %v2 = alloca <16 x i32>, align 64
   %v3 = alloca <16 x i32>, align 64
   %v4 = alloca <32 x i32>, align 128
-  store i8* %a0, i8** %v0, align 4
-  store i8* %a1, i8** %v1, align 4
-  %v5 = load i8*, i8** %v0, align 4
-  %v6 = load <16 x i32>, <16 x i32>* %v2, align 64
-  call void asm sideeffect "  $1 = vmemu($0);\0A", "r,v"(i8* %v5, <16 x i32> %v6) #1, !srcloc !0
-  %v7 = load i8*, i8** %v0, align 4
-  %v8 = load <16 x i32>, <16 x i32>* %v3, align 64
-  call void asm sideeffect "  $1 = vmemu($0);\0A", "r,v"(i8* %v7, <16 x i32> %v8) #1, !srcloc !1
-  %v9 = load <32 x i32>, <32 x i32>* %v4, align 128
-  %v10 = load <16 x i32>, <16 x i32>* %v2, align 64
-  %v11 = load <16 x i32>, <16 x i32>* %v3, align 64
+  store ptr %a0, ptr %v0, align 4
+  store ptr %a1, ptr %v1, align 4
+  %v5 = load ptr, ptr %v0, align 4
+  %v6 = load <16 x i32>, ptr %v2, align 64
+  call void asm sideeffect "  $1 = vmemu($0);\0A", "r,v"(ptr %v5, <16 x i32> %v6) #1, !srcloc !0
+  %v7 = load ptr, ptr %v0, align 4
+  %v8 = load <16 x i32>, ptr %v3, align 64
+  call void asm sideeffect "  $1 = vmemu($0);\0A", "r,v"(ptr %v7, <16 x i32> %v8) #1, !srcloc !1
+  %v9 = load <32 x i32>, ptr %v4, align 128
+  %v10 = load <16 x i32>, ptr %v2, align 64
+  %v11 = load <16 x i32>, ptr %v3, align 64
   call void asm sideeffect "  $0 = vcombine($1,$2);\0A", "v,v,v"(<32 x i32> %v9, <16 x i32> %v10, <16 x i32> %v11) #1, !srcloc !2
-  %v12 = load i8*, i8** %v1, align 4
-  %v13 = load <16 x i32>, <16 x i32>* %v2, align 64
-  call void asm sideeffect "  vmemu($0) = $1;\0A", "r,v,~{memory}"(i8* %v12, <16 x i32> %v13) #1, !srcloc !3
+  %v12 = load ptr, ptr %v1, align 4
+  %v13 = load <16 x i32>, ptr %v2, align 64
+  call void asm sideeffect "  vmemu($0) = $1;\0A", "r,v,~{memory}"(ptr %v12, <16 x i32> %v13) #1, !srcloc !3
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/v6-inlasm4.ll b/llvm/test/CodeGen/Hexagon/v6-inlasm4.ll
index fada5c11732d8..286d0384ca219 100644
--- a/llvm/test/CodeGen/Hexagon/v6-inlasm4.ll
+++ b/llvm/test/CodeGen/Hexagon/v6-inlasm4.ll
@@ -9,12 +9,12 @@ b0:
   %v0 = alloca i32, align 4
   %v1 = alloca <16 x i32>, align 64
   %v2 = alloca <16 x i32>, align 64
-  store i32 %a0, i32* %v0, align 4
-  store <16 x i32> %a1, <16 x i32>* %v1, align 64
-  %v3 = load i32, i32* %v0, align 4
+  store i32 %a0, ptr %v0, align 4
+  store <16 x i32> %a1, ptr %v1, align 64
+  %v3 = load i32, ptr %v0, align 4
   %v4 = tail call <64 x i1> asm sideeffect "  $0 = vsetq($1);\0A", "=q,r"(i32 %v3) #1, !srcloc !0
   %v5 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %v4, i32 -1)
-  store <16 x i32> %v5, <16 x i32>* %v2, align 64
+  store <16 x i32> %v5, ptr %v2, align 64
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/v6-shuffl.ll b/llvm/test/CodeGen/Hexagon/v6-shuffl.ll
index 028a9fba2c415..ba49f79a2893d 100644
--- a/llvm/test/CodeGen/Hexagon/v6-shuffl.ll
+++ b/llvm/test/CodeGen/Hexagon/v6-shuffl.ll
@@ -10,33 +10,33 @@ target triple = "hexagon"
 define void @f0() #0 {
 b0:
   %v0 = tail call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 1)
-  store <16 x i32> %v0, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @g0, i32 0, i32 0), align 64, !tbaa !0
+  store <16 x i32> %v0, ptr @g0, align 64, !tbaa !0
   %v1 = tail call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 2)
-  store <16 x i32> %v1, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @g0, i32 0, i32 1), align 64, !tbaa !0
+  store <16 x i32> %v1, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @g0, i32 0, i32 1), align 64, !tbaa !0
   %v2 = tail call <32 x i32> @llvm.hexagon.V6.vaddubh(<16 x i32> %v0, <16 x i32> %v1)
-  store <32 x i32> %v2, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @g1, i32 0, i32 0), align 128, !tbaa !0
-  store <32 x i32> %v2, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @g1, i32 0, i32 1), align 128, !tbaa !0
+  store <32 x i32> %v2, ptr @g1, align 128, !tbaa !0
+  store <32 x i32> %v2, ptr getelementptr inbounds ([15 x <32 x i32>], ptr @g1, i32 0, i32 1), align 128, !tbaa !0
   %v3 = tail call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 3)
-  store <16 x i32> %v3, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @g0, i32 0, i32 2), align 64, !tbaa !0
+  store <16 x i32> %v3, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @g0, i32 0, i32 2), align 64, !tbaa !0
   %v4 = tail call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 4)
-  store <16 x i32> %v4, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @g0, i32 0, i32 3), align 64, !tbaa !0
+  store <16 x i32> %v4, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @g0, i32 0, i32 3), align 64, !tbaa !0
   %v5 = tail call <32 x i32> @llvm.hexagon.V6.vaddubh(<16 x i32> %v3, <16 x i32> %v4)
-  store <32 x i32> %v5, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @g1, i32 0, i32 2), align 128, !tbaa !0
-  store <32 x i32> %v5, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @g1, i32 0, i32 3), align 128, !tbaa !0
+  store <32 x i32> %v5, ptr getelementptr inbounds ([15 x <32 x i32>], ptr @g1, i32 0, i32 2), align 128, !tbaa !0
+  store <32 x i32> %v5, ptr getelementptr inbounds ([15 x <32 x i32>], ptr @g1, i32 0, i32 3), align 128, !tbaa !0
   %v6 = tail call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 5)
-  store <16 x i32> %v6, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @g0, i32 0, i32 4), align 64, !tbaa !0
+  store <16 x i32> %v6, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @g0, i32 0, i32 4), align 64, !tbaa !0
   %v7 = tail call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 6)
-  store <16 x i32> %v7, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @g0, i32 0, i32 5), align 64, !tbaa !0
+  store <16 x i32> %v7, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @g0, i32 0, i32 5), align 64, !tbaa !0
   %v8 = tail call <32 x i32> @llvm.hexagon.V6.vaddubh(<16 x i32> %v6, <16 x i32> %v7)
-  store <32 x i32> %v8, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @g1, i32 0, i32 4), align 128, !tbaa !0
-  store <32 x i32> %v8, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @g1, i32 0, i32 5), align 128, !tbaa !0
+  store <32 x i32> %v8, ptr getelementptr inbounds ([15 x <32 x i32>], ptr @g1, i32 0, i32 4), align 128, !tbaa !0
+  store <32 x i32> %v8, ptr getelementptr inbounds ([15 x <32 x i32>], ptr @g1, i32 0, i32 5), align 128, !tbaa !0
   %v9 = tail call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 7)
-  store <16 x i32> %v9, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @g0, i32 0, i32 6), align 64, !tbaa !0
+  store <16 x i32> %v9, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @g0, i32 0, i32 6), align 64, !tbaa !0
   %v10 = tail call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 8)
-  store <16 x i32> %v10, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @g0, i32 0, i32 7), align 64, !tbaa !0
+  store <16 x i32> %v10, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @g0, i32 0, i32 7), align 64, !tbaa !0
   %v11 = tail call <32 x i32> @llvm.hexagon.V6.vaddubh(<16 x i32> %v9, <16 x i32> %v10)
-  store <32 x i32> %v11, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @g1, i32 0, i32 6), align 128, !tbaa !0
-  store <32 x i32> %v11, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @g1, i32 0, i32 7), align 128, !tbaa !0
+  store <32 x i32> %v11, ptr getelementptr inbounds ([15 x <32 x i32>], ptr @g1, i32 0, i32 6), align 128, !tbaa !0
+  store <32 x i32> %v11, ptr getelementptr inbounds ([15 x <32 x i32>], ptr @g1, i32 0, i32 7), align 128, !tbaa !0
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/v6-spill1.ll b/llvm/test/CodeGen/Hexagon/v6-spill1.ll
index c2f37d44b2889..78de91a01b46f 100644
--- a/llvm/test/CodeGen/Hexagon/v6-spill1.ll
+++ b/llvm/test/CodeGen/Hexagon/v6-spill1.ll
@@ -2,7 +2,7 @@
 ; CHECK-NOT: vmemu
 
 ; Function Attrs: nounwind
-define void @f0(i8* nocapture readonly %a0, i32 %a1, i32 %a2, i32 %a3, i16* nocapture %a4, i16* nocapture %a5) #0 {
+define void @f0(ptr nocapture readonly %a0, i32 %a1, i32 %a2, i32 %a3, ptr nocapture %a4, ptr nocapture %a5) #0 {
 b0:
   %v0 = tail call i32 @llvm.hexagon.S2.vsplatrb(i32 %a3)
   %v1 = tail call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 %v0)
@@ -13,18 +13,15 @@ b0:
   br i1 %v5, label %b1, label %b6
 
 b1:                                               ; preds = %b0
-  %v6 = bitcast i16* %a5 to <16 x i32>*
-  %v7 = bitcast i16* %a4 to <16 x i32>*
   %v8 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v3, <16 x i32> %v3)
   br label %b2
 
 b2:                                               ; preds = %b4, %b1
   %v9 = phi i32 [ 0, %b1 ], [ %v100, %b4 ]
-  %v10 = phi i8* [ %a0, %b1 ], [ %v87, %b4 ]
-  %v11 = phi <16 x i32>* [ %v6, %b1 ], [ %v99, %b4 ]
-  %v12 = phi <16 x i32>* [ %v7, %b1 ], [ %v95, %b4 ]
-  %v13 = bitcast i8* %v10 to <16 x i32>*
-  %v14 = load <16 x i32>, <16 x i32>* %v13, align 64, !tbaa !0
+  %v10 = phi ptr [ %a0, %b1 ], [ %v87, %b4 ]
+  %v11 = phi ptr [ %a5, %b1 ], [ %v99, %b4 ]
+  %v12 = phi ptr [ %a4, %b1 ], [ %v95, %b4 ]
+  %v14 = load <16 x i32>, ptr %v10, align 64, !tbaa !0
   br label %b3
 
 b3:                                               ; preds = %b3, %b2
@@ -32,17 +29,14 @@ b3:                                               ; preds = %b3, %b2
   %v16 = phi <32 x i32> [ %v8, %b2 ], [ %v78, %b3 ]
   %v17 = phi <16 x i32> [ %v3, %b2 ], [ %v82, %b3 ]
   %v18 = mul nsw i32 %v15, %a1
-  %v19 = getelementptr inbounds i8, i8* %v10, i32 %v18
-  %v20 = bitcast i8* %v19 to <16 x i32>*
+  %v19 = getelementptr inbounds i8, ptr %v10, i32 %v18
   %v21 = add i32 %v18, -64
-  %v22 = getelementptr inbounds i8, i8* %v10, i32 %v21
-  %v23 = bitcast i8* %v22 to <16 x i32>*
-  %v24 = load <16 x i32>, <16 x i32>* %v23, align 64, !tbaa !0
-  %v25 = load <16 x i32>, <16 x i32>* %v20, align 64, !tbaa !0
+  %v22 = getelementptr inbounds i8, ptr %v10, i32 %v21
+  %v24 = load <16 x i32>, ptr %v22, align 64, !tbaa !0
+  %v25 = load <16 x i32>, ptr %v19, align 64, !tbaa !0
   %v26 = add i32 %v18, 64
-  %v27 = getelementptr inbounds i8, i8* %v10, i32 %v26
-  %v28 = bitcast i8* %v27 to <16 x i32>*
-  %v29 = load <16 x i32>, <16 x i32>* %v28, align 64, !tbaa !0
+  %v27 = getelementptr inbounds i8, ptr %v10, i32 %v26
+  %v29 = load <16 x i32>, ptr %v27, align 64, !tbaa !0
   %v30 = tail call <16 x i32> @llvm.hexagon.V6.vabs
diff ub(<16 x i32> %v25, <16 x i32> %v14)
   %v31 = tail call <64 x i1> @llvm.hexagon.V6.vgtub(<16 x i32> %v30, <16 x i32> %v1)
   %v32 = tail call <16 x i32> @llvm.hexagon.V6.vmux(<64 x i1> %v31, <16 x i32> %v3, <16 x i32> %v25)
@@ -103,23 +97,23 @@ b3:                                               ; preds = %b3, %b2
 b4:                                               ; preds = %b3
   %v85 = phi <16 x i32> [ %v82, %b3 ]
   %v86 = phi <32 x i32> [ %v78, %b3 ]
-  %v87 = getelementptr inbounds i8, i8* %v10, i32 64
+  %v87 = getelementptr inbounds i8, ptr %v10, i32 64
   %v88 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v86)
   %v89 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v86)
   %v90 = tail call <32 x i32> @llvm.hexagon.V6.vshuffvdd(<16 x i32> %v88, <16 x i32> %v89, i32 -2)
   %v91 = tail call <32 x i32> @llvm.hexagon.V6.vunpackub(<16 x i32> %v85)
   %v92 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v90)
-  %v93 = getelementptr inbounds <16 x i32>, <16 x i32>* %v12, i32 1
-  store <16 x i32> %v92, <16 x i32>* %v12, align 64, !tbaa !0
+  %v93 = getelementptr inbounds <16 x i32>, ptr %v12, i32 1
+  store <16 x i32> %v92, ptr %v12, align 64, !tbaa !0
   %v94 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v90)
-  %v95 = getelementptr inbounds <16 x i32>, <16 x i32>* %v12, i32 2
-  store <16 x i32> %v94, <16 x i32>* %v93, align 64, !tbaa !0
+  %v95 = getelementptr inbounds <16 x i32>, ptr %v12, i32 2
+  store <16 x i32> %v94, ptr %v93, align 64, !tbaa !0
   %v96 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v91)
-  %v97 = getelementptr inbounds <16 x i32>, <16 x i32>* %v11, i32 1
-  store <16 x i32> %v96, <16 x i32>* %v11, align 64, !tbaa !0
+  %v97 = getelementptr inbounds <16 x i32>, ptr %v11, i32 1
+  store <16 x i32> %v96, ptr %v11, align 64, !tbaa !0
   %v98 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v91)
-  %v99 = getelementptr inbounds <16 x i32>, <16 x i32>* %v11, i32 2
-  store <16 x i32> %v98, <16 x i32>* %v97, align 64, !tbaa !0
+  %v99 = getelementptr inbounds <16 x i32>, ptr %v11, i32 2
+  store <16 x i32> %v98, ptr %v97, align 64, !tbaa !0
   %v100 = add nsw i32 %v9, 1
   %v101 = icmp slt i32 %v100, %v4
   br i1 %v101, label %b2, label %b5
@@ -180,7 +174,7 @@ declare <16 x i32> @llvm.hexagon.V6.lo(<32 x i32>) #1
 declare <32 x i32> @llvm.hexagon.V6.vunpackub(<16 x i32>) #1
 
 ; Function Attrs: nounwind
-define void @f1(i16* nocapture readonly %a0, i16* nocapture readonly %a1, i16* nocapture readonly %a2, i32 %a3, i8* nocapture %a4) #0 {
+define void @f1(ptr nocapture readonly %a0, ptr nocapture readonly %a1, ptr nocapture readonly %a2, i32 %a3, ptr nocapture %a4) #0 {
 b0:
   %v0 = tail call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 983055)
   %v1 = sdiv i32 %a3, 64
@@ -188,25 +182,21 @@ b0:
   br i1 %v2, label %b1, label %b4
 
 b1:                                               ; preds = %b0
-  %v3 = bitcast i8* %a4 to <16 x i32>*
-  %v4 = bitcast i16* %a1 to <16 x i32>*
-  %v5 = bitcast i16* %a2 to <16 x i32>*
-  %v6 = bitcast i16* %a0 to <16 x i32>*
   br label %b2
 
 b2:                                               ; preds = %b2, %b1
   %v7 = phi i32 [ 0, %b1 ], [ %v44, %b2 ]
-  %v8 = phi <16 x i32>* [ %v3, %b1 ], [ %v43, %b2 ]
-  %v9 = phi <16 x i32>* [ %v4, %b1 ], [ %v29, %b2 ]
-  %v10 = phi <16 x i32>* [ %v5, %b1 ], [ %v32, %b2 ]
-  %v11 = phi <16 x i32>* [ %v6, %b1 ], [ %v27, %b2 ]
-  %v12 = getelementptr inbounds <16 x i32>, <16 x i32>* %v11, i32 1
-  %v13 = load <16 x i32>, <16 x i32>* %v11, align 64, !tbaa !0
-  %v14 = getelementptr inbounds <16 x i32>, <16 x i32>* %v9, i32 1
-  %v15 = load <16 x i32>, <16 x i32>* %v9, align 64, !tbaa !0
+  %v8 = phi ptr [ %a4, %b1 ], [ %v43, %b2 ]
+  %v9 = phi ptr [ %a1, %b1 ], [ %v29, %b2 ]
+  %v10 = phi ptr [ %a2, %b1 ], [ %v32, %b2 ]
+  %v11 = phi ptr [ %a0, %b1 ], [ %v27, %b2 ]
+  %v12 = getelementptr inbounds <16 x i32>, ptr %v11, i32 1
+  %v13 = load <16 x i32>, ptr %v11, align 64, !tbaa !0
+  %v14 = getelementptr inbounds <16 x i32>, ptr %v9, i32 1
+  %v15 = load <16 x i32>, ptr %v9, align 64, !tbaa !0
   %v16 = tail call <32 x i32> @llvm.hexagon.V6.vmpyuhv(<16 x i32> %v13, <16 x i32> %v15)
-  %v17 = getelementptr inbounds <16 x i32>, <16 x i32>* %v10, i32 1
-  %v18 = load <16 x i32>, <16 x i32>* %v10, align 64, !tbaa !0
+  %v17 = getelementptr inbounds <16 x i32>, ptr %v10, i32 1
+  %v18 = load <16 x i32>, ptr %v10, align 64, !tbaa !0
   %v19 = tail call <32 x i32> @llvm.hexagon.V6.vaddhw(<16 x i32> %v18, <16 x i32> %v0)
   %v20 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v16)
   %v21 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v19)
@@ -215,13 +205,13 @@ b2:                                               ; preds = %b2, %b1
   %v24 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v19)
   %v25 = tail call <16 x i32> @llvm.hexagon.V6.vlsrwv(<16 x i32> %v23, <16 x i32> %v24)
   %v26 = tail call <16 x i32> @llvm.hexagon.V6.vshufeh(<16 x i32> %v25, <16 x i32> %v22)
-  %v27 = getelementptr inbounds <16 x i32>, <16 x i32>* %v11, i32 2
-  %v28 = load <16 x i32>, <16 x i32>* %v12, align 64, !tbaa !0
-  %v29 = getelementptr inbounds <16 x i32>, <16 x i32>* %v9, i32 2
-  %v30 = load <16 x i32>, <16 x i32>* %v14, align 64, !tbaa !0
+  %v27 = getelementptr inbounds <16 x i32>, ptr %v11, i32 2
+  %v28 = load <16 x i32>, ptr %v12, align 64, !tbaa !0
+  %v29 = getelementptr inbounds <16 x i32>, ptr %v9, i32 2
+  %v30 = load <16 x i32>, ptr %v14, align 64, !tbaa !0
   %v31 = tail call <32 x i32> @llvm.hexagon.V6.vmpyuhv(<16 x i32> %v28, <16 x i32> %v30)
-  %v32 = getelementptr inbounds <16 x i32>, <16 x i32>* %v10, i32 2
-  %v33 = load <16 x i32>, <16 x i32>* %v17, align 64, !tbaa !0
+  %v32 = getelementptr inbounds <16 x i32>, ptr %v10, i32 2
+  %v33 = load <16 x i32>, ptr %v17, align 64, !tbaa !0
   %v34 = tail call <32 x i32> @llvm.hexagon.V6.vaddhw(<16 x i32> %v33, <16 x i32> %v0)
   %v35 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v31)
   %v36 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v34)
@@ -231,8 +221,8 @@ b2:                                               ; preds = %b2, %b1
   %v40 = tail call <16 x i32> @llvm.hexagon.V6.vlsrwv(<16 x i32> %v38, <16 x i32> %v39)
   %v41 = tail call <16 x i32> @llvm.hexagon.V6.vshufeh(<16 x i32> %v40, <16 x i32> %v37)
   %v42 = tail call <16 x i32> @llvm.hexagon.V6.vpackhub.sat(<16 x i32> %v41, <16 x i32> %v26)
-  %v43 = getelementptr inbounds <16 x i32>, <16 x i32>* %v8, i32 1
-  store <16 x i32> %v42, <16 x i32>* %v8, align 64, !tbaa !0
+  %v43 = getelementptr inbounds <16 x i32>, ptr %v8, i32 1
+  store <16 x i32> %v42, ptr %v8, align 64, !tbaa !0
   %v44 = add nsw i32 %v7, 1
   %v45 = icmp slt i32 %v44, %v1
   br i1 %v45, label %b2, label %b3

diff  --git a/llvm/test/CodeGen/Hexagon/v6-unaligned-spill.ll b/llvm/test/CodeGen/Hexagon/v6-unaligned-spill.ll
index ea074bf3eea38..8191142468f34 100644
--- a/llvm/test/CodeGen/Hexagon/v6-unaligned-spill.ll
+++ b/llvm/test/CodeGen/Hexagon/v6-unaligned-spill.ll
@@ -8,7 +8,7 @@
 %s.0 = type { [5 x [4 x i8]], i32, i32, i32, i32 }
 
 ; Function Attrs: nounwind
-define i32 @f0(i8* nocapture readonly %a0, i8* nocapture %a1, i8* nocapture readonly %a2, i8* nocapture readonly %a3, i32 %a4, i32 %a5, i32 %a6, %s.0* nocapture readonly %a7) #0 {
+define i32 @f0(ptr nocapture readonly %a0, ptr nocapture %a1, ptr nocapture readonly %a2, ptr nocapture readonly %a3, i32 %a4, i32 %a5, i32 %a6, ptr nocapture readonly %a7) #0 {
 b0:
   %v0 = alloca i8, i32 %a4, align 128
   br i1 undef, label %b1, label %b5
@@ -21,7 +21,7 @@ b2:                                               ; preds = %b3, %b2, %b1
   br i1 undef, label %b3, label %b2
 
 b3:                                               ; preds = %b2
-  call void @f1(i8* undef, i8* undef, i8* nonnull %v0, i32 %a4, i32 %a5, %s.0* %a7)
+  call void @f1(ptr undef, ptr undef, ptr nonnull %v0, i32 %a4, i32 %a5, ptr %a7)
   %v2 = tail call <32 x i32> @llvm.hexagon.V6.vd0.128B() #2
   br i1 %v1, label %b4, label %b2
 
@@ -36,7 +36,7 @@ b5:                                               ; preds = %b0
 }
 
 ; Function Attrs: nounwind
-declare void @f1(i8* nocapture readonly, i8* nocapture readonly, i8* nocapture, i32, i32, %s.0* nocapture readonly) #0
+declare void @f1(ptr nocapture readonly, ptr nocapture readonly, ptr nocapture, i32, i32, ptr nocapture readonly) #0
 
 ; Function Attrs: nounwind readnone
 declare <32 x i32> @llvm.hexagon.V6.vd0.128B() #1

diff  --git a/llvm/test/CodeGen/Hexagon/v6-vecpred-copy.ll b/llvm/test/CodeGen/Hexagon/v6-vecpred-copy.ll
index c5cba8cf61559..cf8bc7b495d40 100644
--- a/llvm/test/CodeGen/Hexagon/v6-vecpred-copy.ll
+++ b/llvm/test/CodeGen/Hexagon/v6-vecpred-copy.ll
@@ -26,83 +26,83 @@ target triple = "hexagon"
 define i32 @f0() #0 {
 b0:
   %v0 = call <16 x i32> @llvm.hexagon.V6.vd0()
-  store <16 x i32> %v0, <16 x i32>* @g0, align 64
+  store <16 x i32> %v0, ptr @g0, align 64
   %v1 = call <16 x i32> @llvm.hexagon.V6.vd0()
-  store <16 x i32> %v1, <16 x i32>* @g1, align 64
+  store <16 x i32> %v1, ptr @g1, align 64
   %v2 = call <16 x i32> @llvm.hexagon.V6.vd0()
-  store <16 x i32> %v2, <16 x i32>* @g2, align 64
-  %v3 = load <16 x i32>, <16 x i32>* @g3, align 64
+  store <16 x i32> %v2, ptr @g2, align 64
+  %v3 = load <16 x i32>, ptr @g3, align 64
   %v4 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %v3, i32 -1)
-  %v5 = load <16 x i32>, <16 x i32>* @g2, align 64
-  %v6 = load <16 x i32>, <16 x i32>* @g1, align 64
+  %v5 = load <16 x i32>, ptr @g2, align 64
+  %v6 = load <16 x i32>, ptr @g1, align 64
   %v7 = call <16 x i32> @llvm.hexagon.V6.vaddbq(<64 x i1> %v4, <16 x i32> %v5, <16 x i32> %v6)
-  store <16 x i32> %v7, <16 x i32>* @g2, align 64
-  %v8 = load <16 x i32>, <16 x i32>* @g3, align 64
+  store <16 x i32> %v7, ptr @g2, align 64
+  %v8 = load <16 x i32>, ptr @g3, align 64
   %v9 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %v8, i32 -1)
-  %v10 = load <16 x i32>, <16 x i32>* @g2, align 64
-  %v11 = load <16 x i32>, <16 x i32>* @g1, align 64
+  %v10 = load <16 x i32>, ptr @g2, align 64
+  %v11 = load <16 x i32>, ptr @g1, align 64
   %v12 = call <16 x i32> @llvm.hexagon.V6.vsubbq(<64 x i1> %v9, <16 x i32> %v10, <16 x i32> %v11)
-  store <16 x i32> %v12, <16 x i32>* @g2, align 64
-  %v13 = load <16 x i32>, <16 x i32>* @g3, align 64
+  store <16 x i32> %v12, ptr @g2, align 64
+  %v13 = load <16 x i32>, ptr @g3, align 64
   %v14 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %v13, i32 -1)
-  %v15 = load <16 x i32>, <16 x i32>* @g2, align 64
-  %v16 = load <16 x i32>, <16 x i32>* @g1, align 64
+  %v15 = load <16 x i32>, ptr @g2, align 64
+  %v16 = load <16 x i32>, ptr @g1, align 64
   %v17 = call <16 x i32> @llvm.hexagon.V6.vaddhq(<64 x i1> %v14, <16 x i32> %v15, <16 x i32> %v16)
-  store <16 x i32> %v17, <16 x i32>* @g2, align 64
-  %v18 = load <16 x i32>, <16 x i32>* @g3, align 64
+  store <16 x i32> %v17, ptr @g2, align 64
+  %v18 = load <16 x i32>, ptr @g3, align 64
   %v19 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %v18, i32 -1)
-  %v20 = load <16 x i32>, <16 x i32>* @g2, align 64
-  %v21 = load <16 x i32>, <16 x i32>* @g1, align 64
+  %v20 = load <16 x i32>, ptr @g2, align 64
+  %v21 = load <16 x i32>, ptr @g1, align 64
   %v22 = call <16 x i32> @llvm.hexagon.V6.vsubhq(<64 x i1> %v19, <16 x i32> %v20, <16 x i32> %v21)
-  store <16 x i32> %v22, <16 x i32>* @g2, align 64
-  %v23 = load <16 x i32>, <16 x i32>* @g3, align 64
+  store <16 x i32> %v22, ptr @g2, align 64
+  %v23 = load <16 x i32>, ptr @g3, align 64
   %v24 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %v23, i32 -1)
-  %v25 = load <16 x i32>, <16 x i32>* @g2, align 64
-  %v26 = load <16 x i32>, <16 x i32>* @g1, align 64
+  %v25 = load <16 x i32>, ptr @g2, align 64
+  %v26 = load <16 x i32>, ptr @g1, align 64
   %v27 = call <16 x i32> @llvm.hexagon.V6.vaddwq(<64 x i1> %v24, <16 x i32> %v25, <16 x i32> %v26)
-  store <16 x i32> %v27, <16 x i32>* @g2, align 64
-  %v28 = load <16 x i32>, <16 x i32>* @g3, align 64
+  store <16 x i32> %v27, ptr @g2, align 64
+  %v28 = load <16 x i32>, ptr @g3, align 64
   %v29 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %v28, i32 -1)
-  %v30 = load <16 x i32>, <16 x i32>* @g2, align 64
-  %v31 = load <16 x i32>, <16 x i32>* @g1, align 64
+  %v30 = load <16 x i32>, ptr @g2, align 64
+  %v31 = load <16 x i32>, ptr @g1, align 64
   %v32 = call <16 x i32> @llvm.hexagon.V6.vsubwq(<64 x i1> %v29, <16 x i32> %v30, <16 x i32> %v31)
-  store <16 x i32> %v32, <16 x i32>* @g2, align 64
-  %v33 = load <16 x i32>, <16 x i32>* @g3, align 64
+  store <16 x i32> %v32, ptr @g2, align 64
+  %v33 = load <16 x i32>, ptr @g3, align 64
   %v34 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %v33, i32 -1)
-  %v35 = load <16 x i32>, <16 x i32>* @g2, align 64
-  %v36 = load <16 x i32>, <16 x i32>* @g1, align 64
+  %v35 = load <16 x i32>, ptr @g2, align 64
+  %v36 = load <16 x i32>, ptr @g1, align 64
   %v37 = call <16 x i32> @llvm.hexagon.V6.vaddbnq(<64 x i1> %v34, <16 x i32> %v35, <16 x i32> %v36)
-  store <16 x i32> %v37, <16 x i32>* @g2, align 64
-  %v38 = load <16 x i32>, <16 x i32>* @g3, align 64
+  store <16 x i32> %v37, ptr @g2, align 64
+  %v38 = load <16 x i32>, ptr @g3, align 64
   %v39 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %v38, i32 -1)
-  %v40 = load <16 x i32>, <16 x i32>* @g2, align 64
-  %v41 = load <16 x i32>, <16 x i32>* @g1, align 64
+  %v40 = load <16 x i32>, ptr @g2, align 64
+  %v41 = load <16 x i32>, ptr @g1, align 64
   %v42 = call <16 x i32> @llvm.hexagon.V6.vsubbnq(<64 x i1> %v39, <16 x i32> %v40, <16 x i32> %v41)
-  store <16 x i32> %v42, <16 x i32>* @g2, align 64
-  %v43 = load <16 x i32>, <16 x i32>* @g3, align 64
+  store <16 x i32> %v42, ptr @g2, align 64
+  %v43 = load <16 x i32>, ptr @g3, align 64
   %v44 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %v43, i32 -1)
-  %v45 = load <16 x i32>, <16 x i32>* @g2, align 64
-  %v46 = load <16 x i32>, <16 x i32>* @g1, align 64
+  %v45 = load <16 x i32>, ptr @g2, align 64
+  %v46 = load <16 x i32>, ptr @g1, align 64
   %v47 = call <16 x i32> @llvm.hexagon.V6.vaddhnq(<64 x i1> %v44, <16 x i32> %v45, <16 x i32> %v46)
-  store <16 x i32> %v47, <16 x i32>* @g2, align 64
-  %v48 = load <16 x i32>, <16 x i32>* @g3, align 64
+  store <16 x i32> %v47, ptr @g2, align 64
+  %v48 = load <16 x i32>, ptr @g3, align 64
   %v49 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %v48, i32 -1)
-  %v50 = load <16 x i32>, <16 x i32>* @g2, align 64
-  %v51 = load <16 x i32>, <16 x i32>* @g1, align 64
+  %v50 = load <16 x i32>, ptr @g2, align 64
+  %v51 = load <16 x i32>, ptr @g1, align 64
   %v52 = call <16 x i32> @llvm.hexagon.V6.vsubhnq(<64 x i1> %v49, <16 x i32> %v50, <16 x i32> %v51)
-  store <16 x i32> %v52, <16 x i32>* @g2, align 64
-  %v53 = load <16 x i32>, <16 x i32>* @g3, align 64
+  store <16 x i32> %v52, ptr @g2, align 64
+  %v53 = load <16 x i32>, ptr @g3, align 64
   %v54 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %v53, i32 -1)
-  %v55 = load <16 x i32>, <16 x i32>* @g2, align 64
-  %v56 = load <16 x i32>, <16 x i32>* @g1, align 64
+  %v55 = load <16 x i32>, ptr @g2, align 64
+  %v56 = load <16 x i32>, ptr @g1, align 64
   %v57 = call <16 x i32> @llvm.hexagon.V6.vaddwnq(<64 x i1> %v54, <16 x i32> %v55, <16 x i32> %v56)
-  store <16 x i32> %v57, <16 x i32>* @g2, align 64
-  %v58 = load <16 x i32>, <16 x i32>* @g3, align 64
+  store <16 x i32> %v57, ptr @g2, align 64
+  %v58 = load <16 x i32>, ptr @g3, align 64
   %v59 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %v58, i32 -1)
-  %v60 = load <16 x i32>, <16 x i32>* @g2, align 64
-  %v61 = load <16 x i32>, <16 x i32>* @g1, align 64
+  %v60 = load <16 x i32>, ptr @g2, align 64
+  %v61 = load <16 x i32>, ptr @g1, align 64
   %v62 = call <16 x i32> @llvm.hexagon.V6.vsubwnq(<64 x i1> %v59, <16 x i32> %v60, <16 x i32> %v61)
-  store <16 x i32> %v62, <16 x i32>* @g2, align 64
+  store <16 x i32> %v62, ptr @g2, align 64
   ret i32 0
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/v60-align.ll b/llvm/test/CodeGen/Hexagon/v60-align.ll
index 71b976994b7a7..56dbd715c5c03 100644
--- a/llvm/test/CodeGen/Hexagon/v60-align.ll
+++ b/llvm/test/CodeGen/Hexagon/v60-align.ll
@@ -8,13 +8,12 @@ target triple = "hexagon"
 define i32 @f0() #0 {
 b0:
   %v0 = alloca <16 x i32>, align 64
-  %v1 = bitcast <16 x i32>* %v0 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 64, i8* %v1) #3
+  call void @llvm.lifetime.start.p0(i64 64, ptr %v0) #3
   %v2 = tail call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 1)
   %v3 = tail call <16 x i32> @llvm.hexagon.V6.vsubh.rt(<16 x i32> %v2, i32 -1)
-  store <16 x i32> %v3, <16 x i32>* %v0, align 64, !tbaa !0
-  call void @f1(i32 64, i8* %v1) #3
-  call void @llvm.lifetime.end.p0i8(i64 64, i8* %v1) #3
+  store <16 x i32> %v3, ptr %v0, align 64, !tbaa !0
+  call void @f1(i32 64, ptr %v0) #3
+  call void @llvm.lifetime.end.p0(i64 64, ptr %v0) #3
   ret i32 0
 }
 
@@ -24,13 +23,13 @@ declare <16 x i32> @llvm.hexagon.V6.vsubh.rt(<16 x i32>, i32) #1
 ; Function Attrs: nounwind readnone
 declare <16 x i32> @llvm.hexagon.V6.lvsplatw(i32) #1
 
-declare void @f1(i32, i8*) #0
+declare void @f1(i32, ptr) #0
 
 ; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #2
+declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #2
 
 ; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #2
+declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #2
 
 attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvxv60,+hvx-length64b" }
 attributes #1 = { nounwind readnone }

diff  --git a/llvm/test/CodeGen/Hexagon/v60-cur.ll b/llvm/test/CodeGen/Hexagon/v60-cur.ll
index 0f3204de3259b..f6db1968c34af 100644
--- a/llvm/test/CodeGen/Hexagon/v60-cur.ll
+++ b/llvm/test/CodeGen/Hexagon/v60-cur.ll
@@ -5,7 +5,7 @@
 ; CHECK: v{{[0-9]*}}.cur
 
 ; Function Attrs: nounwind
-define void @f0(i8* noalias nocapture readonly %a0, i32 %a1, i32 %a2, <16 x i32>* %a3, <16 x i32>* %a4) #0 {
+define void @f0(ptr noalias nocapture readonly %a0, i32 %a1, i32 %a2, ptr %a3, ptr %a4) #0 {
 b0:
   br i1 undef, label %b1, label %b3
 
@@ -13,14 +13,13 @@ b1:                                               ; preds = %b0
   br label %b2
 
 b2:                                               ; preds = %b2, %b1
-  %v0 = phi i8* [ %a0, %b1 ], [ %v4, %b2 ]
+  %v0 = phi ptr [ %a0, %b1 ], [ %v4, %b2 ]
   %v1 = phi i32 [ 0, %b1 ], [ %v23, %b2 ]
   %v2 = phi <16 x i32> [ zeroinitializer, %b1 ], [ %v6, %b2 ]
   %v3 = phi <16 x i32> [ zeroinitializer, %b1 ], [ zeroinitializer, %b2 ]
-  %v4 = getelementptr inbounds i8, i8* %v0, i32 64
-  %v5 = bitcast i8* %v4 to <16 x i32>*
-  %v6 = load <16 x i32>, <16 x i32>* %v5, align 64, !tbaa !0
-  %v7 = load <16 x i32>, <16 x i32>* %a3, align 64, !tbaa !0
+  %v4 = getelementptr inbounds i8, ptr %v0, i32 64
+  %v6 = load <16 x i32>, ptr %v4, align 64, !tbaa !0
+  %v7 = load <16 x i32>, ptr %a3, align 64, !tbaa !0
   %v8 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %v6, <16 x i32> %v2, i32 4)
   %v9 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> zeroinitializer, <16 x i32> %v3, i32 4)
   %v10 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %v7, <16 x i32> zeroinitializer, i32 4)
@@ -32,12 +31,12 @@ b2:                                               ; preds = %b2, %b1
   %v16 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v15)
   %v17 = tail call <16 x i32> @llvm.hexagon.V6.vasrwh(<16 x i32> %v16, <16 x i32> undef, i32 %a1)
   %v18 = tail call <16 x i32> @llvm.hexagon.V6.vsathub(<16 x i32> undef, <16 x i32> %v17)
-  store <16 x i32> %v18, <16 x i32>* %a3, align 64, !tbaa !0
+  store <16 x i32> %v18, ptr %a3, align 64, !tbaa !0
   %v19 = tail call <32 x i32> @llvm.hexagon.V6.vrmpybusi.acc(<32 x i32> zeroinitializer, <32 x i32> %v12, i32 undef, i32 1)
   %v20 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v19)
   %v21 = tail call <16 x i32> @llvm.hexagon.V6.vasrwh(<16 x i32> %v20, <16 x i32> undef, i32 %a1)
   %v22 = tail call <16 x i32> @llvm.hexagon.V6.vsathub(<16 x i32> %v21, <16 x i32> undef)
-  store <16 x i32> %v22, <16 x i32>* %a4, align 64, !tbaa !0
+  store <16 x i32> %v22, ptr %a4, align 64, !tbaa !0
   %v23 = add nsw i32 %v1, 64
   %v24 = icmp slt i32 %v23, %a2
   br i1 %v24, label %b2, label %b3

diff  --git a/llvm/test/CodeGen/Hexagon/v60-haar-postinc.ll b/llvm/test/CodeGen/Hexagon/v60-haar-postinc.ll
index b842e948f7a49..9198177772185 100644
--- a/llvm/test/CodeGen/Hexagon/v60-haar-postinc.ll
+++ b/llvm/test/CodeGen/Hexagon/v60-haar-postinc.ll
@@ -8,7 +8,7 @@
 target triple = "hexagon"
 
 ; Function Attrs: nounwind
-define void @f0(i16* nocapture readonly %a0, i32 %a1, i32 %a2, i32 %a3, i8* nocapture %a4, i32 %a5) #0 {
+define void @f0(ptr nocapture readonly %a0, i32 %a1, i32 %a2, i32 %a3, ptr nocapture %a4, i32 %a5) #0 {
 b0:
   %v0 = ashr i32 %a3, 2
   %v1 = ashr i32 %a3, 1
@@ -34,36 +34,30 @@ b3:                                               ; preds = %b2
   %v12 = add i32 %v2, %v8
   %v13 = add i32 %v8, %v0
   %v14 = add i32 %v8, %v1
-  %v15 = getelementptr inbounds i8, i8* %a4, i32 %v10
-  %v16 = getelementptr inbounds i8, i8* %a4, i32 %v11
-  %v17 = getelementptr inbounds i16, i16* %a0, i32 %v12
-  %v18 = getelementptr inbounds i16, i16* %a0, i32 %v13
-  %v19 = getelementptr inbounds i16, i16* %a0, i32 %v14
-  %v20 = getelementptr inbounds i16, i16* %a0, i32 %v8
-  %v21 = bitcast i8* %v15 to <16 x i32>*
-  %v22 = bitcast i8* %v16 to <16 x i32>*
-  %v23 = bitcast i16* %v17 to <16 x i32>*
-  %v24 = bitcast i16* %v18 to <16 x i32>*
-  %v25 = bitcast i16* %v19 to <16 x i32>*
-  %v26 = bitcast i16* %v20 to <16 x i32>*
+  %v15 = getelementptr inbounds i8, ptr %a4, i32 %v10
+  %v16 = getelementptr inbounds i8, ptr %a4, i32 %v11
+  %v17 = getelementptr inbounds i16, ptr %a0, i32 %v12
+  %v18 = getelementptr inbounds i16, ptr %a0, i32 %v13
+  %v19 = getelementptr inbounds i16, ptr %a0, i32 %v14
+  %v20 = getelementptr inbounds i16, ptr %a0, i32 %v8
   br label %b4
 
 b4:                                               ; preds = %b4, %b3
   %v27 = phi i32 [ 0, %b3 ], [ %v54, %b4 ]
-  %v28 = phi <16 x i32>* [ %v26, %b3 ], [ %v34, %b4 ]
-  %v29 = phi <16 x i32>* [ %v25, %b3 ], [ %v36, %b4 ]
-  %v30 = phi <16 x i32>* [ %v24, %b3 ], [ %v38, %b4 ]
-  %v31 = phi <16 x i32>* [ %v23, %b3 ], [ %v40, %b4 ]
-  %v32 = phi <16 x i32>* [ %v21, %b3 ], [ %v53, %b4 ]
-  %v33 = phi <16 x i32>* [ %v22, %b3 ], [ %v52, %b4 ]
-  %v34 = getelementptr inbounds <16 x i32>, <16 x i32>* %v28, i32 1
-  %v35 = load <16 x i32>, <16 x i32>* %v28, align 64, !tbaa !0
-  %v36 = getelementptr inbounds <16 x i32>, <16 x i32>* %v29, i32 1
-  %v37 = load <16 x i32>, <16 x i32>* %v29, align 64, !tbaa !0
-  %v38 = getelementptr inbounds <16 x i32>, <16 x i32>* %v30, i32 1
-  %v39 = load <16 x i32>, <16 x i32>* %v30, align 64, !tbaa !0
-  %v40 = getelementptr inbounds <16 x i32>, <16 x i32>* %v31, i32 1
-  %v41 = load <16 x i32>, <16 x i32>* %v31, align 64, !tbaa !0
+  %v28 = phi ptr [ %v20, %b3 ], [ %v34, %b4 ]
+  %v29 = phi ptr [ %v19, %b3 ], [ %v36, %b4 ]
+  %v30 = phi ptr [ %v18, %b3 ], [ %v38, %b4 ]
+  %v31 = phi ptr [ %v17, %b3 ], [ %v40, %b4 ]
+  %v32 = phi ptr [ %v15, %b3 ], [ %v53, %b4 ]
+  %v33 = phi ptr [ %v16, %b3 ], [ %v52, %b4 ]
+  %v34 = getelementptr inbounds <16 x i32>, ptr %v28, i32 1
+  %v35 = load <16 x i32>, ptr %v28, align 64, !tbaa !0
+  %v36 = getelementptr inbounds <16 x i32>, ptr %v29, i32 1
+  %v37 = load <16 x i32>, ptr %v29, align 64, !tbaa !0
+  %v38 = getelementptr inbounds <16 x i32>, ptr %v30, i32 1
+  %v39 = load <16 x i32>, ptr %v30, align 64, !tbaa !0
+  %v40 = getelementptr inbounds <16 x i32>, ptr %v31, i32 1
+  %v41 = load <16 x i32>, ptr %v31, align 64, !tbaa !0
   %v42 = tail call <16 x i32> @llvm.hexagon.V6.vaddh(<16 x i32> %v35, <16 x i32> %v37)
   %v43 = tail call <16 x i32> @llvm.hexagon.V6.vsubh(<16 x i32> %v35, <16 x i32> %v37)
   %v44 = tail call <16 x i32> @llvm.hexagon.V6.vaddh(<16 x i32> %v39, <16 x i32> %v41)
@@ -74,10 +68,10 @@ b4:                                               ; preds = %b4, %b3
   %v49 = tail call <16 x i32> @llvm.hexagon.V6.vnavgh(<16 x i32> %v43, <16 x i32> %v45)
   %v50 = tail call <16 x i32> @llvm.hexagon.V6.vsathub(<16 x i32> %v47, <16 x i32> %v46)
   %v51 = tail call <16 x i32> @llvm.hexagon.V6.vsathub(<16 x i32> %v49, <16 x i32> %v48)
-  %v52 = getelementptr inbounds <16 x i32>, <16 x i32>* %v33, i32 1
-  store <16 x i32> %v50, <16 x i32>* %v33, align 64, !tbaa !0
-  %v53 = getelementptr inbounds <16 x i32>, <16 x i32>* %v32, i32 1
-  store <16 x i32> %v51, <16 x i32>* %v32, align 64, !tbaa !0
+  %v52 = getelementptr inbounds <16 x i32>, ptr %v33, i32 1
+  store <16 x i32> %v50, ptr %v33, align 64, !tbaa !0
+  %v53 = getelementptr inbounds <16 x i32>, ptr %v32, i32 1
+  store <16 x i32> %v51, ptr %v32, align 64, !tbaa !0
   %v54 = add nsw i32 %v27, 1
   %v55 = icmp slt i32 %v54, %v4
   br i1 %v55, label %b4, label %b5

diff  --git a/llvm/test/CodeGen/Hexagon/v60-halide-vcombinei8.ll b/llvm/test/CodeGen/Hexagon/v60-halide-vcombinei8.ll
index de91ac3326578..59a9c13fcc0b8 100644
--- a/llvm/test/CodeGen/Hexagon/v60-halide-vcombinei8.ll
+++ b/llvm/test/CodeGen/Hexagon/v60-halide-vcombinei8.ll
@@ -6,15 +6,15 @@
 target triple = "hexagon-unknown--elf"
 
 ; Function Attrs: norecurse nounwind
-define void @f0(<64 x i8>* %a0) #0 {
+define void @f0(ptr %a0) #0 {
 b0:                                               ; preds = %b3
-  %v0 = load <64 x i8>, <64 x i8>* %a0, align 1
+  %v0 = load <64 x i8>, ptr %a0, align 1
   %v1 = shufflevector <64 x i8> %v0, <64 x i8> undef, <128 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127>
   %v2 = bitcast <128 x i8> %v1 to <32 x i32>
   %v3 = tail call <32 x i32> @llvm.hexagon.V6.vaddb.dv(<32 x i32> undef, <32 x i32> %v2)
   %v4 = bitcast <32 x i32> %v3 to <128 x i8>
   %v5 = shufflevector <128 x i8> %v4, <128 x i8> undef, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
-  store <64 x i8> %v5, <64 x i8>* %a0, align 1, !tbaa !2
+  store <64 x i8> %v5, ptr %a0, align 1, !tbaa !2
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/v60-vec-128b-1.ll b/llvm/test/CodeGen/Hexagon/v60-vec-128b-1.ll
index 0e8dbdbc4bad1..ce2e1b5459aed 100644
--- a/llvm/test/CodeGen/Hexagon/v60-vec-128b-1.ll
+++ b/llvm/test/CodeGen/Hexagon/v60-vec-128b-1.ll
@@ -12,15 +12,15 @@ target triple = "hexagon"
 define i32 @f0() #0 {
 b0:
   %v0 = alloca i32, align 4
-  store i32 0, i32* %v0
+  store i32 0, ptr %v0
   %v1 = call i32 @f1(i8 zeroext 0)
-  call void bitcast (void (...)* @f2 to void ()*)()
+  call void @f2()
   %v2 = call <32 x i32> @llvm.hexagon.V6.lvsplatw.128B(i32 1)
   %v3 = call <32 x i32> @llvm.hexagon.V6.lvsplatw.128B(i32 2)
   %v4 = call <64 x i32> @llvm.hexagon.V6.vaddubh.128B(<32 x i32> %v2, <32 x i32> %v3)
   %v5 = call <64 x i32> @llvm.hexagon.V6.vtmpyhb.128B(<64 x i32> %v4, i32 12)
-  store <64 x i32> %v5, <64 x i32>* @g0, align 256
-  call void @f3(i32 2048, i8* bitcast (<64 x i32>* @g0 to i8*))
+  store <64 x i32> %v5, ptr @g0, align 256
+  call void @f3(i32 2048, ptr @g0)
   ret i32 0
 }
 
@@ -37,7 +37,7 @@ declare <64 x i32> @llvm.hexagon.V6.vaddubh.128B(<32 x i32>, <32 x i32>) #1
 ; Function Attrs: nounwind readnone
 declare <32 x i32> @llvm.hexagon.V6.lvsplatw.128B(i32) #1
 
-declare void @f3(i32, i8*) #0
+declare void @f3(i32, ptr) #0
 
 attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvxv60,+hvx-length128b" }
 attributes #1 = { nounwind readnone }

diff  --git a/llvm/test/CodeGen/Hexagon/v60-vecpred-spill.ll b/llvm/test/CodeGen/Hexagon/v60-vecpred-spill.ll
index f03a0cc438f02..ba388b89410fb 100644
--- a/llvm/test/CodeGen/Hexagon/v60-vecpred-spill.ll
+++ b/llvm/test/CodeGen/Hexagon/v60-vecpred-spill.ll
@@ -6,7 +6,7 @@
 target triple = "hexagon"
 
 ; Function Attrs: nounwind
-define void @f0(i8* nocapture readonly %a0, i32 %a1, i32 %a2, i32 %a3, i16* nocapture %a4, i16* nocapture %a5) #0 {
+define void @f0(ptr nocapture readonly %a0, i32 %a1, i32 %a2, i32 %a3, ptr nocapture %a4, ptr nocapture %a5) #0 {
 b0:
   %v0 = tail call i32 @llvm.hexagon.S2.vsplatrb(i32 %a3)
   %v1 = tail call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 %v0)
@@ -17,18 +17,15 @@ b0:
   br i1 %v5, label %b1, label %b6
 
 b1:                                               ; preds = %b0
-  %v6 = bitcast i16* %a5 to <16 x i32>*
-  %v7 = bitcast i16* %a4 to <16 x i32>*
   %v8 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v3, <16 x i32> %v3)
   br label %b2
 
 b2:                                               ; preds = %b4, %b1
   %v9 = phi i32 [ 0, %b1 ], [ %v100, %b4 ]
-  %v10 = phi i8* [ %a0, %b1 ], [ %v87, %b4 ]
-  %v11 = phi <16 x i32>* [ %v6, %b1 ], [ %v99, %b4 ]
-  %v12 = phi <16 x i32>* [ %v7, %b1 ], [ %v95, %b4 ]
-  %v13 = bitcast i8* %v10 to <16 x i32>*
-  %v14 = load <16 x i32>, <16 x i32>* %v13, align 64, !tbaa !0
+  %v10 = phi ptr [ %a0, %b1 ], [ %v87, %b4 ]
+  %v11 = phi ptr [ %a5, %b1 ], [ %v99, %b4 ]
+  %v12 = phi ptr [ %a4, %b1 ], [ %v95, %b4 ]
+  %v14 = load <16 x i32>, ptr %v10, align 64, !tbaa !0
   br label %b3
 
 b3:                                               ; preds = %b3, %b2
@@ -36,17 +33,14 @@ b3:                                               ; preds = %b3, %b2
   %v16 = phi <32 x i32> [ %v8, %b2 ], [ %v78, %b3 ]
   %v17 = phi <16 x i32> [ %v3, %b2 ], [ %v82, %b3 ]
   %v18 = mul nsw i32 %v15, %a1
-  %v19 = getelementptr inbounds i8, i8* %v10, i32 %v18
-  %v20 = bitcast i8* %v19 to <16 x i32>*
+  %v19 = getelementptr inbounds i8, ptr %v10, i32 %v18
   %v21 = add i32 %v18, -64
-  %v22 = getelementptr inbounds i8, i8* %v10, i32 %v21
-  %v23 = bitcast i8* %v22 to <16 x i32>*
-  %v24 = load <16 x i32>, <16 x i32>* %v23, align 64, !tbaa !0
-  %v25 = load <16 x i32>, <16 x i32>* %v20, align 64, !tbaa !0
+  %v22 = getelementptr inbounds i8, ptr %v10, i32 %v21
+  %v24 = load <16 x i32>, ptr %v22, align 64, !tbaa !0
+  %v25 = load <16 x i32>, ptr %v19, align 64, !tbaa !0
   %v26 = add i32 %v18, 64
-  %v27 = getelementptr inbounds i8, i8* %v10, i32 %v26
-  %v28 = bitcast i8* %v27 to <16 x i32>*
-  %v29 = load <16 x i32>, <16 x i32>* %v28, align 64, !tbaa !0
+  %v27 = getelementptr inbounds i8, ptr %v10, i32 %v26
+  %v29 = load <16 x i32>, ptr %v27, align 64, !tbaa !0
   %v30 = tail call <16 x i32> @llvm.hexagon.V6.vabs
diff ub(<16 x i32> %v25, <16 x i32> %v14)
   %v31 = tail call <64 x i1> @llvm.hexagon.V6.vgtub(<16 x i32> %v30, <16 x i32> %v1)
   %v32 = tail call <16 x i32> @llvm.hexagon.V6.vmux(<64 x i1> %v31, <16 x i32> %v3, <16 x i32> %v25)
@@ -107,23 +101,23 @@ b3:                                               ; preds = %b3, %b2
 b4:                                               ; preds = %b3
   %v85 = phi <16 x i32> [ %v82, %b3 ]
   %v86 = phi <32 x i32> [ %v78, %b3 ]
-  %v87 = getelementptr inbounds i8, i8* %v10, i32 64
+  %v87 = getelementptr inbounds i8, ptr %v10, i32 64
   %v88 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v86)
   %v89 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v86)
   %v90 = tail call <32 x i32> @llvm.hexagon.V6.vshuffvdd(<16 x i32> %v88, <16 x i32> %v89, i32 -2)
   %v91 = tail call <32 x i32> @llvm.hexagon.V6.vunpackub(<16 x i32> %v85)
   %v92 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v90)
-  %v93 = getelementptr inbounds <16 x i32>, <16 x i32>* %v12, i32 1
-  store <16 x i32> %v92, <16 x i32>* %v12, align 64, !tbaa !0
+  %v93 = getelementptr inbounds <16 x i32>, ptr %v12, i32 1
+  store <16 x i32> %v92, ptr %v12, align 64, !tbaa !0
   %v94 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v90)
-  %v95 = getelementptr inbounds <16 x i32>, <16 x i32>* %v12, i32 2
-  store <16 x i32> %v94, <16 x i32>* %v93, align 64, !tbaa !0
+  %v95 = getelementptr inbounds <16 x i32>, ptr %v12, i32 2
+  store <16 x i32> %v94, ptr %v93, align 64, !tbaa !0
   %v96 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v91)
-  %v97 = getelementptr inbounds <16 x i32>, <16 x i32>* %v11, i32 1
-  store <16 x i32> %v96, <16 x i32>* %v11, align 64, !tbaa !0
+  %v97 = getelementptr inbounds <16 x i32>, ptr %v11, i32 1
+  store <16 x i32> %v96, ptr %v11, align 64, !tbaa !0
   %v98 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v91)
-  %v99 = getelementptr inbounds <16 x i32>, <16 x i32>* %v11, i32 2
-  store <16 x i32> %v98, <16 x i32>* %v97, align 64, !tbaa !0
+  %v99 = getelementptr inbounds <16 x i32>, ptr %v11, i32 2
+  store <16 x i32> %v98, ptr %v97, align 64, !tbaa !0
   %v100 = add nsw i32 %v9, 1
   %v101 = icmp slt i32 %v100, %v4
   br i1 %v101, label %b2, label %b5

diff  --git a/llvm/test/CodeGen/Hexagon/v60-vsel1.ll b/llvm/test/CodeGen/Hexagon/v60-vsel1.ll
index 5da450b80459b..6572964685e36 100644
--- a/llvm/test/CodeGen/Hexagon/v60-vsel1.ll
+++ b/llvm/test/CodeGen/Hexagon/v60-vsel1.ll
@@ -5,56 +5,54 @@
 target triple = "hexagon"
 
 ; Function Attrs: nounwind
-define void @fast9_detect_coarse(i8* nocapture readnone %img, i32 %xsize, i32 %stride, i32 %barrier, i32* nocapture %bitmask, i32 %boundary) #0 {
+define void @fast9_detect_coarse(ptr nocapture readnone %img, i32 %xsize, i32 %stride, i32 %barrier, ptr nocapture %bitmask, i32 %boundary) #0 {
 entry:
-  %0 = bitcast i32* %bitmask to <16 x i32>*
-  %1 = mul i32 %boundary, -2
-  %sub = add i32 %1, %xsize
+  %0 = mul i32 %boundary, -2
+  %sub = add i32 %0, %xsize
   %rem = and i32 %boundary, 63
   %add = add i32 %sub, %rem
-  %2 = tail call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 -1)
-  %3 = tail call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 1)
-  %4 = tail call <64 x i1> @llvm.hexagon.V6.pred.scalar2(i32 %add)
-  %5 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt.acc(<16 x i32> %3, <64 x i1> %4, i32 12)
+  %1 = tail call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 -1)
+  %2 = tail call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 1)
+  %3 = tail call <64 x i1> @llvm.hexagon.V6.pred.scalar2(i32 %add)
+  %4 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt.acc(<16 x i32> %2, <64 x i1> %3, i32 12)
   %and4 = and i32 %add, 511
   %cmp = icmp eq i32 %and4, 0
-  %sMaskR.0 = select i1 %cmp, <16 x i32> %2, <16 x i32> %5
+  %sMaskR.0 = select i1 %cmp, <16 x i32> %1, <16 x i32> %4
   %cmp547 = icmp sgt i32 %add, 0
   br i1 %cmp547, label %for.body.lr.ph, label %for.end
 
 for.body.lr.ph:                                   ; preds = %entry
-  %6 = tail call <64 x i1> @llvm.hexagon.V6.pred.scalar2(i32 %boundary)
-  %7 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %6, i32 16843009)
-  %8 = tail call <16 x i32> @llvm.hexagon.V6.vnot(<16 x i32> %7)
-  %9 = add i32 %rem, %xsize
-  %10 = add i32 %9, -1
-  %11 = add i32 %10, %1
-  %12 = lshr i32 %11, 9
-  %13 = mul i32 %12, 16
-  %14 = add nuw nsw i32 %13, 16
-  %scevgep = getelementptr i32, i32* %bitmask, i32 %14
+  %5 = tail call <64 x i1> @llvm.hexagon.V6.pred.scalar2(i32 %boundary)
+  %6 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %5, i32 16843009)
+  %7 = tail call <16 x i32> @llvm.hexagon.V6.vnot(<16 x i32> %6)
+  %8 = add i32 %rem, %xsize
+  %9 = add i32 %8, -1
+  %10 = add i32 %9, %0
+  %11 = lshr i32 %10, 9
+  %12 = mul i32 %11, 16
+  %13 = add nuw nsw i32 %12, 16
+  %scevgep = getelementptr i32, ptr %bitmask, i32 %13
   br label %for.body
 
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %i.050 = phi i32 [ %add, %for.body.lr.ph ], [ %sub6, %for.body ]
-  %sMask.049 = phi <16 x i32> [ %8, %for.body.lr.ph ], [ %2, %for.body ]
-  %optr.048 = phi <16 x i32>* [ %0, %for.body.lr.ph ], [ %incdec.ptr, %for.body ]
-  %15 = tail call <16 x i32> @llvm.hexagon.V6.vand(<16 x i32> undef, <16 x i32> %sMask.049)
-  %incdec.ptr = getelementptr inbounds <16 x i32>, <16 x i32>* %optr.048, i32 1
-  store <16 x i32> %15, <16 x i32>* %optr.048, align 64
+  %sMask.049 = phi <16 x i32> [ %7, %for.body.lr.ph ], [ %1, %for.body ]
+  %optr.048 = phi ptr [ %bitmask, %for.body.lr.ph ], [ %incdec.ptr, %for.body ]
+  %14 = tail call <16 x i32> @llvm.hexagon.V6.vand(<16 x i32> undef, <16 x i32> %sMask.049)
+  %incdec.ptr = getelementptr inbounds <16 x i32>, ptr %optr.048, i32 1
+  store <16 x i32> %14, ptr %optr.048, align 64
   %sub6 = add nsw i32 %i.050, -512
   %cmp5 = icmp sgt i32 %sub6, 0
   br i1 %cmp5, label %for.body, label %for.cond.for.end_crit_edge
 
 for.cond.for.end_crit_edge:                       ; preds = %for.body
-  %scevgep51 = bitcast i32* %scevgep to <16 x i32>*
   br label %for.end
 
 for.end:                                          ; preds = %for.cond.for.end_crit_edge, %entry
-  %optr.0.lcssa = phi <16 x i32>* [ %scevgep51, %for.cond.for.end_crit_edge ], [ %0, %entry ]
-  %16 = load <16 x i32>, <16 x i32>* %optr.0.lcssa, align 64
-  %17 = tail call <16 x i32> @llvm.hexagon.V6.vand(<16 x i32> %16, <16 x i32> %sMaskR.0)
-  store <16 x i32> %17, <16 x i32>* %optr.0.lcssa, align 64
+  %optr.0.lcssa = phi ptr [ %scevgep, %for.cond.for.end_crit_edge ], [ %bitmask, %entry ]
+  %15 = load <16 x i32>, ptr %optr.0.lcssa, align 64
+  %16 = tail call <16 x i32> @llvm.hexagon.V6.vand(<16 x i32> %15, <16 x i32> %sMaskR.0)
+  store <16 x i32> %16, ptr %optr.0.lcssa, align 64
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/v60-vsel2.ll b/llvm/test/CodeGen/Hexagon/v60-vsel2.ll
index 8db3dd4ded0f0..1883c4bea114d 100644
--- a/llvm/test/CodeGen/Hexagon/v60-vsel2.ll
+++ b/llvm/test/CodeGen/Hexagon/v60-vsel2.ll
@@ -5,9 +5,8 @@
 target triple = "hexagon"
 
 ; Function Attrs: nounwind
-define void @f0(i8* nocapture readnone %a0, i32 %a1, i32 %a2, i32 %a3, i32* nocapture %a4, i32 %a5) #0 {
+define void @f0(ptr nocapture readnone %a0, i32 %a1, i32 %a2, i32 %a3, ptr nocapture %a4, i32 %a5) #0 {
 b0:
-  %v0 = bitcast i32* %a4 to <16 x i32>*
   %v1 = mul i32 %a5, -2
   %v2 = add i32 %v1, %a1
   %v3 = and i32 %a5, 63
@@ -40,30 +39,29 @@ b3:                                               ; preds = %b2
   %v21 = lshr i32 %v20, 9
   %v22 = mul i32 %v21, 16
   %v23 = add nuw nsw i32 %v22, 16
-  %v24 = getelementptr i32, i32* %a4, i32 %v23
+  %v24 = getelementptr i32, ptr %a4, i32 %v23
   br label %b4
 
 b4:                                               ; preds = %b4, %b3
   %v25 = phi i32 [ %v4, %b3 ], [ %v30, %b4 ]
   %v26 = phi <16 x i32> [ %v17, %b3 ], [ %v5, %b4 ]
-  %v27 = phi <16 x i32>* [ %v0, %b3 ], [ %v29, %b4 ]
+  %v27 = phi ptr [ %a4, %b3 ], [ %v29, %b4 ]
   %v28 = tail call <16 x i32> @llvm.hexagon.V6.vand(<16 x i32> undef, <16 x i32> %v26)
-  %v29 = getelementptr inbounds <16 x i32>, <16 x i32>* %v27, i32 1
-  store <16 x i32> %v28, <16 x i32>* %v27, align 64, !tbaa !0
+  %v29 = getelementptr inbounds <16 x i32>, ptr %v27, i32 1
+  store <16 x i32> %v28, ptr %v27, align 64, !tbaa !0
   %v30 = add nsw i32 %v25, -512
   %v31 = icmp sgt i32 %v30, 0
   br i1 %v31, label %b4, label %b5
 
 b5:                                               ; preds = %b4
-  %v32 = bitcast i32* %v24 to <16 x i32>*
   br label %b6
 
 b6:                                               ; preds = %b5, %b2
-  %v33 = phi <16 x i32>* [ %v32, %b5 ], [ %v0, %b2 ]
-  %v34 = load <16 x i32>, <16 x i32>* %v33, align 64, !tbaa !0
+  %v33 = phi ptr [ %v24, %b5 ], [ %a4, %b2 ]
+  %v34 = load <16 x i32>, ptr %v33, align 64, !tbaa !0
   %v35 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v13)
   %v36 = tail call <16 x i32> @llvm.hexagon.V6.vand(<16 x i32> %v34, <16 x i32> %v35)
-  store <16 x i32> %v36, <16 x i32>* %v33, align 64, !tbaa !0
+  store <16 x i32> %v36, ptr %v33, align 64, !tbaa !0
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/v60Intrins.ll b/llvm/test/CodeGen/Hexagon/v60Intrins.ll
index 61087f573e6a3..edf105ccfba88 100644
--- a/llvm/test/CodeGen/Hexagon/v60Intrins.ll
+++ b/llvm/test/CodeGen/Hexagon/v60Intrins.ll
@@ -361,8 +361,8 @@ target triple = "hexagon"
 @vector_pairs = common global [15 x <32 x i32>] zeroinitializer, align 128
 @VectorPairResult = common global <32 x i32> zeroinitializer, align 128
 @dst_addresses = common global [15 x i8] zeroinitializer, align 8
- at ptr_addresses = common global [15 x i8*] zeroinitializer, align 8
- at src_addresses = common global [15 x i8*] zeroinitializer, align 8
+ at ptr_addresses = common global [15 x ptr] zeroinitializer, align 8
+ at src_addresses = common global [15 x ptr] zeroinitializer, align 8
 @dst = common global i8 0, align 1
 @ptr = common global [32768 x i8] zeroinitializer, align 8
 
@@ -370,1296 +370,1296 @@ target triple = "hexagon"
 define i32 @main() #0 {
 entry:
   %retval = alloca i32, align 4
-  store i32 0, i32* %retval, align 4
-  %0 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vecpreds, i32 0, i32 0), align 64
+  store i32 0, ptr %retval, align 4
+  %0 = load volatile <16 x i32>, ptr @vecpreds, align 64
   %1 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %0, i32 -1)
-  %2 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vecpreds, i32 0, i32 1), align 64
+  %2 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vecpreds, i32 0, i32 1), align 64
   %3 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %2, i32 -1)
   %4 = call <64 x i1> @llvm.hexagon.V6.pred.and(<64 x i1> %1, <64 x i1> %3)
   %5 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %4, i32 -1)
-  store volatile <16 x i32> %5, <16 x i32>* @Q6VecPredResult, align 64
-  %6 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vecpreds, i32 0, i32 0), align 64
+  store volatile <16 x i32> %5, ptr @Q6VecPredResult, align 64
+  %6 = load volatile <16 x i32>, ptr @vecpreds, align 64
   %7 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %6, i32 -1)
-  %8 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vecpreds, i32 0, i32 1), align 64
+  %8 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vecpreds, i32 0, i32 1), align 64
   %9 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %8, i32 -1)
   %10 = call <64 x i1> @llvm.hexagon.V6.pred.and.n(<64 x i1> %7, <64 x i1> %9)
   %11 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %10, i32 -1)
-  store volatile <16 x i32> %11, <16 x i32>* @Q6VecPredResult, align 64
-  %12 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vecpreds, i32 0, i32 0), align 64
+  store volatile <16 x i32> %11, ptr @Q6VecPredResult, align 64
+  %12 = load volatile <16 x i32>, ptr @vecpreds, align 64
   %13 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %12, i32 -1)
   %14 = call <64 x i1> @llvm.hexagon.V6.pred.not(<64 x i1> %13)
   %15 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %14, i32 -1)
-  store volatile <16 x i32> %15, <16 x i32>* @Q6VecPredResult, align 64
-  %16 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vecpreds, i32 0, i32 0), align 64
+  store volatile <16 x i32> %15, ptr @Q6VecPredResult, align 64
+  %16 = load volatile <16 x i32>, ptr @vecpreds, align 64
   %17 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %16, i32 -1)
-  %18 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vecpreds, i32 0, i32 1), align 64
+  %18 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vecpreds, i32 0, i32 1), align 64
   %19 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %18, i32 -1)
   %20 = call <64 x i1> @llvm.hexagon.V6.pred.or(<64 x i1> %17, <64 x i1> %19)
   %21 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %20, i32 -1)
-  store volatile <16 x i32> %21, <16 x i32>* @Q6VecPredResult, align 64
-  %22 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vecpreds, i32 0, i32 0), align 64
+  store volatile <16 x i32> %21, ptr @Q6VecPredResult, align 64
+  %22 = load volatile <16 x i32>, ptr @vecpreds, align 64
   %23 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %22, i32 -1)
-  %24 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vecpreds, i32 0, i32 1), align 64
+  %24 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vecpreds, i32 0, i32 1), align 64
   %25 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %24, i32 -1)
   %26 = call <64 x i1> @llvm.hexagon.V6.pred.or.n(<64 x i1> %23, <64 x i1> %25)
   %27 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %26, i32 -1)
-  store volatile <16 x i32> %27, <16 x i32>* @Q6VecPredResult, align 64
-  %28 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
+  store volatile <16 x i32> %27, ptr @Q6VecPredResult, align 64
+  %28 = load volatile <16 x i32>, ptr @vectors, align 64
   %29 = call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %28, i32 -1)
   %30 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %29, i32 -1)
-  store volatile <16 x i32> %30, <16 x i32>* @Q6VecPredResult, align 64
-  %31 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vecpreds, i32 0, i32 0), align 64
+  store volatile <16 x i32> %30, ptr @Q6VecPredResult, align 64
+  %31 = load volatile <16 x i32>, ptr @vecpreds, align 64
   %32 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %31, i32 -1)
-  %33 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
+  %33 = load volatile <16 x i32>, ptr @vectors, align 64
   %34 = call <64 x i1> @llvm.hexagon.V6.vandvrt.acc(<64 x i1> %32, <16 x i32> %33, i32 -1)
   %35 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %34, i32 -1)
-  store volatile <16 x i32> %35, <16 x i32>* @Q6VecPredResult, align 64
-  %36 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %37 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %35, ptr @Q6VecPredResult, align 64
+  %36 = load volatile <16 x i32>, ptr @vectors, align 64
+  %37 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %38 = call <64 x i1> @llvm.hexagon.V6.veqb(<16 x i32> %36, <16 x i32> %37)
   %39 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %38, i32 -1)
-  store volatile <16 x i32> %39, <16 x i32>* @Q6VecPredResult, align 64
-  %40 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %41 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %39, ptr @Q6VecPredResult, align 64
+  %40 = load volatile <16 x i32>, ptr @vectors, align 64
+  %41 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %42 = call <64 x i1> @llvm.hexagon.V6.veqh(<16 x i32> %40, <16 x i32> %41)
   %43 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %42, i32 -1)
-  store volatile <16 x i32> %43, <16 x i32>* @Q6VecPredResult, align 64
-  %44 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %45 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %43, ptr @Q6VecPredResult, align 64
+  %44 = load volatile <16 x i32>, ptr @vectors, align 64
+  %45 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %46 = call <64 x i1> @llvm.hexagon.V6.veqw(<16 x i32> %44, <16 x i32> %45)
   %47 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %46, i32 -1)
-  store volatile <16 x i32> %47, <16 x i32>* @Q6VecPredResult, align 64
-  %48 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vecpreds, i32 0, i32 0), align 64
+  store volatile <16 x i32> %47, ptr @Q6VecPredResult, align 64
+  %48 = load volatile <16 x i32>, ptr @vecpreds, align 64
   %49 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %48, i32 -1)
-  %50 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %51 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  %50 = load volatile <16 x i32>, ptr @vectors, align 64
+  %51 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %52 = call <64 x i1> @llvm.hexagon.V6.veqb.and(<64 x i1> %49, <16 x i32> %50, <16 x i32> %51)
   %53 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %52, i32 -1)
-  store volatile <16 x i32> %53, <16 x i32>* @Q6VecPredResult, align 64
-  %54 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vecpreds, i32 0, i32 0), align 64
+  store volatile <16 x i32> %53, ptr @Q6VecPredResult, align 64
+  %54 = load volatile <16 x i32>, ptr @vecpreds, align 64
   %55 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %54, i32 -1)
-  %56 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %57 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  %56 = load volatile <16 x i32>, ptr @vectors, align 64
+  %57 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %58 = call <64 x i1> @llvm.hexagon.V6.veqh.and(<64 x i1> %55, <16 x i32> %56, <16 x i32> %57)
   %59 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %58, i32 -1)
-  store volatile <16 x i32> %59, <16 x i32>* @Q6VecPredResult, align 64
-  %60 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vecpreds, i32 0, i32 0), align 64
+  store volatile <16 x i32> %59, ptr @Q6VecPredResult, align 64
+  %60 = load volatile <16 x i32>, ptr @vecpreds, align 64
   %61 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %60, i32 -1)
-  %62 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %63 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  %62 = load volatile <16 x i32>, ptr @vectors, align 64
+  %63 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %64 = call <64 x i1> @llvm.hexagon.V6.veqw.and(<64 x i1> %61, <16 x i32> %62, <16 x i32> %63)
   %65 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %64, i32 -1)
-  store volatile <16 x i32> %65, <16 x i32>* @Q6VecPredResult, align 64
-  %66 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vecpreds, i32 0, i32 0), align 64
+  store volatile <16 x i32> %65, ptr @Q6VecPredResult, align 64
+  %66 = load volatile <16 x i32>, ptr @vecpreds, align 64
   %67 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %66, i32 -1)
-  %68 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %69 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  %68 = load volatile <16 x i32>, ptr @vectors, align 64
+  %69 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %70 = call <64 x i1> @llvm.hexagon.V6.veqb.or(<64 x i1> %67, <16 x i32> %68, <16 x i32> %69)
   %71 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %70, i32 -1)
-  store volatile <16 x i32> %71, <16 x i32>* @Q6VecPredResult, align 64
-  %72 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vecpreds, i32 0, i32 0), align 64
+  store volatile <16 x i32> %71, ptr @Q6VecPredResult, align 64
+  %72 = load volatile <16 x i32>, ptr @vecpreds, align 64
   %73 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %72, i32 -1)
-  %74 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %75 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  %74 = load volatile <16 x i32>, ptr @vectors, align 64
+  %75 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %76 = call <64 x i1> @llvm.hexagon.V6.veqh.or(<64 x i1> %73, <16 x i32> %74, <16 x i32> %75)
   %77 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %76, i32 -1)
-  store volatile <16 x i32> %77, <16 x i32>* @Q6VecPredResult, align 64
-  %78 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vecpreds, i32 0, i32 0), align 64
+  store volatile <16 x i32> %77, ptr @Q6VecPredResult, align 64
+  %78 = load volatile <16 x i32>, ptr @vecpreds, align 64
   %79 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %78, i32 -1)
-  %80 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %81 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  %80 = load volatile <16 x i32>, ptr @vectors, align 64
+  %81 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %82 = call <64 x i1> @llvm.hexagon.V6.veqw.or(<64 x i1> %79, <16 x i32> %80, <16 x i32> %81)
   %83 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %82, i32 -1)
-  store volatile <16 x i32> %83, <16 x i32>* @Q6VecPredResult, align 64
-  %84 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vecpreds, i32 0, i32 0), align 64
+  store volatile <16 x i32> %83, ptr @Q6VecPredResult, align 64
+  %84 = load volatile <16 x i32>, ptr @vecpreds, align 64
   %85 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %84, i32 -1)
-  %86 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %87 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  %86 = load volatile <16 x i32>, ptr @vectors, align 64
+  %87 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %88 = call <64 x i1> @llvm.hexagon.V6.veqb.xor(<64 x i1> %85, <16 x i32> %86, <16 x i32> %87)
   %89 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %88, i32 -1)
-  store volatile <16 x i32> %89, <16 x i32>* @Q6VecPredResult, align 64
-  %90 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vecpreds, i32 0, i32 0), align 64
+  store volatile <16 x i32> %89, ptr @Q6VecPredResult, align 64
+  %90 = load volatile <16 x i32>, ptr @vecpreds, align 64
   %91 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %90, i32 -1)
-  %92 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %93 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  %92 = load volatile <16 x i32>, ptr @vectors, align 64
+  %93 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %94 = call <64 x i1> @llvm.hexagon.V6.veqh.xor(<64 x i1> %91, <16 x i32> %92, <16 x i32> %93)
   %95 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %94, i32 -1)
-  store volatile <16 x i32> %95, <16 x i32>* @Q6VecPredResult, align 64
-  %96 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vecpreds, i32 0, i32 0), align 64
+  store volatile <16 x i32> %95, ptr @Q6VecPredResult, align 64
+  %96 = load volatile <16 x i32>, ptr @vecpreds, align 64
   %97 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %96, i32 -1)
-  %98 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %99 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  %98 = load volatile <16 x i32>, ptr @vectors, align 64
+  %99 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %100 = call <64 x i1> @llvm.hexagon.V6.veqw.xor(<64 x i1> %97, <16 x i32> %98, <16 x i32> %99)
   %101 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %100, i32 -1)
-  store volatile <16 x i32> %101, <16 x i32>* @Q6VecPredResult, align 64
-  %102 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %103 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %101, ptr @Q6VecPredResult, align 64
+  %102 = load volatile <16 x i32>, ptr @vectors, align 64
+  %103 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %104 = call <64 x i1> @llvm.hexagon.V6.vgtb(<16 x i32> %102, <16 x i32> %103)
   %105 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %104, i32 -1)
-  store volatile <16 x i32> %105, <16 x i32>* @Q6VecPredResult, align 64
-  %106 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %107 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %105, ptr @Q6VecPredResult, align 64
+  %106 = load volatile <16 x i32>, ptr @vectors, align 64
+  %107 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %108 = call <64 x i1> @llvm.hexagon.V6.vgth(<16 x i32> %106, <16 x i32> %107)
   %109 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %108, i32 -1)
-  store volatile <16 x i32> %109, <16 x i32>* @Q6VecPredResult, align 64
-  %110 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %111 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %109, ptr @Q6VecPredResult, align 64
+  %110 = load volatile <16 x i32>, ptr @vectors, align 64
+  %111 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %112 = call <64 x i1> @llvm.hexagon.V6.vgtub(<16 x i32> %110, <16 x i32> %111)
   %113 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %112, i32 -1)
-  store volatile <16 x i32> %113, <16 x i32>* @Q6VecPredResult, align 64
-  %114 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %115 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %113, ptr @Q6VecPredResult, align 64
+  %114 = load volatile <16 x i32>, ptr @vectors, align 64
+  %115 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %116 = call <64 x i1> @llvm.hexagon.V6.vgtuh(<16 x i32> %114, <16 x i32> %115)
   %117 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %116, i32 -1)
-  store volatile <16 x i32> %117, <16 x i32>* @Q6VecPredResult, align 64
-  %118 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %119 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %117, ptr @Q6VecPredResult, align 64
+  %118 = load volatile <16 x i32>, ptr @vectors, align 64
+  %119 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %120 = call <64 x i1> @llvm.hexagon.V6.vgtuw(<16 x i32> %118, <16 x i32> %119)
   %121 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %120, i32 -1)
-  store volatile <16 x i32> %121, <16 x i32>* @Q6VecPredResult, align 64
-  %122 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %123 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %121, ptr @Q6VecPredResult, align 64
+  %122 = load volatile <16 x i32>, ptr @vectors, align 64
+  %123 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %124 = call <64 x i1> @llvm.hexagon.V6.vgtw(<16 x i32> %122, <16 x i32> %123)
   %125 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %124, i32 -1)
-  store volatile <16 x i32> %125, <16 x i32>* @Q6VecPredResult, align 64
-  %126 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vecpreds, i32 0, i32 0), align 64
+  store volatile <16 x i32> %125, ptr @Q6VecPredResult, align 64
+  %126 = load volatile <16 x i32>, ptr @vecpreds, align 64
   %127 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %126, i32 -1)
-  %128 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %129 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  %128 = load volatile <16 x i32>, ptr @vectors, align 64
+  %129 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %130 = call <64 x i1> @llvm.hexagon.V6.vgtb.and(<64 x i1> %127, <16 x i32> %128, <16 x i32> %129)
   %131 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %130, i32 -1)
-  store volatile <16 x i32> %131, <16 x i32>* @Q6VecPredResult, align 64
-  %132 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vecpreds, i32 0, i32 0), align 64
+  store volatile <16 x i32> %131, ptr @Q6VecPredResult, align 64
+  %132 = load volatile <16 x i32>, ptr @vecpreds, align 64
   %133 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %132, i32 -1)
-  %134 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %135 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  %134 = load volatile <16 x i32>, ptr @vectors, align 64
+  %135 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %136 = call <64 x i1> @llvm.hexagon.V6.vgth.and(<64 x i1> %133, <16 x i32> %134, <16 x i32> %135)
   %137 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %136, i32 -1)
-  store volatile <16 x i32> %137, <16 x i32>* @Q6VecPredResult, align 64
-  %138 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vecpreds, i32 0, i32 0), align 64
+  store volatile <16 x i32> %137, ptr @Q6VecPredResult, align 64
+  %138 = load volatile <16 x i32>, ptr @vecpreds, align 64
   %139 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %138, i32 -1)
-  %140 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %141 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  %140 = load volatile <16 x i32>, ptr @vectors, align 64
+  %141 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %142 = call <64 x i1> @llvm.hexagon.V6.vgtub.and(<64 x i1> %139, <16 x i32> %140, <16 x i32> %141)
   %143 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %142, i32 -1)
-  store volatile <16 x i32> %143, <16 x i32>* @Q6VecPredResult, align 64
-  %144 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vecpreds, i32 0, i32 0), align 64
+  store volatile <16 x i32> %143, ptr @Q6VecPredResult, align 64
+  %144 = load volatile <16 x i32>, ptr @vecpreds, align 64
   %145 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %144, i32 -1)
-  %146 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %147 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  %146 = load volatile <16 x i32>, ptr @vectors, align 64
+  %147 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %148 = call <64 x i1> @llvm.hexagon.V6.vgtuh.and(<64 x i1> %145, <16 x i32> %146, <16 x i32> %147)
   %149 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %148, i32 -1)
-  store volatile <16 x i32> %149, <16 x i32>* @Q6VecPredResult, align 64
-  %150 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vecpreds, i32 0, i32 0), align 64
+  store volatile <16 x i32> %149, ptr @Q6VecPredResult, align 64
+  %150 = load volatile <16 x i32>, ptr @vecpreds, align 64
   %151 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %150, i32 -1)
-  %152 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %153 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  %152 = load volatile <16 x i32>, ptr @vectors, align 64
+  %153 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %154 = call <64 x i1> @llvm.hexagon.V6.vgtuw.and(<64 x i1> %151, <16 x i32> %152, <16 x i32> %153)
   %155 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %154, i32 -1)
-  store volatile <16 x i32> %155, <16 x i32>* @Q6VecPredResult, align 64
-  %156 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vecpreds, i32 0, i32 0), align 64
+  store volatile <16 x i32> %155, ptr @Q6VecPredResult, align 64
+  %156 = load volatile <16 x i32>, ptr @vecpreds, align 64
   %157 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %156, i32 -1)
-  %158 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %159 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  %158 = load volatile <16 x i32>, ptr @vectors, align 64
+  %159 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %160 = call <64 x i1> @llvm.hexagon.V6.vgtw.and(<64 x i1> %157, <16 x i32> %158, <16 x i32> %159)
   %161 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %160, i32 -1)
-  store volatile <16 x i32> %161, <16 x i32>* @Q6VecPredResult, align 64
-  %162 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vecpreds, i32 0, i32 0), align 64
+  store volatile <16 x i32> %161, ptr @Q6VecPredResult, align 64
+  %162 = load volatile <16 x i32>, ptr @vecpreds, align 64
   %163 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %162, i32 -1)
-  %164 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %165 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  %164 = load volatile <16 x i32>, ptr @vectors, align 64
+  %165 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %166 = call <64 x i1> @llvm.hexagon.V6.vgtb.or(<64 x i1> %163, <16 x i32> %164, <16 x i32> %165)
   %167 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %166, i32 -1)
-  store volatile <16 x i32> %167, <16 x i32>* @Q6VecPredResult, align 64
-  %168 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vecpreds, i32 0, i32 0), align 64
+  store volatile <16 x i32> %167, ptr @Q6VecPredResult, align 64
+  %168 = load volatile <16 x i32>, ptr @vecpreds, align 64
   %169 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %168, i32 -1)
-  %170 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %171 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  %170 = load volatile <16 x i32>, ptr @vectors, align 64
+  %171 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %172 = call <64 x i1> @llvm.hexagon.V6.vgth.or(<64 x i1> %169, <16 x i32> %170, <16 x i32> %171)
   %173 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %172, i32 -1)
-  store volatile <16 x i32> %173, <16 x i32>* @Q6VecPredResult, align 64
-  %174 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vecpreds, i32 0, i32 0), align 64
+  store volatile <16 x i32> %173, ptr @Q6VecPredResult, align 64
+  %174 = load volatile <16 x i32>, ptr @vecpreds, align 64
   %175 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %174, i32 -1)
-  %176 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %177 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  %176 = load volatile <16 x i32>, ptr @vectors, align 64
+  %177 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %178 = call <64 x i1> @llvm.hexagon.V6.vgtub.or(<64 x i1> %175, <16 x i32> %176, <16 x i32> %177)
   %179 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %178, i32 -1)
-  store volatile <16 x i32> %179, <16 x i32>* @Q6VecPredResult, align 64
-  %180 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vecpreds, i32 0, i32 0), align 64
+  store volatile <16 x i32> %179, ptr @Q6VecPredResult, align 64
+  %180 = load volatile <16 x i32>, ptr @vecpreds, align 64
   %181 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %180, i32 -1)
-  %182 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %183 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  %182 = load volatile <16 x i32>, ptr @vectors, align 64
+  %183 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %184 = call <64 x i1> @llvm.hexagon.V6.vgtuh.or(<64 x i1> %181, <16 x i32> %182, <16 x i32> %183)
   %185 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %184, i32 -1)
-  store volatile <16 x i32> %185, <16 x i32>* @Q6VecPredResult, align 64
-  %186 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vecpreds, i32 0, i32 0), align 64
+  store volatile <16 x i32> %185, ptr @Q6VecPredResult, align 64
+  %186 = load volatile <16 x i32>, ptr @vecpreds, align 64
   %187 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %186, i32 -1)
-  %188 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %189 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  %188 = load volatile <16 x i32>, ptr @vectors, align 64
+  %189 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %190 = call <64 x i1> @llvm.hexagon.V6.vgtuw.or(<64 x i1> %187, <16 x i32> %188, <16 x i32> %189)
   %191 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %190, i32 -1)
-  store volatile <16 x i32> %191, <16 x i32>* @Q6VecPredResult, align 64
-  %192 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vecpreds, i32 0, i32 0), align 64
+  store volatile <16 x i32> %191, ptr @Q6VecPredResult, align 64
+  %192 = load volatile <16 x i32>, ptr @vecpreds, align 64
   %193 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %192, i32 -1)
-  %194 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %195 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  %194 = load volatile <16 x i32>, ptr @vectors, align 64
+  %195 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %196 = call <64 x i1> @llvm.hexagon.V6.vgtw.or(<64 x i1> %193, <16 x i32> %194, <16 x i32> %195)
   %197 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %196, i32 -1)
-  store volatile <16 x i32> %197, <16 x i32>* @Q6VecPredResult, align 64
-  %198 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vecpreds, i32 0, i32 0), align 64
+  store volatile <16 x i32> %197, ptr @Q6VecPredResult, align 64
+  %198 = load volatile <16 x i32>, ptr @vecpreds, align 64
   %199 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %198, i32 -1)
-  %200 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %201 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  %200 = load volatile <16 x i32>, ptr @vectors, align 64
+  %201 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %202 = call <64 x i1> @llvm.hexagon.V6.vgtb.xor(<64 x i1> %199, <16 x i32> %200, <16 x i32> %201)
   %203 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %202, i32 -1)
-  store volatile <16 x i32> %203, <16 x i32>* @Q6VecPredResult, align 64
-  %204 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vecpreds, i32 0, i32 0), align 64
+  store volatile <16 x i32> %203, ptr @Q6VecPredResult, align 64
+  %204 = load volatile <16 x i32>, ptr @vecpreds, align 64
   %205 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %204, i32 -1)
-  %206 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %207 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  %206 = load volatile <16 x i32>, ptr @vectors, align 64
+  %207 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %208 = call <64 x i1> @llvm.hexagon.V6.vgth.xor(<64 x i1> %205, <16 x i32> %206, <16 x i32> %207)
   %209 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %208, i32 -1)
-  store volatile <16 x i32> %209, <16 x i32>* @Q6VecPredResult, align 64
-  %210 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vecpreds, i32 0, i32 0), align 64
+  store volatile <16 x i32> %209, ptr @Q6VecPredResult, align 64
+  %210 = load volatile <16 x i32>, ptr @vecpreds, align 64
   %211 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %210, i32 -1)
-  %212 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %213 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  %212 = load volatile <16 x i32>, ptr @vectors, align 64
+  %213 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %214 = call <64 x i1> @llvm.hexagon.V6.vgtub.xor(<64 x i1> %211, <16 x i32> %212, <16 x i32> %213)
   %215 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %214, i32 -1)
-  store volatile <16 x i32> %215, <16 x i32>* @Q6VecPredResult, align 64
-  %216 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vecpreds, i32 0, i32 0), align 64
+  store volatile <16 x i32> %215, ptr @Q6VecPredResult, align 64
+  %216 = load volatile <16 x i32>, ptr @vecpreds, align 64
   %217 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %216, i32 -1)
-  %218 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %219 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  %218 = load volatile <16 x i32>, ptr @vectors, align 64
+  %219 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %220 = call <64 x i1> @llvm.hexagon.V6.vgtuh.xor(<64 x i1> %217, <16 x i32> %218, <16 x i32> %219)
   %221 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %220, i32 -1)
-  store volatile <16 x i32> %221, <16 x i32>* @Q6VecPredResult, align 64
-  %222 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vecpreds, i32 0, i32 0), align 64
+  store volatile <16 x i32> %221, ptr @Q6VecPredResult, align 64
+  %222 = load volatile <16 x i32>, ptr @vecpreds, align 64
   %223 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %222, i32 -1)
-  %224 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %225 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  %224 = load volatile <16 x i32>, ptr @vectors, align 64
+  %225 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %226 = call <64 x i1> @llvm.hexagon.V6.vgtuw.xor(<64 x i1> %223, <16 x i32> %224, <16 x i32> %225)
   %227 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %226, i32 -1)
-  store volatile <16 x i32> %227, <16 x i32>* @Q6VecPredResult, align 64
-  %228 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vecpreds, i32 0, i32 0), align 64
+  store volatile <16 x i32> %227, ptr @Q6VecPredResult, align 64
+  %228 = load volatile <16 x i32>, ptr @vecpreds, align 64
   %229 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %228, i32 -1)
-  %230 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %231 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  %230 = load volatile <16 x i32>, ptr @vectors, align 64
+  %231 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %232 = call <64 x i1> @llvm.hexagon.V6.vgtw.xor(<64 x i1> %229, <16 x i32> %230, <16 x i32> %231)
   %233 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %232, i32 -1)
-  store volatile <16 x i32> %233, <16 x i32>* @Q6VecPredResult, align 64
+  store volatile <16 x i32> %233, ptr @Q6VecPredResult, align 64
   %234 = call <64 x i1> @llvm.hexagon.V6.pred.scalar2(i32 1)
   %235 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %234, i32 -1)
-  store volatile <16 x i32> %235, <16 x i32>* @Q6VecPredResult, align 64
-  %236 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vecpreds, i32 0, i32 0), align 64
+  store volatile <16 x i32> %235, ptr @Q6VecPredResult, align 64
+  %236 = load volatile <16 x i32>, ptr @vecpreds, align 64
   %237 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %236, i32 -1)
-  %238 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vecpreds, i32 0, i32 1), align 64
+  %238 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vecpreds, i32 0, i32 1), align 64
   %239 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %238, i32 -1)
   %240 = call <64 x i1> @llvm.hexagon.V6.pred.xor(<64 x i1> %237, <64 x i1> %239)
   %241 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %240, i32 -1)
-  store volatile <16 x i32> %241, <16 x i32>* @Q6VecPredResult, align 64
-  %242 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
+  store volatile <16 x i32> %241, ptr @Q6VecPredResult, align 64
+  %242 = load volatile <16 x i32>, ptr @vectors, align 64
   %243 = call <16 x i32> @llvm.hexagon.V6.vassign(<16 x i32> %242)
-  store volatile <16 x i32> %243, <16 x i32>* @VectorResult, align 64
-  %244 = load volatile <32 x i32>, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @vector_pairs, i32 0, i32 0), align 128
+  store volatile <16 x i32> %243, ptr @VectorResult, align 64
+  %244 = load volatile <32 x i32>, ptr @vector_pairs, align 128
   %245 = call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %244)
-  store volatile <16 x i32> %245, <16 x i32>* @VectorResult, align 64
-  %246 = load volatile <32 x i32>, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @vector_pairs, i32 0, i32 0), align 128
+  store volatile <16 x i32> %245, ptr @VectorResult, align 64
+  %246 = load volatile <32 x i32>, ptr @vector_pairs, align 128
   %247 = call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %246)
-  store volatile <16 x i32> %247, <16 x i32>* @VectorResult, align 64
-  %248 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %249 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %247, ptr @VectorResult, align 64
+  %248 = load volatile <16 x i32>, ptr @vectors, align 64
+  %249 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %250 = call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %248, <16 x i32> %249, i32 1)
-  store volatile <16 x i32> %250, <16 x i32>* @VectorResult, align 64
-  %251 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %252 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %250, ptr @VectorResult, align 64
+  %251 = load volatile <16 x i32>, ptr @vectors, align 64
+  %252 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %253 = call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %251, <16 x i32> %252, i32 -1)
-  store volatile <16 x i32> %253, <16 x i32>* @VectorResult, align 64
-  %254 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vecpreds, i32 0, i32 0), align 64
+  store volatile <16 x i32> %253, ptr @VectorResult, align 64
+  %254 = load volatile <16 x i32>, ptr @vecpreds, align 64
   %255 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %254, i32 -1)
   %256 = call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %255, i32 -1)
-  store volatile <16 x i32> %256, <16 x i32>* @VectorResult, align 64
-  %257 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %258 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %256, ptr @VectorResult, align 64
+  %257 = load volatile <16 x i32>, ptr @vectors, align 64
+  %258 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %259 = call <16 x i32> @llvm.hexagon.V6.vand(<16 x i32> %257, <16 x i32> %258)
-  store volatile <16 x i32> %259, <16 x i32>* @VectorResult, align 64
-  %260 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %261 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vecpreds, i32 0, i32 0), align 64
+  store volatile <16 x i32> %259, ptr @VectorResult, align 64
+  %260 = load volatile <16 x i32>, ptr @vectors, align 64
+  %261 = load volatile <16 x i32>, ptr @vecpreds, align 64
   %262 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %261, i32 -1)
   %263 = call <16 x i32> @llvm.hexagon.V6.vandqrt.acc(<16 x i32> %260, <64 x i1> %262, i32 -1)
-  store volatile <16 x i32> %263, <16 x i32>* @VectorResult, align 64
-  %264 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %265 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %263, ptr @VectorResult, align 64
+  %264 = load volatile <16 x i32>, ptr @vectors, align 64
+  %265 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %266 = call <16 x i32> @llvm.hexagon.V6.vdelta(<16 x i32> %264, <16 x i32> %265)
-  store volatile <16 x i32> %266, <16 x i32>* @VectorResult, align 64
-  %267 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %268 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %266, ptr @VectorResult, align 64
+  %267 = load volatile <16 x i32>, ptr @vectors, align 64
+  %268 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %269 = call <16 x i32> @llvm.hexagon.V6.vlalignbi(<16 x i32> %267, <16 x i32> %268, i32 1)
-  store volatile <16 x i32> %269, <16 x i32>* @VectorResult, align 64
-  %270 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %271 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %269, ptr @VectorResult, align 64
+  %270 = load volatile <16 x i32>, ptr @vectors, align 64
+  %271 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %272 = call <16 x i32> @llvm.hexagon.V6.vlalignb(<16 x i32> %270, <16 x i32> %271, i32 -1)
-  store volatile <16 x i32> %272, <16 x i32>* @VectorResult, align 64
-  %273 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vecpreds, i32 0, i32 0), align 64
+  store volatile <16 x i32> %272, ptr @VectorResult, align 64
+  %273 = load volatile <16 x i32>, ptr @vecpreds, align 64
   %274 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %273, i32 -1)
-  %275 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %276 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  %275 = load volatile <16 x i32>, ptr @vectors, align 64
+  %276 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %277 = call <16 x i32> @llvm.hexagon.V6.vmux(<64 x i1> %274, <16 x i32> %275, <16 x i32> %276)
-  store volatile <16 x i32> %277, <16 x i32>* @VectorResult, align 64
-  %278 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
+  store volatile <16 x i32> %277, ptr @VectorResult, align 64
+  %278 = load volatile <16 x i32>, ptr @vectors, align 64
   %279 = call <16 x i32> @llvm.hexagon.V6.vnot(<16 x i32> %278)
-  store volatile <16 x i32> %279, <16 x i32>* @VectorResult, align 64
-  %280 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %281 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %279, ptr @VectorResult, align 64
+  %280 = load volatile <16 x i32>, ptr @vectors, align 64
+  %281 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %282 = call <16 x i32> @llvm.hexagon.V6.vor(<16 x i32> %280, <16 x i32> %281)
-  store volatile <16 x i32> %282, <16 x i32>* @VectorResult, align 64
-  %283 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %284 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %282, ptr @VectorResult, align 64
+  %283 = load volatile <16 x i32>, ptr @vectors, align 64
+  %284 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %285 = call <16 x i32> @llvm.hexagon.V6.vrdelta(<16 x i32> %283, <16 x i32> %284)
-  store volatile <16 x i32> %285, <16 x i32>* @VectorResult, align 64
-  %286 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
+  store volatile <16 x i32> %285, ptr @VectorResult, align 64
+  %286 = load volatile <16 x i32>, ptr @vectors, align 64
   %287 = call <16 x i32> @llvm.hexagon.V6.vror(<16 x i32> %286, i32 -1)
-  store volatile <16 x i32> %287, <16 x i32>* @VectorResult, align 64
+  store volatile <16 x i32> %287, ptr @VectorResult, align 64
   %288 = call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 -1)
-  store volatile <16 x i32> %288, <16 x i32>* @VectorResult, align 64
-  %289 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %290 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %288, ptr @VectorResult, align 64
+  %289 = load volatile <16 x i32>, ptr @vectors, align 64
+  %290 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %291 = call <16 x i32> @llvm.hexagon.V6.vxor(<16 x i32> %289, <16 x i32> %290)
-  store volatile <16 x i32> %291, <16 x i32>* @VectorResult, align 64
+  store volatile <16 x i32> %291, ptr @VectorResult, align 64
   %292 = call <16 x i32> @llvm.hexagon.V6.vd0()
-  store volatile <16 x i32> %292, <16 x i32>* @VectorResult, align 64
-  %293 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vecpreds, i32 0, i32 0), align 64
+  store volatile <16 x i32> %292, ptr @VectorResult, align 64
+  %293 = load volatile <16 x i32>, ptr @vecpreds, align 64
   %294 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %293, i32 -1)
-  %295 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %296 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  %295 = load volatile <16 x i32>, ptr @vectors, align 64
+  %296 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %297 = call <16 x i32> @llvm.hexagon.V6.vaddbq(<64 x i1> %294, <16 x i32> %295, <16 x i32> %296)
-  store volatile <16 x i32> %297, <16 x i32>* @VectorResult, align 64
-  %298 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vecpreds, i32 0, i32 0), align 64
+  store volatile <16 x i32> %297, ptr @VectorResult, align 64
+  %298 = load volatile <16 x i32>, ptr @vecpreds, align 64
   %299 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %298, i32 -1)
-  %300 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %301 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  %300 = load volatile <16 x i32>, ptr @vectors, align 64
+  %301 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %302 = call <16 x i32> @llvm.hexagon.V6.vaddbnq(<64 x i1> %299, <16 x i32> %300, <16 x i32> %301)
-  store volatile <16 x i32> %302, <16 x i32>* @VectorResult, align 64
-  %303 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vecpreds, i32 0, i32 0), align 64
+  store volatile <16 x i32> %302, ptr @VectorResult, align 64
+  %303 = load volatile <16 x i32>, ptr @vecpreds, align 64
   %304 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %303, i32 -1)
-  %305 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %306 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  %305 = load volatile <16 x i32>, ptr @vectors, align 64
+  %306 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %307 = call <16 x i32> @llvm.hexagon.V6.vsubbq(<64 x i1> %304, <16 x i32> %305, <16 x i32> %306)
-  store volatile <16 x i32> %307, <16 x i32>* @VectorResult, align 64
-  %308 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vecpreds, i32 0, i32 0), align 64
+  store volatile <16 x i32> %307, ptr @VectorResult, align 64
+  %308 = load volatile <16 x i32>, ptr @vecpreds, align 64
   %309 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %308, i32 -1)
-  %310 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %311 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  %310 = load volatile <16 x i32>, ptr @vectors, align 64
+  %311 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %312 = call <16 x i32> @llvm.hexagon.V6.vsubbnq(<64 x i1> %309, <16 x i32> %310, <16 x i32> %311)
-  store volatile <16 x i32> %312, <16 x i32>* @VectorResult, align 64
-  %313 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %314 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %312, ptr @VectorResult, align 64
+  %313 = load volatile <16 x i32>, ptr @vectors, align 64
+  %314 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %315 = call <16 x i32> @llvm.hexagon.V6.vaddb(<16 x i32> %313, <16 x i32> %314)
-  store volatile <16 x i32> %315, <16 x i32>* @VectorResult, align 64
-  %316 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %317 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %315, ptr @VectorResult, align 64
+  %316 = load volatile <16 x i32>, ptr @vectors, align 64
+  %317 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %318 = call <16 x i32> @llvm.hexagon.V6.vasrhbrndsat(<16 x i32> %316, <16 x i32> %317, i32 -1)
-  store volatile <16 x i32> %318, <16 x i32>* @VectorResult, align 64
-  %319 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
+  store volatile <16 x i32> %318, ptr @VectorResult, align 64
+  %319 = load volatile <16 x i32>, ptr @vectors, align 64
   %320 = call <16 x i32> @llvm.hexagon.V6.vdealb(<16 x i32> %319)
-  store volatile <16 x i32> %320, <16 x i32>* @VectorResult, align 64
-  %321 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %322 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %320, ptr @VectorResult, align 64
+  %321 = load volatile <16 x i32>, ptr @vectors, align 64
+  %322 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %323 = call <16 x i32> @llvm.hexagon.V6.vdealb4w(<16 x i32> %321, <16 x i32> %322)
-  store volatile <16 x i32> %323, <16 x i32>* @VectorResult, align 64
-  %324 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %325 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %323, ptr @VectorResult, align 64
+  %324 = load volatile <16 x i32>, ptr @vectors, align 64
+  %325 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %326 = call <16 x i32> @llvm.hexagon.V6.vlutvvb(<16 x i32> %324, <16 x i32> %325, i32 -1)
-  store volatile <16 x i32> %326, <16 x i32>* @VectorResult, align 64
-  %327 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %328 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
-  %329 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 2), align 64
+  store volatile <16 x i32> %326, ptr @VectorResult, align 64
+  %327 = load volatile <16 x i32>, ptr @vectors, align 64
+  %328 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
+  %329 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 2), align 64
   %330 = call <16 x i32> @llvm.hexagon.V6.vlutvvb.oracc(<16 x i32> %327, <16 x i32> %328, <16 x i32> %329, i32 -1)
-  store volatile <16 x i32> %330, <16 x i32>* @VectorResult, align 64
-  %331 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %332 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %330, ptr @VectorResult, align 64
+  %331 = load volatile <16 x i32>, ptr @vectors, align 64
+  %332 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %333 = call <16 x i32> @llvm.hexagon.V6.vnavgub(<16 x i32> %331, <16 x i32> %332)
-  store volatile <16 x i32> %333, <16 x i32>* @VectorResult, align 64
-  %334 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %335 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %333, ptr @VectorResult, align 64
+  %334 = load volatile <16 x i32>, ptr @vectors, align 64
+  %335 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %336 = call <16 x i32> @llvm.hexagon.V6.vpackhb.sat(<16 x i32> %334, <16 x i32> %335)
-  store volatile <16 x i32> %336, <16 x i32>* @VectorResult, align 64
-  %337 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %338 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %336, ptr @VectorResult, align 64
+  %337 = load volatile <16 x i32>, ptr @vectors, align 64
+  %338 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %339 = call <16 x i32> @llvm.hexagon.V6.vpackeb(<16 x i32> %337, <16 x i32> %338)
-  store volatile <16 x i32> %339, <16 x i32>* @VectorResult, align 64
-  %340 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %341 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %339, ptr @VectorResult, align 64
+  %340 = load volatile <16 x i32>, ptr @vectors, align 64
+  %341 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %342 = call <16 x i32> @llvm.hexagon.V6.vpackob(<16 x i32> %340, <16 x i32> %341)
-  store volatile <16 x i32> %342, <16 x i32>* @VectorResult, align 64
-  %343 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %344 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %342, ptr @VectorResult, align 64
+  %343 = load volatile <16 x i32>, ptr @vectors, align 64
+  %344 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %345 = call <16 x i32> @llvm.hexagon.V6.vroundhb(<16 x i32> %343, <16 x i32> %344)
-  store volatile <16 x i32> %345, <16 x i32>* @VectorResult, align 64
-  %346 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
+  store volatile <16 x i32> %345, ptr @VectorResult, align 64
+  %346 = load volatile <16 x i32>, ptr @vectors, align 64
   %347 = call <16 x i32> @llvm.hexagon.V6.vshuffb(<16 x i32> %346)
-  store volatile <16 x i32> %347, <16 x i32>* @VectorResult, align 64
-  %348 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %349 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %347, ptr @VectorResult, align 64
+  %348 = load volatile <16 x i32>, ptr @vectors, align 64
+  %349 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %350 = call <16 x i32> @llvm.hexagon.V6.vshuffeb(<16 x i32> %348, <16 x i32> %349)
-  store volatile <16 x i32> %350, <16 x i32>* @VectorResult, align 64
-  %351 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %352 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %350, ptr @VectorResult, align 64
+  %351 = load volatile <16 x i32>, ptr @vectors, align 64
+  %352 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %353 = call <16 x i32> @llvm.hexagon.V6.vshuffob(<16 x i32> %351, <16 x i32> %352)
-  store volatile <16 x i32> %353, <16 x i32>* @VectorResult, align 64
-  %354 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %355 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %353, ptr @VectorResult, align 64
+  %354 = load volatile <16 x i32>, ptr @vectors, align 64
+  %355 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %356 = call <16 x i32> @llvm.hexagon.V6.vsubb(<16 x i32> %354, <16 x i32> %355)
-  store volatile <16 x i32> %356, <16 x i32>* @VectorResult, align 64
-  %357 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vecpreds, i32 0, i32 0), align 64
+  store volatile <16 x i32> %356, ptr @VectorResult, align 64
+  %357 = load volatile <16 x i32>, ptr @vecpreds, align 64
   %358 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %357, i32 -1)
-  %359 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %360 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  %359 = load volatile <16 x i32>, ptr @vectors, align 64
+  %360 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %361 = call <16 x i32> @llvm.hexagon.V6.vaddhq(<64 x i1> %358, <16 x i32> %359, <16 x i32> %360)
-  store volatile <16 x i32> %361, <16 x i32>* @VectorResult, align 64
-  %362 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vecpreds, i32 0, i32 0), align 64
+  store volatile <16 x i32> %361, ptr @VectorResult, align 64
+  %362 = load volatile <16 x i32>, ptr @vecpreds, align 64
   %363 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %362, i32 -1)
-  %364 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %365 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  %364 = load volatile <16 x i32>, ptr @vectors, align 64
+  %365 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %366 = call <16 x i32> @llvm.hexagon.V6.vaddhnq(<64 x i1> %363, <16 x i32> %364, <16 x i32> %365)
-  store volatile <16 x i32> %366, <16 x i32>* @VectorResult, align 64
-  %367 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vecpreds, i32 0, i32 0), align 64
+  store volatile <16 x i32> %366, ptr @VectorResult, align 64
+  %367 = load volatile <16 x i32>, ptr @vecpreds, align 64
   %368 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %367, i32 -1)
-  %369 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %370 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  %369 = load volatile <16 x i32>, ptr @vectors, align 64
+  %370 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %371 = call <16 x i32> @llvm.hexagon.V6.vsubhq(<64 x i1> %368, <16 x i32> %369, <16 x i32> %370)
-  store volatile <16 x i32> %371, <16 x i32>* @VectorResult, align 64
-  %372 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vecpreds, i32 0, i32 0), align 64
+  store volatile <16 x i32> %371, ptr @VectorResult, align 64
+  %372 = load volatile <16 x i32>, ptr @vecpreds, align 64
   %373 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %372, i32 -1)
-  %374 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %375 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  %374 = load volatile <16 x i32>, ptr @vectors, align 64
+  %375 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %376 = call <16 x i32> @llvm.hexagon.V6.vsubhnq(<64 x i1> %373, <16 x i32> %374, <16 x i32> %375)
-  store volatile <16 x i32> %376, <16 x i32>* @VectorResult, align 64
-  %377 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
+  store volatile <16 x i32> %376, ptr @VectorResult, align 64
+  %377 = load volatile <16 x i32>, ptr @vectors, align 64
   %378 = call <16 x i32> @llvm.hexagon.V6.vabsh(<16 x i32> %377)
-  store volatile <16 x i32> %378, <16 x i32>* @VectorResult, align 64
-  %379 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
+  store volatile <16 x i32> %378, ptr @VectorResult, align 64
+  %379 = load volatile <16 x i32>, ptr @vectors, align 64
   %380 = call <16 x i32> @llvm.hexagon.V6.vabsh.sat(<16 x i32> %379)
-  store volatile <16 x i32> %380, <16 x i32>* @VectorResult, align 64
-  %381 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %382 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %380, ptr @VectorResult, align 64
+  %381 = load volatile <16 x i32>, ptr @vectors, align 64
+  %382 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %383 = call <16 x i32> @llvm.hexagon.V6.vaddh(<16 x i32> %381, <16 x i32> %382)
-  store volatile <16 x i32> %383, <16 x i32>* @VectorResult, align 64
-  %384 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %385 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %383, ptr @VectorResult, align 64
+  %384 = load volatile <16 x i32>, ptr @vectors, align 64
+  %385 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %386 = call <16 x i32> @llvm.hexagon.V6.vaddhsat(<16 x i32> %384, <16 x i32> %385)
-  store volatile <16 x i32> %386, <16 x i32>* @VectorResult, align 64
-  %387 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
+  store volatile <16 x i32> %386, ptr @VectorResult, align 64
+  %387 = load volatile <16 x i32>, ptr @vectors, align 64
   %388 = call <16 x i32> @llvm.hexagon.V6.vaslh(<16 x i32> %387, i32 -1)
-  store volatile <16 x i32> %388, <16 x i32>* @VectorResult, align 64
-  %389 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %390 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %388, ptr @VectorResult, align 64
+  %389 = load volatile <16 x i32>, ptr @vectors, align 64
+  %390 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %391 = call <16 x i32> @llvm.hexagon.V6.vaslhv(<16 x i32> %389, <16 x i32> %390)
-  store volatile <16 x i32> %391, <16 x i32>* @VectorResult, align 64
-  %392 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
+  store volatile <16 x i32> %391, ptr @VectorResult, align 64
+  %392 = load volatile <16 x i32>, ptr @vectors, align 64
   %393 = call <16 x i32> @llvm.hexagon.V6.vasrh(<16 x i32> %392, i32 -1)
-  store volatile <16 x i32> %393, <16 x i32>* @VectorResult, align 64
-  %394 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %395 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %393, ptr @VectorResult, align 64
+  %394 = load volatile <16 x i32>, ptr @vectors, align 64
+  %395 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %396 = call <16 x i32> @llvm.hexagon.V6.vasrhv(<16 x i32> %394, <16 x i32> %395)
-  store volatile <16 x i32> %396, <16 x i32>* @VectorResult, align 64
-  %397 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %398 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %396, ptr @VectorResult, align 64
+  %397 = load volatile <16 x i32>, ptr @vectors, align 64
+  %398 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %399 = call <16 x i32> @llvm.hexagon.V6.vasrwh(<16 x i32> %397, <16 x i32> %398, i32 -1)
-  store volatile <16 x i32> %399, <16 x i32>* @VectorResult, align 64
-  %400 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %401 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %399, ptr @VectorResult, align 64
+  %400 = load volatile <16 x i32>, ptr @vectors, align 64
+  %401 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %402 = call <16 x i32> @llvm.hexagon.V6.vasrwhrndsat(<16 x i32> %400, <16 x i32> %401, i32 -1)
-  store volatile <16 x i32> %402, <16 x i32>* @VectorResult, align 64
-  %403 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %404 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %402, ptr @VectorResult, align 64
+  %403 = load volatile <16 x i32>, ptr @vectors, align 64
+  %404 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %405 = call <16 x i32> @llvm.hexagon.V6.vasrwhsat(<16 x i32> %403, <16 x i32> %404, i32 -1)
-  store volatile <16 x i32> %405, <16 x i32>* @VectorResult, align 64
-  %406 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %407 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %405, ptr @VectorResult, align 64
+  %406 = load volatile <16 x i32>, ptr @vectors, align 64
+  %407 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %408 = call <16 x i32> @llvm.hexagon.V6.vavgh(<16 x i32> %406, <16 x i32> %407)
-  store volatile <16 x i32> %408, <16 x i32>* @VectorResult, align 64
-  %409 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %410 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %408, ptr @VectorResult, align 64
+  %409 = load volatile <16 x i32>, ptr @vectors, align 64
+  %410 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %411 = call <16 x i32> @llvm.hexagon.V6.vavghrnd(<16 x i32> %409, <16 x i32> %410)
-  store volatile <16 x i32> %411, <16 x i32>* @VectorResult, align 64
-  %412 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
+  store volatile <16 x i32> %411, ptr @VectorResult, align 64
+  %412 = load volatile <16 x i32>, ptr @vectors, align 64
   %413 = call <16 x i32> @llvm.hexagon.V6.vdealh(<16 x i32> %412)
-  store volatile <16 x i32> %413, <16 x i32>* @VectorResult, align 64
-  %414 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
+  store volatile <16 x i32> %413, ptr @VectorResult, align 64
+  %414 = load volatile <16 x i32>, ptr @vectors, align 64
   %415 = call <16 x i32> @llvm.hexagon.V6.vdmpybus(<16 x i32> %414, i32 -1)
-  store volatile <16 x i32> %415, <16 x i32>* @VectorResult, align 64
-  %416 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %417 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %415, ptr @VectorResult, align 64
+  %416 = load volatile <16 x i32>, ptr @vectors, align 64
+  %417 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %418 = call <16 x i32> @llvm.hexagon.V6.vdmpybus.acc(<16 x i32> %416, <16 x i32> %417, i32 -1)
-  store volatile <16 x i32> %418, <16 x i32>* @VectorResult, align 64
-  %419 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %420 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %418, ptr @VectorResult, align 64
+  %419 = load volatile <16 x i32>, ptr @vectors, align 64
+  %420 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %421 = call <16 x i32> @llvm.hexagon.V6.vlsrhv(<16 x i32> %419, <16 x i32> %420)
-  store volatile <16 x i32> %421, <16 x i32>* @VectorResult, align 64
-  %422 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %423 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %421, ptr @VectorResult, align 64
+  %422 = load volatile <16 x i32>, ptr @vectors, align 64
+  %423 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %424 = call <16 x i32> @llvm.hexagon.V6.vmaxh(<16 x i32> %422, <16 x i32> %423)
-  store volatile <16 x i32> %424, <16 x i32>* @VectorResult, align 64
-  %425 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %426 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %424, ptr @VectorResult, align 64
+  %425 = load volatile <16 x i32>, ptr @vectors, align 64
+  %426 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %427 = call <16 x i32> @llvm.hexagon.V6.vminh(<16 x i32> %425, <16 x i32> %426)
-  store volatile <16 x i32> %427, <16 x i32>* @VectorResult, align 64
-  %428 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
+  store volatile <16 x i32> %427, ptr @VectorResult, align 64
+  %428 = load volatile <16 x i32>, ptr @vectors, align 64
   %429 = call <16 x i32> @llvm.hexagon.V6.vmpyhsrs(<16 x i32> %428, i32 -1)
-  store volatile <16 x i32> %429, <16 x i32>* @VectorResult, align 64
-  %430 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
+  store volatile <16 x i32> %429, ptr @VectorResult, align 64
+  %430 = load volatile <16 x i32>, ptr @vectors, align 64
   %431 = call <16 x i32> @llvm.hexagon.V6.vmpyhss(<16 x i32> %430, i32 -1)
-  store volatile <16 x i32> %431, <16 x i32>* @VectorResult, align 64
-  %432 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %433 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %431, ptr @VectorResult, align 64
+  %432 = load volatile <16 x i32>, ptr @vectors, align 64
+  %433 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %434 = call <16 x i32> @llvm.hexagon.V6.vmpyhvsrs(<16 x i32> %432, <16 x i32> %433)
-  store volatile <16 x i32> %434, <16 x i32>* @VectorResult, align 64
-  %435 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
+  store volatile <16 x i32> %434, ptr @VectorResult, align 64
+  %435 = load volatile <16 x i32>, ptr @vectors, align 64
   %436 = call <16 x i32> @llvm.hexagon.V6.vmpyihb(<16 x i32> %435, i32 -1)
-  store volatile <16 x i32> %436, <16 x i32>* @VectorResult, align 64
-  %437 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %438 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %436, ptr @VectorResult, align 64
+  %437 = load volatile <16 x i32>, ptr @vectors, align 64
+  %438 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %439 = call <16 x i32> @llvm.hexagon.V6.vmpyih(<16 x i32> %437, <16 x i32> %438)
-  store volatile <16 x i32> %439, <16 x i32>* @VectorResult, align 64
-  %440 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %441 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %439, ptr @VectorResult, align 64
+  %440 = load volatile <16 x i32>, ptr @vectors, align 64
+  %441 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %442 = call <16 x i32> @llvm.hexagon.V6.vmpyihb.acc(<16 x i32> %440, <16 x i32> %441, i32 -1)
-  store volatile <16 x i32> %442, <16 x i32>* @VectorResult, align 64
-  %443 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %444 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
-  %445 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 2), align 64
+  store volatile <16 x i32> %442, ptr @VectorResult, align 64
+  %443 = load volatile <16 x i32>, ptr @vectors, align 64
+  %444 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
+  %445 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 2), align 64
   %446 = call <16 x i32> @llvm.hexagon.V6.vmpyih.acc(<16 x i32> %443, <16 x i32> %444, <16 x i32> %445)
-  store volatile <16 x i32> %446, <16 x i32>* @VectorResult, align 64
-  %447 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %448 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %446, ptr @VectorResult, align 64
+  %447 = load volatile <16 x i32>, ptr @vectors, align 64
+  %448 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %449 = call <16 x i32> @llvm.hexagon.V6.vnavgh(<16 x i32> %447, <16 x i32> %448)
-  store volatile <16 x i32> %449, <16 x i32>* @VectorResult, align 64
-  %450 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
+  store volatile <16 x i32> %449, ptr @VectorResult, align 64
+  %450 = load volatile <16 x i32>, ptr @vectors, align 64
   %451 = call <16 x i32> @llvm.hexagon.V6.vnormamth(<16 x i32> %450)
-  store volatile <16 x i32> %451, <16 x i32>* @VectorResult, align 64
-  %452 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %453 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %451, ptr @VectorResult, align 64
+  %452 = load volatile <16 x i32>, ptr @vectors, align 64
+  %453 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %454 = call <16 x i32> @llvm.hexagon.V6.vpackwh.sat(<16 x i32> %452, <16 x i32> %453)
-  store volatile <16 x i32> %454, <16 x i32>* @VectorResult, align 64
-  %455 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %456 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %454, ptr @VectorResult, align 64
+  %455 = load volatile <16 x i32>, ptr @vectors, align 64
+  %456 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %457 = call <16 x i32> @llvm.hexagon.V6.vpackeh(<16 x i32> %455, <16 x i32> %456)
-  store volatile <16 x i32> %457, <16 x i32>* @VectorResult, align 64
-  %458 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %459 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %457, ptr @VectorResult, align 64
+  %458 = load volatile <16 x i32>, ptr @vectors, align 64
+  %459 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %460 = call <16 x i32> @llvm.hexagon.V6.vpackoh(<16 x i32> %458, <16 x i32> %459)
-  store volatile <16 x i32> %460, <16 x i32>* @VectorResult, align 64
-  %461 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
+  store volatile <16 x i32> %460, ptr @VectorResult, align 64
+  %461 = load volatile <16 x i32>, ptr @vectors, align 64
   %462 = call <16 x i32> @llvm.hexagon.V6.vpopcounth(<16 x i32> %461)
-  store volatile <16 x i32> %462, <16 x i32>* @VectorResult, align 64
-  %463 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %464 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %462, ptr @VectorResult, align 64
+  %463 = load volatile <16 x i32>, ptr @vectors, align 64
+  %464 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %465 = call <16 x i32> @llvm.hexagon.V6.vroundwh(<16 x i32> %463, <16 x i32> %464)
-  store volatile <16 x i32> %465, <16 x i32>* @VectorResult, align 64
-  %466 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %467 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %465, ptr @VectorResult, align 64
+  %466 = load volatile <16 x i32>, ptr @vectors, align 64
+  %467 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %468 = call <16 x i32> @llvm.hexagon.V6.vsatwh(<16 x i32> %466, <16 x i32> %467)
-  store volatile <16 x i32> %468, <16 x i32>* @VectorResult, align 64
-  %469 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
+  store volatile <16 x i32> %468, ptr @VectorResult, align 64
+  %469 = load volatile <16 x i32>, ptr @vectors, align 64
   %470 = call <16 x i32> @llvm.hexagon.V6.vshuffh(<16 x i32> %469)
-  store volatile <16 x i32> %470, <16 x i32>* @VectorResult, align 64
-  %471 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %472 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %470, ptr @VectorResult, align 64
+  %471 = load volatile <16 x i32>, ptr @vectors, align 64
+  %472 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %473 = call <16 x i32> @llvm.hexagon.V6.vshufeh(<16 x i32> %471, <16 x i32> %472)
-  store volatile <16 x i32> %473, <16 x i32>* @VectorResult, align 64
-  %474 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %475 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %473, ptr @VectorResult, align 64
+  %474 = load volatile <16 x i32>, ptr @vectors, align 64
+  %475 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %476 = call <16 x i32> @llvm.hexagon.V6.vshufoh(<16 x i32> %474, <16 x i32> %475)
-  store volatile <16 x i32> %476, <16 x i32>* @VectorResult, align 64
-  %477 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %478 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %476, ptr @VectorResult, align 64
+  %477 = load volatile <16 x i32>, ptr @vectors, align 64
+  %478 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %479 = call <16 x i32> @llvm.hexagon.V6.vsubh(<16 x i32> %477, <16 x i32> %478)
-  store volatile <16 x i32> %479, <16 x i32>* @VectorResult, align 64
-  %480 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %481 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %479, ptr @VectorResult, align 64
+  %480 = load volatile <16 x i32>, ptr @vectors, align 64
+  %481 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %482 = call <16 x i32> @llvm.hexagon.V6.vsubhsat(<16 x i32> %480, <16 x i32> %481)
-  store volatile <16 x i32> %482, <16 x i32>* @VectorResult, align 64
-  %483 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %484 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %482, ptr @VectorResult, align 64
+  %483 = load volatile <16 x i32>, ptr @vectors, align 64
+  %484 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %485 = call <16 x i32> @llvm.hexagon.V6.vabs
diff ub(<16 x i32> %483, <16 x i32> %484)
-  store volatile <16 x i32> %485, <16 x i32>* @VectorResult, align 64
-  %486 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %487 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %485, ptr @VectorResult, align 64
+  %486 = load volatile <16 x i32>, ptr @vectors, align 64
+  %487 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %488 = call <16 x i32> @llvm.hexagon.V6.vaddubsat(<16 x i32> %486, <16 x i32> %487)
-  store volatile <16 x i32> %488, <16 x i32>* @VectorResult, align 64
-  %489 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %490 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %488, ptr @VectorResult, align 64
+  %489 = load volatile <16 x i32>, ptr @vectors, align 64
+  %490 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %491 = call <16 x i32> @llvm.hexagon.V6.vasrhubrndsat(<16 x i32> %489, <16 x i32> %490, i32 -1)
-  store volatile <16 x i32> %491, <16 x i32>* @VectorResult, align 64
-  %492 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %493 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %491, ptr @VectorResult, align 64
+  %492 = load volatile <16 x i32>, ptr @vectors, align 64
+  %493 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %494 = call <16 x i32> @llvm.hexagon.V6.vasrhubsat(<16 x i32> %492, <16 x i32> %493, i32 -1)
-  store volatile <16 x i32> %494, <16 x i32>* @VectorResult, align 64
-  %495 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %496 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %494, ptr @VectorResult, align 64
+  %495 = load volatile <16 x i32>, ptr @vectors, align 64
+  %496 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %497 = call <16 x i32> @llvm.hexagon.V6.vavgub(<16 x i32> %495, <16 x i32> %496)
-  store volatile <16 x i32> %497, <16 x i32>* @VectorResult, align 64
-  %498 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %499 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %497, ptr @VectorResult, align 64
+  %498 = load volatile <16 x i32>, ptr @vectors, align 64
+  %499 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %500 = call <16 x i32> @llvm.hexagon.V6.vavgubrnd(<16 x i32> %498, <16 x i32> %499)
-  store volatile <16 x i32> %500, <16 x i32>* @VectorResult, align 64
-  %501 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %502 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %500, ptr @VectorResult, align 64
+  %501 = load volatile <16 x i32>, ptr @vectors, align 64
+  %502 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %503 = call <16 x i32> @llvm.hexagon.V6.vmaxub(<16 x i32> %501, <16 x i32> %502)
-  store volatile <16 x i32> %503, <16 x i32>* @VectorResult, align 64
-  %504 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %505 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %503, ptr @VectorResult, align 64
+  %504 = load volatile <16 x i32>, ptr @vectors, align 64
+  %505 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %506 = call <16 x i32> @llvm.hexagon.V6.vminub(<16 x i32> %504, <16 x i32> %505)
-  store volatile <16 x i32> %506, <16 x i32>* @VectorResult, align 64
-  %507 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %508 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %506, ptr @VectorResult, align 64
+  %507 = load volatile <16 x i32>, ptr @vectors, align 64
+  %508 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %509 = call <16 x i32> @llvm.hexagon.V6.vpackhub.sat(<16 x i32> %507, <16 x i32> %508)
-  store volatile <16 x i32> %509, <16 x i32>* @VectorResult, align 64
-  %510 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %511 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %509, ptr @VectorResult, align 64
+  %510 = load volatile <16 x i32>, ptr @vectors, align 64
+  %511 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %512 = call <16 x i32> @llvm.hexagon.V6.vroundhub(<16 x i32> %510, <16 x i32> %511)
-  store volatile <16 x i32> %512, <16 x i32>* @VectorResult, align 64
-  %513 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %514 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %512, ptr @VectorResult, align 64
+  %513 = load volatile <16 x i32>, ptr @vectors, align 64
+  %514 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %515 = call <16 x i32> @llvm.hexagon.V6.vsathub(<16 x i32> %513, <16 x i32> %514)
-  store volatile <16 x i32> %515, <16 x i32>* @VectorResult, align 64
-  %516 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %517 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %515, ptr @VectorResult, align 64
+  %516 = load volatile <16 x i32>, ptr @vectors, align 64
+  %517 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %518 = call <16 x i32> @llvm.hexagon.V6.vsububsat(<16 x i32> %516, <16 x i32> %517)
-  store volatile <16 x i32> %518, <16 x i32>* @VectorResult, align 64
-  %519 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %520 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %518, ptr @VectorResult, align 64
+  %519 = load volatile <16 x i32>, ptr @vectors, align 64
+  %520 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %521 = call <16 x i32> @llvm.hexagon.V6.vabs
diff h(<16 x i32> %519, <16 x i32> %520)
-  store volatile <16 x i32> %521, <16 x i32>* @VectorResult, align 64
-  %522 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %523 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %521, ptr @VectorResult, align 64
+  %522 = load volatile <16 x i32>, ptr @vectors, align 64
+  %523 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %524 = call <16 x i32> @llvm.hexagon.V6.vabs
diff uh(<16 x i32> %522, <16 x i32> %523)
-  store volatile <16 x i32> %524, <16 x i32>* @VectorResult, align 64
-  %525 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %526 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %524, ptr @VectorResult, align 64
+  %525 = load volatile <16 x i32>, ptr @vectors, align 64
+  %526 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %527 = call <16 x i32> @llvm.hexagon.V6.vadduhsat(<16 x i32> %525, <16 x i32> %526)
-  store volatile <16 x i32> %527, <16 x i32>* @VectorResult, align 64
-  %528 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %529 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %527, ptr @VectorResult, align 64
+  %528 = load volatile <16 x i32>, ptr @vectors, align 64
+  %529 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %530 = call <16 x i32> @llvm.hexagon.V6.vasrwuhsat(<16 x i32> %528, <16 x i32> %529, i32 -1)
-  store volatile <16 x i32> %530, <16 x i32>* @VectorResult, align 64
-  %531 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %532 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %530, ptr @VectorResult, align 64
+  %531 = load volatile <16 x i32>, ptr @vectors, align 64
+  %532 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %533 = call <16 x i32> @llvm.hexagon.V6.vavguh(<16 x i32> %531, <16 x i32> %532)
-  store volatile <16 x i32> %533, <16 x i32>* @VectorResult, align 64
-  %534 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %535 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %533, ptr @VectorResult, align 64
+  %534 = load volatile <16 x i32>, ptr @vectors, align 64
+  %535 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %536 = call <16 x i32> @llvm.hexagon.V6.vavguhrnd(<16 x i32> %534, <16 x i32> %535)
-  store volatile <16 x i32> %536, <16 x i32>* @VectorResult, align 64
-  %537 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
+  store volatile <16 x i32> %536, ptr @VectorResult, align 64
+  %537 = load volatile <16 x i32>, ptr @vectors, align 64
   %538 = call <16 x i32> @llvm.hexagon.V6.vcl0h(<16 x i32> %537)
-  store volatile <16 x i32> %538, <16 x i32>* @VectorResult, align 64
-  %539 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
+  store volatile <16 x i32> %538, ptr @VectorResult, align 64
+  %539 = load volatile <16 x i32>, ptr @vectors, align 64
   %540 = call <16 x i32> @llvm.hexagon.V6.vlsrh(<16 x i32> %539, i32 -1)
-  store volatile <16 x i32> %540, <16 x i32>* @VectorResult, align 64
-  %541 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %542 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %540, ptr @VectorResult, align 64
+  %541 = load volatile <16 x i32>, ptr @vectors, align 64
+  %542 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %543 = call <16 x i32> @llvm.hexagon.V6.vmaxuh(<16 x i32> %541, <16 x i32> %542)
-  store volatile <16 x i32> %543, <16 x i32>* @VectorResult, align 64
-  %544 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %545 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %543, ptr @VectorResult, align 64
+  %544 = load volatile <16 x i32>, ptr @vectors, align 64
+  %545 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %546 = call <16 x i32> @llvm.hexagon.V6.vminuh(<16 x i32> %544, <16 x i32> %545)
-  store volatile <16 x i32> %546, <16 x i32>* @VectorResult, align 64
-  %547 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %548 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %546, ptr @VectorResult, align 64
+  %547 = load volatile <16 x i32>, ptr @vectors, align 64
+  %548 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %549 = call <16 x i32> @llvm.hexagon.V6.vpackwuh.sat(<16 x i32> %547, <16 x i32> %548)
-  store volatile <16 x i32> %549, <16 x i32>* @VectorResult, align 64
-  %550 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %551 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %549, ptr @VectorResult, align 64
+  %550 = load volatile <16 x i32>, ptr @vectors, align 64
+  %551 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %552 = call <16 x i32> @llvm.hexagon.V6.vroundwuh(<16 x i32> %550, <16 x i32> %551)
-  store volatile <16 x i32> %552, <16 x i32>* @VectorResult, align 64
-  %553 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %554 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %552, ptr @VectorResult, align 64
+  %553 = load volatile <16 x i32>, ptr @vectors, align 64
+  %554 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %555 = call <16 x i32> @llvm.hexagon.V6.vsubuhsat(<16 x i32> %553, <16 x i32> %554)
-  store volatile <16 x i32> %555, <16 x i32>* @VectorResult, align 64
-  %556 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %557 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %555, ptr @VectorResult, align 64
+  %556 = load volatile <16 x i32>, ptr @vectors, align 64
+  %557 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %558 = call <16 x i32> @llvm.hexagon.V6.vabs
diff w(<16 x i32> %556, <16 x i32> %557)
-  store volatile <16 x i32> %558, <16 x i32>* @VectorResult, align 64
-  %559 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
+  store volatile <16 x i32> %558, ptr @VectorResult, align 64
+  %559 = load volatile <16 x i32>, ptr @vectors, align 64
   %560 = call <16 x i32> @llvm.hexagon.V6.vcl0w(<16 x i32> %559)
-  store volatile <16 x i32> %560, <16 x i32>* @VectorResult, align 64
-  %561 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
+  store volatile <16 x i32> %560, ptr @VectorResult, align 64
+  %561 = load volatile <16 x i32>, ptr @vectors, align 64
   %562 = call <16 x i32> @llvm.hexagon.V6.vlsrw(<16 x i32> %561, i32 -1)
-  store volatile <16 x i32> %562, <16 x i32>* @VectorResult, align 64
-  %563 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
+  store volatile <16 x i32> %562, ptr @VectorResult, align 64
+  %563 = load volatile <16 x i32>, ptr @vectors, align 64
   %564 = call <16 x i32> @llvm.hexagon.V6.vrmpyub(<16 x i32> %563, i32 -1)
-  store volatile <16 x i32> %564, <16 x i32>* @VectorResult, align 64
-  %565 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %566 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %564, ptr @VectorResult, align 64
+  %565 = load volatile <16 x i32>, ptr @vectors, align 64
+  %566 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %567 = call <16 x i32> @llvm.hexagon.V6.vrmpyubv(<16 x i32> %565, <16 x i32> %566)
-  store volatile <16 x i32> %567, <16 x i32>* @VectorResult, align 64
-  %568 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %569 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %567, ptr @VectorResult, align 64
+  %568 = load volatile <16 x i32>, ptr @vectors, align 64
+  %569 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %570 = call <16 x i32> @llvm.hexagon.V6.vrmpyub.acc(<16 x i32> %568, <16 x i32> %569, i32 -1)
-  store volatile <16 x i32> %570, <16 x i32>* @VectorResult, align 64
-  %571 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %572 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
-  %573 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 2), align 64
+  store volatile <16 x i32> %570, ptr @VectorResult, align 64
+  %571 = load volatile <16 x i32>, ptr @vectors, align 64
+  %572 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
+  %573 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 2), align 64
   %574 = call <16 x i32> @llvm.hexagon.V6.vrmpyubv.acc(<16 x i32> %571, <16 x i32> %572, <16 x i32> %573)
-  store volatile <16 x i32> %574, <16 x i32>* @VectorResult, align 64
-  %575 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vecpreds, i32 0, i32 0), align 64
+  store volatile <16 x i32> %574, ptr @VectorResult, align 64
+  %575 = load volatile <16 x i32>, ptr @vecpreds, align 64
   %576 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %575, i32 -1)
-  %577 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %578 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  %577 = load volatile <16 x i32>, ptr @vectors, align 64
+  %578 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %579 = call <16 x i32> @llvm.hexagon.V6.vaddwq(<64 x i1> %576, <16 x i32> %577, <16 x i32> %578)
-  store volatile <16 x i32> %579, <16 x i32>* @VectorResult, align 64
-  %580 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vecpreds, i32 0, i32 0), align 64
+  store volatile <16 x i32> %579, ptr @VectorResult, align 64
+  %580 = load volatile <16 x i32>, ptr @vecpreds, align 64
   %581 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %580, i32 -1)
-  %582 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %583 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  %582 = load volatile <16 x i32>, ptr @vectors, align 64
+  %583 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %584 = call <16 x i32> @llvm.hexagon.V6.vaddwnq(<64 x i1> %581, <16 x i32> %582, <16 x i32> %583)
-  store volatile <16 x i32> %584, <16 x i32>* @VectorResult, align 64
-  %585 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vecpreds, i32 0, i32 0), align 64
+  store volatile <16 x i32> %584, ptr @VectorResult, align 64
+  %585 = load volatile <16 x i32>, ptr @vecpreds, align 64
   %586 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %585, i32 -1)
-  %587 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %588 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  %587 = load volatile <16 x i32>, ptr @vectors, align 64
+  %588 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %589 = call <16 x i32> @llvm.hexagon.V6.vsubwq(<64 x i1> %586, <16 x i32> %587, <16 x i32> %588)
-  store volatile <16 x i32> %589, <16 x i32>* @VectorResult, align 64
-  %590 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vecpreds, i32 0, i32 0), align 64
+  store volatile <16 x i32> %589, ptr @VectorResult, align 64
+  %590 = load volatile <16 x i32>, ptr @vecpreds, align 64
   %591 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %590, i32 -1)
-  %592 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %593 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  %592 = load volatile <16 x i32>, ptr @vectors, align 64
+  %593 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %594 = call <16 x i32> @llvm.hexagon.V6.vsubwnq(<64 x i1> %591, <16 x i32> %592, <16 x i32> %593)
-  store volatile <16 x i32> %594, <16 x i32>* @VectorResult, align 64
-  %595 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
+  store volatile <16 x i32> %594, ptr @VectorResult, align 64
+  %595 = load volatile <16 x i32>, ptr @vectors, align 64
   %596 = call <16 x i32> @llvm.hexagon.V6.vabsw(<16 x i32> %595)
-  store volatile <16 x i32> %596, <16 x i32>* @VectorResult, align 64
-  %597 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
+  store volatile <16 x i32> %596, ptr @VectorResult, align 64
+  %597 = load volatile <16 x i32>, ptr @vectors, align 64
   %598 = call <16 x i32> @llvm.hexagon.V6.vabsw.sat(<16 x i32> %597)
-  store volatile <16 x i32> %598, <16 x i32>* @VectorResult, align 64
-  %599 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %600 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %598, ptr @VectorResult, align 64
+  %599 = load volatile <16 x i32>, ptr @vectors, align 64
+  %600 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %601 = call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %599, <16 x i32> %600)
-  store volatile <16 x i32> %601, <16 x i32>* @VectorResult, align 64
-  %602 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %603 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %601, ptr @VectorResult, align 64
+  %602 = load volatile <16 x i32>, ptr @vectors, align 64
+  %603 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %604 = call <16 x i32> @llvm.hexagon.V6.vaddwsat(<16 x i32> %602, <16 x i32> %603)
-  store volatile <16 x i32> %604, <16 x i32>* @VectorResult, align 64
-  %605 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
+  store volatile <16 x i32> %604, ptr @VectorResult, align 64
+  %605 = load volatile <16 x i32>, ptr @vectors, align 64
   %606 = call <16 x i32> @llvm.hexagon.V6.vaslw(<16 x i32> %605, i32 -1)
-  store volatile <16 x i32> %606, <16 x i32>* @VectorResult, align 64
-  %607 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %608 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %606, ptr @VectorResult, align 64
+  %607 = load volatile <16 x i32>, ptr @vectors, align 64
+  %608 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %609 = call <16 x i32> @llvm.hexagon.V6.vaslwv(<16 x i32> %607, <16 x i32> %608)
-  store volatile <16 x i32> %609, <16 x i32>* @VectorResult, align 64
-  %610 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %611 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %609, ptr @VectorResult, align 64
+  %610 = load volatile <16 x i32>, ptr @vectors, align 64
+  %611 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %612 = call <16 x i32> @llvm.hexagon.V6.vaslw.acc(<16 x i32> %610, <16 x i32> %611, i32 -1)
-  store volatile <16 x i32> %612, <16 x i32>* @VectorResult, align 64
-  %613 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
+  store volatile <16 x i32> %612, ptr @VectorResult, align 64
+  %613 = load volatile <16 x i32>, ptr @vectors, align 64
   %614 = call <16 x i32> @llvm.hexagon.V6.vasrw(<16 x i32> %613, i32 -1)
-  store volatile <16 x i32> %614, <16 x i32>* @VectorResult, align 64
-  %615 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %616 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %614, ptr @VectorResult, align 64
+  %615 = load volatile <16 x i32>, ptr @vectors, align 64
+  %616 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %617 = call <16 x i32> @llvm.hexagon.V6.vasrwv(<16 x i32> %615, <16 x i32> %616)
-  store volatile <16 x i32> %617, <16 x i32>* @VectorResult, align 64
-  %618 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %619 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %617, ptr @VectorResult, align 64
+  %618 = load volatile <16 x i32>, ptr @vectors, align 64
+  %619 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %620 = call <16 x i32> @llvm.hexagon.V6.vasrw.acc(<16 x i32> %618, <16 x i32> %619, i32 -1)
-  store volatile <16 x i32> %620, <16 x i32>* @VectorResult, align 64
-  %621 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %622 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %620, ptr @VectorResult, align 64
+  %621 = load volatile <16 x i32>, ptr @vectors, align 64
+  %622 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %623 = call <16 x i32> @llvm.hexagon.V6.vavgw(<16 x i32> %621, <16 x i32> %622)
-  store volatile <16 x i32> %623, <16 x i32>* @VectorResult, align 64
-  %624 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %625 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %623, ptr @VectorResult, align 64
+  %624 = load volatile <16 x i32>, ptr @vectors, align 64
+  %625 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %626 = call <16 x i32> @llvm.hexagon.V6.vavgwrnd(<16 x i32> %624, <16 x i32> %625)
-  store volatile <16 x i32> %626, <16 x i32>* @VectorResult, align 64
-  %627 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
+  store volatile <16 x i32> %626, ptr @VectorResult, align 64
+  %627 = load volatile <16 x i32>, ptr @vectors, align 64
   %628 = call <16 x i32> @llvm.hexagon.V6.vdmpyhb(<16 x i32> %627, i32 -1)
-  store volatile <16 x i32> %628, <16 x i32>* @VectorResult, align 64
-  %629 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
+  store volatile <16 x i32> %628, ptr @VectorResult, align 64
+  %629 = load volatile <16 x i32>, ptr @vectors, align 64
   %630 = call <16 x i32> @llvm.hexagon.V6.vdmpyhsat(<16 x i32> %629, i32 -1)
-  store volatile <16 x i32> %630, <16 x i32>* @VectorResult, align 64
-  %631 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
+  store volatile <16 x i32> %630, ptr @VectorResult, align 64
+  %631 = load volatile <16 x i32>, ptr @vectors, align 64
   %632 = call <16 x i32> @llvm.hexagon.V6.vdmpyhsusat(<16 x i32> %631, i32 -1)
-  store volatile <16 x i32> %632, <16 x i32>* @VectorResult, align 64
-  %633 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %634 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %632, ptr @VectorResult, align 64
+  %633 = load volatile <16 x i32>, ptr @vectors, align 64
+  %634 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %635 = call <16 x i32> @llvm.hexagon.V6.vdmpyhvsat(<16 x i32> %633, <16 x i32> %634)
-  store volatile <16 x i32> %635, <16 x i32>* @VectorResult, align 64
-  %636 = load volatile <32 x i32>, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @vector_pairs, i32 0, i32 0), align 128
+  store volatile <16 x i32> %635, ptr @VectorResult, align 64
+  %636 = load volatile <32 x i32>, ptr @vector_pairs, align 128
   %637 = call <16 x i32> @llvm.hexagon.V6.vdmpyhisat(<32 x i32> %636, i32 -1)
-  store volatile <16 x i32> %637, <16 x i32>* @VectorResult, align 64
-  %638 = load volatile <32 x i32>, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @vector_pairs, i32 0, i32 0), align 128
+  store volatile <16 x i32> %637, ptr @VectorResult, align 64
+  %638 = load volatile <32 x i32>, ptr @vector_pairs, align 128
   %639 = call <16 x i32> @llvm.hexagon.V6.vdmpyhsuisat(<32 x i32> %638, i32 -1)
-  store volatile <16 x i32> %639, <16 x i32>* @VectorResult, align 64
-  %640 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %641 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %639, ptr @VectorResult, align 64
+  %640 = load volatile <16 x i32>, ptr @vectors, align 64
+  %641 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %642 = call <16 x i32> @llvm.hexagon.V6.vdmpyhb.acc(<16 x i32> %640, <16 x i32> %641, i32 -1)
-  store volatile <16 x i32> %642, <16 x i32>* @VectorResult, align 64
-  %643 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %644 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %642, ptr @VectorResult, align 64
+  %643 = load volatile <16 x i32>, ptr @vectors, align 64
+  %644 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %645 = call <16 x i32> @llvm.hexagon.V6.vdmpyhsat.acc(<16 x i32> %643, <16 x i32> %644, i32 -1)
-  store volatile <16 x i32> %645, <16 x i32>* @VectorResult, align 64
-  %646 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %647 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %645, ptr @VectorResult, align 64
+  %646 = load volatile <16 x i32>, ptr @vectors, align 64
+  %647 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %648 = call <16 x i32> @llvm.hexagon.V6.vdmpyhsusat.acc(<16 x i32> %646, <16 x i32> %647, i32 -1)
-  store volatile <16 x i32> %648, <16 x i32>* @VectorResult, align 64
-  %649 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %650 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
-  %651 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 2), align 64
+  store volatile <16 x i32> %648, ptr @VectorResult, align 64
+  %649 = load volatile <16 x i32>, ptr @vectors, align 64
+  %650 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
+  %651 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 2), align 64
   %652 = call <16 x i32> @llvm.hexagon.V6.vdmpyhvsat.acc(<16 x i32> %649, <16 x i32> %650, <16 x i32> %651)
-  store volatile <16 x i32> %652, <16 x i32>* @VectorResult, align 64
-  %653 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %654 = load volatile <32 x i32>, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @vector_pairs, i32 0, i32 0), align 128
+  store volatile <16 x i32> %652, ptr @VectorResult, align 64
+  %653 = load volatile <16 x i32>, ptr @vectors, align 64
+  %654 = load volatile <32 x i32>, ptr @vector_pairs, align 128
   %655 = call <16 x i32> @llvm.hexagon.V6.vdmpyhisat.acc(<16 x i32> %653, <32 x i32> %654, i32 -1)
-  store volatile <16 x i32> %655, <16 x i32>* @VectorResult, align 64
-  %656 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %657 = load volatile <32 x i32>, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @vector_pairs, i32 0, i32 0), align 128
+  store volatile <16 x i32> %655, ptr @VectorResult, align 64
+  %656 = load volatile <16 x i32>, ptr @vectors, align 64
+  %657 = load volatile <32 x i32>, ptr @vector_pairs, align 128
   %658 = call <16 x i32> @llvm.hexagon.V6.vdmpyhsuisat.acc(<16 x i32> %656, <32 x i32> %657, i32 -1)
-  store volatile <16 x i32> %658, <16 x i32>* @VectorResult, align 64
-  %659 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
+  store volatile <16 x i32> %658, ptr @VectorResult, align 64
+  %659 = load volatile <16 x i32>, ptr @vectors, align 64
   %660 = call <16 x i32> @llvm.hexagon.V6.vinsertwr(<16 x i32> %659, i32 -1)
-  store volatile <16 x i32> %660, <16 x i32>* @VectorResult, align 64
-  %661 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
+  store volatile <16 x i32> %660, ptr @VectorResult, align 64
+  %661 = load volatile <16 x i32>, ptr @vectors, align 64
   %662 = call <16 x i32> @llvm.hexagon.V6.vinsertwr(<16 x i32> %661, i32 0)
-  store volatile <16 x i32> %662, <16 x i32>* @VectorResult, align 64
-  %663 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
+  store volatile <16 x i32> %662, ptr @VectorResult, align 64
+  %663 = load volatile <16 x i32>, ptr @vectors, align 64
   %664 = call <16 x i32> @llvm.hexagon.V6.vinsertwr(<16 x i32> %663, i32 1)
-  store volatile <16 x i32> %664, <16 x i32>* @VectorResult, align 64
-  %665 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %666 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %664, ptr @VectorResult, align 64
+  %665 = load volatile <16 x i32>, ptr @vectors, align 64
+  %666 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %667 = call <16 x i32> @llvm.hexagon.V6.vlsrwv(<16 x i32> %665, <16 x i32> %666)
-  store volatile <16 x i32> %667, <16 x i32>* @VectorResult, align 64
-  %668 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %669 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %667, ptr @VectorResult, align 64
+  %668 = load volatile <16 x i32>, ptr @vectors, align 64
+  %669 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %670 = call <16 x i32> @llvm.hexagon.V6.vmaxw(<16 x i32> %668, <16 x i32> %669)
-  store volatile <16 x i32> %670, <16 x i32>* @VectorResult, align 64
-  %671 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %672 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %670, ptr @VectorResult, align 64
+  %671 = load volatile <16 x i32>, ptr @vectors, align 64
+  %672 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %673 = call <16 x i32> @llvm.hexagon.V6.vminw(<16 x i32> %671, <16 x i32> %672)
-  store volatile <16 x i32> %673, <16 x i32>* @VectorResult, align 64
-  %674 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %675 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %673, ptr @VectorResult, align 64
+  %674 = load volatile <16 x i32>, ptr @vectors, align 64
+  %675 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %676 = call <16 x i32> @llvm.hexagon.V6.vmpyewuh(<16 x i32> %674, <16 x i32> %675)
-  store volatile <16 x i32> %676, <16 x i32>* @VectorResult, align 64
-  %677 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
+  store volatile <16 x i32> %676, ptr @VectorResult, align 64
+  %677 = load volatile <16 x i32>, ptr @vectors, align 64
   %678 = call <16 x i32> @llvm.hexagon.V6.vmpyiwb(<16 x i32> %677, i32 -1)
-  store volatile <16 x i32> %678, <16 x i32>* @VectorResult, align 64
-  %679 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
+  store volatile <16 x i32> %678, ptr @VectorResult, align 64
+  %679 = load volatile <16 x i32>, ptr @vectors, align 64
   %680 = call <16 x i32> @llvm.hexagon.V6.vmpyiwh(<16 x i32> %679, i32 -1)
-  store volatile <16 x i32> %680, <16 x i32>* @VectorResult, align 64
-  %681 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %682 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %680, ptr @VectorResult, align 64
+  %681 = load volatile <16 x i32>, ptr @vectors, align 64
+  %682 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %683 = call <16 x i32> @llvm.hexagon.V6.vmpyiwb.acc(<16 x i32> %681, <16 x i32> %682, i32 -1)
-  store volatile <16 x i32> %683, <16 x i32>* @VectorResult, align 64
-  %684 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %685 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %683, ptr @VectorResult, align 64
+  %684 = load volatile <16 x i32>, ptr @vectors, align 64
+  %685 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %686 = call <16 x i32> @llvm.hexagon.V6.vmpyiwh.acc(<16 x i32> %684, <16 x i32> %685, i32 -1)
-  store volatile <16 x i32> %686, <16 x i32>* @VectorResult, align 64
-  %687 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %688 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %686, ptr @VectorResult, align 64
+  %687 = load volatile <16 x i32>, ptr @vectors, align 64
+  %688 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %689 = call <16 x i32> @llvm.hexagon.V6.vmpyiewuh(<16 x i32> %687, <16 x i32> %688)
-  store volatile <16 x i32> %689, <16 x i32>* @VectorResult, align 64
-  %690 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %691 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
-  %692 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 2), align 64
+  store volatile <16 x i32> %689, ptr @VectorResult, align 64
+  %690 = load volatile <16 x i32>, ptr @vectors, align 64
+  %691 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
+  %692 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 2), align 64
   %693 = call <16 x i32> @llvm.hexagon.V6.vmpyiewh.acc(<16 x i32> %690, <16 x i32> %691, <16 x i32> %692)
-  store volatile <16 x i32> %693, <16 x i32>* @VectorResult, align 64
-  %694 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %695 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
-  %696 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 2), align 64
+  store volatile <16 x i32> %693, ptr @VectorResult, align 64
+  %694 = load volatile <16 x i32>, ptr @vectors, align 64
+  %695 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
+  %696 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 2), align 64
   %697 = call <16 x i32> @llvm.hexagon.V6.vmpyiewuh.acc(<16 x i32> %694, <16 x i32> %695, <16 x i32> %696)
-  store volatile <16 x i32> %697, <16 x i32>* @VectorResult, align 64
-  %698 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %699 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %697, ptr @VectorResult, align 64
+  %698 = load volatile <16 x i32>, ptr @vectors, align 64
+  %699 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %700 = call <16 x i32> @llvm.hexagon.V6.vmpyieoh(<16 x i32> %698, <16 x i32> %699)
-  store volatile <16 x i32> %700, <16 x i32>* @VectorResult, align 64
-  %701 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %702 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %700, ptr @VectorResult, align 64
+  %701 = load volatile <16 x i32>, ptr @vectors, align 64
+  %702 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %703 = call <16 x i32> @llvm.hexagon.V6.vmpyiowh(<16 x i32> %701, <16 x i32> %702)
-  store volatile <16 x i32> %703, <16 x i32>* @VectorResult, align 64
-  %704 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %705 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %703, ptr @VectorResult, align 64
+  %704 = load volatile <16 x i32>, ptr @vectors, align 64
+  %705 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %706 = call <16 x i32> @llvm.hexagon.V6.vmpyowh.rnd(<16 x i32> %704, <16 x i32> %705)
-  store volatile <16 x i32> %706, <16 x i32>* @VectorResult, align 64
-  %707 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %708 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %706, ptr @VectorResult, align 64
+  %707 = load volatile <16 x i32>, ptr @vectors, align 64
+  %708 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %709 = call <16 x i32> @llvm.hexagon.V6.vmpyowh(<16 x i32> %707, <16 x i32> %708)
-  store volatile <16 x i32> %709, <16 x i32>* @VectorResult, align 64
-  %710 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %711 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
-  %712 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 2), align 64
+  store volatile <16 x i32> %709, ptr @VectorResult, align 64
+  %710 = load volatile <16 x i32>, ptr @vectors, align 64
+  %711 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
+  %712 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 2), align 64
   %713 = call <16 x i32> @llvm.hexagon.V6.vmpyowh.rnd.sacc(<16 x i32> %710, <16 x i32> %711, <16 x i32> %712)
-  store volatile <16 x i32> %713, <16 x i32>* @VectorResult, align 64
-  %714 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %715 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
-  %716 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 2), align 64
+  store volatile <16 x i32> %713, ptr @VectorResult, align 64
+  %714 = load volatile <16 x i32>, ptr @vectors, align 64
+  %715 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
+  %716 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 2), align 64
   %717 = call <16 x i32> @llvm.hexagon.V6.vmpyowh.sacc(<16 x i32> %714, <16 x i32> %715, <16 x i32> %716)
-  store volatile <16 x i32> %717, <16 x i32>* @VectorResult, align 64
-  %718 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %719 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %717, ptr @VectorResult, align 64
+  %718 = load volatile <16 x i32>, ptr @vectors, align 64
+  %719 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %720 = call <16 x i32> @llvm.hexagon.V6.vnavgw(<16 x i32> %718, <16 x i32> %719)
-  store volatile <16 x i32> %720, <16 x i32>* @VectorResult, align 64
-  %721 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
+  store volatile <16 x i32> %720, ptr @VectorResult, align 64
+  %721 = load volatile <16 x i32>, ptr @vectors, align 64
   %722 = call <16 x i32> @llvm.hexagon.V6.vnormamtw(<16 x i32> %721)
-  store volatile <16 x i32> %722, <16 x i32>* @VectorResult, align 64
-  %723 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %724 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %722, ptr @VectorResult, align 64
+  %723 = load volatile <16 x i32>, ptr @vectors, align 64
+  %724 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %725 = call <16 x i32> @llvm.hexagon.V6.vrmpybv(<16 x i32> %723, <16 x i32> %724)
-  store volatile <16 x i32> %725, <16 x i32>* @VectorResult, align 64
-  %726 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
+  store volatile <16 x i32> %725, ptr @VectorResult, align 64
+  %726 = load volatile <16 x i32>, ptr @vectors, align 64
   %727 = call <16 x i32> @llvm.hexagon.V6.vrmpybus(<16 x i32> %726, i32 -1)
-  store volatile <16 x i32> %727, <16 x i32>* @VectorResult, align 64
-  %728 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %729 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %727, ptr @VectorResult, align 64
+  %728 = load volatile <16 x i32>, ptr @vectors, align 64
+  %729 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %730 = call <16 x i32> @llvm.hexagon.V6.vrmpybusv(<16 x i32> %728, <16 x i32> %729)
-  store volatile <16 x i32> %730, <16 x i32>* @VectorResult, align 64
-  %731 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %732 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
-  %733 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 2), align 64
+  store volatile <16 x i32> %730, ptr @VectorResult, align 64
+  %731 = load volatile <16 x i32>, ptr @vectors, align 64
+  %732 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
+  %733 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 2), align 64
   %734 = call <16 x i32> @llvm.hexagon.V6.vrmpybv.acc(<16 x i32> %731, <16 x i32> %732, <16 x i32> %733)
-  store volatile <16 x i32> %734, <16 x i32>* @VectorResult, align 64
-  %735 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %736 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %734, ptr @VectorResult, align 64
+  %735 = load volatile <16 x i32>, ptr @vectors, align 64
+  %736 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %737 = call <16 x i32> @llvm.hexagon.V6.vrmpybus.acc(<16 x i32> %735, <16 x i32> %736, i32 -1)
-  store volatile <16 x i32> %737, <16 x i32>* @VectorResult, align 64
-  %738 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %739 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
-  %740 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 2), align 64
+  store volatile <16 x i32> %737, ptr @VectorResult, align 64
+  %738 = load volatile <16 x i32>, ptr @vectors, align 64
+  %739 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
+  %740 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 2), align 64
   %741 = call <16 x i32> @llvm.hexagon.V6.vrmpybusv.acc(<16 x i32> %738, <16 x i32> %739, <16 x i32> %740)
-  store volatile <16 x i32> %741, <16 x i32>* @VectorResult, align 64
-  %742 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %743 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %741, ptr @VectorResult, align 64
+  %742 = load volatile <16 x i32>, ptr @vectors, align 64
+  %743 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %744 = call <16 x i32> @llvm.hexagon.V6.vsubw(<16 x i32> %742, <16 x i32> %743)
-  store volatile <16 x i32> %744, <16 x i32>* @VectorResult, align 64
-  %745 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %746 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <16 x i32> %744, ptr @VectorResult, align 64
+  %745 = load volatile <16 x i32>, ptr @vectors, align 64
+  %746 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %747 = call <16 x i32> @llvm.hexagon.V6.vsubwsat(<16 x i32> %745, <16 x i32> %746)
-  store volatile <16 x i32> %747, <16 x i32>* @VectorResult, align 64
-  %748 = load volatile <32 x i32>, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @vector_pairs, i32 0, i32 0), align 128
+  store volatile <16 x i32> %747, ptr @VectorResult, align 64
+  %748 = load volatile <32 x i32>, ptr @vector_pairs, align 128
   %749 = call <32 x i32> @llvm.hexagon.V6.vassignp(<32 x i32> %748)
-  store volatile <32 x i32> %749, <32 x i32>* @VectorPairResult, align 128
-  %750 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %751 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <32 x i32> %749, ptr @VectorPairResult, align 128
+  %750 = load volatile <16 x i32>, ptr @vectors, align 64
+  %751 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %752 = call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %750, <16 x i32> %751)
-  store volatile <32 x i32> %752, <32 x i32>* @VectorPairResult, align 128
-  %753 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %754 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <32 x i32> %752, ptr @VectorPairResult, align 128
+  %753 = load volatile <16 x i32>, ptr @vectors, align 64
+  %754 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %755 = call <32 x i32> @llvm.hexagon.V6.vdealvdd(<16 x i32> %753, <16 x i32> %754, i32 -1)
-  store volatile <32 x i32> %755, <32 x i32>* @VectorPairResult, align 128
-  %756 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %757 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <32 x i32> %755, ptr @VectorPairResult, align 128
+  %756 = load volatile <16 x i32>, ptr @vectors, align 64
+  %757 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %758 = call <32 x i32> @llvm.hexagon.V6.vshuffvdd(<16 x i32> %756, <16 x i32> %757, i32 -1)
-  store volatile <32 x i32> %758, <32 x i32>* @VectorPairResult, align 128
-  %759 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %760 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <32 x i32> %758, ptr @VectorPairResult, align 128
+  %759 = load volatile <16 x i32>, ptr @vectors, align 64
+  %760 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %761 = call <32 x i32> @llvm.hexagon.V6.vshuffvdd(<16 x i32> %759, <16 x i32> %760, i32 0)
-  store volatile <32 x i32> %761, <32 x i32>* @VectorPairResult, align 128
-  %762 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %763 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <32 x i32> %761, ptr @VectorPairResult, align 128
+  %762 = load volatile <16 x i32>, ptr @vectors, align 64
+  %763 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %764 = call <32 x i32> @llvm.hexagon.V6.vshuffvdd(<16 x i32> %762, <16 x i32> %763, i32 1)
-  store volatile <32 x i32> %764, <32 x i32>* @VectorPairResult, align 128
-  %765 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vecpreds, i32 0, i32 0), align 64
+  store volatile <32 x i32> %764, ptr @VectorPairResult, align 128
+  %765 = load volatile <16 x i32>, ptr @vecpreds, align 64
   %766 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %765, i32 -1)
-  %767 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %768 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  %767 = load volatile <16 x i32>, ptr @vectors, align 64
+  %768 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %769 = call <32 x i32> @llvm.hexagon.V6.vswap(<64 x i1> %766, <16 x i32> %767, <16 x i32> %768)
-  store volatile <32 x i32> %769, <32 x i32>* @VectorPairResult, align 128
-  %770 = load volatile <32 x i32>, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @vector_pairs, i32 0, i32 0), align 128
-  %771 = load volatile <32 x i32>, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @vector_pairs, i32 0, i32 1), align 128
+  store volatile <32 x i32> %769, ptr @VectorPairResult, align 128
+  %770 = load volatile <32 x i32>, ptr @vector_pairs, align 128
+  %771 = load volatile <32 x i32>, ptr getelementptr inbounds ([15 x <32 x i32>], ptr @vector_pairs, i32 0, i32 1), align 128
   %772 = call <32 x i32> @llvm.hexagon.V6.vaddb.dv(<32 x i32> %770, <32 x i32> %771)
-  store volatile <32 x i32> %772, <32 x i32>* @VectorPairResult, align 128
-  %773 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %774 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <32 x i32> %772, ptr @VectorPairResult, align 128
+  %773 = load volatile <16 x i32>, ptr @vectors, align 64
+  %774 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %775 = call <32 x i32> @llvm.hexagon.V6.vshufoeb(<16 x i32> %773, <16 x i32> %774)
-  store volatile <32 x i32> %775, <32 x i32>* @VectorPairResult, align 128
-  %776 = load volatile <32 x i32>, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @vector_pairs, i32 0, i32 0), align 128
-  %777 = load volatile <32 x i32>, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @vector_pairs, i32 0, i32 1), align 128
+  store volatile <32 x i32> %775, ptr @VectorPairResult, align 128
+  %776 = load volatile <32 x i32>, ptr @vector_pairs, align 128
+  %777 = load volatile <32 x i32>, ptr getelementptr inbounds ([15 x <32 x i32>], ptr @vector_pairs, i32 0, i32 1), align 128
   %778 = call <32 x i32> @llvm.hexagon.V6.vsubb.dv(<32 x i32> %776, <32 x i32> %777)
-  store volatile <32 x i32> %778, <32 x i32>* @VectorPairResult, align 128
-  %779 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %780 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <32 x i32> %778, ptr @VectorPairResult, align 128
+  %779 = load volatile <16 x i32>, ptr @vectors, align 64
+  %780 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %781 = call <32 x i32> @llvm.hexagon.V6.vaddubh(<16 x i32> %779, <16 x i32> %780)
-  store volatile <32 x i32> %781, <32 x i32>* @VectorPairResult, align 128
-  %782 = load volatile <32 x i32>, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @vector_pairs, i32 0, i32 0), align 128
-  %783 = load volatile <32 x i32>, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @vector_pairs, i32 0, i32 1), align 128
+  store volatile <32 x i32> %781, ptr @VectorPairResult, align 128
+  %782 = load volatile <32 x i32>, ptr @vector_pairs, align 128
+  %783 = load volatile <32 x i32>, ptr getelementptr inbounds ([15 x <32 x i32>], ptr @vector_pairs, i32 0, i32 1), align 128
   %784 = call <32 x i32> @llvm.hexagon.V6.vaddh.dv(<32 x i32> %782, <32 x i32> %783)
-  store volatile <32 x i32> %784, <32 x i32>* @VectorPairResult, align 128
-  %785 = load volatile <32 x i32>, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @vector_pairs, i32 0, i32 0), align 128
-  %786 = load volatile <32 x i32>, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @vector_pairs, i32 0, i32 1), align 128
+  store volatile <32 x i32> %784, ptr @VectorPairResult, align 128
+  %785 = load volatile <32 x i32>, ptr @vector_pairs, align 128
+  %786 = load volatile <32 x i32>, ptr getelementptr inbounds ([15 x <32 x i32>], ptr @vector_pairs, i32 0, i32 1), align 128
   %787 = call <32 x i32> @llvm.hexagon.V6.vaddhsat.dv(<32 x i32> %785, <32 x i32> %786)
-  store volatile <32 x i32> %787, <32 x i32>* @VectorPairResult, align 128
-  %788 = load volatile <32 x i32>, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @vector_pairs, i32 0, i32 0), align 128
+  store volatile <32 x i32> %787, ptr @VectorPairResult, align 128
+  %788 = load volatile <32 x i32>, ptr @vector_pairs, align 128
   %789 = call <32 x i32> @llvm.hexagon.V6.vdmpybus.dv(<32 x i32> %788, i32 -1)
-  store volatile <32 x i32> %789, <32 x i32>* @VectorPairResult, align 128
-  %790 = load volatile <32 x i32>, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @vector_pairs, i32 0, i32 0), align 128
-  %791 = load volatile <32 x i32>, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @vector_pairs, i32 0, i32 1), align 128
+  store volatile <32 x i32> %789, ptr @VectorPairResult, align 128
+  %790 = load volatile <32 x i32>, ptr @vector_pairs, align 128
+  %791 = load volatile <32 x i32>, ptr getelementptr inbounds ([15 x <32 x i32>], ptr @vector_pairs, i32 0, i32 1), align 128
   %792 = call <32 x i32> @llvm.hexagon.V6.vdmpybus.dv.acc(<32 x i32> %790, <32 x i32> %791, i32 -1)
-  store volatile <32 x i32> %792, <32 x i32>* @VectorPairResult, align 128
-  %793 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %794 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <32 x i32> %792, ptr @VectorPairResult, align 128
+  %793 = load volatile <16 x i32>, ptr @vectors, align 64
+  %794 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %795 = call <32 x i32> @llvm.hexagon.V6.vlutvwh(<16 x i32> %793, <16 x i32> %794, i32 -1)
-  store volatile <32 x i32> %795, <32 x i32>* @VectorPairResult, align 128
-  %796 = load volatile <32 x i32>, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @vector_pairs, i32 0, i32 0), align 128
-  %797 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %798 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <32 x i32> %795, ptr @VectorPairResult, align 128
+  %796 = load volatile <32 x i32>, ptr @vector_pairs, align 128
+  %797 = load volatile <16 x i32>, ptr @vectors, align 64
+  %798 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %799 = call <32 x i32> @llvm.hexagon.V6.vlutvwh.oracc(<32 x i32> %796, <16 x i32> %797, <16 x i32> %798, i32 -1)
-  store volatile <32 x i32> %799, <32 x i32>* @VectorPairResult, align 128
-  %800 = load volatile <32 x i32>, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @vector_pairs, i32 0, i32 0), align 128
+  store volatile <32 x i32> %799, ptr @VectorPairResult, align 128
+  %800 = load volatile <32 x i32>, ptr @vector_pairs, align 128
   %801 = call <32 x i32> @llvm.hexagon.V6.vmpabus(<32 x i32> %800, i32 -1)
-  store volatile <32 x i32> %801, <32 x i32>* @VectorPairResult, align 128
-  %802 = load volatile <32 x i32>, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @vector_pairs, i32 0, i32 0), align 128
-  %803 = load volatile <32 x i32>, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @vector_pairs, i32 0, i32 1), align 128
+  store volatile <32 x i32> %801, ptr @VectorPairResult, align 128
+  %802 = load volatile <32 x i32>, ptr @vector_pairs, align 128
+  %803 = load volatile <32 x i32>, ptr getelementptr inbounds ([15 x <32 x i32>], ptr @vector_pairs, i32 0, i32 1), align 128
   %804 = call <32 x i32> @llvm.hexagon.V6.vmpabusv(<32 x i32> %802, <32 x i32> %803)
-  store volatile <32 x i32> %804, <32 x i32>* @VectorPairResult, align 128
-  %805 = load volatile <32 x i32>, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @vector_pairs, i32 0, i32 0), align 128
-  %806 = load volatile <32 x i32>, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @vector_pairs, i32 0, i32 1), align 128
+  store volatile <32 x i32> %804, ptr @VectorPairResult, align 128
+  %805 = load volatile <32 x i32>, ptr @vector_pairs, align 128
+  %806 = load volatile <32 x i32>, ptr getelementptr inbounds ([15 x <32 x i32>], ptr @vector_pairs, i32 0, i32 1), align 128
   %807 = call <32 x i32> @llvm.hexagon.V6.vmpabuuv(<32 x i32> %805, <32 x i32> %806)
-  store volatile <32 x i32> %807, <32 x i32>* @VectorPairResult, align 128
-  %808 = load volatile <32 x i32>, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @vector_pairs, i32 0, i32 0), align 128
-  %809 = load volatile <32 x i32>, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @vector_pairs, i32 0, i32 1), align 128
+  store volatile <32 x i32> %807, ptr @VectorPairResult, align 128
+  %808 = load volatile <32 x i32>, ptr @vector_pairs, align 128
+  %809 = load volatile <32 x i32>, ptr getelementptr inbounds ([15 x <32 x i32>], ptr @vector_pairs, i32 0, i32 1), align 128
   %810 = call <32 x i32> @llvm.hexagon.V6.vmpabus.acc(<32 x i32> %808, <32 x i32> %809, i32 -1)
-  store volatile <32 x i32> %810, <32 x i32>* @VectorPairResult, align 128
-  %811 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %812 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <32 x i32> %810, ptr @VectorPairResult, align 128
+  %811 = load volatile <16 x i32>, ptr @vectors, align 64
+  %812 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %813 = call <32 x i32> @llvm.hexagon.V6.vmpybv(<16 x i32> %811, <16 x i32> %812)
-  store volatile <32 x i32> %813, <32 x i32>* @VectorPairResult, align 128
-  %814 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
+  store volatile <32 x i32> %813, ptr @VectorPairResult, align 128
+  %814 = load volatile <16 x i32>, ptr @vectors, align 64
   %815 = call <32 x i32> @llvm.hexagon.V6.vmpybus(<16 x i32> %814, i32 -1)
-  store volatile <32 x i32> %815, <32 x i32>* @VectorPairResult, align 128
-  %816 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %817 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <32 x i32> %815, ptr @VectorPairResult, align 128
+  %816 = load volatile <16 x i32>, ptr @vectors, align 64
+  %817 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %818 = call <32 x i32> @llvm.hexagon.V6.vmpybusv(<16 x i32> %816, <16 x i32> %817)
-  store volatile <32 x i32> %818, <32 x i32>* @VectorPairResult, align 128
-  %819 = load volatile <32 x i32>, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @vector_pairs, i32 0, i32 0), align 128
-  %820 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %821 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <32 x i32> %818, ptr @VectorPairResult, align 128
+  %819 = load volatile <32 x i32>, ptr @vector_pairs, align 128
+  %820 = load volatile <16 x i32>, ptr @vectors, align 64
+  %821 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %822 = call <32 x i32> @llvm.hexagon.V6.vmpybv.acc(<32 x i32> %819, <16 x i32> %820, <16 x i32> %821)
-  store volatile <32 x i32> %822, <32 x i32>* @VectorPairResult, align 128
-  %823 = load volatile <32 x i32>, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @vector_pairs, i32 0, i32 0), align 128
-  %824 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
+  store volatile <32 x i32> %822, ptr @VectorPairResult, align 128
+  %823 = load volatile <32 x i32>, ptr @vector_pairs, align 128
+  %824 = load volatile <16 x i32>, ptr @vectors, align 64
   %825 = call <32 x i32> @llvm.hexagon.V6.vmpybus.acc(<32 x i32> %823, <16 x i32> %824, i32 -1)
-  store volatile <32 x i32> %825, <32 x i32>* @VectorPairResult, align 128
-  %826 = load volatile <32 x i32>, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @vector_pairs, i32 0, i32 0), align 128
-  %827 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %828 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <32 x i32> %825, ptr @VectorPairResult, align 128
+  %826 = load volatile <32 x i32>, ptr @vector_pairs, align 128
+  %827 = load volatile <16 x i32>, ptr @vectors, align 64
+  %828 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %829 = call <32 x i32> @llvm.hexagon.V6.vmpybusv.acc(<32 x i32> %826, <16 x i32> %827, <16 x i32> %828)
-  store volatile <32 x i32> %829, <32 x i32>* @VectorPairResult, align 128
-  %830 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %831 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <32 x i32> %829, ptr @VectorPairResult, align 128
+  %830 = load volatile <16 x i32>, ptr @vectors, align 64
+  %831 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %832 = call <32 x i32> @llvm.hexagon.V6.vshufoeh(<16 x i32> %830, <16 x i32> %831)
-  store volatile <32 x i32> %832, <32 x i32>* @VectorPairResult, align 128
-  %833 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %834 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <32 x i32> %832, ptr @VectorPairResult, align 128
+  %833 = load volatile <16 x i32>, ptr @vectors, align 64
+  %834 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %835 = call <32 x i32> @llvm.hexagon.V6.vsububh(<16 x i32> %833, <16 x i32> %834)
-  store volatile <32 x i32> %835, <32 x i32>* @VectorPairResult, align 128
-  %836 = load volatile <32 x i32>, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @vector_pairs, i32 0, i32 0), align 128
-  %837 = load volatile <32 x i32>, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @vector_pairs, i32 0, i32 1), align 128
+  store volatile <32 x i32> %835, ptr @VectorPairResult, align 128
+  %836 = load volatile <32 x i32>, ptr @vector_pairs, align 128
+  %837 = load volatile <32 x i32>, ptr getelementptr inbounds ([15 x <32 x i32>], ptr @vector_pairs, i32 0, i32 1), align 128
   %838 = call <32 x i32> @llvm.hexagon.V6.vsubh.dv(<32 x i32> %836, <32 x i32> %837)
-  store volatile <32 x i32> %838, <32 x i32>* @VectorPairResult, align 128
-  %839 = load volatile <32 x i32>, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @vector_pairs, i32 0, i32 0), align 128
-  %840 = load volatile <32 x i32>, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @vector_pairs, i32 0, i32 1), align 128
+  store volatile <32 x i32> %838, ptr @VectorPairResult, align 128
+  %839 = load volatile <32 x i32>, ptr @vector_pairs, align 128
+  %840 = load volatile <32 x i32>, ptr getelementptr inbounds ([15 x <32 x i32>], ptr @vector_pairs, i32 0, i32 1), align 128
   %841 = call <32 x i32> @llvm.hexagon.V6.vsubhsat.dv(<32 x i32> %839, <32 x i32> %840)
-  store volatile <32 x i32> %841, <32 x i32>* @VectorPairResult, align 128
-  %842 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
+  store volatile <32 x i32> %841, ptr @VectorPairResult, align 128
+  %842 = load volatile <16 x i32>, ptr @vectors, align 64
   %843 = call <32 x i32> @llvm.hexagon.V6.vsb(<16 x i32> %842)
-  store volatile <32 x i32> %843, <32 x i32>* @VectorPairResult, align 128
-  %844 = load volatile <32 x i32>, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @vector_pairs, i32 0, i32 0), align 128
+  store volatile <32 x i32> %843, ptr @VectorPairResult, align 128
+  %844 = load volatile <32 x i32>, ptr @vector_pairs, align 128
   %845 = call <32 x i32> @llvm.hexagon.V6.vtmpyb(<32 x i32> %844, i32 -1)
-  store volatile <32 x i32> %845, <32 x i32>* @VectorPairResult, align 128
-  %846 = load volatile <32 x i32>, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @vector_pairs, i32 0, i32 0), align 128
+  store volatile <32 x i32> %845, ptr @VectorPairResult, align 128
+  %846 = load volatile <32 x i32>, ptr @vector_pairs, align 128
   %847 = call <32 x i32> @llvm.hexagon.V6.vtmpybus(<32 x i32> %846, i32 -1)
-  store volatile <32 x i32> %847, <32 x i32>* @VectorPairResult, align 128
-  %848 = load volatile <32 x i32>, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @vector_pairs, i32 0, i32 0), align 128
-  %849 = load volatile <32 x i32>, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @vector_pairs, i32 0, i32 1), align 128
+  store volatile <32 x i32> %847, ptr @VectorPairResult, align 128
+  %848 = load volatile <32 x i32>, ptr @vector_pairs, align 128
+  %849 = load volatile <32 x i32>, ptr getelementptr inbounds ([15 x <32 x i32>], ptr @vector_pairs, i32 0, i32 1), align 128
   %850 = call <32 x i32> @llvm.hexagon.V6.vtmpyb.acc(<32 x i32> %848, <32 x i32> %849, i32 -1)
-  store volatile <32 x i32> %850, <32 x i32>* @VectorPairResult, align 128
-  %851 = load volatile <32 x i32>, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @vector_pairs, i32 0, i32 0), align 128
-  %852 = load volatile <32 x i32>, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @vector_pairs, i32 0, i32 1), align 128
+  store volatile <32 x i32> %850, ptr @VectorPairResult, align 128
+  %851 = load volatile <32 x i32>, ptr @vector_pairs, align 128
+  %852 = load volatile <32 x i32>, ptr getelementptr inbounds ([15 x <32 x i32>], ptr @vector_pairs, i32 0, i32 1), align 128
   %853 = call <32 x i32> @llvm.hexagon.V6.vtmpybus.acc(<32 x i32> %851, <32 x i32> %852, i32 -1)
-  store volatile <32 x i32> %853, <32 x i32>* @VectorPairResult, align 128
-  %854 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
+  store volatile <32 x i32> %853, ptr @VectorPairResult, align 128
+  %854 = load volatile <16 x i32>, ptr @vectors, align 64
   %855 = call <32 x i32> @llvm.hexagon.V6.vunpackb(<16 x i32> %854)
-  store volatile <32 x i32> %855, <32 x i32>* @VectorPairResult, align 128
-  %856 = load volatile <32 x i32>, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @vector_pairs, i32 0, i32 0), align 128
-  %857 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
+  store volatile <32 x i32> %855, ptr @VectorPairResult, align 128
+  %856 = load volatile <32 x i32>, ptr @vector_pairs, align 128
+  %857 = load volatile <16 x i32>, ptr @vectors, align 64
   %858 = call <32 x i32> @llvm.hexagon.V6.vunpackob(<32 x i32> %856, <16 x i32> %857)
-  store volatile <32 x i32> %858, <32 x i32>* @VectorPairResult, align 128
-  %859 = load volatile <32 x i32>, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @vector_pairs, i32 0, i32 0), align 128
-  %860 = load volatile <32 x i32>, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @vector_pairs, i32 0, i32 1), align 128
+  store volatile <32 x i32> %858, ptr @VectorPairResult, align 128
+  %859 = load volatile <32 x i32>, ptr @vector_pairs, align 128
+  %860 = load volatile <32 x i32>, ptr getelementptr inbounds ([15 x <32 x i32>], ptr @vector_pairs, i32 0, i32 1), align 128
   %861 = call <32 x i32> @llvm.hexagon.V6.vaddubsat.dv(<32 x i32> %859, <32 x i32> %860)
-  store volatile <32 x i32> %861, <32 x i32>* @VectorPairResult, align 128
-  %862 = load volatile <32 x i32>, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @vector_pairs, i32 0, i32 0), align 128
-  %863 = load volatile <32 x i32>, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @vector_pairs, i32 0, i32 1), align 128
+  store volatile <32 x i32> %861, ptr @VectorPairResult, align 128
+  %862 = load volatile <32 x i32>, ptr @vector_pairs, align 128
+  %863 = load volatile <32 x i32>, ptr getelementptr inbounds ([15 x <32 x i32>], ptr @vector_pairs, i32 0, i32 1), align 128
   %864 = call <32 x i32> @llvm.hexagon.V6.vsububsat.dv(<32 x i32> %862, <32 x i32> %863)
-  store volatile <32 x i32> %864, <32 x i32>* @VectorPairResult, align 128
-  %865 = load volatile <32 x i32>, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @vector_pairs, i32 0, i32 0), align 128
-  %866 = load volatile <32 x i32>, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @vector_pairs, i32 0, i32 1), align 128
+  store volatile <32 x i32> %864, ptr @VectorPairResult, align 128
+  %865 = load volatile <32 x i32>, ptr @vector_pairs, align 128
+  %866 = load volatile <32 x i32>, ptr getelementptr inbounds ([15 x <32 x i32>], ptr @vector_pairs, i32 0, i32 1), align 128
   %867 = call <32 x i32> @llvm.hexagon.V6.vadduhsat.dv(<32 x i32> %865, <32 x i32> %866)
-  store volatile <32 x i32> %867, <32 x i32>* @VectorPairResult, align 128
-  %868 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
+  store volatile <32 x i32> %867, ptr @VectorPairResult, align 128
+  %868 = load volatile <16 x i32>, ptr @vectors, align 64
   %869 = call <32 x i32> @llvm.hexagon.V6.vmpyub(<16 x i32> %868, i32 -1)
-  store volatile <32 x i32> %869, <32 x i32>* @VectorPairResult, align 128
-  %870 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %871 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <32 x i32> %869, ptr @VectorPairResult, align 128
+  %870 = load volatile <16 x i32>, ptr @vectors, align 64
+  %871 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %872 = call <32 x i32> @llvm.hexagon.V6.vmpyubv(<16 x i32> %870, <16 x i32> %871)
-  store volatile <32 x i32> %872, <32 x i32>* @VectorPairResult, align 128
-  %873 = load volatile <32 x i32>, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @vector_pairs, i32 0, i32 0), align 128
-  %874 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
+  store volatile <32 x i32> %872, ptr @VectorPairResult, align 128
+  %873 = load volatile <32 x i32>, ptr @vector_pairs, align 128
+  %874 = load volatile <16 x i32>, ptr @vectors, align 64
   %875 = call <32 x i32> @llvm.hexagon.V6.vmpyub.acc(<32 x i32> %873, <16 x i32> %874, i32 -1)
-  store volatile <32 x i32> %875, <32 x i32>* @VectorPairResult, align 128
-  %876 = load volatile <32 x i32>, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @vector_pairs, i32 0, i32 0), align 128
-  %877 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %878 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <32 x i32> %875, ptr @VectorPairResult, align 128
+  %876 = load volatile <32 x i32>, ptr @vector_pairs, align 128
+  %877 = load volatile <16 x i32>, ptr @vectors, align 64
+  %878 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %879 = call <32 x i32> @llvm.hexagon.V6.vmpyubv.acc(<32 x i32> %876, <16 x i32> %877, <16 x i32> %878)
-  store volatile <32 x i32> %879, <32 x i32>* @VectorPairResult, align 128
-  %880 = load volatile <32 x i32>, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @vector_pairs, i32 0, i32 0), align 128
-  %881 = load volatile <32 x i32>, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @vector_pairs, i32 0, i32 1), align 128
+  store volatile <32 x i32> %879, ptr @VectorPairResult, align 128
+  %880 = load volatile <32 x i32>, ptr @vector_pairs, align 128
+  %881 = load volatile <32 x i32>, ptr getelementptr inbounds ([15 x <32 x i32>], ptr @vector_pairs, i32 0, i32 1), align 128
   %882 = call <32 x i32> @llvm.hexagon.V6.vsubuhsat.dv(<32 x i32> %880, <32 x i32> %881)
-  store volatile <32 x i32> %882, <32 x i32>* @VectorPairResult, align 128
-  %883 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
+  store volatile <32 x i32> %882, ptr @VectorPairResult, align 128
+  %883 = load volatile <16 x i32>, ptr @vectors, align 64
   %884 = call <32 x i32> @llvm.hexagon.V6.vunpackub(<16 x i32> %883)
-  store volatile <32 x i32> %884, <32 x i32>* @VectorPairResult, align 128
-  %885 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
+  store volatile <32 x i32> %884, ptr @VectorPairResult, align 128
+  %885 = load volatile <16 x i32>, ptr @vectors, align 64
   %886 = call <32 x i32> @llvm.hexagon.V6.vzb(<16 x i32> %885)
-  store volatile <32 x i32> %886, <32 x i32>* @VectorPairResult, align 128
-  %887 = load volatile <32 x i32>, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @vector_pairs, i32 0, i32 0), align 128
+  store volatile <32 x i32> %886, ptr @VectorPairResult, align 128
+  %887 = load volatile <32 x i32>, ptr @vector_pairs, align 128
   %888 = call <32 x i32> @llvm.hexagon.V6.vdsaduh(<32 x i32> %887, i32 -1)
-  store volatile <32 x i32> %888, <32 x i32>* @VectorPairResult, align 128
-  %889 = load volatile <32 x i32>, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @vector_pairs, i32 0, i32 0), align 128
-  %890 = load volatile <32 x i32>, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @vector_pairs, i32 0, i32 1), align 128
+  store volatile <32 x i32> %888, ptr @VectorPairResult, align 128
+  %889 = load volatile <32 x i32>, ptr @vector_pairs, align 128
+  %890 = load volatile <32 x i32>, ptr getelementptr inbounds ([15 x <32 x i32>], ptr @vector_pairs, i32 0, i32 1), align 128
   %891 = call <32 x i32> @llvm.hexagon.V6.vdsaduh.acc(<32 x i32> %889, <32 x i32> %890, i32 -1)
-  store volatile <32 x i32> %891, <32 x i32>* @VectorPairResult, align 128
-  %892 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
+  store volatile <32 x i32> %891, ptr @VectorPairResult, align 128
+  %892 = load volatile <16 x i32>, ptr @vectors, align 64
   %893 = call <32 x i32> @llvm.hexagon.V6.vmpyuh(<16 x i32> %892, i32 -1)
-  store volatile <32 x i32> %893, <32 x i32>* @VectorPairResult, align 128
-  %894 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %895 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <32 x i32> %893, ptr @VectorPairResult, align 128
+  %894 = load volatile <16 x i32>, ptr @vectors, align 64
+  %895 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %896 = call <32 x i32> @llvm.hexagon.V6.vmpyuhv(<16 x i32> %894, <16 x i32> %895)
-  store volatile <32 x i32> %896, <32 x i32>* @VectorPairResult, align 128
-  %897 = load volatile <32 x i32>, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @vector_pairs, i32 0, i32 0), align 128
-  %898 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
+  store volatile <32 x i32> %896, ptr @VectorPairResult, align 128
+  %897 = load volatile <32 x i32>, ptr @vector_pairs, align 128
+  %898 = load volatile <16 x i32>, ptr @vectors, align 64
   %899 = call <32 x i32> @llvm.hexagon.V6.vmpyuh.acc(<32 x i32> %897, <16 x i32> %898, i32 -1)
-  store volatile <32 x i32> %899, <32 x i32>* @VectorPairResult, align 128
-  %900 = load volatile <32 x i32>, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @vector_pairs, i32 0, i32 0), align 128
-  %901 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %902 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <32 x i32> %899, ptr @VectorPairResult, align 128
+  %900 = load volatile <32 x i32>, ptr @vector_pairs, align 128
+  %901 = load volatile <16 x i32>, ptr @vectors, align 64
+  %902 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %903 = call <32 x i32> @llvm.hexagon.V6.vmpyuhv.acc(<32 x i32> %900, <16 x i32> %901, <16 x i32> %902)
-  store volatile <32 x i32> %903, <32 x i32>* @VectorPairResult, align 128
-  %904 = load volatile <32 x i32>, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @vector_pairs, i32 0, i32 0), align 128
+  store volatile <32 x i32> %903, ptr @VectorPairResult, align 128
+  %904 = load volatile <32 x i32>, ptr @vector_pairs, align 128
   %905 = call <32 x i32> @llvm.hexagon.V6.vrmpyubi(<32 x i32> %904, i32 -1, i32 0)
-  store volatile <32 x i32> %905, <32 x i32>* @VectorPairResult, align 128
-  %906 = load volatile <32 x i32>, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @vector_pairs, i32 0, i32 0), align 128
-  %907 = load volatile <32 x i32>, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @vector_pairs, i32 0, i32 1), align 128
+  store volatile <32 x i32> %905, ptr @VectorPairResult, align 128
+  %906 = load volatile <32 x i32>, ptr @vector_pairs, align 128
+  %907 = load volatile <32 x i32>, ptr getelementptr inbounds ([15 x <32 x i32>], ptr @vector_pairs, i32 0, i32 1), align 128
   %908 = call <32 x i32> @llvm.hexagon.V6.vrmpyubi.acc(<32 x i32> %906, <32 x i32> %907, i32 -1, i32 0)
-  store volatile <32 x i32> %908, <32 x i32>* @VectorPairResult, align 128
-  %909 = load volatile <32 x i32>, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @vector_pairs, i32 0, i32 0), align 128
+  store volatile <32 x i32> %908, ptr @VectorPairResult, align 128
+  %909 = load volatile <32 x i32>, ptr @vector_pairs, align 128
   %910 = call <32 x i32> @llvm.hexagon.V6.vrsadubi(<32 x i32> %909, i32 -1, i32 0)
-  store volatile <32 x i32> %910, <32 x i32>* @VectorPairResult, align 128
-  %911 = load volatile <32 x i32>, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @vector_pairs, i32 0, i32 0), align 128
-  %912 = load volatile <32 x i32>, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @vector_pairs, i32 0, i32 1), align 128
+  store volatile <32 x i32> %910, ptr @VectorPairResult, align 128
+  %911 = load volatile <32 x i32>, ptr @vector_pairs, align 128
+  %912 = load volatile <32 x i32>, ptr getelementptr inbounds ([15 x <32 x i32>], ptr @vector_pairs, i32 0, i32 1), align 128
   %913 = call <32 x i32> @llvm.hexagon.V6.vrsadubi.acc(<32 x i32> %911, <32 x i32> %912, i32 -1, i32 0)
-  store volatile <32 x i32> %913, <32 x i32>* @VectorPairResult, align 128
-  %914 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
+  store volatile <32 x i32> %913, ptr @VectorPairResult, align 128
+  %914 = load volatile <16 x i32>, ptr @vectors, align 64
   %915 = call <32 x i32> @llvm.hexagon.V6.vunpackuh(<16 x i32> %914)
-  store volatile <32 x i32> %915, <32 x i32>* @VectorPairResult, align 128
-  %916 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
+  store volatile <32 x i32> %915, ptr @VectorPairResult, align 128
+  %916 = load volatile <16 x i32>, ptr @vectors, align 64
   %917 = call <32 x i32> @llvm.hexagon.V6.vzh(<16 x i32> %916)
-  store volatile <32 x i32> %917, <32 x i32>* @VectorPairResult, align 128
-  %918 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %919 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <32 x i32> %917, ptr @VectorPairResult, align 128
+  %918 = load volatile <16 x i32>, ptr @vectors, align 64
+  %919 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %920 = call <32 x i32> @llvm.hexagon.V6.vaddhw(<16 x i32> %918, <16 x i32> %919)
-  store volatile <32 x i32> %920, <32 x i32>* @VectorPairResult, align 128
-  %921 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %922 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <32 x i32> %920, ptr @VectorPairResult, align 128
+  %921 = load volatile <16 x i32>, ptr @vectors, align 64
+  %922 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %923 = call <32 x i32> @llvm.hexagon.V6.vadduhw(<16 x i32> %921, <16 x i32> %922)
-  store volatile <32 x i32> %923, <32 x i32>* @VectorPairResult, align 128
-  %924 = load volatile <32 x i32>, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @vector_pairs, i32 0, i32 0), align 128
-  %925 = load volatile <32 x i32>, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @vector_pairs, i32 0, i32 1), align 128
+  store volatile <32 x i32> %923, ptr @VectorPairResult, align 128
+  %924 = load volatile <32 x i32>, ptr @vector_pairs, align 128
+  %925 = load volatile <32 x i32>, ptr getelementptr inbounds ([15 x <32 x i32>], ptr @vector_pairs, i32 0, i32 1), align 128
   %926 = call <32 x i32> @llvm.hexagon.V6.vaddw.dv(<32 x i32> %924, <32 x i32> %925)
-  store volatile <32 x i32> %926, <32 x i32>* @VectorPairResult, align 128
-  %927 = load volatile <32 x i32>, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @vector_pairs, i32 0, i32 0), align 128
-  %928 = load volatile <32 x i32>, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @vector_pairs, i32 0, i32 1), align 128
+  store volatile <32 x i32> %926, ptr @VectorPairResult, align 128
+  %927 = load volatile <32 x i32>, ptr @vector_pairs, align 128
+  %928 = load volatile <32 x i32>, ptr getelementptr inbounds ([15 x <32 x i32>], ptr @vector_pairs, i32 0, i32 1), align 128
   %929 = call <32 x i32> @llvm.hexagon.V6.vaddwsat.dv(<32 x i32> %927, <32 x i32> %928)
-  store volatile <32 x i32> %929, <32 x i32>* @VectorPairResult, align 128
-  %930 = load volatile <32 x i32>, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @vector_pairs, i32 0, i32 0), align 128
+  store volatile <32 x i32> %929, ptr @VectorPairResult, align 128
+  %930 = load volatile <32 x i32>, ptr @vector_pairs, align 128
   %931 = call <32 x i32> @llvm.hexagon.V6.vdmpyhb.dv(<32 x i32> %930, i32 -1)
-  store volatile <32 x i32> %931, <32 x i32>* @VectorPairResult, align 128
-  %932 = load volatile <32 x i32>, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @vector_pairs, i32 0, i32 0), align 128
-  %933 = load volatile <32 x i32>, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @vector_pairs, i32 0, i32 1), align 128
+  store volatile <32 x i32> %931, ptr @VectorPairResult, align 128
+  %932 = load volatile <32 x i32>, ptr @vector_pairs, align 128
+  %933 = load volatile <32 x i32>, ptr getelementptr inbounds ([15 x <32 x i32>], ptr @vector_pairs, i32 0, i32 1), align 128
   %934 = call <32 x i32> @llvm.hexagon.V6.vdmpyhb.dv.acc(<32 x i32> %932, <32 x i32> %933, i32 -1)
-  store volatile <32 x i32> %934, <32 x i32>* @VectorPairResult, align 128
-  %935 = load volatile <32 x i32>, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @vector_pairs, i32 0, i32 0), align 128
+  store volatile <32 x i32> %934, ptr @VectorPairResult, align 128
+  %935 = load volatile <32 x i32>, ptr @vector_pairs, align 128
   %936 = call <32 x i32> @llvm.hexagon.V6.vmpahb(<32 x i32> %935, i32 -1)
-  store volatile <32 x i32> %936, <32 x i32>* @VectorPairResult, align 128
-  %937 = load volatile <32 x i32>, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @vector_pairs, i32 0, i32 0), align 128
-  %938 = load volatile <32 x i32>, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @vector_pairs, i32 0, i32 1), align 128
+  store volatile <32 x i32> %936, ptr @VectorPairResult, align 128
+  %937 = load volatile <32 x i32>, ptr @vector_pairs, align 128
+  %938 = load volatile <32 x i32>, ptr getelementptr inbounds ([15 x <32 x i32>], ptr @vector_pairs, i32 0, i32 1), align 128
   %939 = call <32 x i32> @llvm.hexagon.V6.vmpahb.acc(<32 x i32> %937, <32 x i32> %938, i32 -1)
-  store volatile <32 x i32> %939, <32 x i32>* @VectorPairResult, align 128
-  %940 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
+  store volatile <32 x i32> %939, ptr @VectorPairResult, align 128
+  %940 = load volatile <16 x i32>, ptr @vectors, align 64
   %941 = call <32 x i32> @llvm.hexagon.V6.vmpyh(<16 x i32> %940, i32 -1)
-  store volatile <32 x i32> %941, <32 x i32>* @VectorPairResult, align 128
-  %942 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %943 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <32 x i32> %941, ptr @VectorPairResult, align 128
+  %942 = load volatile <16 x i32>, ptr @vectors, align 64
+  %943 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %944 = call <32 x i32> @llvm.hexagon.V6.vmpyhv(<16 x i32> %942, <16 x i32> %943)
-  store volatile <32 x i32> %944, <32 x i32>* @VectorPairResult, align 128
-  %945 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %946 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <32 x i32> %944, ptr @VectorPairResult, align 128
+  %945 = load volatile <16 x i32>, ptr @vectors, align 64
+  %946 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %947 = call <32 x i32> @llvm.hexagon.V6.vmpyhus(<16 x i32> %945, <16 x i32> %946)
-  store volatile <32 x i32> %947, <32 x i32>* @VectorPairResult, align 128
-  %948 = load volatile <32 x i32>, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @vector_pairs, i32 0, i32 0), align 128
-  %949 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
+  store volatile <32 x i32> %947, ptr @VectorPairResult, align 128
+  %948 = load volatile <32 x i32>, ptr @vector_pairs, align 128
+  %949 = load volatile <16 x i32>, ptr @vectors, align 64
   %950 = call <32 x i32> @llvm.hexagon.V6.vmpyhsat.acc(<32 x i32> %948, <16 x i32> %949, i32 -1)
-  store volatile <32 x i32> %950, <32 x i32>* @VectorPairResult, align 128
-  %951 = load volatile <32 x i32>, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @vector_pairs, i32 0, i32 0), align 128
-  %952 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %953 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <32 x i32> %950, ptr @VectorPairResult, align 128
+  %951 = load volatile <32 x i32>, ptr @vector_pairs, align 128
+  %952 = load volatile <16 x i32>, ptr @vectors, align 64
+  %953 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %954 = call <32 x i32> @llvm.hexagon.V6.vmpyhv.acc(<32 x i32> %951, <16 x i32> %952, <16 x i32> %953)
-  store volatile <32 x i32> %954, <32 x i32>* @VectorPairResult, align 128
-  %955 = load volatile <32 x i32>, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @vector_pairs, i32 0, i32 0), align 128
-  %956 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %957 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <32 x i32> %954, ptr @VectorPairResult, align 128
+  %955 = load volatile <32 x i32>, ptr @vector_pairs, align 128
+  %956 = load volatile <16 x i32>, ptr @vectors, align 64
+  %957 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %958 = call <32 x i32> @llvm.hexagon.V6.vmpyhus.acc(<32 x i32> %955, <16 x i32> %956, <16 x i32> %957)
-  store volatile <32 x i32> %958, <32 x i32>* @VectorPairResult, align 128
-  %959 = load volatile <32 x i32>, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @vector_pairs, i32 0, i32 0), align 128
+  store volatile <32 x i32> %958, ptr @VectorPairResult, align 128
+  %959 = load volatile <32 x i32>, ptr @vector_pairs, align 128
   %960 = call <32 x i32> @llvm.hexagon.V6.vrmpybusi(<32 x i32> %959, i32 -1, i32 0)
-  store volatile <32 x i32> %960, <32 x i32>* @VectorPairResult, align 128
-  %961 = load volatile <32 x i32>, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @vector_pairs, i32 0, i32 0), align 128
-  %962 = load volatile <32 x i32>, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @vector_pairs, i32 0, i32 1), align 128
+  store volatile <32 x i32> %960, ptr @VectorPairResult, align 128
+  %961 = load volatile <32 x i32>, ptr @vector_pairs, align 128
+  %962 = load volatile <32 x i32>, ptr getelementptr inbounds ([15 x <32 x i32>], ptr @vector_pairs, i32 0, i32 1), align 128
   %963 = call <32 x i32> @llvm.hexagon.V6.vrmpybusi.acc(<32 x i32> %961, <32 x i32> %962, i32 -1, i32 0)
-  store volatile <32 x i32> %963, <32 x i32>* @VectorPairResult, align 128
-  %964 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %965 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <32 x i32> %963, ptr @VectorPairResult, align 128
+  %964 = load volatile <16 x i32>, ptr @vectors, align 64
+  %965 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %966 = call <32 x i32> @llvm.hexagon.V6.vsubhw(<16 x i32> %964, <16 x i32> %965)
-  store volatile <32 x i32> %966, <32 x i32>* @VectorPairResult, align 128
-  %967 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
-  %968 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 1), align 64
+  store volatile <32 x i32> %966, ptr @VectorPairResult, align 128
+  %967 = load volatile <16 x i32>, ptr @vectors, align 64
+  %968 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vectors, i32 0, i32 1), align 64
   %969 = call <32 x i32> @llvm.hexagon.V6.vsubuhw(<16 x i32> %967, <16 x i32> %968)
-  store volatile <32 x i32> %969, <32 x i32>* @VectorPairResult, align 128
-  %970 = load volatile <32 x i32>, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @vector_pairs, i32 0, i32 0), align 128
-  %971 = load volatile <32 x i32>, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @vector_pairs, i32 0, i32 1), align 128
+  store volatile <32 x i32> %969, ptr @VectorPairResult, align 128
+  %970 = load volatile <32 x i32>, ptr @vector_pairs, align 128
+  %971 = load volatile <32 x i32>, ptr getelementptr inbounds ([15 x <32 x i32>], ptr @vector_pairs, i32 0, i32 1), align 128
   %972 = call <32 x i32> @llvm.hexagon.V6.vsubw.dv(<32 x i32> %970, <32 x i32> %971)
-  store volatile <32 x i32> %972, <32 x i32>* @VectorPairResult, align 128
-  %973 = load volatile <32 x i32>, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @vector_pairs, i32 0, i32 0), align 128
-  %974 = load volatile <32 x i32>, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @vector_pairs, i32 0, i32 1), align 128
+  store volatile <32 x i32> %972, ptr @VectorPairResult, align 128
+  %973 = load volatile <32 x i32>, ptr @vector_pairs, align 128
+  %974 = load volatile <32 x i32>, ptr getelementptr inbounds ([15 x <32 x i32>], ptr @vector_pairs, i32 0, i32 1), align 128
   %975 = call <32 x i32> @llvm.hexagon.V6.vsubwsat.dv(<32 x i32> %973, <32 x i32> %974)
-  store volatile <32 x i32> %975, <32 x i32>* @VectorPairResult, align 128
-  %976 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
+  store volatile <32 x i32> %975, ptr @VectorPairResult, align 128
+  %976 = load volatile <16 x i32>, ptr @vectors, align 64
   %977 = call <32 x i32> @llvm.hexagon.V6.vsh(<16 x i32> %976)
-  store volatile <32 x i32> %977, <32 x i32>* @VectorPairResult, align 128
-  %978 = load volatile <32 x i32>, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @vector_pairs, i32 0, i32 0), align 128
+  store volatile <32 x i32> %977, ptr @VectorPairResult, align 128
+  %978 = load volatile <32 x i32>, ptr @vector_pairs, align 128
   %979 = call <32 x i32> @llvm.hexagon.V6.vtmpyhb(<32 x i32> %978, i32 -1)
-  store volatile <32 x i32> %979, <32 x i32>* @VectorPairResult, align 128
-  %980 = load volatile <32 x i32>, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @vector_pairs, i32 0, i32 0), align 128
-  %981 = load volatile <32 x i32>, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @vector_pairs, i32 0, i32 1), align 128
+  store volatile <32 x i32> %979, ptr @VectorPairResult, align 128
+  %980 = load volatile <32 x i32>, ptr @vector_pairs, align 128
+  %981 = load volatile <32 x i32>, ptr getelementptr inbounds ([15 x <32 x i32>], ptr @vector_pairs, i32 0, i32 1), align 128
   %982 = call <32 x i32> @llvm.hexagon.V6.vtmpyhb.acc(<32 x i32> %980, <32 x i32> %981, i32 -1)
-  store volatile <32 x i32> %982, <32 x i32>* @VectorPairResult, align 128
-  %983 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
+  store volatile <32 x i32> %982, ptr @VectorPairResult, align 128
+  %983 = load volatile <16 x i32>, ptr @vectors, align 64
   %984 = call <32 x i32> @llvm.hexagon.V6.vunpackh(<16 x i32> %983)
-  store volatile <32 x i32> %984, <32 x i32>* @VectorPairResult, align 128
-  %985 = load volatile <32 x i32>, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @vector_pairs, i32 0, i32 0), align 128
-  %986 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vectors, i32 0, i32 0), align 64
+  store volatile <32 x i32> %984, ptr @VectorPairResult, align 128
+  %985 = load volatile <32 x i32>, ptr @vector_pairs, align 128
+  %986 = load volatile <16 x i32>, ptr @vectors, align 64
   %987 = call <32 x i32> @llvm.hexagon.V6.vunpackoh(<32 x i32> %985, <16 x i32> %986)
-  store volatile <32 x i32> %987, <32 x i32>* @VectorPairResult, align 128
+  store volatile <32 x i32> %987, ptr @VectorPairResult, align 128
   ret i32 0
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/v60_Q6_P_rol_PI.ll b/llvm/test/CodeGen/Hexagon/v60_Q6_P_rol_PI.ll
index 5fabc6b288b92..b1495a3dc210e 100644
--- a/llvm/test/CodeGen/Hexagon/v60_Q6_P_rol_PI.ll
+++ b/llvm/test/CodeGen/Hexagon/v60_Q6_P_rol_PI.ll
@@ -6,17 +6,17 @@ target triple = "hexagon"
 @g0 = private unnamed_addr constant [33 x i8] c"%llx :  Q6_P_rol_PI(LONG_MIN,0)\0A\00", align 1
 
 ; Function Attrs: nounwind
-declare i32 @f0(i8*, ...) #0
+declare i32 @f0(ptr, ...) #0
 
 ; Function Attrs: nounwind
 define i32 @f1() #0 {
 b0:
   %v0 = alloca i32, align 4
   %v1 = alloca i32, align 4
-  store i32 0, i32* %v0
-  store i32 0, i32* %v1, align 4
+  store i32 0, ptr %v0
+  store i32 0, ptr %v1, align 4
   %v2 = call i64 @llvm.hexagon.S6.rol.i.p(i64 483648, i32 4)
-  %v3 = call i32 (i8*, ...) @f0(i8* getelementptr inbounds ([33 x i8], [33 x i8]* @g0, i32 0, i32 0), i64 %v2) #2
+  %v3 = call i32 (ptr, ...) @f0(ptr @g0, i64 %v2) #2
   ret i32 0
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/v60_sort16.ll b/llvm/test/CodeGen/Hexagon/v60_sort16.ll
index f54768ed3f201..1463bc258384b 100644
--- a/llvm/test/CodeGen/Hexagon/v60_sort16.ll
+++ b/llvm/test/CodeGen/Hexagon/v60_sort16.ll
@@ -6,19 +6,19 @@
 target triple = "hexagon"
 
 ; Function Attrs: nounwind
-define void @f0(i16* %a0, i32 %a1, i8* %a2, i16* %a3) #0 {
+define void @f0(ptr %a0, i32 %a1, ptr %a2, ptr %a3) #0 {
 b0:
-  %v0 = alloca i16*, align 4
+  %v0 = alloca ptr, align 4
   %v1 = alloca i32, align 4
-  %v2 = alloca i8*, align 4
-  %v3 = alloca i16*, align 4
+  %v2 = alloca ptr, align 4
+  %v3 = alloca ptr, align 4
   %v4 = alloca i32, align 4
   %v5 = alloca i32, align 4
   %v6 = alloca i32, align 4
   %v7 = alloca i32, align 4
   %v8 = alloca i32, align 4
-  %v9 = alloca i16*, align 4
-  %v10 = alloca i16*, align 4
+  %v9 = alloca ptr, align 4
+  %v10 = alloca ptr, align 4
   %v11 = alloca <16 x i32>, align 64
   %v12 = alloca <16 x i32>, align 64
   %v13 = alloca <32 x i32>, align 128
@@ -27,61 +27,60 @@ b0:
   %v16 = alloca <32 x i32>, align 128
   %v17 = alloca <16 x i32>, align 64
   %v18 = alloca <16 x i32>, align 64
-  store i16* %a0, i16** %v0, align 4
-  store i32 %a1, i32* %v1, align 4
-  store i8* %a2, i8** %v2, align 4
-  store i16* %a3, i16** %v3, align 4
-  %v19 = load i8*, i8** %v2, align 4
-  %v20 = getelementptr inbounds i8, i8* %v19, i32 192
-  %v21 = bitcast i8* %v20 to <16 x i32>*
-  %v22 = load <16 x i32>, <16 x i32>* %v21, align 64
-  store <16 x i32> %v22, <16 x i32>* %v12, align 64
-  store i32 16843009, i32* %v4, align 4
-  %v23 = load i32, i32* %v4, align 4
-  %v24 = load i32, i32* %v4, align 4
+  store ptr %a0, ptr %v0, align 4
+  store i32 %a1, ptr %v1, align 4
+  store ptr %a2, ptr %v2, align 4
+  store ptr %a3, ptr %v3, align 4
+  %v19 = load ptr, ptr %v2, align 4
+  %v20 = getelementptr inbounds i8, ptr %v19, i32 192
+  %v22 = load <16 x i32>, ptr %v20, align 64
+  store <16 x i32> %v22, ptr %v12, align 64
+  store i32 16843009, ptr %v4, align 4
+  %v23 = load i32, ptr %v4, align 4
+  %v24 = load i32, ptr %v4, align 4
   %v25 = add nsw i32 %v23, %v24
-  store i32 %v25, i32* %v5, align 4
-  %v26 = load i32, i32* %v5, align 4
-  %v27 = load i32, i32* %v5, align 4
+  store i32 %v25, ptr %v5, align 4
+  %v26 = load i32, ptr %v5, align 4
+  %v27 = load i32, ptr %v5, align 4
   %v28 = add nsw i32 %v26, %v27
-  store i32 %v28, i32* %v6, align 4
-  %v29 = load i16*, i16** %v0, align 4
-  store i16* %v29, i16** %v9, align 4
-  %v30 = load i16*, i16** %v3, align 4
-  store i16* %v30, i16** %v10, align 4
-  store i32 0, i32* %v8, align 4
+  store i32 %v28, ptr %v6, align 4
+  %v29 = load ptr, ptr %v0, align 4
+  store ptr %v29, ptr %v9, align 4
+  %v30 = load ptr, ptr %v3, align 4
+  store ptr %v30, ptr %v10, align 4
+  store i32 0, ptr %v8, align 4
   br label %b1
 
 b1:                                               ; preds = %b3, %b0
-  %v31 = load i32, i32* %v8, align 4
-  %v32 = load i32, i32* %v1, align 4
+  %v31 = load i32, ptr %v8, align 4
+  %v32 = load i32, ptr %v1, align 4
   %v33 = icmp slt i32 %v31, %v32
   br i1 %v33, label %b2, label %b4
 
 b2:                                               ; preds = %b1
-  %v34 = load <16 x i32>, <16 x i32>* %v11, align 64
+  %v34 = load <16 x i32>, ptr %v11, align 64
   %v35 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %v34, i32 -1)
-  %v36 = load <16 x i32>, <16 x i32>* %v14, align 64
-  %v37 = load <16 x i32>, <16 x i32>* %v15, align 64
+  %v36 = load <16 x i32>, ptr %v14, align 64
+  %v37 = load <16 x i32>, ptr %v15, align 64
   %v38 = call <32 x i32> @llvm.hexagon.V6.vswap(<64 x i1> %v35, <16 x i32> %v36, <16 x i32> %v37)
-  store <32 x i32> %v38, <32 x i32>* %v13, align 128
-  %v39 = load <32 x i32>, <32 x i32>* %v13, align 128
+  store <32 x i32> %v38, ptr %v13, align 128
+  %v39 = load <32 x i32>, ptr %v13, align 128
   %v40 = call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v39)
-  store <16 x i32> %v40, <16 x i32>* %v14, align 64
-  %v41 = load <32 x i32>, <32 x i32>* %v13, align 128
+  store <16 x i32> %v40, ptr %v14, align 64
+  %v41 = load <32 x i32>, ptr %v13, align 128
   %v42 = call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v41)
-  store <16 x i32> %v42, <16 x i32>* %v15, align 64
-  %v43 = load <16 x i32>, <16 x i32>* %v17, align 64
-  %v44 = load <16 x i32>, <16 x i32>* %v18, align 64
-  %v45 = load i32, i32* %v7, align 4
+  store <16 x i32> %v42, ptr %v15, align 64
+  %v43 = load <16 x i32>, ptr %v17, align 64
+  %v44 = load <16 x i32>, ptr %v18, align 64
+  %v45 = load i32, ptr %v7, align 4
   %v46 = call <32 x i32> @llvm.hexagon.V6.vdealvdd(<16 x i32> %v43, <16 x i32> %v44, i32 %v45)
-  store <32 x i32> %v46, <32 x i32>* %v16, align 128
+  store <32 x i32> %v46, ptr %v16, align 128
   br label %b3
 
 b3:                                               ; preds = %b2
-  %v47 = load i32, i32* %v8, align 4
+  %v47 = load i32, ptr %v8, align 4
   %v48 = add nsw i32 %v47, 1
-  store i32 %v48, i32* %v8, align 4
+  store i32 %v48, ptr %v8, align 4
   br label %b1
 
 b4:                                               ; preds = %b1

diff  --git a/llvm/test/CodeGen/Hexagon/v60rol-instrs.ll b/llvm/test/CodeGen/Hexagon/v60rol-instrs.ll
index 508c9b49312ba..e174e0b7bbde4 100644
--- a/llvm/test/CodeGen/Hexagon/v60rol-instrs.ll
+++ b/llvm/test/CodeGen/Hexagon/v60rol-instrs.ll
@@ -19,18 +19,18 @@ define i32 @f0() #0 {
 b0:
   %v0 = alloca i32, align 4
   %v1 = alloca i32, align 4
-  store i32 0, i32* %v0
-  store i32 0, i32* %v1, align 4
+  store i32 0, ptr %v0
+  store i32 0, ptr %v1, align 4
   %v2 = call i32 @llvm.hexagon.S6.rol.i.r.acc(i32 0, i32 1, i32 31)
-  store i32 %v2, i32* @g0, align 4
+  store i32 %v2, ptr @g0, align 4
   %v3 = call i32 @llvm.hexagon.S6.rol.i.r.and(i32 0, i32 1, i32 31)
-  store i32 %v3, i32* @g1, align 4
+  store i32 %v3, ptr @g1, align 4
   %v4 = call i32 @llvm.hexagon.S6.rol.i.r.nac(i32 0, i32 1, i32 31)
-  store i32 %v4, i32* @g2, align 4
+  store i32 %v4, ptr @g2, align 4
   %v5 = call i32 @llvm.hexagon.S6.rol.i.r.or(i32 0, i32 1, i32 31)
-  store i32 %v5, i32* @g3, align 4
+  store i32 %v5, ptr @g3, align 4
   %v6 = call i32 @llvm.hexagon.S6.rol.i.r.xacc(i32 0, i32 1, i32 31)
-  store i32 %v6, i32* @g4, align 4
+  store i32 %v6, ptr @g4, align 4
   ret i32 0
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/v60small.ll b/llvm/test/CodeGen/Hexagon/v60small.ll
index 7800173e0bd50..8c83b80854dbf 100644
--- a/llvm/test/CodeGen/Hexagon/v60small.ll
+++ b/llvm/test/CodeGen/Hexagon/v60small.ll
@@ -13,8 +13,8 @@ target triple = "hexagon"
 @vector_pairs = common global [15 x <32 x i32>] zeroinitializer, align 128
 @VectorPairResult = common global <32 x i32> zeroinitializer, align 128
 @dst_addresses = common global [15 x i8] zeroinitializer, align 8
- at ptr_addresses = common global [15 x i8*] zeroinitializer, align 8
- at src_addresses = common global [15 x i8*] zeroinitializer, align 8
+ at ptr_addresses = common global [15 x ptr] zeroinitializer, align 8
+ at src_addresses = common global [15 x ptr] zeroinitializer, align 8
 @dst = common global i8 0, align 1
 @ptr = common global [32768 x i8] zeroinitializer, align 8
 
@@ -22,21 +22,21 @@ target triple = "hexagon"
 define i32 @main() #0 {
 entry:
   %retval = alloca i32, align 4
-  store i32 0, i32* %retval, align 4
-  %0 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vecpreds, i32 0, i32 0), align 64
+  store i32 0, ptr %retval, align 4
+  %0 = load volatile <16 x i32>, ptr @vecpreds, align 64
   %1 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %0, i32 -1)
-  %2 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vecpreds, i32 0, i32 1), align 64
+  %2 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vecpreds, i32 0, i32 1), align 64
   %3 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %2, i32 -1)
   %4 = call <64 x i1> @llvm.hexagon.V6.pred.and(<64 x i1> %1, <64 x i1> %3)
   %5 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %4, i32 -1)
-  store volatile <16 x i32> %5, <16 x i32>* @Q6VecPredResult, align 64
-  %6 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vecpreds, i32 0, i32 0), align 64
+  store volatile <16 x i32> %5, ptr @Q6VecPredResult, align 64
+  %6 = load volatile <16 x i32>, ptr @vecpreds, align 64
   %7 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %6, i32 -1)
-  %8 = load volatile <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @vecpreds, i32 0, i32 1), align 64
+  %8 = load volatile <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @vecpreds, i32 0, i32 1), align 64
   %9 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %8, i32 -1)
   %10 = call <64 x i1> @llvm.hexagon.V6.pred.and.n(<64 x i1> %7, <64 x i1> %9)
   %11 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %10, i32 -1)
-  store volatile <16 x i32> %11, <16 x i32>* @Q6VecPredResult, align 64
+  store volatile <16 x i32> %11, ptr @Q6VecPredResult, align 64
   ret i32 0
 
 }

diff  --git a/llvm/test/CodeGen/Hexagon/v62-CJAllSlots.ll b/llvm/test/CodeGen/Hexagon/v62-CJAllSlots.ll
index 2a13b04a5f61f..52376b3614b9f 100644
--- a/llvm/test/CodeGen/Hexagon/v62-CJAllSlots.ll
+++ b/llvm/test/CodeGen/Hexagon/v62-CJAllSlots.ll
@@ -10,49 +10,46 @@ target triple = "hexagon"
 @g0 = global <16 x i32> zeroinitializer, align 64
 
 ; Function Attrs: nounwind
-define void @f0(i16* nocapture readonly %a0, i32 %a1, i32 %a2, i16* nocapture %a3) #0 {
+define void @f0(ptr nocapture readonly %a0, i32 %a1, i32 %a2, ptr nocapture %a3) #0 {
 b0:
   %v0 = mul i32 %a2, -2
   %v1 = add i32 %v0, 64
-  %v2 = bitcast i16* %a3 to <16 x i32>*
-  %v3 = load <16 x i32>, <16 x i32>* @g0, align 64
+  %v3 = load <16 x i32>, ptr @g0, align 64
   %v4 = sdiv i32 %a1, 32
   %v5 = icmp sgt i32 %a1, 31
   br i1 %v5, label %b1, label %b4
 
 b1:                                               ; preds = %b0
-  %v6 = bitcast i16* %a0 to <16 x i32>*
   %v7 = icmp sgt i32 %a1, 63
   %v8 = mul i32 %v4, 32
   %v9 = select i1 %v7, i32 %v8, i32 32
-  %v10 = getelementptr i16, i16* %a3, i32 %v9
+  %v10 = getelementptr i16, ptr %a3, i32 %v9
   br label %b2
 
 b2:                                               ; preds = %b2, %b1
   %v11 = phi i32 [ 0, %b1 ], [ %v19, %b2 ]
   %v12 = phi <16 x i32> [ %v3, %b1 ], [ %v16, %b2 ]
-  %v13 = phi <16 x i32>* [ %v2, %b1 ], [ %v18, %b2 ]
-  %v14 = phi <16 x i32>* [ %v6, %b1 ], [ %v15, %b2 ]
-  %v15 = getelementptr inbounds <16 x i32>, <16 x i32>* %v14, i32 1
-  %v16 = load <16 x i32>, <16 x i32>* %v14, align 64
+  %v13 = phi ptr [ %a3, %b1 ], [ %v18, %b2 ]
+  %v14 = phi ptr [ %a0, %b1 ], [ %v15, %b2 ]
+  %v15 = getelementptr inbounds <16 x i32>, ptr %v14, i32 1
+  %v16 = load <16 x i32>, ptr %v14, align 64
   %v17 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v16, <16 x i32> %v12, i32 %v1)
-  %v18 = getelementptr inbounds <16 x i32>, <16 x i32>* %v13, i32 1
-  store <16 x i32> %v17, <16 x i32>* %v13, align 64
+  %v18 = getelementptr inbounds <16 x i32>, ptr %v13, i32 1
+  store <16 x i32> %v17, ptr %v13, align 64
   %v19 = add nsw i32 %v11, 1
   %v20 = icmp slt i32 %v19, %v4
   br i1 %v20, label %b2, label %b3
 
 b3:                                               ; preds = %b2
-  %v21 = bitcast i16* %v10 to <16 x i32>*
-  %v22 = load <16 x i32>, <16 x i32>* @g0, align 64
+  %v22 = load <16 x i32>, ptr @g0, align 64
   br label %b4
 
 b4:                                               ; preds = %b3, %b0
   %v23 = phi <16 x i32> [ %v22, %b3 ], [ %v3, %b0 ]
   %v24 = phi <16 x i32> [ %v16, %b3 ], [ %v3, %b0 ]
-  %v25 = phi <16 x i32>* [ %v21, %b3 ], [ %v2, %b0 ]
+  %v25 = phi ptr [ %v10, %b3 ], [ %a3, %b0 ]
   %v26 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v23, <16 x i32> %v24, i32 %v1)
-  store <16 x i32> %v26, <16 x i32>* %v25, align 64
+  store <16 x i32> %v26, ptr %v25, align 64
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/v62-inlasm4.ll b/llvm/test/CodeGen/Hexagon/v62-inlasm4.ll
index 1ba41011f1249..777f7711540d4 100644
--- a/llvm/test/CodeGen/Hexagon/v62-inlasm4.ll
+++ b/llvm/test/CodeGen/Hexagon/v62-inlasm4.ll
@@ -9,12 +9,12 @@ b0:
   %v0 = alloca i32, align 4
   %v1 = alloca <16 x i32>, align 64
   %v2 = alloca <16 x i32>, align 64
-  store i32 %a0, i32* %v0, align 4
-  store <16 x i32> %a1, <16 x i32>* %v1, align 64
-  %v3 = load i32, i32* %v0, align 4
+  store i32 %a0, ptr %v0, align 4
+  store <16 x i32> %a1, ptr %v1, align 64
+  %v3 = load i32, ptr %v0, align 4
   %v4 = tail call <64 x i1> asm sideeffect "  $0 = vsetq2($1);\0A", "=q,r"(i32 %v3) #1
   %v5 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %v4, i32 -1)
-  store <16 x i32> %v5, <16 x i32>* %v2, align 64
+  store <16 x i32> %v5, ptr %v2, align 64
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/v6vassignp.ll b/llvm/test/CodeGen/Hexagon/v6vassignp.ll
index 619cd25debfbb..be2d103a20131 100644
--- a/llvm/test/CodeGen/Hexagon/v6vassignp.ll
+++ b/llvm/test/CodeGen/Hexagon/v6vassignp.ll
@@ -15,11 +15,11 @@ define i32 @f0() #0 {
 b0:
   %v0 = alloca i32, align 4
   %v1 = alloca i32, align 4
-  store i32 0, i32* %v0
-  store i32 0, i32* %v1, align 4
-  %v2 = load <32 x i32>, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @g0, i32 0, i32 0), align 64
+  store i32 0, ptr %v0
+  store i32 0, ptr %v1, align 4
+  %v2 = load <32 x i32>, ptr @g0, align 64
   %v3 = call <32 x i32> @llvm.hexagon.V6.vassignp(<32 x i32> %v2)
-  store <32 x i32> %v3, <32 x i32>* @g1, align 64
+  store <32 x i32> %v3, ptr @g1, align 64
   ret i32 0
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/v6vec-vmemu1.ll b/llvm/test/CodeGen/Hexagon/v6vec-vmemu1.ll
index a2cbf5b8a88eb..83690f1578722 100644
--- a/llvm/test/CodeGen/Hexagon/v6vec-vmemu1.ll
+++ b/llvm/test/CodeGen/Hexagon/v6vec-vmemu1.ll
@@ -9,47 +9,44 @@
 target triple = "hexagon"
 
 ; Function Attrs: nounwind
-define void @f0(i16* nocapture readonly %a0, i32 %a1, i32 %a2, i16* nocapture %a3) #0 {
+define void @f0(ptr nocapture readonly %a0, i32 %a1, i32 %a2, ptr nocapture %a3) #0 {
 b0:
   %v0 = mul i32 %a2, -2
   %v1 = add i32 %v0, 64
   %v2 = tail call <16 x i32> @llvm.hexagon.V6.vsubw(<16 x i32> undef, <16 x i32> undef)
-  %v3 = bitcast i16* %a3 to <16 x i32>*
   %v4 = sdiv i32 %a1, 32
   %v5 = icmp sgt i32 %a1, 31
   br i1 %v5, label %b1, label %b4
 
 b1:                                               ; preds = %b0
-  %v6 = bitcast i16* %a0 to <16 x i32>*
   %v7 = icmp sgt i32 %a1, 63
   %v8 = mul i32 %v4, 32
   %v9 = select i1 %v7, i32 %v8, i32 32
-  %v10 = getelementptr i16, i16* %a3, i32 %v9
+  %v10 = getelementptr i16, ptr %a3, i32 %v9
   br label %b2
 
 b2:                                               ; preds = %b2, %b1
   %v11 = phi i32 [ 0, %b1 ], [ %v19, %b2 ]
   %v12 = phi <16 x i32> [ %v2, %b1 ], [ %v16, %b2 ]
-  %v13 = phi <16 x i32>* [ %v3, %b1 ], [ %v18, %b2 ]
-  %v14 = phi <16 x i32>* [ %v6, %b1 ], [ %v15, %b2 ]
-  %v15 = getelementptr inbounds <16 x i32>, <16 x i32>* %v14, i32 1
-  %v16 = load <16 x i32>, <16 x i32>* %v14, align 4, !tbaa !0
+  %v13 = phi ptr [ %a3, %b1 ], [ %v18, %b2 ]
+  %v14 = phi ptr [ %a0, %b1 ], [ %v15, %b2 ]
+  %v15 = getelementptr inbounds <16 x i32>, ptr %v14, i32 1
+  %v16 = load <16 x i32>, ptr %v14, align 4, !tbaa !0
   %v17 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v16, <16 x i32> %v12, i32 %v1)
-  %v18 = getelementptr inbounds <16 x i32>, <16 x i32>* %v13, i32 1
-  store <16 x i32> %v17, <16 x i32>* %v13, align 4, !tbaa !0
+  %v18 = getelementptr inbounds <16 x i32>, ptr %v13, i32 1
+  store <16 x i32> %v17, ptr %v13, align 4, !tbaa !0
   %v19 = add nsw i32 %v11, 1
   %v20 = icmp slt i32 %v19, %v4
   br i1 %v20, label %b2, label %b3
 
 b3:                                               ; preds = %b2
-  %v21 = bitcast i16* %v10 to <16 x i32>*
   br label %b4
 
 b4:                                               ; preds = %b3, %b0
   %v22 = phi <16 x i32> [ %v16, %b3 ], [ %v2, %b0 ]
-  %v23 = phi <16 x i32>* [ %v21, %b3 ], [ %v3, %b0 ]
+  %v23 = phi ptr [ %v10, %b3 ], [ %a3, %b0 ]
   %v24 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v2, <16 x i32> %v22, i32 %v1)
-  store <16 x i32> %v24, <16 x i32>* %v23, align 4, !tbaa !0
+  store <16 x i32> %v24, ptr %v23, align 4, !tbaa !0
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/v6vec-vmemu2.ll b/llvm/test/CodeGen/Hexagon/v6vec-vmemu2.ll
index 4f3ea1e87705b..b79bedc10133b 100644
--- a/llvm/test/CodeGen/Hexagon/v6vec-vmemu2.ll
+++ b/llvm/test/CodeGen/Hexagon/v6vec-vmemu2.ll
@@ -9,15 +9,12 @@
 target triple = "hexagon"
 
 ; Function Attrs: nounwind
-define void @f0(i8* nocapture readonly %a0, i8* nocapture readonly %a1, i8* nocapture %a2) #0 {
+define void @f0(ptr nocapture readonly %a0, ptr nocapture readonly %a1, ptr nocapture %a2) #0 {
 b0:
-  %v0 = bitcast i8* %a0 to <16 x i32>*
-  %v1 = load <16 x i32>, <16 x i32>* %v0, align 4, !tbaa !0
-  %v2 = bitcast i8* %a1 to <16 x i32>*
-  %v3 = load <16 x i32>, <16 x i32>* %v2, align 4, !tbaa !0
+  %v1 = load <16 x i32>, ptr %a0, align 4, !tbaa !0
+  %v3 = load <16 x i32>, ptr %a1, align 4, !tbaa !0
   %v4 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %v1, <16 x i32> %v3)
-  %v5 = bitcast i8* %a2 to <16 x i32>*
-  store <16 x i32> %v4, <16 x i32>* %v5, align 4, !tbaa !0
+  store <16 x i32> %v4, ptr %a2, align 4, !tbaa !0
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/v6vec-vprint.ll b/llvm/test/CodeGen/Hexagon/v6vec-vprint.ll
index 0f232e4cb41e6..45a101e4ad3ef 100644
--- a/llvm/test/CodeGen/Hexagon/v6vec-vprint.ll
+++ b/llvm/test/CodeGen/Hexagon/v6vec-vprint.ll
@@ -10,15 +10,12 @@ target datalayout = "e-p:32:32:32-i64:64:64-i32:32:32-i16:16:16-i1:32:32-f64:64:
 target triple = "hexagon"
 
 ; Function Attrs: nounwind
-define void @do_vecs(i8* nocapture readonly %a, i8* nocapture readonly %b, i8* nocapture %c) #0 {
+define void @do_vecs(ptr nocapture readonly %a, ptr nocapture readonly %b, ptr nocapture %c) #0 {
 entry:
-  %0 = bitcast i8* %a to <16 x i32>*
-  %1 = load <16 x i32>, <16 x i32>* %0, align 4, !tbaa !1
-  %2 = bitcast i8* %b to <16 x i32>*
-  %3 = load <16 x i32>, <16 x i32>* %2, align 4, !tbaa !1
-  %4 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %1, <16 x i32> %3)
-  %5 = bitcast i8* %c to <16 x i32>*
-  store <16 x i32> %4, <16 x i32>* %5, align 4, !tbaa !1
+  %0 = load <16 x i32>, ptr %a, align 4, !tbaa !1
+  %1 = load <16 x i32>, ptr %b, align 4, !tbaa !1
+  %2 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %0, <16 x i32> %1)
+  store <16 x i32> %2, ptr %c, align 4, !tbaa !1
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/v6vec-vshuff.ll b/llvm/test/CodeGen/Hexagon/v6vec-vshuff.ll
index 2eb71a173143d..1dc9d6a98a289 100644
--- a/llvm/test/CodeGen/Hexagon/v6vec-vshuff.ll
+++ b/llvm/test/CodeGen/Hexagon/v6vec-vshuff.ll
@@ -11,11 +11,11 @@ target triple = "hexagon"
 ; Function Attrs: nounwind
 define i32 @f0() #0 {
 b0:
-  %v0 = load <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @g0, i32 0, i32 0), align 64, !tbaa !0
-  %v1 = load <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @g0, i32 0, i32 1), align 64, !tbaa !0
+  %v0 = load <16 x i32>, ptr @g0, align 64, !tbaa !0
+  %v1 = load <16 x i32>, ptr getelementptr inbounds ([15 x <16 x i32>], ptr @g0, i32 0, i32 1), align 64, !tbaa !0
   %v2 = tail call <32 x i32> @llvm.hexagon.V6.vshuffvdd(<16 x i32> %v0, <16 x i32> %v1, i32 -2147483648)
-  store <32 x i32> %v2, <32 x i32>* @g1, align 128, !tbaa !0
-  %v3 = tail call i32 bitcast (i32 (...)* @f1 to i32 (i32, <32 x i32>*)*)(i32 1024, <32 x i32>* @g1) #0
+  store <32 x i32> %v2, ptr @g1, align 128, !tbaa !0
+  %v3 = tail call i32 @f1(i32 1024, ptr @g1) #0
   ret i32 0
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/v6vec_inc1.ll b/llvm/test/CodeGen/Hexagon/v6vec_inc1.ll
index 73e930ebb85f4..323dbd4560fbe 100644
--- a/llvm/test/CodeGen/Hexagon/v6vec_inc1.ll
+++ b/llvm/test/CodeGen/Hexagon/v6vec_inc1.ll
@@ -16,47 +16,44 @@
 target triple = "hexagon"
 
 ; Function Attrs: nounwind
-define void @f0(i16* nocapture readonly %a0, i32 %a1, i32 %a2, i16* nocapture %a3) #0 {
+define void @f0(ptr nocapture readonly %a0, i32 %a1, i32 %a2, ptr nocapture %a3) #0 {
 b0:
   %v0 = mul i32 %a2, -2
   %v1 = add i32 %v0, 64
   %v2 = tail call <16 x i32> @llvm.hexagon.V6.vsubw(<16 x i32> undef, <16 x i32> undef)
-  %v3 = bitcast i16* %a3 to <16 x i32>*
   %v4 = sdiv i32 %a1, 32
   %v5 = icmp sgt i32 %a1, 31
   br i1 %v5, label %b1, label %b4
 
 b1:                                               ; preds = %b0
-  %v6 = bitcast i16* %a0 to <16 x i32>*
   %v7 = icmp sgt i32 %a1, 63
   %v8 = mul i32 %v4, 32
   %v9 = select i1 %v7, i32 %v8, i32 32
-  %v10 = getelementptr i16, i16* %a3, i32 %v9
+  %v10 = getelementptr i16, ptr %a3, i32 %v9
   br label %b2
 
 b2:                                               ; preds = %b2, %b1
   %v11 = phi i32 [ 0, %b1 ], [ %v19, %b2 ]
   %v12 = phi <16 x i32> [ %v2, %b1 ], [ %v16, %b2 ]
-  %v13 = phi <16 x i32>* [ %v3, %b1 ], [ %v18, %b2 ]
-  %v14 = phi <16 x i32>* [ %v6, %b1 ], [ %v15, %b2 ]
-  %v15 = getelementptr inbounds <16 x i32>, <16 x i32>* %v14, i32 1
-  %v16 = load <16 x i32>, <16 x i32>* %v14, align 64, !tbaa !0
+  %v13 = phi ptr [ %a3, %b1 ], [ %v18, %b2 ]
+  %v14 = phi ptr [ %a0, %b1 ], [ %v15, %b2 ]
+  %v15 = getelementptr inbounds <16 x i32>, ptr %v14, i32 1
+  %v16 = load <16 x i32>, ptr %v14, align 64, !tbaa !0
   %v17 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v16, <16 x i32> %v12, i32 %v1)
-  %v18 = getelementptr inbounds <16 x i32>, <16 x i32>* %v13, i32 1
-  store <16 x i32> %v17, <16 x i32>* %v13, align 64, !tbaa !0
+  %v18 = getelementptr inbounds <16 x i32>, ptr %v13, i32 1
+  store <16 x i32> %v17, ptr %v13, align 64, !tbaa !0
   %v19 = add nsw i32 %v11, 1
   %v20 = icmp slt i32 %v19, %v4
   br i1 %v20, label %b2, label %b3
 
 b3:                                               ; preds = %b2
-  %v21 = bitcast i16* %v10 to <16 x i32>*
   br label %b4
 
 b4:                                               ; preds = %b3, %b0
   %v22 = phi <16 x i32> [ %v16, %b3 ], [ %v2, %b0 ]
-  %v23 = phi <16 x i32>* [ %v21, %b3 ], [ %v3, %b0 ]
+  %v23 = phi ptr [ %v10, %b3 ], [ %a3, %b0 ]
   %v24 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v2, <16 x i32> %v22, i32 %v1)
-  store <16 x i32> %v24, <16 x i32>* %v23, align 64, !tbaa !0
+  store <16 x i32> %v24, ptr %v23, align 64, !tbaa !0
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/v6vec_zero.ll b/llvm/test/CodeGen/Hexagon/v6vec_zero.ll
index e9e69975cb9cc..ee2da0a0bbd94 100644
--- a/llvm/test/CodeGen/Hexagon/v6vec_zero.ll
+++ b/llvm/test/CodeGen/Hexagon/v6vec_zero.ll
@@ -11,7 +11,7 @@ b0:
 
 b1:                                               ; preds = %b1, %b0
   %v0 = phi i32 [ 0, %b1 ], [ 0, %b0 ]
-  store <16 x i32> zeroinitializer, <16 x i32>* null, align 64
+  store <16 x i32> zeroinitializer, ptr null, align 64
   br i1 false, label %b1, label %b2
 
 b2:                                               ; preds = %b1, %b0

diff  --git a/llvm/test/CodeGen/Hexagon/v6vect-dbl-fail1.ll b/llvm/test/CodeGen/Hexagon/v6vect-dbl-fail1.ll
index c44c10b370b43..512d16a4eb1f9 100644
--- a/llvm/test/CodeGen/Hexagon/v6vect-dbl-fail1.ll
+++ b/llvm/test/CodeGen/Hexagon/v6vect-dbl-fail1.ll
@@ -8,33 +8,30 @@
 target triple = "hexagon"
 
 ; Function Attrs: nounwind
-define void @f0(i8* %a0, i8* %a1, i32 %a2, i8* %a3, i32 %a4) #0 {
+define void @f0(ptr %a0, ptr %a1, i32 %a2, ptr %a3, i32 %a4) #0 {
 b0:
-  %v0 = alloca i8*, align 4
-  %v1 = alloca i8*, align 4
+  %v0 = alloca ptr, align 4
+  %v1 = alloca ptr, align 4
   %v2 = alloca i32, align 4
-  %v3 = alloca i8*, align 4
+  %v3 = alloca ptr, align 4
   %v4 = alloca i32, align 4
   %v5 = alloca <16 x i32>, align 64
   %v6 = alloca <32 x i32>, align 128
-  store i8* %a0, i8** %v0, align 4
-  store i8* %a1, i8** %v1, align 4
-  store i32 %a2, i32* %v2, align 4
-  store i8* %a3, i8** %v3, align 4
-  store i32 %a4, i32* %v4, align 4
-  %v7 = load i8*, i8** %v0, align 4
-  %v8 = bitcast i8* %v7 to <16 x i32>*
-  %v9 = load <16 x i32>, <16 x i32>* %v8, align 64
-  %v10 = load i8*, i8** %v0, align 4
-  %v11 = getelementptr inbounds i8, i8* %v10, i32 64
-  %v12 = bitcast i8* %v11 to <16 x i32>*
-  %v13 = load <16 x i32>, <16 x i32>* %v12, align 64
+  store ptr %a0, ptr %v0, align 4
+  store ptr %a1, ptr %v1, align 4
+  store i32 %a2, ptr %v2, align 4
+  store ptr %a3, ptr %v3, align 4
+  store i32 %a4, ptr %v4, align 4
+  %v7 = load ptr, ptr %v0, align 4
+  %v9 = load <16 x i32>, ptr %v7, align 64
+  %v10 = load ptr, ptr %v0, align 4
+  %v11 = getelementptr inbounds i8, ptr %v10, i32 64
+  %v13 = load <16 x i32>, ptr %v11, align 64
   %v14 = call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v9, <16 x i32> %v13)
-  store <32 x i32> %v14, <32 x i32>* %v6, align 128
-  %v15 = load i8*, i8** %v3, align 4
-  %v16 = bitcast i8* %v15 to <16 x i32>*
-  %v17 = load <16 x i32>, <16 x i32>* %v16, align 64
-  store <16 x i32> %v17, <16 x i32>* %v5, align 64
+  store <32 x i32> %v14, ptr %v6, align 128
+  %v15 = load ptr, ptr %v3, align 4
+  %v17 = load <16 x i32>, ptr %v15, align 64
+  store <16 x i32> %v17, ptr %v5, align 64
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/v6vect-dbl-spill.ll b/llvm/test/CodeGen/Hexagon/v6vect-dbl-spill.ll
index c5f989a88f535..259e0df23520e 100644
--- a/llvm/test/CodeGen/Hexagon/v6vect-dbl-spill.ll
+++ b/llvm/test/CodeGen/Hexagon/v6vect-dbl-spill.ll
@@ -15,7 +15,7 @@ b0:
 
 b1:                                               ; preds = %b1, %b0
   %v5 = phi i32 [ %v77, %b1 ], [ 0, %b0 ]
-  %v6 = phi <32 x i32>* [ undef, %b1 ], [ undef, %b0 ]
+  %v6 = phi ptr [ undef, %b1 ], [ undef, %b0 ]
   %v7 = tail call <32 x i32> @llvm.hexagon.V6.vabs
diff ub.128B(<32 x i32> undef, <32 x i32> undef)
   %v8 = tail call <128 x i1> @llvm.hexagon.V6.vgtub.128B(<32 x i32> %v7, <32 x i32> zeroinitializer)
   %v9 = tail call <32 x i32> @llvm.hexagon.V6.vaddbnq.128B(<128 x i1> %v8, <32 x i32> undef, <32 x i32> %v0)
@@ -48,7 +48,7 @@ b1:                                               ; preds = %b1, %b0
   %v36 = tail call <32 x i32> @llvm.hexagon.V6.vaddbnq.128B(<128 x i1> undef, <32 x i32> %v35, <32 x i32> %v0)
   %v37 = tail call <32 x i32> @llvm.hexagon.V6.vaddbnq.128B(<128 x i1> undef, <32 x i32> %v36, <32 x i32> %v0)
   %v38 = tail call <32 x i32> @llvm.hexagon.V6.vaddbnq.128B(<128 x i1> %v30, <32 x i32> %v37, <32 x i32> %v0)
-  %v39 = load <32 x i32>, <32 x i32>* null, align 128, !tbaa !0
+  %v39 = load <32 x i32>, ptr null, align 128, !tbaa !0
   %v40 = tail call <128 x i1> @llvm.hexagon.V6.vgtub.128B(<32 x i32> undef, <32 x i32> zeroinitializer)
   %v41 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<128 x i1> %v40, <32 x i32> undef, <32 x i32> %v39)
   %v42 = tail call <64 x i32> @llvm.hexagon.V6.vmpybus.acc.128B(<64 x i32> %v34, <32 x i32> %v41, i32 16843009)
@@ -86,7 +86,7 @@ b1:                                               ; preds = %b1, %b0
   %v74 = tail call <32 x i32> @llvm.hexagon.V6.hi.128B(<64 x i32> %v73)
   %v75 = tail call <32 x i32> @llvm.hexagon.V6.vasrwh.128B(<32 x i32> %v74, <32 x i32> undef, i32 14)
   %v76 = tail call <32 x i32> @llvm.hexagon.V6.vshuffeb.128B(<32 x i32> %v75, <32 x i32> undef)
-  store <32 x i32> %v76, <32 x i32>* %v6, align 128, !tbaa !0
+  store <32 x i32> %v76, ptr %v6, align 128, !tbaa !0
   %v77 = add nsw i32 %v5, 1
   %v78 = icmp slt i32 %v77, %v3
   br i1 %v78, label %b1, label %b2

diff  --git a/llvm/test/CodeGen/Hexagon/v6vect-dbl.ll b/llvm/test/CodeGen/Hexagon/v6vect-dbl.ll
index 1ea7e14ddff59..234fdd3386a93 100644
--- a/llvm/test/CodeGen/Hexagon/v6vect-dbl.ll
+++ b/llvm/test/CodeGen/Hexagon/v6vect-dbl.ll
@@ -32,41 +32,41 @@ target triple = "hexagon"
 @g3 = common global [10 x <16 x i32>] zeroinitializer, align 64
 @g4 = common global [10 x <32 x i32>] zeroinitializer, align 64
 
-declare i32 @f0(i8*, ...)
+declare i32 @f0(ptr, ...)
 
 ; Function Attrs: nounwind
 define void @f1(i32 %a0) #0 {
 b0:
   %v0 = alloca i32, align 4
-  %v1 = alloca i32*, align 4
+  %v1 = alloca ptr, align 4
   %v2 = alloca i32, align 4
-  store i32 %a0, i32* %v0, align 4
-  store i32* getelementptr inbounds ([10 x <32 x i32>], [10 x <32 x i32>]* @g0, i32 0, i32 0, i32 0), i32** %v1, align 4
-  %v3 = load i32, i32* %v0, align 4
-  %v4 = load i32*, i32** %v1, align 4
-  %v5 = getelementptr inbounds i32, i32* %v4, i32 %v3
-  store i32* %v5, i32** %v1, align 4
-  store i32 0, i32* %v2, align 4
+  store i32 %a0, ptr %v0, align 4
+  store ptr @g0, ptr %v1, align 4
+  %v3 = load i32, ptr %v0, align 4
+  %v4 = load ptr, ptr %v1, align 4
+  %v5 = getelementptr inbounds i32, ptr %v4, i32 %v3
+  store ptr %v5, ptr %v1, align 4
+  store i32 0, ptr %v2, align 4
   br label %b1
 
 b1:                                               ; preds = %b3, %b0
-  %v6 = load i32, i32* %v2, align 4
+  %v6 = load i32, ptr %v2, align 4
   %v7 = icmp slt i32 %v6, 16
   br i1 %v7, label %b2, label %b4
 
 b2:                                               ; preds = %b1
-  %v8 = load i32, i32* %v2, align 4
-  %v9 = load i32*, i32** %v1, align 4
-  %v10 = getelementptr inbounds i32, i32* %v9, i32 1
-  store i32* %v10, i32** %v1, align 4
-  %v11 = load i32, i32* %v9, align 4
-  %v12 = call i32 (i8*, ...) @f0(i8* getelementptr inbounds ([11 x i8], [11 x i8]* @g1, i32 0, i32 0), i32 %v8, i32 %v11)
+  %v8 = load i32, ptr %v2, align 4
+  %v9 = load ptr, ptr %v1, align 4
+  %v10 = getelementptr inbounds i32, ptr %v9, i32 1
+  store ptr %v10, ptr %v1, align 4
+  %v11 = load i32, ptr %v9, align 4
+  %v12 = call i32 (ptr, ...) @f0(ptr @g1, i32 %v8, i32 %v11)
   br label %b3
 
 b3:                                               ; preds = %b2
-  %v13 = load i32, i32* %v2, align 4
+  %v13 = load i32, ptr %v2, align 4
   %v14 = add nsw i32 %v13, 1
-  store i32 %v14, i32* %v2, align 4
+  store i32 %v14, ptr %v2, align 4
   br label %b1
 
 b4:                                               ; preds = %b1
@@ -78,90 +78,90 @@ define i32 @f2() #0 {
 b0:
   %v0 = alloca i32, align 4
   %v1 = alloca i32, align 4
-  store i32 0, i32* %v0
-  store i32 0, i32* %v1, align 4
+  store i32 0, ptr %v0
+  store i32 0, ptr %v1, align 4
   br label %b1
 
 b1:                                               ; preds = %b3, %b0
-  %v2 = load i32, i32* %v1, align 4
+  %v2 = load i32, ptr %v1, align 4
   %v3 = icmp slt i32 %v2, 3
   br i1 %v3, label %b2, label %b4
 
 b2:                                               ; preds = %b1
-  %v4 = load i32, i32* %v1, align 4
+  %v4 = load i32, ptr %v1, align 4
   %v5 = add nsw i32 %v4, 1
   %v6 = call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 %v5)
-  %v7 = load i32, i32* %v1, align 4
-  %v8 = getelementptr inbounds [10 x <16 x i32>], [10 x <16 x i32>]* @g2, i32 0, i32 %v7
-  store <16 x i32> %v6, <16 x i32>* %v8, align 64
-  %v9 = load i32, i32* %v1, align 4
+  %v7 = load i32, ptr %v1, align 4
+  %v8 = getelementptr inbounds [10 x <16 x i32>], ptr @g2, i32 0, i32 %v7
+  store <16 x i32> %v6, ptr %v8, align 64
+  %v9 = load i32, ptr %v1, align 4
   %v10 = mul nsw i32 %v9, 10
   %v11 = add nsw i32 %v10, 1
   %v12 = call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 %v11)
-  %v13 = load i32, i32* %v1, align 4
-  %v14 = getelementptr inbounds [10 x <16 x i32>], [10 x <16 x i32>]* @g3, i32 0, i32 %v13
-  store <16 x i32> %v12, <16 x i32>* %v14, align 64
-  %v15 = load i32, i32* %v1, align 4
-  %v16 = getelementptr inbounds [10 x <16 x i32>], [10 x <16 x i32>]* @g2, i32 0, i32 %v15
-  %v17 = load <16 x i32>, <16 x i32>* %v16, align 64
-  %v18 = load i32, i32* %v1, align 4
-  %v19 = getelementptr inbounds [10 x <16 x i32>], [10 x <16 x i32>]* @g3, i32 0, i32 %v18
-  %v20 = load <16 x i32>, <16 x i32>* %v19, align 64
+  %v13 = load i32, ptr %v1, align 4
+  %v14 = getelementptr inbounds [10 x <16 x i32>], ptr @g3, i32 0, i32 %v13
+  store <16 x i32> %v12, ptr %v14, align 64
+  %v15 = load i32, ptr %v1, align 4
+  %v16 = getelementptr inbounds [10 x <16 x i32>], ptr @g2, i32 0, i32 %v15
+  %v17 = load <16 x i32>, ptr %v16, align 64
+  %v18 = load i32, ptr %v1, align 4
+  %v19 = getelementptr inbounds [10 x <16 x i32>], ptr @g3, i32 0, i32 %v18
+  %v20 = load <16 x i32>, ptr %v19, align 64
   %v21 = call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v17, <16 x i32> %v20)
-  %v22 = load i32, i32* %v1, align 4
-  %v23 = getelementptr inbounds [10 x <32 x i32>], [10 x <32 x i32>]* @g4, i32 0, i32 %v22
-  store <32 x i32> %v21, <32 x i32>* %v23, align 64
+  %v22 = load i32, ptr %v1, align 4
+  %v23 = getelementptr inbounds [10 x <32 x i32>], ptr @g4, i32 0, i32 %v22
+  store <32 x i32> %v21, ptr %v23, align 64
   br label %b3
 
 b3:                                               ; preds = %b2
-  %v24 = load i32, i32* %v1, align 4
+  %v24 = load i32, ptr %v1, align 4
   %v25 = add nsw i32 %v24, 1
-  store i32 %v25, i32* %v1, align 4
+  store i32 %v25, ptr %v1, align 4
   br label %b1
 
 b4:                                               ; preds = %b1
-  store i32 0, i32* %v1, align 4
+  store i32 0, ptr %v1, align 4
   br label %b5
 
 b5:                                               ; preds = %b7, %b4
-  %v26 = load i32, i32* %v1, align 4
+  %v26 = load i32, ptr %v1, align 4
   %v27 = icmp slt i32 %v26, 3
   br i1 %v27, label %b6, label %b8
 
 b6:                                               ; preds = %b5
-  %v28 = load i32, i32* %v1, align 4
-  %v29 = getelementptr inbounds [10 x <32 x i32>], [10 x <32 x i32>]* @g4, i32 0, i32 %v28
-  %v30 = load <32 x i32>, <32 x i32>* %v29, align 64
-  %v31 = load i32, i32* %v1, align 4
-  %v32 = getelementptr inbounds [10 x <32 x i32>], [10 x <32 x i32>]* @g0, i32 0, i32 %v31
-  store <32 x i32> %v30, <32 x i32>* %v32, align 64
+  %v28 = load i32, ptr %v1, align 4
+  %v29 = getelementptr inbounds [10 x <32 x i32>], ptr @g4, i32 0, i32 %v28
+  %v30 = load <32 x i32>, ptr %v29, align 64
+  %v31 = load i32, ptr %v1, align 4
+  %v32 = getelementptr inbounds [10 x <32 x i32>], ptr @g0, i32 0, i32 %v31
+  store <32 x i32> %v30, ptr %v32, align 64
   br label %b7
 
 b7:                                               ; preds = %b6
-  %v33 = load i32, i32* %v1, align 4
+  %v33 = load i32, ptr %v1, align 4
   %v34 = add nsw i32 %v33, 1
-  store i32 %v34, i32* %v1, align 4
+  store i32 %v34, ptr %v1, align 4
   br label %b5
 
 b8:                                               ; preds = %b5
-  store i32 0, i32* %v1, align 4
+  store i32 0, ptr %v1, align 4
   br label %b9
 
 b9:                                               ; preds = %b11, %b8
-  %v35 = load i32, i32* %v1, align 4
+  %v35 = load i32, ptr %v1, align 4
   %v36 = icmp slt i32 %v35, 3
   br i1 %v36, label %b10, label %b12
 
 b10:                                              ; preds = %b9
-  %v37 = load i32, i32* %v1, align 4
+  %v37 = load i32, ptr %v1, align 4
   %v38 = mul nsw i32 %v37, 16
   call void @f1(i32 %v38)
   br label %b11
 
 b11:                                              ; preds = %b10
-  %v39 = load i32, i32* %v1, align 4
+  %v39 = load i32, ptr %v1, align 4
   %v40 = add nsw i32 %v39, 1
-  store i32 %v40, i32* %v1, align 4
+  store i32 %v40, ptr %v1, align 4
   br label %b9
 
 b12:                                              ; preds = %b9

diff  --git a/llvm/test/CodeGen/Hexagon/v6vect-dh1.ll b/llvm/test/CodeGen/Hexagon/v6vect-dh1.ll
index 6c02e247c1ca6..2af218d8ecbc6 100644
--- a/llvm/test/CodeGen/Hexagon/v6vect-dh1.ll
+++ b/llvm/test/CodeGen/Hexagon/v6vect-dh1.ll
@@ -18,66 +18,55 @@
 target triple = "hexagon"
 
 ; Function Attrs: nounwind
-define void @f0(i8* nocapture readonly %a0, i8* nocapture readonly %a1, i32 %a2, i8* nocapture %a3, i32 %a4) #0 {
+define void @f0(ptr nocapture readonly %a0, ptr nocapture readonly %a1, i32 %a2, ptr nocapture %a3, i32 %a4) #0 {
 b0:
-  %v0 = bitcast i8* %a1 to i32*
-  %v1 = load i32, i32* %v0, align 4, !tbaa !0
-  %v2 = getelementptr inbounds i8, i8* %a1, i32 4
-  %v3 = bitcast i8* %v2 to i32*
-  %v4 = load i32, i32* %v3, align 4, !tbaa !0
-  %v5 = getelementptr inbounds i8, i8* %a1, i32 8
-  %v6 = bitcast i8* %v5 to i32*
-  %v7 = load i32, i32* %v6, align 4, !tbaa !0
+  %v1 = load i32, ptr %a1, align 4, !tbaa !0
+  %v2 = getelementptr inbounds i8, ptr %a1, i32 4
+  %v4 = load i32, ptr %v2, align 4, !tbaa !0
+  %v5 = getelementptr inbounds i8, ptr %a1, i32 8
+  %v7 = load i32, ptr %v5, align 4, !tbaa !0
   %v8 = mul i32 %a4, 2
   %v9 = add i32 %v8, %a4
   %v10 = icmp sgt i32 %a4, 0
   br i1 %v10, label %b1, label %b4
 
 b1:                                               ; preds = %b0
-  %v11 = getelementptr inbounds i8, i8* %a0, i32 %v9
-  %v12 = getelementptr inbounds i8, i8* %a0, i32 %v8
-  %v13 = getelementptr inbounds i8, i8* %a0, i32 %a4
+  %v11 = getelementptr inbounds i8, ptr %a0, i32 %v9
+  %v12 = getelementptr inbounds i8, ptr %a0, i32 %v8
+  %v13 = getelementptr inbounds i8, ptr %a0, i32 %a4
   %v14 = add i32 %v9, 64
-  %v15 = bitcast i8* %v11 to <16 x i32>*
   %v16 = add i32 %v8, 64
-  %v17 = bitcast i8* %v12 to <16 x i32>*
   %v18 = add i32 %a4, 64
-  %v19 = bitcast i8* %v13 to <16 x i32>*
-  %v20 = bitcast i8* %a0 to <16 x i32>*
-  %v21 = getelementptr inbounds i8, i8* %a0, i32 %v14
-  %v22 = load <16 x i32>, <16 x i32>* %v15, align 64, !tbaa !4
-  %v23 = getelementptr inbounds i8, i8* %a0, i32 %v16
-  %v24 = load <16 x i32>, <16 x i32>* %v17, align 64, !tbaa !4
-  %v25 = getelementptr inbounds i8, i8* %a0, i32 %v18
-  %v26 = load <16 x i32>, <16 x i32>* %v19, align 64, !tbaa !4
-  %v27 = load <16 x i32>, <16 x i32>* %v20, align 64, !tbaa !4
-  %v28 = getelementptr inbounds i8, i8* %a3, i32 %a4
+  %v21 = getelementptr inbounds i8, ptr %a0, i32 %v14
+  %v22 = load <16 x i32>, ptr %v11, align 64, !tbaa !4
+  %v23 = getelementptr inbounds i8, ptr %a0, i32 %v16
+  %v24 = load <16 x i32>, ptr %v12, align 64, !tbaa !4
+  %v25 = getelementptr inbounds i8, ptr %a0, i32 %v18
+  %v26 = load <16 x i32>, ptr %v13, align 64, !tbaa !4
+  %v27 = load <16 x i32>, ptr %a0, align 64, !tbaa !4
+  %v28 = getelementptr inbounds i8, ptr %a3, i32 %a4
   br label %b2
 
 b2:                                               ; preds = %b2, %b1
-  %v29 = phi i8* [ %a0, %b1 ], [ %v40, %b2 ]
-  %v30 = phi i8* [ %a3, %b1 ], [ %v74, %b2 ]
-  %v31 = phi i8* [ %v25, %b1 ], [ %v45, %b2 ]
-  %v32 = phi i8* [ %v23, %b1 ], [ %v48, %b2 ]
-  %v33 = phi i8* [ %v21, %b1 ], [ %v51, %b2 ]
-  %v34 = phi i8* [ %v28, %b1 ], [ %v89, %b2 ]
+  %v29 = phi ptr [ %a0, %b1 ], [ %v40, %b2 ]
+  %v30 = phi ptr [ %a3, %b1 ], [ %v74, %b2 ]
+  %v31 = phi ptr [ %v25, %b1 ], [ %v45, %b2 ]
+  %v32 = phi ptr [ %v23, %b1 ], [ %v48, %b2 ]
+  %v33 = phi ptr [ %v21, %b1 ], [ %v51, %b2 ]
+  %v34 = phi ptr [ %v28, %b1 ], [ %v89, %b2 ]
   %v35 = phi i32 [ 0, %b1 ], [ %v90, %b2 ]
   %v36 = phi <16 x i32> [ %v27, %b1 ], [ %v42, %b2 ]
   %v37 = phi <16 x i32> [ %v26, %b1 ], [ %v44, %b2 ]
   %v38 = phi <16 x i32> [ %v24, %b1 ], [ %v47, %b2 ]
   %v39 = phi <16 x i32> [ %v22, %b1 ], [ %v50, %b2 ]
-  %v40 = getelementptr inbounds i8, i8* %v29, i32 64
-  %v41 = bitcast i8* %v40 to <16 x i32>*
-  %v42 = load <16 x i32>, <16 x i32>* %v41, align 64, !tbaa !4
-  %v43 = bitcast i8* %v31 to <16 x i32>*
-  %v44 = load <16 x i32>, <16 x i32>* %v43, align 64, !tbaa !4
-  %v45 = getelementptr inbounds i8, i8* %v31, i32 64
-  %v46 = bitcast i8* %v32 to <16 x i32>*
-  %v47 = load <16 x i32>, <16 x i32>* %v46, align 64, !tbaa !4
-  %v48 = getelementptr inbounds i8, i8* %v32, i32 64
-  %v49 = bitcast i8* %v33 to <16 x i32>*
-  %v50 = load <16 x i32>, <16 x i32>* %v49, align 64, !tbaa !4
-  %v51 = getelementptr inbounds i8, i8* %v33, i32 64
+  %v40 = getelementptr inbounds i8, ptr %v29, i32 64
+  %v42 = load <16 x i32>, ptr %v40, align 64, !tbaa !4
+  %v44 = load <16 x i32>, ptr %v31, align 64, !tbaa !4
+  %v45 = getelementptr inbounds i8, ptr %v31, i32 64
+  %v47 = load <16 x i32>, ptr %v32, align 64, !tbaa !4
+  %v48 = getelementptr inbounds i8, ptr %v32, i32 64
+  %v50 = load <16 x i32>, ptr %v33, align 64, !tbaa !4
+  %v51 = getelementptr inbounds i8, ptr %v33, i32 64
   %v52 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %v42, <16 x i32> %v36, i32 4)
   %v53 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %v44, <16 x i32> %v37, i32 4)
   %v54 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %v47, <16 x i32> %v38, i32 4)
@@ -99,9 +88,8 @@ b2:                                               ; preds = %b2, %b1
   %v70 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v64)
   %v71 = tail call <16 x i32> @llvm.hexagon.V6.vasrwh(<16 x i32> %v69, <16 x i32> %v70, i32 %a2)
   %v72 = tail call <16 x i32> @llvm.hexagon.V6.vsathub(<16 x i32> %v68, <16 x i32> %v71)
-  %v73 = bitcast i8* %v30 to <16 x i32>*
-  store <16 x i32> %v72, <16 x i32>* %v73, align 64, !tbaa !4
-  %v74 = getelementptr inbounds i8, i8* %v30, i32 64
+  store <16 x i32> %v72, ptr %v30, align 64, !tbaa !4
+  %v74 = getelementptr inbounds i8, ptr %v30, i32 64
   %v75 = tail call <32 x i32> @llvm.hexagon.V6.vrmpybusi(<32 x i32> %v57, i32 %v1, i32 0)
   %v76 = tail call <32 x i32> @llvm.hexagon.V6.vrmpybusi(<32 x i32> %v57, i32 %v1, i32 1)
   %v77 = tail call <32 x i32> @llvm.hexagon.V6.vrmpybusi.acc(<32 x i32> %v75, <32 x i32> %v58, i32 %v4, i32 0)
@@ -115,9 +103,8 @@ b2:                                               ; preds = %b2, %b1
   %v85 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v79)
   %v86 = tail call <16 x i32> @llvm.hexagon.V6.vasrwh(<16 x i32> %v84, <16 x i32> %v85, i32 %a2)
   %v87 = tail call <16 x i32> @llvm.hexagon.V6.vsathub(<16 x i32> %v83, <16 x i32> %v86)
-  %v88 = bitcast i8* %v34 to <16 x i32>*
-  store <16 x i32> %v87, <16 x i32>* %v88, align 64, !tbaa !4
-  %v89 = getelementptr inbounds i8, i8* %v34, i32 64
+  store <16 x i32> %v87, ptr %v34, align 64, !tbaa !4
+  %v89 = getelementptr inbounds i8, ptr %v34, i32 64
   %v90 = add nsw i32 %v35, 64
   %v91 = icmp slt i32 %v90, %a4
   br i1 %v91, label %b2, label %b3

diff  --git a/llvm/test/CodeGen/Hexagon/v6vect-locals1.ll b/llvm/test/CodeGen/Hexagon/v6vect-locals1.ll
index 8ae0c7b66ee40..50ea80ceb3800 100644
--- a/llvm/test/CodeGen/Hexagon/v6vect-locals1.ll
+++ b/llvm/test/CodeGen/Hexagon/v6vect-locals1.ll
@@ -13,24 +13,24 @@ target triple = "hexagon"
 @g2 = global <16 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16>, align 64
 
 ; Function Attrs: nounwind
-declare i32 @f0(i8* nocapture, ...) #0
+declare i32 @f0(ptr nocapture, ...) #0
 
 ; Function Attrs: nounwind
-define void @f1(%s.0* byval(%s.0) %a0, <16 x i32> %a1) #0 {
+define void @f1(ptr byval(%s.0) %a0, <16 x i32> %a1) #0 {
 b0:
   %v0 = alloca <16 x i32>, align 64
-  store <16 x i32> %a1, <16 x i32>* %v0, align 64, !tbaa !0
-  %v1 = ptrtoint %s.0* %a0 to i32
-  %v2 = ptrtoint <16 x i32>* %v0 to i32
-  %v3 = call i32 (i8*, ...) @f0(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @g0, i32 0, i32 0), i32 %v1, i32 %v2) #0
+  store <16 x i32> %a1, ptr %v0, align 64, !tbaa !0
+  %v1 = ptrtoint ptr %a0 to i32
+  %v2 = ptrtoint ptr %v0 to i32
+  %v3 = call i32 (ptr, ...) @f0(ptr @g0, i32 %v1, i32 %v2) #0
   ret void
 }
 
 ; Function Attrs: nounwind
 define i32 @f2() #0 {
 b0:
-  %v0 = load <16 x i32>, <16 x i32>* @g2, align 64, !tbaa !0
-  tail call void @f1(%s.0* byval(%s.0) @g1, <16 x i32> %v0)
+  %v0 = load <16 x i32>, ptr @g2, align 64, !tbaa !0
+  tail call void @f1(ptr byval(%s.0) @g1, <16 x i32> %v0)
   ret i32 0
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/v6vect-no-sideeffects.ll b/llvm/test/CodeGen/Hexagon/v6vect-no-sideeffects.ll
index cb9c878d1a7c9..1e50b5c7940b3 100644
--- a/llvm/test/CodeGen/Hexagon/v6vect-no-sideeffects.ll
+++ b/llvm/test/CodeGen/Hexagon/v6vect-no-sideeffects.ll
@@ -16,19 +16,17 @@
 @g0 = global [256 x i8] c"^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^00226644,,..**8888::66,,,,&&^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^22000022..4444>>::8888**..^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^<<66220000226644<<>>::^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^>><<446622000022>>", align 64
 
 ; Function Attrs: nounwind
-define void @f0(i16** noalias nocapture readonly %a0, i16* noalias nocapture readonly %a1, i32* noalias nocapture %a2, i32 %a3, i32 %a4, i32 %a5, i32 %a6) #0 {
+define void @f0(ptr noalias nocapture readonly %a0, ptr noalias nocapture readonly %a1, ptr noalias nocapture %a2, i32 %a3, i32 %a4, i32 %a5, i32 %a6) #0 {
 b0:
-  %v0 = load <16 x i32>, <16 x i32>* bitcast ([256 x i8]* @g0 to <16 x i32>*), align 64, !tbaa !0
-  %v1 = load <16 x i32>, <16 x i32>* bitcast (i8* getelementptr inbounds ([256 x i8], [256 x i8]* @g0, i32 0, i32 64) to <16 x i32>*), align 64, !tbaa !0
-  %v2 = load <16 x i32>, <16 x i32>* bitcast (i8* getelementptr inbounds ([256 x i8], [256 x i8]* @g0, i32 0, i32 128) to <16 x i32>*), align 64, !tbaa !0
-  %v3 = load <16 x i32>, <16 x i32>* bitcast (i8* getelementptr inbounds ([256 x i8], [256 x i8]* @g0, i32 0, i32 192) to <16 x i32>*), align 64, !tbaa !0
+  %v0 = load <16 x i32>, ptr @g0, align 64, !tbaa !0
+  %v1 = load <16 x i32>, ptr getelementptr inbounds ([256 x i8], ptr @g0, i32 0, i32 64), align 64, !tbaa !0
+  %v2 = load <16 x i32>, ptr getelementptr inbounds ([256 x i8], ptr @g0, i32 0, i32 128), align 64, !tbaa !0
+  %v3 = load <16 x i32>, ptr getelementptr inbounds ([256 x i8], ptr @g0, i32 0, i32 192), align 64, !tbaa !0
   %v4 = icmp sgt i32 %a5, 0
   br i1 %v4, label %b1, label %b5
 
 b1:                                               ; preds = %b0
-  %v5 = bitcast i32* %a2 to <16 x i32>*
   %v6 = tail call <16 x i32> @llvm.hexagon.V6.vd0()
-  %v7 = bitcast i16* %a1 to i64*
   %v8 = mul nsw i32 %a3, 4
   %v9 = add i32 %v8, %a6
   %v10 = add i32 %v9, 32
@@ -37,21 +35,21 @@ b1:                                               ; preds = %b0
 
 b2:                                               ; preds = %b4, %b1
   %v12 = phi i32 [ 0, %b1 ], [ %v59, %b4 ]
-  %v13 = phi <16 x i32>* [ %v5, %b1 ], [ %v58, %b4 ]
-  %v14 = getelementptr i16*, i16** %a0, i32 %v12
+  %v13 = phi ptr [ %a2, %b1 ], [ %v58, %b4 ]
+  %v14 = getelementptr ptr, ptr %a0, i32 %v12
   br label %b3
 
 b3:                                               ; preds = %b3, %b2
-  %v15 = phi i16** [ %v14, %b2 ], [ %v57, %b3 ]
+  %v15 = phi ptr [ %v14, %b2 ], [ %v57, %b3 ]
   %v16 = phi i32 [ 0, %b2 ], [ %v55, %b3 ]
-  %v17 = phi i64* [ %v7, %b2 ], [ %v23, %b3 ]
+  %v17 = phi ptr [ %a1, %b2 ], [ %v23, %b3 ]
   %v18 = phi <16 x i32> [ %v6, %b2 ], [ %v54, %b3 ]
-  %v19 = load i16*, i16** %v15, align 4, !tbaa !3
-  %v20 = getelementptr inbounds i16, i16* %v19, i32 %v9
-  %v21 = getelementptr inbounds i64, i64* %v17, i32 1
-  %v22 = load i64, i64* %v17, align 8, !tbaa !0
-  %v23 = getelementptr inbounds i64, i64* %v17, i32 2
-  %v24 = load i64, i64* %v21, align 8, !tbaa !0
+  %v19 = load ptr, ptr %v15, align 4, !tbaa !3
+  %v20 = getelementptr inbounds i16, ptr %v19, i32 %v9
+  %v21 = getelementptr inbounds i64, ptr %v17, i32 1
+  %v22 = load i64, ptr %v17, align 8, !tbaa !0
+  %v23 = getelementptr inbounds i64, ptr %v17, i32 2
+  %v24 = load i64, ptr %v21, align 8, !tbaa !0
   %v25 = trunc i64 %v22 to i32
   %v26 = lshr i64 %v22, 32
   %v27 = tail call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 %v25)
@@ -62,11 +60,9 @@ b3:                                               ; preds = %b3, %b2
   %v32 = tail call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 %v30)
   %v33 = trunc i64 %v31 to i32
   %v34 = tail call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 %v33)
-  %v35 = bitcast i16* %v20 to <16 x i32>*
-  %v36 = load <16 x i32>, <16 x i32>* %v35, align 4, !tbaa !0
-  %v37 = getelementptr inbounds i16, i16* %v19, i32 %v10
-  %v38 = bitcast i16* %v37 to <16 x i32>*
-  %v39 = load <16 x i32>, <16 x i32>* %v38, align 4, !tbaa !0
+  %v36 = load <16 x i32>, ptr %v20, align 4, !tbaa !0
+  %v37 = getelementptr inbounds i16, ptr %v19, i32 %v10
+  %v39 = load <16 x i32>, ptr %v37, align 4, !tbaa !0
   %v40 = tail call <16 x i32> @llvm.hexagon.V6.vpackeh(<16 x i32> %v39, <16 x i32> %v36)
   %v41 = tail call <16 x i32> @llvm.hexagon.V6.vpackeh(<16 x i32> %v40, <16 x i32> %v40)
   %v42 = tail call <16 x i32> @llvm.hexagon.V6.vdelta(<16 x i32> %v41, <16 x i32> %v0)
@@ -84,12 +80,12 @@ b3:                                               ; preds = %b3, %b2
   %v54 = tail call <16 x i32> @llvm.hexagon.V6.vasrw.acc(<16 x i32> %v18, <16 x i32> %v53, i32 6)
   %v55 = add nsw i32 %v16, 1
   %v56 = icmp eq i32 %v16, 7
-  %v57 = getelementptr i16*, i16** %v15, i32 1
+  %v57 = getelementptr ptr, ptr %v15, i32 1
   br i1 %v56, label %b4, label %b3
 
 b4:                                               ; preds = %b3
-  %v58 = getelementptr inbounds <16 x i32>, <16 x i32>* %v13, i32 1
-  store <16 x i32> %v54, <16 x i32>* %v13, align 64, !tbaa !0
+  %v58 = getelementptr inbounds <16 x i32>, ptr %v13, i32 1
+  store <16 x i32> %v54, ptr %v13, align 64, !tbaa !0
   %v59 = add nsw i32 %v12, 1
   %v60 = icmp eq i32 %v12, %v11
   br i1 %v60, label %b5, label %b2

diff  --git a/llvm/test/CodeGen/Hexagon/v6vect-pred2.ll b/llvm/test/CodeGen/Hexagon/v6vect-pred2.ll
index 8be372a56c8f0..7bf385be1d4fe 100644
--- a/llvm/test/CodeGen/Hexagon/v6vect-pred2.ll
+++ b/llvm/test/CodeGen/Hexagon/v6vect-pred2.ll
@@ -15,13 +15,13 @@ target triple = "hexagon"
 define i32 @f0() #0 {
 b0:
   %v0 = tail call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 11)
-  store <16 x i32> %v0, <16 x i32>* @g1, align 64, !tbaa !0
+  store <16 x i32> %v0, ptr @g1, align 64, !tbaa !0
   %v1 = tail call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 12)
-  store <16 x i32> %v1, <16 x i32>* @g2, align 64, !tbaa !0
-  %v2 = load <16 x i32>, <16 x i32>* @g0, align 64, !tbaa !0
+  store <16 x i32> %v1, ptr @g2, align 64, !tbaa !0
+  %v2 = load <16 x i32>, ptr @g0, align 64, !tbaa !0
   %v3 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %v2, i32 -1)
   %v4 = tail call <16 x i32> @llvm.hexagon.V6.vmux(<64 x i1> %v3, <16 x i32> %v0, <16 x i32> %v1)
-  store <16 x i32> %v4, <16 x i32>* @g3, align 64, !tbaa !0
+  store <16 x i32> %v4, ptr @g3, align 64, !tbaa !0
   ret i32 0
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/v6vect-spill-kill.ll b/llvm/test/CodeGen/Hexagon/v6vect-spill-kill.ll
index d724075a4ace1..d22296acc9431 100644
--- a/llvm/test/CodeGen/Hexagon/v6vect-spill-kill.ll
+++ b/llvm/test/CodeGen/Hexagon/v6vect-spill-kill.ll
@@ -5,7 +5,7 @@
 ; slots. This happens because the kill flag wasn't added to the appropriate
 ; operands for the spill code.
 
-define void @f0(i32 %a0, i8* noalias nocapture %a1) #0 {
+define void @f0(i32 %a0, ptr noalias nocapture %a1) #0 {
 b0:
   %v0 = tail call <32 x i32> @llvm.hexagon.V6.vshuffh.128B(<32 x i32> undef)
   %v1 = sdiv i32 %a0, 128
@@ -13,11 +13,10 @@ b0:
   br i1 %v2, label %b1, label %b3
 
 b1:                                               ; preds = %b0
-  %v3 = bitcast i8* %a1 to <32 x i32>*
   br label %b2
 
 b2:                                               ; preds = %b2, %b1
-  %v4 = phi <32 x i32>* [ %v3, %b1 ], [ undef, %b2 ]
+  %v4 = phi ptr [ %a1, %b1 ], [ undef, %b2 ]
   %v5 = tail call <32 x i32> @llvm.hexagon.V6.vlalignbi.128B(<32 x i32> undef, <32 x i32> zeroinitializer, i32 2)
   %v6 = tail call <32 x i32> @llvm.hexagon.V6.vabs
diff ub.128B(<32 x i32> %v5, <32 x i32> zeroinitializer)
   %v7 = tail call <32 x i32> @llvm.hexagon.V6.vaddbnq.128B(<128 x i1> zeroinitializer, <32 x i32> zeroinitializer, <32 x i32> zeroinitializer)
@@ -34,9 +33,8 @@ b2:                                               ; preds = %b2, %b1
   %v18 = tail call <32 x i32> @llvm.hexagon.V6.vaddbnq.128B(<128 x i1> %v14, <32 x i32> %v17, <32 x i32> zeroinitializer)
   %v19 = tail call <32 x i32> @llvm.hexagon.V6.vaddbnq.128B(<128 x i1> %v15, <32 x i32> %v18, <32 x i32> zeroinitializer)
   %v20 = tail call <32 x i32> @llvm.hexagon.V6.vaddbnq.128B(<128 x i1> %v16, <32 x i32> %v19, <32 x i32> zeroinitializer)
-  %v21 = getelementptr inbounds i8, i8* null, i32 undef
-  %v22 = bitcast i8* %v21 to <32 x i32>*
-  %v23 = load <32 x i32>, <32 x i32>* %v22, align 128, !tbaa !0
+  %v21 = getelementptr inbounds i8, ptr null, i32 undef
+  %v23 = load <32 x i32>, ptr %v21, align 128, !tbaa !0
   %v24 = tail call <32 x i32> @llvm.hexagon.V6.vabs
diff ub.128B(<32 x i32> %v23, <32 x i32> zeroinitializer)
   %v25 = tail call <128 x i1> @llvm.hexagon.V6.vgtub.128B(<32 x i32> %v24, <32 x i32> undef)
   %v26 = tail call <32 x i32> @llvm.hexagon.V6.vaddbnq.128B(<128 x i1> %v25, <32 x i32> %v20, <32 x i32> zeroinitializer)
@@ -85,7 +83,7 @@ b2:                                               ; preds = %b2, %b1
   %v69 = tail call <32 x i32> @llvm.hexagon.V6.hi.128B(<64 x i32> %v65)
   %v70 = tail call <32 x i32> @llvm.hexagon.V6.vasrwh.128B(<32 x i32> %v69, <32 x i32> undef, i32 14)
   %v71 = tail call <32 x i32> @llvm.hexagon.V6.vshuffeb.128B(<32 x i32> %v70, <32 x i32> %v68)
-  store <32 x i32> %v71, <32 x i32>* %v4, align 128, !tbaa !0
+  store <32 x i32> %v71, ptr %v4, align 128, !tbaa !0
   %v72 = icmp slt i32 0, %v1
   br i1 %v72, label %b2, label %b3
 

diff  --git a/llvm/test/CodeGen/Hexagon/v6vect-vmem1.ll b/llvm/test/CodeGen/Hexagon/v6vect-vmem1.ll
index dc02cc4aaf544..9a7930b9134b9 100644
--- a/llvm/test/CodeGen/Hexagon/v6vect-vmem1.ll
+++ b/llvm/test/CodeGen/Hexagon/v6vect-vmem1.ll
@@ -4,13 +4,13 @@
 target triple = "hexagon"
 
 ; Function Attrs: nounwind
-define i32 @f0(<16 x i32>* %a0, <32 x i32>* %a1) #0 {
+define i32 @f0(ptr %a0, ptr %a1) #0 {
 b0:
   %v0 = call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 1)
-  store <16 x i32> %v0, <16 x i32>* %a0, align 64
-  %v1 = load <16 x i32>, <16 x i32>* %a0, align 64
+  store <16 x i32> %v0, ptr %a0, align 64
+  %v1 = load <16 x i32>, ptr %a0, align 64
   %v2 = call <32 x i32> @llvm.hexagon.V6.vunpackh(<16 x i32> %v1)
-  store <32 x i32> %v2, <32 x i32>* %a1, align 64
+  store <32 x i32> %v2, ptr %a1, align 64
   ret i32 0
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/v6vect-vsplat.ll b/llvm/test/CodeGen/Hexagon/v6vect-vsplat.ll
index e5a4e4d9e504e..53b45a6a87371 100644
--- a/llvm/test/CodeGen/Hexagon/v6vect-vsplat.ll
+++ b/llvm/test/CodeGen/Hexagon/v6vect-vsplat.ll
@@ -18,14 +18,14 @@ b0:
   tail call void @f1() #2
   %v0 = tail call i32 @f2(i8 zeroext 0) #2
   %v1 = tail call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 1) #2
-  store <16 x i32> %v1, <16 x i32>* getelementptr inbounds ([2 x <16 x i32>], [2 x <16 x i32>]* @g2, i32 0, i32 0), align 64, !tbaa !0
+  store <16 x i32> %v1, ptr @g2, align 64, !tbaa !0
   %v2 = tail call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 2) #2
-  store <16 x i32> %v2, <16 x i32>* getelementptr inbounds ([2 x <16 x i32>], [2 x <16 x i32>]* @g2, i32 0, i32 1), align 64, !tbaa !0
+  store <16 x i32> %v2, ptr getelementptr inbounds ([2 x <16 x i32>], ptr @g2, i32 0, i32 1), align 64, !tbaa !0
   %v3 = tail call <32 x i32> @llvm.hexagon.V6.vaddubh(<16 x i32> %v1, <16 x i32> %v2) #2
-  store <32 x i32> %v3, <32 x i32>* getelementptr inbounds ([2 x <32 x i32>], [2 x <32 x i32>]* @g0, i32 0, i32 0), align 128, !tbaa !0
-  store <32 x i32> %v3, <32 x i32>* getelementptr inbounds ([2 x <32 x i32>], [2 x <32 x i32>]* @g0, i32 0, i32 1), align 128, !tbaa !0
+  store <32 x i32> %v3, ptr @g0, align 128, !tbaa !0
+  store <32 x i32> %v3, ptr getelementptr inbounds ([2 x <32 x i32>], ptr @g0, i32 0, i32 1), align 128, !tbaa !0
   %v4 = tail call <32 x i32> @llvm.hexagon.V6.vdmpybus.dv.acc(<32 x i32> %v3, <32 x i32> %v3, i32 -2147483648)
-  store <32 x i32> %v4, <32 x i32>* @g1, align 128, !tbaa !0
+  store <32 x i32> %v4, ptr @g1, align 128, !tbaa !0
   ret i32 0
 }
 
@@ -40,12 +40,12 @@ declare <32 x i32> @llvm.hexagon.V6.vdmpybus.dv.acc(<32 x i32>, <32 x i32>, i32)
 define void @f3() #0 {
 b0:
   %v0 = tail call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 1)
-  store <16 x i32> %v0, <16 x i32>* getelementptr inbounds ([2 x <16 x i32>], [2 x <16 x i32>]* @g2, i32 0, i32 0), align 64, !tbaa !0
+  store <16 x i32> %v0, ptr @g2, align 64, !tbaa !0
   %v1 = tail call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 2)
-  store <16 x i32> %v1, <16 x i32>* getelementptr inbounds ([2 x <16 x i32>], [2 x <16 x i32>]* @g2, i32 0, i32 1), align 64, !tbaa !0
+  store <16 x i32> %v1, ptr getelementptr inbounds ([2 x <16 x i32>], ptr @g2, i32 0, i32 1), align 64, !tbaa !0
   %v2 = tail call <32 x i32> @llvm.hexagon.V6.vaddubh(<16 x i32> %v0, <16 x i32> %v1)
-  store <32 x i32> %v2, <32 x i32>* getelementptr inbounds ([2 x <32 x i32>], [2 x <32 x i32>]* @g0, i32 0, i32 0), align 128, !tbaa !0
-  store <32 x i32> %v2, <32 x i32>* getelementptr inbounds ([2 x <32 x i32>], [2 x <32 x i32>]* @g0, i32 0, i32 1), align 128, !tbaa !0
+  store <32 x i32> %v2, ptr @g0, align 128, !tbaa !0
+  store <32 x i32> %v2, ptr getelementptr inbounds ([2 x <32 x i32>], ptr @g0, i32 0, i32 1), align 128, !tbaa !0
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/vacopy.ll b/llvm/test/CodeGen/Hexagon/vacopy.ll
index 7b9dcd3ab45ac..999f151a5bee6 100644
--- a/llvm/test/CodeGen/Hexagon/vacopy.ll
+++ b/llvm/test/CodeGen/Hexagon/vacopy.ll
@@ -5,25 +5,23 @@
 ; CHECK-DAG: r{{[0-9]+}}:{{[0-9]+}} = memd{{.*}}
 ; CHECK-DAG: memd{{.*}} = r{{[0-9]+}}:{{[0-9]+}}
 
-%struct.__va_list_tag = type { i8*, i8*, i8* }
+%struct.__va_list_tag = type { ptr, ptr, ptr }
 
 ; Function Attrs: nounwind
 define void @PrintInts(i32 %first, ...) #0 {
 entry:
   %vl = alloca [1 x %struct.__va_list_tag], align 8
   %vl_count = alloca [1 x %struct.__va_list_tag], align 8
-  %arraydecay1 = bitcast [1 x %struct.__va_list_tag]* %vl to i8*
-  call void @llvm.va_start(i8* %arraydecay1)
-  %0 = bitcast [1 x %struct.__va_list_tag]* %vl_count to i8*
-  call void @llvm.va_copy(i8* %0, i8* %arraydecay1)
+  call void @llvm.va_start(ptr %vl)
+  call void @llvm.va_copy(ptr %vl_count, ptr %vl)
   ret void
 }
 
 ; Function Attrs: nounwind
-declare void @llvm.va_start(i8*) #1
+declare void @llvm.va_start(ptr) #1
 
 ; Function Attrs: nounwind
-declare void @llvm.va_copy(i8*, i8*) #1
+declare void @llvm.va_copy(ptr, ptr) #1
 
 ; Function Attrs: nounwind
 define i32 @main() #0 {

diff  --git a/llvm/test/CodeGen/Hexagon/vadd1.ll b/llvm/test/CodeGen/Hexagon/vadd1.ll
index e716e9b5b1a43..edb335288076d 100644
--- a/llvm/test/CodeGen/Hexagon/vadd1.ll
+++ b/llvm/test/CodeGen/Hexagon/vadd1.ll
@@ -10,10 +10,10 @@ target triple = "hexagon"
 ; Function Attrs: nounwind
 define void @f0() #0 {
 b0:
-  %v0 = load <16 x i32>, <16 x i32>* @g0, align 32, !tbaa !0
-  %v1 = load <16 x i32>, <16 x i32>* @g1, align 32, !tbaa !0
+  %v0 = load <16 x i32>, ptr @g0, align 32, !tbaa !0
+  %v1 = load <16 x i32>, ptr @g1, align 32, !tbaa !0
   %v2 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %v0, <16 x i32> %v1)
-  store <16 x i32> %v2, <16 x i32>* @g2, align 64, !tbaa !0
+  store <16 x i32> %v2, ptr @g2, align 64, !tbaa !0
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/vaddh.ll b/llvm/test/CodeGen/Hexagon/vaddh.ll
index f139c288bb5b9..eb7e74a3486f9 100644
--- a/llvm/test/CodeGen/Hexagon/vaddh.ll
+++ b/llvm/test/CodeGen/Hexagon/vaddh.ll
@@ -6,10 +6,10 @@
 
 define void @f0() #0 {
 b0:
-  %v0 = load i32, i32* @g0, align 4
-  %v1 = load i32, i32* @g1, align 4
+  %v0 = load i32, ptr @g0, align 4
+  %v1 = load i32, ptr @g1, align 4
   %v2 = call i32 @llvm.hexagon.A2.svaddh(i32 %v0, i32 %v1)
-  store i32 %v2, i32* @g1, align 4
+  store i32 %v2, ptr @g1, align 4
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/validate-offset.ll b/llvm/test/CodeGen/Hexagon/validate-offset.ll
index ed98f281e4b28..d6ca532770e5a 100644
--- a/llvm/test/CodeGen/Hexagon/validate-offset.ll
+++ b/llvm/test/CodeGen/Hexagon/validate-offset.ll
@@ -9,29 +9,29 @@ b0:
   %v0 = alloca i32, align 4
   %v1 = alloca i32, align 4
   %v2 = alloca i32, align 4
-  store i32 %a0, i32* %v1, align 4
-  store i32 %a1, i32* %v2, align 4
-  %v3 = load i32, i32* %v1, align 4
-  %v4 = load i32, i32* %v2, align 4
+  store i32 %a0, ptr %v1, align 4
+  store i32 %a1, ptr %v2, align 4
+  %v3 = load i32, ptr %v1, align 4
+  %v4 = load i32, ptr %v2, align 4
   %v5 = icmp sgt i32 %v3, %v4
   br i1 %v5, label %b1, label %b2
 
 b1:                                               ; preds = %b0
-  %v6 = load i32, i32* %v1, align 4
-  %v7 = load i32, i32* %v2, align 4
+  %v6 = load i32, ptr %v1, align 4
+  %v7 = load i32, ptr %v2, align 4
   %v8 = add nsw i32 %v6, %v7
-  store i32 %v8, i32* %v0
+  store i32 %v8, ptr %v0
   br label %b3
 
 b2:                                               ; preds = %b0
-  %v9 = load i32, i32* %v1, align 4
-  %v10 = load i32, i32* %v2, align 4
+  %v9 = load i32, ptr %v1, align 4
+  %v10 = load i32, ptr %v2, align 4
   %v11 = sub nsw i32 %v9, %v10
-  store i32 %v11, i32* %v0
+  store i32 %v11, ptr %v0
   br label %b3
 
 b3:                                               ; preds = %b2, %b1
-  %v12 = load i32, i32* %v0
+  %v12 = load i32, ptr %v0
   ret i32 %v12
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/vararg-linux-abi.ll b/llvm/test/CodeGen/Hexagon/vararg-linux-abi.ll
index a4523313893f6..f270afb2859b4 100644
--- a/llvm/test/CodeGen/Hexagon/vararg-linux-abi.ll
+++ b/llvm/test/CodeGen/Hexagon/vararg-linux-abi.ll
@@ -7,84 +7,80 @@
 
 target triple = "hexagon-unknown-linux"
 
-%s.0 = type { i8*, i8*, i8* }
+%s.0 = type { ptr, ptr, ptr }
 
 define dso_local i32 @f0(i32 %a0, ...) local_unnamed_addr #0 {
 b0:
   %v0 = alloca [1 x %s.0], align 8
-  %v1 = bitcast [1 x %s.0]* %v0 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 12, i8* nonnull %v1) #2
-  call void @llvm.va_start(i8* nonnull %v1)
-  %v2 = getelementptr inbounds [1 x %s.0], [1 x %s.0]* %v0, i32 0, i32 0, i32 0
-  %v3 = load i8*, i8** %v2, align 8
-  %v4 = getelementptr inbounds [1 x %s.0], [1 x %s.0]* %v0, i32 0, i32 0, i32 1
-  %v5 = load i8*, i8** %v4, align 4
-  %v6 = getelementptr i8, i8* %v3, i32 4
-  %v7 = icmp sgt i8* %v6, %v5
+  call void @llvm.lifetime.start.p0(i64 12, ptr nonnull %v0) #2
+  call void @llvm.va_start(ptr nonnull %v0)
+  %v3 = load ptr, ptr %v0, align 8
+  %v4 = getelementptr inbounds [1 x %s.0], ptr %v0, i32 0, i32 0, i32 1
+  %v5 = load ptr, ptr %v4, align 4
+  %v6 = getelementptr i8, ptr %v3, i32 4
+  %v7 = icmp sgt ptr %v6, %v5
   br i1 %v7, label %b1, label %b2
 
 b1:                                               ; preds = %b0
-  %v8 = getelementptr inbounds [1 x %s.0], [1 x %s.0]* %v0, i32 0, i32 0, i32 2
-  %v9 = load i8*, i8** %v8, align 8
-  %v10 = getelementptr i8, i8* %v9, i32 4
-  store i8* %v10, i8** %v8, align 8
+  %v8 = getelementptr inbounds [1 x %s.0], ptr %v0, i32 0, i32 0, i32 2
+  %v9 = load ptr, ptr %v8, align 8
+  %v10 = getelementptr i8, ptr %v9, i32 4
+  store ptr %v10, ptr %v8, align 8
   br label %b2
 
 b2:                                               ; preds = %b1, %b0
-  %v11 = phi i8* [ %v10, %b1 ], [ %v6, %b0 ]
-  %v12 = phi i8* [ %v9, %b1 ], [ %v3, %b0 ]
-  %v13 = bitcast i8* %v12 to i32*
-  store i8* %v11, i8** %v2, align 8
-  %v14 = load i32, i32* %v13, align 4
+  %v11 = phi ptr [ %v10, %b1 ], [ %v6, %b0 ]
+  %v12 = phi ptr [ %v9, %b1 ], [ %v3, %b0 ]
+  store ptr %v11, ptr %v0, align 8
+  %v14 = load i32, ptr %v12, align 4
   %v15 = icmp eq i32 %v14, 0
   br i1 %v15, label %b7, label %b3
 
 b3:                                               ; preds = %b2
-  %v16 = getelementptr inbounds [1 x %s.0], [1 x %s.0]* %v0, i32 0, i32 0, i32 2
+  %v16 = getelementptr inbounds [1 x %s.0], ptr %v0, i32 0, i32 0, i32 2
   br label %b4
 
 b4:                                               ; preds = %b6, %b3
   %v17 = phi i32 [ %v14, %b3 ], [ %v28, %b6 ]
   %v18 = phi i32 [ %a0, %b3 ], [ %v20, %b6 ]
-  %v19 = phi i8* [ %v11, %b3 ], [ %v25, %b6 ]
+  %v19 = phi ptr [ %v11, %b3 ], [ %v25, %b6 ]
   %v20 = add nsw i32 %v17, %v18
-  %v21 = getelementptr i8, i8* %v19, i32 4
-  %v22 = icmp sgt i8* %v21, %v5
+  %v21 = getelementptr i8, ptr %v19, i32 4
+  %v22 = icmp sgt ptr %v21, %v5
   br i1 %v22, label %b5, label %b6
 
 b5:                                               ; preds = %b4
-  %v23 = load i8*, i8** %v16, align 8
-  %v24 = getelementptr i8, i8* %v23, i32 4
-  store i8* %v24, i8** %v16, align 8
+  %v23 = load ptr, ptr %v16, align 8
+  %v24 = getelementptr i8, ptr %v23, i32 4
+  store ptr %v24, ptr %v16, align 8
   br label %b6
 
 b6:                                               ; preds = %b5, %b4
-  %v25 = phi i8* [ %v24, %b5 ], [ %v21, %b4 ]
-  %v26 = phi i8* [ %v23, %b5 ], [ %v19, %b4 ]
-  %v27 = bitcast i8* %v26 to i32*
-  store i8* %v25, i8** %v2, align 8
-  %v28 = load i32, i32* %v27, align 4
+  %v25 = phi ptr [ %v24, %b5 ], [ %v21, %b4 ]
+  %v26 = phi ptr [ %v23, %b5 ], [ %v19, %b4 ]
+  store ptr %v25, ptr %v0, align 8
+  %v28 = load i32, ptr %v26, align 4
   %v29 = icmp eq i32 %v28, 0
   br i1 %v29, label %b7, label %b4
 
 b7:                                               ; preds = %b6, %b2
   %v30 = phi i32 [ %a0, %b2 ], [ %v20, %b6 ]
-  call void @llvm.va_end(i8* nonnull %v1)
-  call void @llvm.lifetime.end.p0i8(i64 12, i8* nonnull %v1) #2
+  call void @llvm.va_end(ptr nonnull %v0)
+  call void @llvm.lifetime.end.p0(i64 12, ptr nonnull %v0) #2
   ret i32 %v30
 }
 
 ; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #1
+declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
 
 ; Function Attrs: nounwind
-declare void @llvm.va_start(i8*) #2
+declare void @llvm.va_start(ptr) #2
 
 ; Function Attrs: nounwind
-declare void @llvm.va_end(i8*) #2
+declare void @llvm.va_end(ptr) #2
 
 ; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #1
+declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
 
 attributes #0 = { argmemonly nounwind "frame-pointer"="all" }
 

diff  --git a/llvm/test/CodeGen/Hexagon/vararg.ll b/llvm/test/CodeGen/Hexagon/vararg.ll
index c22da1a1163eb..49fef2e4b1db1 100644
--- a/llvm/test/CodeGen/Hexagon/vararg.ll
+++ b/llvm/test/CodeGen/Hexagon/vararg.ll
@@ -13,7 +13,7 @@
 
 
 %struct.AAA = type { i32, i32, i32, i32 }
-%struct.__va_list_tag = type { i8*, i8*, i8* }
+%struct.__va_list_tag = type { ptr, ptr, ptr }
 
 @aaa = global %struct.AAA { i32 100, i32 200, i32 300, i32 400 }, align 4
 @.str = private unnamed_addr constant [13 x i8] c"result = %d\0A\00", align 1
@@ -22,73 +22,68 @@
 define i32 @foo(i32 %xx, ...) #0 {
 entry:
   %ap = alloca [1 x %struct.__va_list_tag], align 8
-  %arraydecay1 = bitcast [1 x %struct.__va_list_tag]* %ap to i8*
-  call void @llvm.va_start(i8* %arraydecay1)
-  %__current_saved_reg_area_pointer_p = getelementptr inbounds [1 x %struct.__va_list_tag], [1 x %struct.__va_list_tag]* %ap, i32 0, i32 0, i32 0
-  %__current_saved_reg_area_pointer = load i8*, i8** %__current_saved_reg_area_pointer_p, align 8
-  %__saved_reg_area_end_pointer_p = getelementptr inbounds [1 x %struct.__va_list_tag], [1 x %struct.__va_list_tag]* %ap, i32 0, i32 0, i32 1
-  %__saved_reg_area_end_pointer = load i8*, i8** %__saved_reg_area_end_pointer_p, align 4
-  %__new_saved_reg_area_pointer = getelementptr i8, i8* %__current_saved_reg_area_pointer, i32 4
-  %0 = icmp sgt i8* %__new_saved_reg_area_pointer, %__saved_reg_area_end_pointer
-  %__overflow_area_pointer_p = getelementptr inbounds [1 x %struct.__va_list_tag], [1 x %struct.__va_list_tag]* %ap, i32 0, i32 0, i32 2
-  %__overflow_area_pointer = load i8*, i8** %__overflow_area_pointer_p, align 8
+  call void @llvm.va_start(ptr %ap)
+  %__current_saved_reg_area_pointer = load ptr, ptr %ap, align 8
+  %__saved_reg_area_end_pointer_p = getelementptr inbounds [1 x %struct.__va_list_tag], ptr %ap, i32 0, i32 0, i32 1
+  %__saved_reg_area_end_pointer = load ptr, ptr %__saved_reg_area_end_pointer_p, align 4
+  %__new_saved_reg_area_pointer = getelementptr i8, ptr %__current_saved_reg_area_pointer, i32 4
+  %0 = icmp sgt ptr %__new_saved_reg_area_pointer, %__saved_reg_area_end_pointer
+  %__overflow_area_pointer_p = getelementptr inbounds [1 x %struct.__va_list_tag], ptr %ap, i32 0, i32 0, i32 2
+  %__overflow_area_pointer = load ptr, ptr %__overflow_area_pointer_p, align 8
   br i1 %0, label %vaarg.on_stack, label %vaarg.end
 
 vaarg.on_stack:                                   ; preds = %entry
-  %__overflow_area_pointer.next = getelementptr i8, i8* %__overflow_area_pointer, i32 4
-  store i8* %__overflow_area_pointer.next, i8** %__overflow_area_pointer_p, align 8
+  %__overflow_area_pointer.next = getelementptr i8, ptr %__overflow_area_pointer, i32 4
+  store ptr %__overflow_area_pointer.next, ptr %__overflow_area_pointer_p, align 8
   br label %vaarg.end
 
 vaarg.end:                                        ; preds = %entry, %vaarg.on_stack
-  %__overflow_area_pointer5 = phi i8* [ %__overflow_area_pointer.next, %vaarg.on_stack ], [ %__overflow_area_pointer, %entry ]
-  %storemerge32 = phi i8* [ %__overflow_area_pointer.next, %vaarg.on_stack ], [ %__new_saved_reg_area_pointer, %entry ]
-  %vaarg.addr.in = phi i8* [ %__overflow_area_pointer, %vaarg.on_stack ], [ %__current_saved_reg_area_pointer, %entry ]
-  store i8* %storemerge32, i8** %__current_saved_reg_area_pointer_p, align 8
-  %vaarg.addr = bitcast i8* %vaarg.addr.in to i32*
-  %1 = load i32, i32* %vaarg.addr, align 4
-  %__overflow_area_pointer_p4 = getelementptr inbounds [1 x %struct.__va_list_tag], [1 x %struct.__va_list_tag]* %ap, i32 0, i32 0, i32 2
-  %__overflow_area_pointer.next6 = getelementptr i8, i8* %__overflow_area_pointer5, i32 16
-  store i8* %__overflow_area_pointer.next6, i8** %__overflow_area_pointer_p4, align 8
-  %bbb.sroa.1.0.idx27 = getelementptr inbounds i8, i8* %__overflow_area_pointer5, i32 12
-  %2 = bitcast i8* %bbb.sroa.1.0.idx27 to i32*
-  %bbb.sroa.1.0.copyload = load i32, i32* %2, align 4
+  %__overflow_area_pointer5 = phi ptr [ %__overflow_area_pointer.next, %vaarg.on_stack ], [ %__overflow_area_pointer, %entry ]
+  %storemerge32 = phi ptr [ %__overflow_area_pointer.next, %vaarg.on_stack ], [ %__new_saved_reg_area_pointer, %entry ]
+  %vaarg.addr.in = phi ptr [ %__overflow_area_pointer, %vaarg.on_stack ], [ %__current_saved_reg_area_pointer, %entry ]
+  store ptr %storemerge32, ptr %ap, align 8
+  %1 = load i32, ptr %vaarg.addr.in, align 4
+  %__overflow_area_pointer_p4 = getelementptr inbounds [1 x %struct.__va_list_tag], ptr %ap, i32 0, i32 0, i32 2
+  %__overflow_area_pointer.next6 = getelementptr i8, ptr %__overflow_area_pointer5, i32 16
+  store ptr %__overflow_area_pointer.next6, ptr %__overflow_area_pointer_p4, align 8
+  %bbb.sroa.1.0.idx27 = getelementptr inbounds i8, ptr %__overflow_area_pointer5, i32 12
+  %bbb.sroa.1.0.copyload = load i32, ptr %bbb.sroa.1.0.idx27, align 4
   %add8 = add nsw i32 %bbb.sroa.1.0.copyload, %1
-  %__new_saved_reg_area_pointer15 = getelementptr i8, i8* %storemerge32, i32 4
-  %3 = icmp sgt i8* %__new_saved_reg_area_pointer15, %__saved_reg_area_end_pointer
-  br i1 %3, label %vaarg.on_stack17, label %vaarg.end21
+  %__new_saved_reg_area_pointer15 = getelementptr i8, ptr %storemerge32, i32 4
+  %2 = icmp sgt ptr %__new_saved_reg_area_pointer15, %__saved_reg_area_end_pointer
+  br i1 %2, label %vaarg.on_stack17, label %vaarg.end21
 
 vaarg.on_stack17:                                 ; preds = %vaarg.end
-  %__overflow_area_pointer.next20 = getelementptr i8, i8* %__overflow_area_pointer5, i32 20
-  store i8* %__overflow_area_pointer.next20, i8** %__overflow_area_pointer_p4, align 8
+  %__overflow_area_pointer.next20 = getelementptr i8, ptr %__overflow_area_pointer5, i32 20
+  store ptr %__overflow_area_pointer.next20, ptr %__overflow_area_pointer_p4, align 8
   br label %vaarg.end21
 
 vaarg.end21:                                      ; preds = %vaarg.end, %vaarg.on_stack17
-  %storemerge = phi i8* [ %__overflow_area_pointer.next20, %vaarg.on_stack17 ], [ %__new_saved_reg_area_pointer15, %vaarg.end ]
-  %vaarg.addr22.in = phi i8* [ %__overflow_area_pointer.next6, %vaarg.on_stack17 ], [ %storemerge32, %vaarg.end ]
-  store i8* %storemerge, i8** %__current_saved_reg_area_pointer_p, align 8
-  %vaarg.addr22 = bitcast i8* %vaarg.addr22.in to i32*
-  %4 = load i32, i32* %vaarg.addr22, align 4
-  %add23 = add nsw i32 %add8, %4
-  call void @llvm.va_end(i8* %arraydecay1)
+  %storemerge = phi ptr [ %__overflow_area_pointer.next20, %vaarg.on_stack17 ], [ %__new_saved_reg_area_pointer15, %vaarg.end ]
+  %vaarg.addr22.in = phi ptr [ %__overflow_area_pointer.next6, %vaarg.on_stack17 ], [ %storemerge32, %vaarg.end ]
+  store ptr %storemerge, ptr %ap, align 8
+  %3 = load i32, ptr %vaarg.addr22.in, align 4
+  %add23 = add nsw i32 %add8, %3
+  call void @llvm.va_end(ptr %ap)
   ret i32 %add23
 }
 
 ; Function Attrs: nounwind
-declare void @llvm.va_start(i8*) #1
+declare void @llvm.va_start(ptr) #1
 
 ; Function Attrs: nounwind
-declare void @llvm.va_end(i8*) #1
+declare void @llvm.va_end(ptr) #1
 
 ; Function Attrs: nounwind
 define i32 @main() #0 {
 entry:
-  %call = tail call i32 (i32, ...) @foo(i32 undef, i32 2, %struct.AAA* byval(%struct.AAA) align 4 @aaa, i32 4)
-  %call1 = tail call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([13 x i8], [13 x i8]* @.str, i32 0, i32 0), i32 %call) #1
+  %call = tail call i32 (i32, ...) @foo(i32 undef, i32 2, ptr byval(%struct.AAA) align 4 @aaa, i32 4)
+  %call1 = tail call i32 (ptr, ...) @printf(ptr @.str, i32 %call) #1
   ret i32 %call
 }
 
 ; Function Attrs: nounwind
-declare i32 @printf(i8* nocapture readonly, ...) #0
+declare i32 @printf(ptr nocapture readonly, ...) #0
 
 attributes #0 = { nounwind }
 

diff  --git a/llvm/test/CodeGen/Hexagon/vararg_align_check.ll b/llvm/test/CodeGen/Hexagon/vararg_align_check.ll
index 647625f4ed1f7..53782ac93af09 100644
--- a/llvm/test/CodeGen/Hexagon/vararg_align_check.ll
+++ b/llvm/test/CodeGen/Hexagon/vararg_align_check.ll
@@ -22,14 +22,14 @@
 
 %struct.AAA = type { i32, i32, i32, i32 }
 %struct.BBB = type { i8, i64, i32 }
-%struct.__va_list_tag = type { i8*, i8*, i8* }
+%struct.__va_list_tag = type { ptr, ptr, ptr }
 
 @aaa = global %struct.AAA { i32 100, i32 200, i32 300, i32 400 }, align 4
 @ddd = global { i8, i64, i32, [4 x i8] } { i8 1, i64 1000000, i32 5, [4 x i8] undef }, align 8
 @.str = private unnamed_addr constant [13 x i8] c"result = %d\0A\00", align 1
 
 ; Function Attrs: nounwind
-define i32 @foo(i32 %xx, %struct.BBB* byval(%struct.BBB) align 8 %eee, ...) #0 {
+define i32 @foo(i32 %xx, ptr byval(%struct.BBB) align 8 %eee, ...) #0 {
 entry:
   %xx.addr = alloca i32, align 4
   %ap = alloca [1 x %struct.__va_list_tag], align 8
@@ -37,128 +37,111 @@ entry:
   %k = alloca i64, align 8
   %ret = alloca i32, align 4
   %bbb = alloca %struct.AAA, align 4
-  store i32 %xx, i32* %xx.addr, align 4
-  store i32 0, i32* %ret, align 4
-  %x = getelementptr inbounds %struct.BBB, %struct.BBB* %eee, i32 0, i32 0
-  %0 = load i8, i8* %x, align 1
+  store i32 %xx, ptr %xx.addr, align 4
+  store i32 0, ptr %ret, align 4
+  %0 = load i8, ptr %eee, align 1
   %tobool = trunc i8 %0 to i1
   br i1 %tobool, label %if.then, label %if.end
 
 if.then:                                          ; preds = %entry
-  store i32 1, i32* %ret, align 4
+  store i32 1, ptr %ret, align 4
   br label %if.end
 
 if.end:                                           ; preds = %if.then, %entry
-  %arraydecay = getelementptr inbounds [1 x %struct.__va_list_tag], [1 x %struct.__va_list_tag]* %ap, i32 0, i32 0
-  %arraydecay1 = bitcast %struct.__va_list_tag* %arraydecay to i8*
-  call void @llvm.va_start(i8* %arraydecay1)
-  %arraydecay2 = getelementptr inbounds [1 x %struct.__va_list_tag], [1 x %struct.__va_list_tag]* %ap, i32 0, i32 0
+  call void @llvm.va_start(ptr %ap)
   br label %vaarg.maybe_reg
 
 vaarg.maybe_reg:                                  ; preds = %if.end
-  %__current_saved_reg_area_pointer_p = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %arraydecay2, i32 0, i32 0
-  %__current_saved_reg_area_pointer = load i8*, i8** %__current_saved_reg_area_pointer_p
-  %__saved_reg_area_end_pointer_p = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %arraydecay2, i32 0, i32 1
-  %__saved_reg_area_end_pointer = load i8*, i8** %__saved_reg_area_end_pointer_p
-  %1 = ptrtoint i8* %__current_saved_reg_area_pointer to i32
+  %__current_saved_reg_area_pointer = load ptr, ptr %ap
+  %__saved_reg_area_end_pointer_p = getelementptr inbounds %struct.__va_list_tag, ptr %ap, i32 0, i32 1
+  %__saved_reg_area_end_pointer = load ptr, ptr %__saved_reg_area_end_pointer_p
+  %1 = ptrtoint ptr %__current_saved_reg_area_pointer to i32
   %align_current_saved_reg_area_pointer = add i32 %1, 7
   %align_current_saved_reg_area_pointer3 = and i32 %align_current_saved_reg_area_pointer, -8
-  %align_current_saved_reg_area_pointer4 = inttoptr i32 %align_current_saved_reg_area_pointer3 to i8*
-  %__new_saved_reg_area_pointer = getelementptr i8, i8* %align_current_saved_reg_area_pointer4, i32 8
-  %2 = icmp sgt i8* %__new_saved_reg_area_pointer, %__saved_reg_area_end_pointer
+  %align_current_saved_reg_area_pointer4 = inttoptr i32 %align_current_saved_reg_area_pointer3 to ptr
+  %__new_saved_reg_area_pointer = getelementptr i8, ptr %align_current_saved_reg_area_pointer4, i32 8
+  %2 = icmp sgt ptr %__new_saved_reg_area_pointer, %__saved_reg_area_end_pointer
   br i1 %2, label %vaarg.on_stack, label %vaarg.in_reg
 
 vaarg.in_reg:                                     ; preds = %vaarg.maybe_reg
-  %3 = bitcast i8* %align_current_saved_reg_area_pointer4 to i64*
-  store i8* %__new_saved_reg_area_pointer, i8** %__current_saved_reg_area_pointer_p
+  store ptr %__new_saved_reg_area_pointer, ptr %ap
   br label %vaarg.end
 
 vaarg.on_stack:                                   ; preds = %vaarg.maybe_reg
-  %__overflow_area_pointer_p = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %arraydecay2, i32 0, i32 2
-  %__overflow_area_pointer = load i8*, i8** %__overflow_area_pointer_p
-  %4 = ptrtoint i8* %__overflow_area_pointer to i32
-  %align_overflow_area_pointer = add i32 %4, 7
+  %__overflow_area_pointer_p = getelementptr inbounds %struct.__va_list_tag, ptr %ap, i32 0, i32 2
+  %__overflow_area_pointer = load ptr, ptr %__overflow_area_pointer_p
+  %3 = ptrtoint ptr %__overflow_area_pointer to i32
+  %align_overflow_area_pointer = add i32 %3, 7
   %align_overflow_area_pointer5 = and i32 %align_overflow_area_pointer, -8
-  %align_overflow_area_pointer6 = inttoptr i32 %align_overflow_area_pointer5 to i8*
-  %__overflow_area_pointer.next = getelementptr i8, i8* %align_overflow_area_pointer6, i32 8
-  store i8* %__overflow_area_pointer.next, i8** %__overflow_area_pointer_p
-  store i8* %__overflow_area_pointer.next, i8** %__current_saved_reg_area_pointer_p
-  %5 = bitcast i8* %align_overflow_area_pointer6 to i64*
+  %align_overflow_area_pointer6 = inttoptr i32 %align_overflow_area_pointer5 to ptr
+  %__overflow_area_pointer.next = getelementptr i8, ptr %align_overflow_area_pointer6, i32 8
+  store ptr %__overflow_area_pointer.next, ptr %__overflow_area_pointer_p
+  store ptr %__overflow_area_pointer.next, ptr %ap
   br label %vaarg.end
 
 vaarg.end:                                        ; preds = %vaarg.on_stack, %vaarg.in_reg
-  %vaarg.addr = phi i64* [ %3, %vaarg.in_reg ], [ %5, %vaarg.on_stack ]
-  %6 = load i64, i64* %vaarg.addr
-  store i64 %6, i64* %k, align 8
-  %7 = load i64, i64* %k, align 8
-  %conv = trunc i64 %7 to i32
+  %vaarg.addr = phi ptr [ %align_current_saved_reg_area_pointer4, %vaarg.in_reg ], [ %align_overflow_area_pointer6, %vaarg.on_stack ]
+  %4 = load i64, ptr %vaarg.addr
+  store i64 %4, ptr %k, align 8
+  %5 = load i64, ptr %k, align 8
+  %conv = trunc i64 %5 to i32
   %div = sdiv i32 %conv, 1000
-  %8 = load i32, i32* %ret, align 4
-  %add = add nsw i32 %8, %div
-  store i32 %add, i32* %ret, align 4
-  %arraydecay7 = getelementptr inbounds [1 x %struct.__va_list_tag], [1 x %struct.__va_list_tag]* %ap, i32 0, i32 0
-  %__overflow_area_pointer_p8 = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %arraydecay7, i32 0, i32 2
-  %__overflow_area_pointer9 = load i8*, i8** %__overflow_area_pointer_p8
-  %9 = bitcast i8* %__overflow_area_pointer9 to %struct.AAA*
-  %__overflow_area_pointer.next10 = getelementptr i8, i8* %__overflow_area_pointer9, i32 16
-  store i8* %__overflow_area_pointer.next10, i8** %__overflow_area_pointer_p8
-  %10 = bitcast %struct.AAA* %bbb to i8*
-  %11 = bitcast %struct.AAA* %9 to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i32(i8* %10, i8* %11, i32 16, i32 4, i1 false)
-  %d11 = getelementptr inbounds %struct.AAA, %struct.AAA* %bbb, i32 0, i32 3
-  %12 = load i32, i32* %d11, align 4
-  %13 = load i32, i32* %ret, align 4
-  %add12 = add nsw i32 %13, %12
-  store i32 %add12, i32* %ret, align 4
-  %arraydecay13 = getelementptr inbounds [1 x %struct.__va_list_tag], [1 x %struct.__va_list_tag]* %ap, i32 0, i32 0
+  %6 = load i32, ptr %ret, align 4
+  %add = add nsw i32 %6, %div
+  store i32 %add, ptr %ret, align 4
+  %__overflow_area_pointer_p8 = getelementptr inbounds %struct.__va_list_tag, ptr %ap, i32 0, i32 2
+  %__overflow_area_pointer9 = load ptr, ptr %__overflow_area_pointer_p8
+  %__overflow_area_pointer.next10 = getelementptr i8, ptr %__overflow_area_pointer9, i32 16
+  store ptr %__overflow_area_pointer.next10, ptr %__overflow_area_pointer_p8
+  call void @llvm.memcpy.p0.p0.i32(ptr %bbb, ptr %__overflow_area_pointer9, i32 16, i32 4, i1 false)
+  %d11 = getelementptr inbounds %struct.AAA, ptr %bbb, i32 0, i32 3
+  %7 = load i32, ptr %d11, align 4
+  %8 = load i32, ptr %ret, align 4
+  %add12 = add nsw i32 %8, %7
+  store i32 %add12, ptr %ret, align 4
   br label %vaarg.maybe_reg14
 
 vaarg.maybe_reg14:                                ; preds = %vaarg.end
-  %__current_saved_reg_area_pointer_p15 = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %arraydecay13, i32 0, i32 0
-  %__current_saved_reg_area_pointer16 = load i8*, i8** %__current_saved_reg_area_pointer_p15
-  %__saved_reg_area_end_pointer_p17 = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %arraydecay13, i32 0, i32 1
-  %__saved_reg_area_end_pointer18 = load i8*, i8** %__saved_reg_area_end_pointer_p17
-  %__new_saved_reg_area_pointer19 = getelementptr i8, i8* %__current_saved_reg_area_pointer16, i32 4
-  %14 = icmp sgt i8* %__new_saved_reg_area_pointer19, %__saved_reg_area_end_pointer18
-  br i1 %14, label %vaarg.on_stack21, label %vaarg.in_reg20
+  %__current_saved_reg_area_pointer16 = load ptr, ptr %ap
+  %__saved_reg_area_end_pointer_p17 = getelementptr inbounds %struct.__va_list_tag, ptr %ap, i32 0, i32 1
+  %__saved_reg_area_end_pointer18 = load ptr, ptr %__saved_reg_area_end_pointer_p17
+  %__new_saved_reg_area_pointer19 = getelementptr i8, ptr %__current_saved_reg_area_pointer16, i32 4
+  %9 = icmp sgt ptr %__new_saved_reg_area_pointer19, %__saved_reg_area_end_pointer18
+  br i1 %9, label %vaarg.on_stack21, label %vaarg.in_reg20
 
 vaarg.in_reg20:                                   ; preds = %vaarg.maybe_reg14
-  %15 = bitcast i8* %__current_saved_reg_area_pointer16 to i32*
-  store i8* %__new_saved_reg_area_pointer19, i8** %__current_saved_reg_area_pointer_p15
+  store ptr %__new_saved_reg_area_pointer19, ptr %ap
   br label %vaarg.end25
 
 vaarg.on_stack21:                                 ; preds = %vaarg.maybe_reg14
-  %__overflow_area_pointer_p22 = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %arraydecay13, i32 0, i32 2
-  %__overflow_area_pointer23 = load i8*, i8** %__overflow_area_pointer_p22
-  %__overflow_area_pointer.next24 = getelementptr i8, i8* %__overflow_area_pointer23, i32 4
-  store i8* %__overflow_area_pointer.next24, i8** %__overflow_area_pointer_p22
-  store i8* %__overflow_area_pointer.next24, i8** %__current_saved_reg_area_pointer_p15
-  %16 = bitcast i8* %__overflow_area_pointer23 to i32*
+  %__overflow_area_pointer_p22 = getelementptr inbounds %struct.__va_list_tag, ptr %ap, i32 0, i32 2
+  %__overflow_area_pointer23 = load ptr, ptr %__overflow_area_pointer_p22
+  %__overflow_area_pointer.next24 = getelementptr i8, ptr %__overflow_area_pointer23, i32 4
+  store ptr %__overflow_area_pointer.next24, ptr %__overflow_area_pointer_p22
+  store ptr %__overflow_area_pointer.next24, ptr %ap
   br label %vaarg.end25
 
 vaarg.end25:                                      ; preds = %vaarg.on_stack21, %vaarg.in_reg20
-  %vaarg.addr26 = phi i32* [ %15, %vaarg.in_reg20 ], [ %16, %vaarg.on_stack21 ]
-  %17 = load i32, i32* %vaarg.addr26
-  store i32 %17, i32* %d, align 4
-  %18 = load i32, i32* %d, align 4
-  %19 = load i32, i32* %ret, align 4
-  %add27 = add nsw i32 %19, %18
-  store i32 %add27, i32* %ret, align 4
-  %arraydecay28 = getelementptr inbounds [1 x %struct.__va_list_tag], [1 x %struct.__va_list_tag]* %ap, i32 0, i32 0
-  %arraydecay2829 = bitcast %struct.__va_list_tag* %arraydecay28 to i8*
-  call void @llvm.va_end(i8* %arraydecay2829)
-  %20 = load i32, i32* %ret, align 4
-  ret i32 %20
+  %vaarg.addr26 = phi ptr [ %__current_saved_reg_area_pointer16, %vaarg.in_reg20 ], [ %__overflow_area_pointer23, %vaarg.on_stack21 ]
+  %10 = load i32, ptr %vaarg.addr26
+  store i32 %10, ptr %d, align 4
+  %11 = load i32, ptr %d, align 4
+  %12 = load i32, ptr %ret, align 4
+  %add27 = add nsw i32 %12, %11
+  store i32 %add27, ptr %ret, align 4
+  call void @llvm.va_end(ptr %ap)
+  %13 = load i32, ptr %ret, align 4
+  ret i32 %13
 }
 
 ; Function Attrs: nounwind
-declare void @llvm.va_start(i8*) #1
+declare void @llvm.va_start(ptr) #1
 
 ; Function Attrs: nounwind
-declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i32, i1) #1
+declare void @llvm.memcpy.p0.p0.i32(ptr nocapture, ptr nocapture readonly, i32, i32, i1) #1
 
 ; Function Attrs: nounwind
-declare void @llvm.va_end(i8*) #1
+declare void @llvm.va_end(ptr) #1
 
 ; Function Attrs: nounwind
 define i32 @main() #0 {
@@ -166,18 +149,18 @@ entry:
   %retval = alloca i32, align 4
   %x = alloca i32, align 4
   %m = alloca i64, align 8
-  store i32 0, i32* %retval
-  store i64 1000000, i64* %m, align 8
-  %0 = load i64, i64* %m, align 8
-  %call = call i32 (i32, %struct.BBB*, ...) @foo(i32 1, %struct.BBB* byval(%struct.BBB) align 8 bitcast ({ i8, i64, i32, [4 x i8] }* @ddd to %struct.BBB*), i64 %0, %struct.AAA* byval(%struct.AAA) align 4 @aaa, i32 4)
-  store i32 %call, i32* %x, align 4
-  %1 = load i32, i32* %x, align 4
-  %call1 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([13 x i8], [13 x i8]* @.str, i32 0, i32 0), i32 %1)
-  %2 = load i32, i32* %x, align 4
+  store i32 0, ptr %retval
+  store i64 1000000, ptr %m, align 8
+  %0 = load i64, ptr %m, align 8
+  %call = call i32 (i32, ptr, ...) @foo(i32 1, ptr byval(%struct.BBB) align 8 @ddd, i64 %0, ptr byval(%struct.AAA) align 4 @aaa, i32 4)
+  store i32 %call, ptr %x, align 4
+  %1 = load i32, ptr %x, align 4
+  %call1 = call i32 (ptr, ...) @printf(ptr @.str, i32 %1)
+  %2 = load i32, ptr %x, align 4
   ret i32 %2
 }
 
-declare i32 @printf(i8*, ...) #2
+declare i32 @printf(ptr, ...) #2
 
 attributes #1 = { nounwind }
 

diff  --git a/llvm/test/CodeGen/Hexagon/vararg_double_onstack.ll b/llvm/test/CodeGen/Hexagon/vararg_double_onstack.ll
index 613fce8bb0ef3..73737b8077f8d 100644
--- a/llvm/test/CodeGen/Hexagon/vararg_double_onstack.ll
+++ b/llvm/test/CodeGen/Hexagon/vararg_double_onstack.ll
@@ -11,7 +11,7 @@
 ; CHECK: r29 = add(r29,#8)
 
 %struct.AAA = type { i32, i32, i32, i32 }
-%struct.__va_list_tag = type { i8*, i8*, i8* }
+%struct.__va_list_tag = type { ptr, ptr, ptr }
 
 @aaa = global %struct.AAA { i32 100, i32 200, i32 300, i32 400 }, align 4
 @.str = private unnamed_addr constant [13 x i8] c"result = %d\0A\00", align 1
@@ -28,164 +28,144 @@ entry:
   %d = alloca i32, align 4
   %ret = alloca i32, align 4
   %bbb = alloca %struct.AAA, align 4
-  store i32 %xx, i32* %xx.addr, align 4
-  store i32 %a, i32* %a.addr, align 4
-  store i32 %b, i32* %b.addr, align 4
-  store i32 %c, i32* %c.addr, align 4
-  store i32 %x, i32* %x.addr, align 4
-  store i32 0, i32* %ret, align 4
-  %arraydecay = getelementptr inbounds [1 x %struct.__va_list_tag], [1 x %struct.__va_list_tag]* %ap, i32 0, i32 0
-  %arraydecay1 = bitcast %struct.__va_list_tag* %arraydecay to i8*
-  call void @llvm.va_start(i8* %arraydecay1)
-  %arraydecay2 = getelementptr inbounds [1 x %struct.__va_list_tag], [1 x %struct.__va_list_tag]* %ap, i32 0, i32 0
+  store i32 %xx, ptr %xx.addr, align 4
+  store i32 %a, ptr %a.addr, align 4
+  store i32 %b, ptr %b.addr, align 4
+  store i32 %c, ptr %c.addr, align 4
+  store i32 %x, ptr %x.addr, align 4
+  store i32 0, ptr %ret, align 4
+  call void @llvm.va_start(ptr %ap)
   br label %vaarg.maybe_reg
 
 vaarg.maybe_reg:                                  ; preds = %entry
-  %__current_saved_reg_area_pointer_p = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %arraydecay2, i32 0, i32 0
-  %__current_saved_reg_area_pointer = load i8*, i8** %__current_saved_reg_area_pointer_p
-  %__saved_reg_area_end_pointer_p = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %arraydecay2, i32 0, i32 1
-  %__saved_reg_area_end_pointer = load i8*, i8** %__saved_reg_area_end_pointer_p
-  %0 = ptrtoint i8* %__current_saved_reg_area_pointer to i32
+  %__current_saved_reg_area_pointer = load ptr, ptr %ap
+  %__saved_reg_area_end_pointer_p = getelementptr inbounds %struct.__va_list_tag, ptr %ap, i32 0, i32 1
+  %__saved_reg_area_end_pointer = load ptr, ptr %__saved_reg_area_end_pointer_p
+  %0 = ptrtoint ptr %__current_saved_reg_area_pointer to i32
   %align_current_saved_reg_area_pointer = add i32 %0, 7
   %align_current_saved_reg_area_pointer3 = and i32 %align_current_saved_reg_area_pointer, -8
-  %align_current_saved_reg_area_pointer4 = inttoptr i32 %align_current_saved_reg_area_pointer3 to i8*
-  %__new_saved_reg_area_pointer = getelementptr i8, i8* %align_current_saved_reg_area_pointer4, i32 8
-  %1 = icmp sgt i8* %__new_saved_reg_area_pointer, %__saved_reg_area_end_pointer
+  %align_current_saved_reg_area_pointer4 = inttoptr i32 %align_current_saved_reg_area_pointer3 to ptr
+  %__new_saved_reg_area_pointer = getelementptr i8, ptr %align_current_saved_reg_area_pointer4, i32 8
+  %1 = icmp sgt ptr %__new_saved_reg_area_pointer, %__saved_reg_area_end_pointer
   br i1 %1, label %vaarg.on_stack, label %vaarg.in_reg
 
 vaarg.in_reg:                                     ; preds = %vaarg.maybe_reg
-  %2 = bitcast i8* %align_current_saved_reg_area_pointer4 to i64*
-  store i8* %__new_saved_reg_area_pointer, i8** %__current_saved_reg_area_pointer_p
+  store ptr %__new_saved_reg_area_pointer, ptr %ap
   br label %vaarg.end
 
 vaarg.on_stack:                                   ; preds = %vaarg.maybe_reg
-  %__overflow_area_pointer_p = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %arraydecay2, i32 0, i32 2
-  %__overflow_area_pointer = load i8*, i8** %__overflow_area_pointer_p
-  %3 = ptrtoint i8* %__overflow_area_pointer to i32
-  %align_overflow_area_pointer = add i32 %3, 7
+  %__overflow_area_pointer_p = getelementptr inbounds %struct.__va_list_tag, ptr %ap, i32 0, i32 2
+  %__overflow_area_pointer = load ptr, ptr %__overflow_area_pointer_p
+  %2 = ptrtoint ptr %__overflow_area_pointer to i32
+  %align_overflow_area_pointer = add i32 %2, 7
   %align_overflow_area_pointer5 = and i32 %align_overflow_area_pointer, -8
-  %align_overflow_area_pointer6 = inttoptr i32 %align_overflow_area_pointer5 to i8*
-  %__overflow_area_pointer.next = getelementptr i8, i8* %align_overflow_area_pointer6, i32 8
-  store i8* %__overflow_area_pointer.next, i8** %__overflow_area_pointer_p
-  store i8* %__overflow_area_pointer.next, i8** %__current_saved_reg_area_pointer_p
-  %4 = bitcast i8* %align_overflow_area_pointer6 to i64*
+  %align_overflow_area_pointer6 = inttoptr i32 %align_overflow_area_pointer5 to ptr
+  %__overflow_area_pointer.next = getelementptr i8, ptr %align_overflow_area_pointer6, i32 8
+  store ptr %__overflow_area_pointer.next, ptr %__overflow_area_pointer_p
+  store ptr %__overflow_area_pointer.next, ptr %ap
   br label %vaarg.end
 
 vaarg.end:                                        ; preds = %vaarg.on_stack, %vaarg.in_reg
-  %vaarg.addr = phi i64* [ %2, %vaarg.in_reg ], [ %4, %vaarg.on_stack ]
-  %5 = load i64, i64* %vaarg.addr
-  %conv = trunc i64 %5 to i32
-  store i32 %conv, i32* %d, align 4
-  %6 = load i32, i32* %d, align 4
-  %7 = load i32, i32* %ret, align 4
-  %add = add nsw i32 %7, %6
-  store i32 %add, i32* %ret, align 4
-  %arraydecay7 = getelementptr inbounds [1 x %struct.__va_list_tag], [1 x %struct.__va_list_tag]* %ap, i32 0, i32 0
-  %__overflow_area_pointer_p8 = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %arraydecay7, i32 0, i32 2
-  %__overflow_area_pointer9 = load i8*, i8** %__overflow_area_pointer_p8
-  %8 = bitcast i8* %__overflow_area_pointer9 to %struct.AAA*
-  %__overflow_area_pointer.next10 = getelementptr i8, i8* %__overflow_area_pointer9, i32 16
-  store i8* %__overflow_area_pointer.next10, i8** %__overflow_area_pointer_p8
-  %9 = bitcast %struct.AAA* %bbb to i8*
-  %10 = bitcast %struct.AAA* %8 to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i32(i8* %9, i8* %10, i32 16, i32 4, i1 false)
-  %d11 = getelementptr inbounds %struct.AAA, %struct.AAA* %bbb, i32 0, i32 3
-  %11 = load i32, i32* %d11, align 4
-  %12 = load i32, i32* %ret, align 4
-  %add12 = add nsw i32 %12, %11
-  store i32 %add12, i32* %ret, align 4
-  %arraydecay13 = getelementptr inbounds [1 x %struct.__va_list_tag], [1 x %struct.__va_list_tag]* %ap, i32 0, i32 0
+  %vaarg.addr = phi ptr [ %align_current_saved_reg_area_pointer4, %vaarg.in_reg ], [ %align_overflow_area_pointer6, %vaarg.on_stack ]
+  %3 = load i64, ptr %vaarg.addr
+  %conv = trunc i64 %3 to i32
+  store i32 %conv, ptr %d, align 4
+  %4 = load i32, ptr %d, align 4
+  %5 = load i32, ptr %ret, align 4
+  %add = add nsw i32 %5, %4
+  store i32 %add, ptr %ret, align 4
+  %__overflow_area_pointer_p8 = getelementptr inbounds %struct.__va_list_tag, ptr %ap, i32 0, i32 2
+  %__overflow_area_pointer9 = load ptr, ptr %__overflow_area_pointer_p8
+  %__overflow_area_pointer.next10 = getelementptr i8, ptr %__overflow_area_pointer9, i32 16
+  store ptr %__overflow_area_pointer.next10, ptr %__overflow_area_pointer_p8
+  call void @llvm.memcpy.p0.p0.i32(ptr %bbb, ptr %__overflow_area_pointer9, i32 16, i32 4, i1 false)
+  %d11 = getelementptr inbounds %struct.AAA, ptr %bbb, i32 0, i32 3
+  %6 = load i32, ptr %d11, align 4
+  %7 = load i32, ptr %ret, align 4
+  %add12 = add nsw i32 %7, %6
+  store i32 %add12, ptr %ret, align 4
   br label %vaarg.maybe_reg14
 
 vaarg.maybe_reg14:                                ; preds = %vaarg.end
-  %__current_saved_reg_area_pointer_p15 = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %arraydecay13, i32 0, i32 0
-  %__current_saved_reg_area_pointer16 = load i8*, i8** %__current_saved_reg_area_pointer_p15
-  %__saved_reg_area_end_pointer_p17 = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %arraydecay13, i32 0, i32 1
-  %__saved_reg_area_end_pointer18 = load i8*, i8** %__saved_reg_area_end_pointer_p17
-  %__new_saved_reg_area_pointer19 = getelementptr i8, i8* %__current_saved_reg_area_pointer16, i32 4
-  %13 = icmp sgt i8* %__new_saved_reg_area_pointer19, %__saved_reg_area_end_pointer18
-  br i1 %13, label %vaarg.on_stack21, label %vaarg.in_reg20
+  %__current_saved_reg_area_pointer16 = load ptr, ptr %ap
+  %__saved_reg_area_end_pointer_p17 = getelementptr inbounds %struct.__va_list_tag, ptr %ap, i32 0, i32 1
+  %__saved_reg_area_end_pointer18 = load ptr, ptr %__saved_reg_area_end_pointer_p17
+  %__new_saved_reg_area_pointer19 = getelementptr i8, ptr %__current_saved_reg_area_pointer16, i32 4
+  %8 = icmp sgt ptr %__new_saved_reg_area_pointer19, %__saved_reg_area_end_pointer18
+  br i1 %8, label %vaarg.on_stack21, label %vaarg.in_reg20
 
 vaarg.in_reg20:                                   ; preds = %vaarg.maybe_reg14
-  %14 = bitcast i8* %__current_saved_reg_area_pointer16 to i32*
-  store i8* %__new_saved_reg_area_pointer19, i8** %__current_saved_reg_area_pointer_p15
+  store ptr %__new_saved_reg_area_pointer19, ptr %ap
   br label %vaarg.end25
 
 vaarg.on_stack21:                                 ; preds = %vaarg.maybe_reg14
-  %__overflow_area_pointer_p22 = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %arraydecay13, i32 0, i32 2
-  %__overflow_area_pointer23 = load i8*, i8** %__overflow_area_pointer_p22
-  %__overflow_area_pointer.next24 = getelementptr i8, i8* %__overflow_area_pointer23, i32 4
-  store i8* %__overflow_area_pointer.next24, i8** %__overflow_area_pointer_p22
-  store i8* %__overflow_area_pointer.next24, i8** %__current_saved_reg_area_pointer_p15
-  %15 = bitcast i8* %__overflow_area_pointer23 to i32*
+  %__overflow_area_pointer_p22 = getelementptr inbounds %struct.__va_list_tag, ptr %ap, i32 0, i32 2
+  %__overflow_area_pointer23 = load ptr, ptr %__overflow_area_pointer_p22
+  %__overflow_area_pointer.next24 = getelementptr i8, ptr %__overflow_area_pointer23, i32 4
+  store ptr %__overflow_area_pointer.next24, ptr %__overflow_area_pointer_p22
+  store ptr %__overflow_area_pointer.next24, ptr %ap
   br label %vaarg.end25
 
 vaarg.end25:                                      ; preds = %vaarg.on_stack21, %vaarg.in_reg20
-  %vaarg.addr26 = phi i32* [ %14, %vaarg.in_reg20 ], [ %15, %vaarg.on_stack21 ]
-  %16 = load i32, i32* %vaarg.addr26
-  store i32 %16, i32* %d, align 4
-  %17 = load i32, i32* %d, align 4
-  %18 = load i32, i32* %ret, align 4
-  %add27 = add nsw i32 %18, %17
-  store i32 %add27, i32* %ret, align 4
-  %arraydecay28 = getelementptr inbounds [1 x %struct.__va_list_tag], [1 x %struct.__va_list_tag]* %ap, i32 0, i32 0
+  %vaarg.addr26 = phi ptr [ %__current_saved_reg_area_pointer16, %vaarg.in_reg20 ], [ %__overflow_area_pointer23, %vaarg.on_stack21 ]
+  %9 = load i32, ptr %vaarg.addr26
+  store i32 %9, ptr %d, align 4
+  %10 = load i32, ptr %d, align 4
+  %11 = load i32, ptr %ret, align 4
+  %add27 = add nsw i32 %11, %10
+  store i32 %add27, ptr %ret, align 4
   br label %vaarg.maybe_reg29
 
 vaarg.maybe_reg29:                                ; preds = %vaarg.end25
-  %__current_saved_reg_area_pointer_p30 = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %arraydecay28, i32 0, i32 0
-  %__current_saved_reg_area_pointer31 = load i8*, i8** %__current_saved_reg_area_pointer_p30
-  %__saved_reg_area_end_pointer_p32 = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %arraydecay28, i32 0, i32 1
-  %__saved_reg_area_end_pointer33 = load i8*, i8** %__saved_reg_area_end_pointer_p32
-  %19 = ptrtoint i8* %__current_saved_reg_area_pointer31 to i32
-  %align_current_saved_reg_area_pointer34 = add i32 %19, 7
+  %__current_saved_reg_area_pointer31 = load ptr, ptr %ap
+  %__saved_reg_area_end_pointer_p32 = getelementptr inbounds %struct.__va_list_tag, ptr %ap, i32 0, i32 1
+  %__saved_reg_area_end_pointer33 = load ptr, ptr %__saved_reg_area_end_pointer_p32
+  %12 = ptrtoint ptr %__current_saved_reg_area_pointer31 to i32
+  %align_current_saved_reg_area_pointer34 = add i32 %12, 7
   %align_current_saved_reg_area_pointer35 = and i32 %align_current_saved_reg_area_pointer34, -8
-  %align_current_saved_reg_area_pointer36 = inttoptr i32 %align_current_saved_reg_area_pointer35 to i8*
-  %__new_saved_reg_area_pointer37 = getelementptr i8, i8* %align_current_saved_reg_area_pointer36, i32 8
-  %20 = icmp sgt i8* %__new_saved_reg_area_pointer37, %__saved_reg_area_end_pointer33
-  br i1 %20, label %vaarg.on_stack39, label %vaarg.in_reg38
+  %align_current_saved_reg_area_pointer36 = inttoptr i32 %align_current_saved_reg_area_pointer35 to ptr
+  %__new_saved_reg_area_pointer37 = getelementptr i8, ptr %align_current_saved_reg_area_pointer36, i32 8
+  %13 = icmp sgt ptr %__new_saved_reg_area_pointer37, %__saved_reg_area_end_pointer33
+  br i1 %13, label %vaarg.on_stack39, label %vaarg.in_reg38
 
 vaarg.in_reg38:                                   ; preds = %vaarg.maybe_reg29
-  %21 = bitcast i8* %align_current_saved_reg_area_pointer36 to i64*
-  store i8* %__new_saved_reg_area_pointer37, i8** %__current_saved_reg_area_pointer_p30
+  store ptr %__new_saved_reg_area_pointer37, ptr %ap
   br label %vaarg.end46
 
 vaarg.on_stack39:                                 ; preds = %vaarg.maybe_reg29
-  %__overflow_area_pointer_p40 = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %arraydecay28, i32 0, i32 2
-  %__overflow_area_pointer41 = load i8*, i8** %__overflow_area_pointer_p40
-  %22 = ptrtoint i8* %__overflow_area_pointer41 to i32
-  %align_overflow_area_pointer42 = add i32 %22, 7
+  %__overflow_area_pointer_p40 = getelementptr inbounds %struct.__va_list_tag, ptr %ap, i32 0, i32 2
+  %__overflow_area_pointer41 = load ptr, ptr %__overflow_area_pointer_p40
+  %14 = ptrtoint ptr %__overflow_area_pointer41 to i32
+  %align_overflow_area_pointer42 = add i32 %14, 7
   %align_overflow_area_pointer43 = and i32 %align_overflow_area_pointer42, -8
-  %align_overflow_area_pointer44 = inttoptr i32 %align_overflow_area_pointer43 to i8*
-  %__overflow_area_pointer.next45 = getelementptr i8, i8* %align_overflow_area_pointer44, i32 8
-  store i8* %__overflow_area_pointer.next45, i8** %__overflow_area_pointer_p40
-  store i8* %__overflow_area_pointer.next45, i8** %__current_saved_reg_area_pointer_p30
-  %23 = bitcast i8* %align_overflow_area_pointer44 to i64*
+  %align_overflow_area_pointer44 = inttoptr i32 %align_overflow_area_pointer43 to ptr
+  %__overflow_area_pointer.next45 = getelementptr i8, ptr %align_overflow_area_pointer44, i32 8
+  store ptr %__overflow_area_pointer.next45, ptr %__overflow_area_pointer_p40
+  store ptr %__overflow_area_pointer.next45, ptr %ap
   br label %vaarg.end46
 
 vaarg.end46:                                      ; preds = %vaarg.on_stack39, %vaarg.in_reg38
-  %vaarg.addr47 = phi i64* [ %21, %vaarg.in_reg38 ], [ %23, %vaarg.on_stack39 ]
-  %24 = load i64, i64* %vaarg.addr47
-  %conv48 = trunc i64 %24 to i32
-  store i32 %conv48, i32* %d, align 4
-  %25 = load i32, i32* %d, align 4
-  %26 = load i32, i32* %ret, align 4
-  %add49 = add nsw i32 %26, %25
-  store i32 %add49, i32* %ret, align 4
-  %arraydecay50 = getelementptr inbounds [1 x %struct.__va_list_tag], [1 x %struct.__va_list_tag]* %ap, i32 0, i32 0
-  %arraydecay5051 = bitcast %struct.__va_list_tag* %arraydecay50 to i8*
-  call void @llvm.va_end(i8* %arraydecay5051)
-  %27 = load i32, i32* %ret, align 4
-  ret i32 %27
+  %vaarg.addr47 = phi ptr [ %align_current_saved_reg_area_pointer36, %vaarg.in_reg38 ], [ %align_overflow_area_pointer44, %vaarg.on_stack39 ]
+  %15 = load i64, ptr %vaarg.addr47
+  %conv48 = trunc i64 %15 to i32
+  store i32 %conv48, ptr %d, align 4
+  %16 = load i32, ptr %d, align 4
+  %17 = load i32, ptr %ret, align 4
+  %add49 = add nsw i32 %17, %16
+  store i32 %add49, ptr %ret, align 4
+  call void @llvm.va_end(ptr %ap)
+  %18 = load i32, ptr %ret, align 4
+  ret i32 %18
 }
 
 ; Function Attrs: nounwind
-declare void @llvm.va_start(i8*) #1
+declare void @llvm.va_start(ptr) #1
 
 ; Function Attrs: nounwind
-declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i32, i1) #1
+declare void @llvm.memcpy.p0.p0.i32(ptr nocapture, ptr nocapture readonly, i32, i32, i1) #1
 
 ; Function Attrs: nounwind
-declare void @llvm.va_end(i8*) #1
+declare void @llvm.va_end(ptr) #1
 
 ; Function Attrs: nounwind
 define i32 @main() #0 {
@@ -193,19 +173,19 @@ entry:
   %retval = alloca i32, align 4
   %x = alloca i32, align 4
   %y = alloca i64, align 8
-  store i32 0, i32* %retval
-  store i64 1000000, i64* %y, align 8
-  %0 = load i64, i64* %y, align 8
-  %1 = load i64, i64* %y, align 8
-  %call = call i32 (i32, i32, i32, i32, i32, ...) @foo(i32 1, i32 2, i32 3, i32 4, i32 5, i64 %0, %struct.AAA* byval(%struct.AAA) align 4 @aaa, i32 4, i64 %1)
-  store i32 %call, i32* %x, align 4
-  %2 = load i32, i32* %x, align 4
-  %call1 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([13 x i8], [13 x i8]* @.str, i32 0, i32 0), i32 %2)
-  %3 = load i32, i32* %x, align 4
+  store i32 0, ptr %retval
+  store i64 1000000, ptr %y, align 8
+  %0 = load i64, ptr %y, align 8
+  %1 = load i64, ptr %y, align 8
+  %call = call i32 (i32, i32, i32, i32, i32, ...) @foo(i32 1, i32 2, i32 3, i32 4, i32 5, i64 %0, ptr byval(%struct.AAA) align 4 @aaa, i32 4, i64 %1)
+  store i32 %call, ptr %x, align 4
+  %2 = load i32, ptr %x, align 4
+  %call1 = call i32 (ptr, ...) @printf(ptr @.str, i32 %2)
+  %3 = load i32, ptr %x, align 4
   ret i32 %3
 }
 
-declare i32 @printf(i8*, ...) #2
+declare i32 @printf(ptr, ...) #2
 
 attributes #0 = { nounwind }
 

diff  --git a/llvm/test/CodeGen/Hexagon/vararg_named.ll b/llvm/test/CodeGen/Hexagon/vararg_named.ll
index 413dc2e1d5da7..cccc37cc447e6 100644
--- a/llvm/test/CodeGen/Hexagon/vararg_named.ll
+++ b/llvm/test/CodeGen/Hexagon/vararg_named.ll
@@ -21,7 +21,7 @@
 ; CHECK: r29 = add(r29,#16)
 
 %struct.AAA = type { i32, i32, i32, i32 }
-%struct.__va_list_tag = type { i8*, i8*, i8* }
+%struct.__va_list_tag = type { ptr, ptr, ptr }
 
 @aaa = global %struct.AAA { i32 100, i32 200, i32 300, i32 400 }, align 4
 @xxx = global %struct.AAA { i32 100, i32 200, i32 300, i32 400 }, align 4
@@ -31,7 +31,7 @@
 @.str = private unnamed_addr constant [13 x i8] c"result = %d\0A\00", align 1
 
 ; Function Attrs: nounwind
-define i32 @foo(i32 %xx, i32 %z, i32 %m, %struct.AAA* byval(%struct.AAA) align 4 %bbb, %struct.AAA* byval(%struct.AAA) align 4 %GGG, ...) #0 {
+define i32 @foo(i32 %xx, i32 %z, i32 %m, ptr byval(%struct.AAA) align 4 %bbb, ptr byval(%struct.AAA) align 4 %GGG, ...) #0 {
 entry:
   %xx.addr = alloca i32, align 4
   %z.addr = alloca i32, align 4
@@ -42,167 +42,143 @@ entry:
   %ddd = alloca %struct.AAA, align 4
   %ggg = alloca %struct.AAA, align 4
   %nnn = alloca %struct.AAA, align 4
-  store i32 %xx, i32* %xx.addr, align 4
-  store i32 %z, i32* %z.addr, align 4
-  store i32 %m, i32* %m.addr, align 4
-  store i32 0, i32* %ret, align 4
-  %arraydecay = getelementptr inbounds [1 x %struct.__va_list_tag], [1 x %struct.__va_list_tag]* %ap, i32 0, i32 0
-  %arraydecay1 = bitcast %struct.__va_list_tag* %arraydecay to i8*
-  call void @llvm.va_start(i8* %arraydecay1)
-  %d2 = getelementptr inbounds %struct.AAA, %struct.AAA* %bbb, i32 0, i32 3
-  %0 = load i32, i32* %d2, align 4
-  %1 = load i32, i32* %ret, align 4
+  store i32 %xx, ptr %xx.addr, align 4
+  store i32 %z, ptr %z.addr, align 4
+  store i32 %m, ptr %m.addr, align 4
+  store i32 0, ptr %ret, align 4
+  call void @llvm.va_start(ptr %ap)
+  %d2 = getelementptr inbounds %struct.AAA, ptr %bbb, i32 0, i32 3
+  %0 = load i32, ptr %d2, align 4
+  %1 = load i32, ptr %ret, align 4
   %add = add nsw i32 %1, %0
-  store i32 %add, i32* %ret, align 4
-  %2 = load i32, i32* %z.addr, align 4
-  %3 = load i32, i32* %ret, align 4
+  store i32 %add, ptr %ret, align 4
+  %2 = load i32, ptr %z.addr, align 4
+  %3 = load i32, ptr %ret, align 4
   %add3 = add nsw i32 %3, %2
-  store i32 %add3, i32* %ret, align 4
-  %arraydecay4 = getelementptr inbounds [1 x %struct.__va_list_tag], [1 x %struct.__va_list_tag]* %ap, i32 0, i32 0
+  store i32 %add3, ptr %ret, align 4
   br label %vaarg.maybe_reg
 
 vaarg.maybe_reg:                                  ; preds = %entry
-  %__current_saved_reg_area_pointer_p = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %arraydecay4, i32 0, i32 0
-  %__current_saved_reg_area_pointer = load i8*, i8** %__current_saved_reg_area_pointer_p
-  %__saved_reg_area_end_pointer_p = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %arraydecay4, i32 0, i32 1
-  %__saved_reg_area_end_pointer = load i8*, i8** %__saved_reg_area_end_pointer_p
-  %__new_saved_reg_area_pointer = getelementptr i8, i8* %__current_saved_reg_area_pointer, i32 4
-  %4 = icmp sgt i8* %__new_saved_reg_area_pointer, %__saved_reg_area_end_pointer
+  %__current_saved_reg_area_pointer = load ptr, ptr %ap
+  %__saved_reg_area_end_pointer_p = getelementptr inbounds %struct.__va_list_tag, ptr %ap, i32 0, i32 1
+  %__saved_reg_area_end_pointer = load ptr, ptr %__saved_reg_area_end_pointer_p
+  %__new_saved_reg_area_pointer = getelementptr i8, ptr %__current_saved_reg_area_pointer, i32 4
+  %4 = icmp sgt ptr %__new_saved_reg_area_pointer, %__saved_reg_area_end_pointer
   br i1 %4, label %vaarg.on_stack, label %vaarg.in_reg
 
 vaarg.in_reg:                                     ; preds = %vaarg.maybe_reg
-  %5 = bitcast i8* %__current_saved_reg_area_pointer to i32*
-  store i8* %__new_saved_reg_area_pointer, i8** %__current_saved_reg_area_pointer_p
+  store ptr %__new_saved_reg_area_pointer, ptr %ap
   br label %vaarg.end
 
 vaarg.on_stack:                                   ; preds = %vaarg.maybe_reg
-  %__overflow_area_pointer_p = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %arraydecay4, i32 0, i32 2
-  %__overflow_area_pointer = load i8*, i8** %__overflow_area_pointer_p
-  %__overflow_area_pointer.next = getelementptr i8, i8* %__overflow_area_pointer, i32 4
-  store i8* %__overflow_area_pointer.next, i8** %__overflow_area_pointer_p
-  store i8* %__overflow_area_pointer.next, i8** %__current_saved_reg_area_pointer_p
-  %6 = bitcast i8* %__overflow_area_pointer to i32*
+  %__overflow_area_pointer_p = getelementptr inbounds %struct.__va_list_tag, ptr %ap, i32 0, i32 2
+  %__overflow_area_pointer = load ptr, ptr %__overflow_area_pointer_p
+  %__overflow_area_pointer.next = getelementptr i8, ptr %__overflow_area_pointer, i32 4
+  store ptr %__overflow_area_pointer.next, ptr %__overflow_area_pointer_p
+  store ptr %__overflow_area_pointer.next, ptr %ap
   br label %vaarg.end
 
 vaarg.end:                                        ; preds = %vaarg.on_stack, %vaarg.in_reg
-  %vaarg.addr = phi i32* [ %5, %vaarg.in_reg ], [ %6, %vaarg.on_stack ]
-  %7 = load i32, i32* %vaarg.addr
-  store i32 %7, i32* %d, align 4
-  %8 = load i32, i32* %d, align 4
-  %9 = load i32, i32* %ret, align 4
-  %add5 = add nsw i32 %9, %8
-  store i32 %add5, i32* %ret, align 4
-  %arraydecay6 = getelementptr inbounds [1 x %struct.__va_list_tag], [1 x %struct.__va_list_tag]* %ap, i32 0, i32 0
-  %__overflow_area_pointer_p7 = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %arraydecay6, i32 0, i32 2
-  %__overflow_area_pointer8 = load i8*, i8** %__overflow_area_pointer_p7
-  %10 = bitcast i8* %__overflow_area_pointer8 to %struct.AAA*
-  %__overflow_area_pointer.next9 = getelementptr i8, i8* %__overflow_area_pointer8, i32 16
-  store i8* %__overflow_area_pointer.next9, i8** %__overflow_area_pointer_p7
-  %11 = bitcast %struct.AAA* %ddd to i8*
-  %12 = bitcast %struct.AAA* %10 to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i32(i8* %11, i8* %12, i32 16, i32 4, i1 false)
-  %d10 = getelementptr inbounds %struct.AAA, %struct.AAA* %ddd, i32 0, i32 3
-  %13 = load i32, i32* %d10, align 4
-  %14 = load i32, i32* %ret, align 4
-  %add11 = add nsw i32 %14, %13
-  store i32 %add11, i32* %ret, align 4
-  %arraydecay12 = getelementptr inbounds [1 x %struct.__va_list_tag], [1 x %struct.__va_list_tag]* %ap, i32 0, i32 0
-  %__overflow_area_pointer_p13 = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %arraydecay12, i32 0, i32 2
-  %__overflow_area_pointer14 = load i8*, i8** %__overflow_area_pointer_p13
-  %15 = bitcast i8* %__overflow_area_pointer14 to %struct.AAA*
-  %__overflow_area_pointer.next15 = getelementptr i8, i8* %__overflow_area_pointer14, i32 16
-  store i8* %__overflow_area_pointer.next15, i8** %__overflow_area_pointer_p13
-  %16 = bitcast %struct.AAA* %ggg to i8*
-  %17 = bitcast %struct.AAA* %15 to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i32(i8* %16, i8* %17, i32 16, i32 4, i1 false)
-  %d16 = getelementptr inbounds %struct.AAA, %struct.AAA* %ggg, i32 0, i32 3
-  %18 = load i32, i32* %d16, align 4
-  %19 = load i32, i32* %ret, align 4
-  %add17 = add nsw i32 %19, %18
-  store i32 %add17, i32* %ret, align 4
-  %arraydecay18 = getelementptr inbounds [1 x %struct.__va_list_tag], [1 x %struct.__va_list_tag]* %ap, i32 0, i32 0
-  %__overflow_area_pointer_p19 = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %arraydecay18, i32 0, i32 2
-  %__overflow_area_pointer20 = load i8*, i8** %__overflow_area_pointer_p19
-  %20 = bitcast i8* %__overflow_area_pointer20 to %struct.AAA*
-  %__overflow_area_pointer.next21 = getelementptr i8, i8* %__overflow_area_pointer20, i32 16
-  store i8* %__overflow_area_pointer.next21, i8** %__overflow_area_pointer_p19
-  %21 = bitcast %struct.AAA* %nnn to i8*
-  %22 = bitcast %struct.AAA* %20 to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i32(i8* %21, i8* %22, i32 16, i32 4, i1 false)
-  %d22 = getelementptr inbounds %struct.AAA, %struct.AAA* %nnn, i32 0, i32 3
-  %23 = load i32, i32* %d22, align 4
-  %24 = load i32, i32* %ret, align 4
-  %add23 = add nsw i32 %24, %23
-  store i32 %add23, i32* %ret, align 4
-  %arraydecay24 = getelementptr inbounds [1 x %struct.__va_list_tag], [1 x %struct.__va_list_tag]* %ap, i32 0, i32 0
+  %vaarg.addr = phi ptr [ %__current_saved_reg_area_pointer, %vaarg.in_reg ], [ %__overflow_area_pointer, %vaarg.on_stack ]
+  %5 = load i32, ptr %vaarg.addr
+  store i32 %5, ptr %d, align 4
+  %6 = load i32, ptr %d, align 4
+  %7 = load i32, ptr %ret, align 4
+  %add5 = add nsw i32 %7, %6
+  store i32 %add5, ptr %ret, align 4
+  %__overflow_area_pointer_p7 = getelementptr inbounds %struct.__va_list_tag, ptr %ap, i32 0, i32 2
+  %__overflow_area_pointer8 = load ptr, ptr %__overflow_area_pointer_p7
+  %__overflow_area_pointer.next9 = getelementptr i8, ptr %__overflow_area_pointer8, i32 16
+  store ptr %__overflow_area_pointer.next9, ptr %__overflow_area_pointer_p7
+  call void @llvm.memcpy.p0.p0.i32(ptr %ddd, ptr %__overflow_area_pointer8, i32 16, i32 4, i1 false)
+  %d10 = getelementptr inbounds %struct.AAA, ptr %ddd, i32 0, i32 3
+  %8 = load i32, ptr %d10, align 4
+  %9 = load i32, ptr %ret, align 4
+  %add11 = add nsw i32 %9, %8
+  store i32 %add11, ptr %ret, align 4
+  %__overflow_area_pointer_p13 = getelementptr inbounds %struct.__va_list_tag, ptr %ap, i32 0, i32 2
+  %__overflow_area_pointer14 = load ptr, ptr %__overflow_area_pointer_p13
+  %__overflow_area_pointer.next15 = getelementptr i8, ptr %__overflow_area_pointer14, i32 16
+  store ptr %__overflow_area_pointer.next15, ptr %__overflow_area_pointer_p13
+  call void @llvm.memcpy.p0.p0.i32(ptr %ggg, ptr %__overflow_area_pointer14, i32 16, i32 4, i1 false)
+  %d16 = getelementptr inbounds %struct.AAA, ptr %ggg, i32 0, i32 3
+  %10 = load i32, ptr %d16, align 4
+  %11 = load i32, ptr %ret, align 4
+  %add17 = add nsw i32 %11, %10
+  store i32 %add17, ptr %ret, align 4
+  %__overflow_area_pointer_p19 = getelementptr inbounds %struct.__va_list_tag, ptr %ap, i32 0, i32 2
+  %__overflow_area_pointer20 = load ptr, ptr %__overflow_area_pointer_p19
+  %__overflow_area_pointer.next21 = getelementptr i8, ptr %__overflow_area_pointer20, i32 16
+  store ptr %__overflow_area_pointer.next21, ptr %__overflow_area_pointer_p19
+  call void @llvm.memcpy.p0.p0.i32(ptr %nnn, ptr %__overflow_area_pointer20, i32 16, i32 4, i1 false)
+  %d22 = getelementptr inbounds %struct.AAA, ptr %nnn, i32 0, i32 3
+  %12 = load i32, ptr %d22, align 4
+  %13 = load i32, ptr %ret, align 4
+  %add23 = add nsw i32 %13, %12
+  store i32 %add23, ptr %ret, align 4
   br label %vaarg.maybe_reg25
 
 vaarg.maybe_reg25:                                ; preds = %vaarg.end
-  %__current_saved_reg_area_pointer_p26 = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %arraydecay24, i32 0, i32 0
-  %__current_saved_reg_area_pointer27 = load i8*, i8** %__current_saved_reg_area_pointer_p26
-  %__saved_reg_area_end_pointer_p28 = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %arraydecay24, i32 0, i32 1
-  %__saved_reg_area_end_pointer29 = load i8*, i8** %__saved_reg_area_end_pointer_p28
-  %__new_saved_reg_area_pointer30 = getelementptr i8, i8* %__current_saved_reg_area_pointer27, i32 4
-  %25 = icmp sgt i8* %__new_saved_reg_area_pointer30, %__saved_reg_area_end_pointer29
-  br i1 %25, label %vaarg.on_stack32, label %vaarg.in_reg31
+  %__current_saved_reg_area_pointer27 = load ptr, ptr %ap
+  %__saved_reg_area_end_pointer_p28 = getelementptr inbounds %struct.__va_list_tag, ptr %ap, i32 0, i32 1
+  %__saved_reg_area_end_pointer29 = load ptr, ptr %__saved_reg_area_end_pointer_p28
+  %__new_saved_reg_area_pointer30 = getelementptr i8, ptr %__current_saved_reg_area_pointer27, i32 4
+  %14 = icmp sgt ptr %__new_saved_reg_area_pointer30, %__saved_reg_area_end_pointer29
+  br i1 %14, label %vaarg.on_stack32, label %vaarg.in_reg31
 
 vaarg.in_reg31:                                   ; preds = %vaarg.maybe_reg25
-  %26 = bitcast i8* %__current_saved_reg_area_pointer27 to i32*
-  store i8* %__new_saved_reg_area_pointer30, i8** %__current_saved_reg_area_pointer_p26
+  store ptr %__new_saved_reg_area_pointer30, ptr %ap
   br label %vaarg.end36
 
 vaarg.on_stack32:                                 ; preds = %vaarg.maybe_reg25
-  %__overflow_area_pointer_p33 = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %arraydecay24, i32 0, i32 2
-  %__overflow_area_pointer34 = load i8*, i8** %__overflow_area_pointer_p33
-  %__overflow_area_pointer.next35 = getelementptr i8, i8* %__overflow_area_pointer34, i32 4
-  store i8* %__overflow_area_pointer.next35, i8** %__overflow_area_pointer_p33
-  store i8* %__overflow_area_pointer.next35, i8** %__current_saved_reg_area_pointer_p26
-  %27 = bitcast i8* %__overflow_area_pointer34 to i32*
+  %__overflow_area_pointer_p33 = getelementptr inbounds %struct.__va_list_tag, ptr %ap, i32 0, i32 2
+  %__overflow_area_pointer34 = load ptr, ptr %__overflow_area_pointer_p33
+  %__overflow_area_pointer.next35 = getelementptr i8, ptr %__overflow_area_pointer34, i32 4
+  store ptr %__overflow_area_pointer.next35, ptr %__overflow_area_pointer_p33
+  store ptr %__overflow_area_pointer.next35, ptr %ap
   br label %vaarg.end36
 
 vaarg.end36:                                      ; preds = %vaarg.on_stack32, %vaarg.in_reg31
-  %vaarg.addr37 = phi i32* [ %26, %vaarg.in_reg31 ], [ %27, %vaarg.on_stack32 ]
-  %28 = load i32, i32* %vaarg.addr37
-  store i32 %28, i32* %d, align 4
-  %29 = load i32, i32* %d, align 4
-  %30 = load i32, i32* %ret, align 4
-  %add38 = add nsw i32 %30, %29
-  store i32 %add38, i32* %ret, align 4
-  %31 = load i32, i32* %m.addr, align 4
-  %32 = load i32, i32* %ret, align 4
-  %add39 = add nsw i32 %32, %31
-  store i32 %add39, i32* %ret, align 4
-  %arraydecay40 = getelementptr inbounds [1 x %struct.__va_list_tag], [1 x %struct.__va_list_tag]* %ap, i32 0, i32 0
-  %arraydecay4041 = bitcast %struct.__va_list_tag* %arraydecay40 to i8*
-  call void @llvm.va_end(i8* %arraydecay4041)
-  %33 = load i32, i32* %ret, align 4
-  ret i32 %33
+  %vaarg.addr37 = phi ptr [ %__current_saved_reg_area_pointer27, %vaarg.in_reg31 ], [ %__overflow_area_pointer34, %vaarg.on_stack32 ]
+  %15 = load i32, ptr %vaarg.addr37
+  store i32 %15, ptr %d, align 4
+  %16 = load i32, ptr %d, align 4
+  %17 = load i32, ptr %ret, align 4
+  %add38 = add nsw i32 %17, %16
+  store i32 %add38, ptr %ret, align 4
+  %18 = load i32, ptr %m.addr, align 4
+  %19 = load i32, ptr %ret, align 4
+  %add39 = add nsw i32 %19, %18
+  store i32 %add39, ptr %ret, align 4
+  call void @llvm.va_end(ptr %ap)
+  %20 = load i32, ptr %ret, align 4
+  ret i32 %20
 }
 
 ; Function Attrs: nounwind
-declare void @llvm.va_start(i8*) #1
+declare void @llvm.va_start(ptr) #1
 
 ; Function Attrs: nounwind
-declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i32, i1) #1
+declare void @llvm.memcpy.p0.p0.i32(ptr nocapture, ptr nocapture readonly, i32, i32, i1) #1
 
 ; Function Attrs: nounwind
-declare void @llvm.va_end(i8*) #1
+declare void @llvm.va_end(ptr) #1
 
 ; Function Attrs: nounwind
 define i32 @main() #0 {
 entry:
   %retval = alloca i32, align 4
   %x = alloca i32, align 4
-  store i32 0, i32* %retval
-  %call = call i32 (i32, i32, i32, %struct.AAA*, %struct.AAA*, ...) @foo(i32 1, i32 3, i32 5, %struct.AAA* byval(%struct.AAA) align 4 @aaa, %struct.AAA* byval(%struct.AAA) align 4 @fff, i32 2, %struct.AAA* byval(%struct.AAA) align 4 @xxx, %struct.AAA* byval(%struct.AAA) align 4 @yyy, %struct.AAA* byval(%struct.AAA) align 4 @ccc, i32 4)
-  store i32 %call, i32* %x, align 4
-  %0 = load i32, i32* %x, align 4
-  %call1 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([13 x i8], [13 x i8]* @.str, i32 0, i32 0), i32 %0)
-  %1 = load i32, i32* %x, align 4
+  store i32 0, ptr %retval
+  %call = call i32 (i32, i32, i32, ptr, ptr, ...) @foo(i32 1, i32 3, i32 5, ptr byval(%struct.AAA) align 4 @aaa, ptr byval(%struct.AAA) align 4 @fff, i32 2, ptr byval(%struct.AAA) align 4 @xxx, ptr byval(%struct.AAA) align 4 @yyy, ptr byval(%struct.AAA) align 4 @ccc, i32 4)
+  store i32 %call, ptr %x, align 4
+  %0 = load i32, ptr %x, align 4
+  %call1 = call i32 (ptr, ...) @printf(ptr @.str, i32 %0)
+  %1 = load i32, ptr %x, align 4
   ret i32 %1
 }
 
-declare i32 @printf(i8*, ...) #2
+declare i32 @printf(ptr, ...) #2
 
 attributes #0 = { nounwind }
 

diff  --git a/llvm/test/CodeGen/Hexagon/varargs-memv.ll b/llvm/test/CodeGen/Hexagon/varargs-memv.ll
index 622f1a59a587f..e62864a715f67 100644
--- a/llvm/test/CodeGen/Hexagon/varargs-memv.ll
+++ b/llvm/test/CodeGen/Hexagon/varargs-memv.ll
@@ -13,20 +13,19 @@ b0:
   %v0 = alloca i32, align 4
   %v1 = alloca i32, align 4
   %v2 = alloca [0 x <4 x i32>], align 16
-  store i32 0, i32* %v0
-  store i32 0, i32* %v1, align 4
-  %v3 = bitcast [0 x <4 x i32>]* %v2 to i8*
-  call void @llvm.memset.p0i8.i32(i8* align 16 %v3, i8 0, i32 0, i1 false)
-  %v4 = load i32, i32* %v1, align 4
+  store i32 0, ptr %v0
+  store i32 0, ptr %v1, align 4
+  call void @llvm.memset.p0.i32(ptr align 16 %v2, i8 0, i32 0, i1 false)
+  %v4 = load i32, ptr %v1, align 4
   %v5 = add nsw i32 %v4, 1
-  store i32 %v5, i32* %v1, align 4
-  %v6 = load <4 x i32>, <4 x i32>* @g1, align 16
-  %v7 = call i32 bitcast (i32 (...)* @f0 to i32 (i8*, i32, <4 x i32>)*)(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @g0, i32 0, i32 0), i32 %v5, <4 x i32> %v6)
+  store i32 %v5, ptr %v1, align 4
+  %v6 = load <4 x i32>, ptr @g1, align 16
+  %v7 = call i32 @f0(ptr @g0, i32 %v5, <4 x i32> %v6)
   ret i32 0
 }
 
 ; Function Attrs: argmemonly nounwind
-declare void @llvm.memset.p0i8.i32(i8* nocapture writeonly, i8, i32, i1) #1
+declare void @llvm.memset.p0.i32(ptr nocapture writeonly, i8, i32, i1) #1
 
 attributes #0 = { nounwind "target-cpu"="hexagonv60" }
 attributes #1 = { argmemonly nounwind }

diff  --git a/llvm/test/CodeGen/Hexagon/vassign-to-combine.ll b/llvm/test/CodeGen/Hexagon/vassign-to-combine.ll
index f33756b76e0e2..5da6250f7e671 100644
--- a/llvm/test/CodeGen/Hexagon/vassign-to-combine.ll
+++ b/llvm/test/CodeGen/Hexagon/vassign-to-combine.ll
@@ -16,10 +16,10 @@ declare <64 x i32> @llvm.hexagon.V6.vadduhsat.dv.128B(<64 x i32>, <64 x i32>) #0
 declare <64 x i32> @llvm.hexagon.V6.vaddubh.128B(<32 x i32>, <32 x i32>) #0
 declare <64 x i32> @llvm.hexagon.V6.vmpyub.128B(<32 x i32>, i32) #0
 
-define void @f0(<32 x i32>* %a0, <32 x i32>* %a1) local_unnamed_addr #1 {
+define void @f0(ptr %a0, ptr %a1) local_unnamed_addr #1 {
 b0:
-  %v0 = load <32 x i32>, <32 x i32>* %a0, align 128
-  %v1 = load <32 x i32>, <32 x i32>* %a1, align 128
+  %v0 = load <32 x i32>, ptr %a0, align 128
+  %v1 = load <32 x i32>, ptr %a1, align 128
   br i1 undef, label %b2, label %b1
 
 b1:                                               ; preds = %b0
@@ -32,7 +32,7 @@ b1:                                               ; preds = %b0
   %v8 = tail call <64 x i32> @llvm.hexagon.V6.vaddh.dv.128B(<64 x i32> undef, <64 x i32> %v7) #1
   %v9 = tail call <32 x i32> @llvm.hexagon.V6.hi.128B(<64 x i32> %v8) #1
   %v10 = tail call <32 x i32> @llvm.hexagon.V6.vsathub.128B(<32 x i32> %v9, <32 x i32> undef) #1
-  store <32 x i32> %v10, <32 x i32>* %a0, align 128
+  store <32 x i32> %v10, ptr %a0, align 128
   br label %b2
 
 b2:                                               ; preds = %b1, %b0
@@ -47,7 +47,7 @@ b2:                                               ; preds = %b1, %b0
   %v19 = tail call <64 x i32> @llvm.hexagon.V6.vaddh.dv.128B(<64 x i32> undef, <64 x i32> %v18) #1
   %v20 = tail call <32 x i32> @llvm.hexagon.V6.hi.128B(<64 x i32> %v19) #1
   %v21 = tail call <32 x i32> @llvm.hexagon.V6.vsathub.128B(<32 x i32> %v20, <32 x i32> undef) #1
-  store <32 x i32> %v21, <32 x i32>* %a1, align 128
+  store <32 x i32> %v21, ptr %a1, align 128
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/vcombine128_to_req_seq.ll b/llvm/test/CodeGen/Hexagon/vcombine128_to_req_seq.ll
index c87b18dc91b7e..e42ac93e58ac8 100644
--- a/llvm/test/CodeGen/Hexagon/vcombine128_to_req_seq.ll
+++ b/llvm/test/CodeGen/Hexagon/vcombine128_to_req_seq.ll
@@ -4,19 +4,16 @@
 
 ; CHECK-LABEL: f0:
 ; CHECK-NOT: vcombine
-define void @f0(i8* nocapture readonly %a0, i8* nocapture readonly %a1, i32 %a2, i8* nocapture %a3, i32 %a4, i32 %a5) #0 {
+define void @f0(ptr nocapture readonly %a0, ptr nocapture readonly %a1, i32 %a2, ptr nocapture %a3, i32 %a4, i32 %a5) #0 {
 b0:
-  %v0 = bitcast i8* %a1 to i64*
-  %v1 = load i64, i64* %v0, align 8
+  %v1 = load i64, ptr %a1, align 8
   %v2 = shl i64 %v1, 8
   %v3 = trunc i64 %v2 to i32
   %v4 = trunc i64 %v1 to i32
   %v5 = and i32 %v4, 16777215
-  %v6 = bitcast i8* %a0 to <32 x i32>*
-  %v7 = load <32 x i32>, <32 x i32>* %v6, align 128
-  %v8 = getelementptr inbounds i8, i8* %a0, i32 32
-  %v9 = bitcast i8* %v8 to <32 x i32>*
-  %v10 = load <32 x i32>, <32 x i32>* %v9, align 128
+  %v7 = load <32 x i32>, ptr %a0, align 128
+  %v8 = getelementptr inbounds i8, ptr %a0, i32 32
+  %v10 = load <32 x i32>, ptr %v8, align 128
   %v11 = tail call <64 x i32> @llvm.hexagon.V6.vcombine.128B(<32 x i32> %v10, <32 x i32> %v7)
   %v12 = tail call <64 x i32> @llvm.hexagon.V6.vrmpybusi.128B(<64 x i32> %v11, i32 %v5, i32 0)
   %v13 = tail call <64 x i32> @llvm.hexagon.V6.vrmpybusi.128B(<64 x i32> %v11, i32 %v3, i32 0)
@@ -24,11 +21,9 @@ b0:
   %v15 = tail call <32 x i32> @llvm.hexagon.V6.vasrwuhsat.128B(<32 x i32> %v14, <32 x i32> %v14, i32 %a2)
   %v16 = tail call <32 x i32> @llvm.hexagon.V6.lo.128B(<64 x i32> %v13)
   %v17 = tail call <32 x i32> @llvm.hexagon.V6.vasrwuhsat.128B(<32 x i32> %v16, <32 x i32> %v16, i32 %a2)
-  %v18 = getelementptr inbounds i8, i8* %a3, i32 32
-  %v19 = bitcast i8* %v18 to <32 x i32>*
-  store <32 x i32> %v15, <32 x i32>* %v19, align 128
-  %v20 = bitcast i8* %a3 to <32 x i32>*
-  store <32 x i32> %v17, <32 x i32>* %v20, align 128
+  %v18 = getelementptr inbounds i8, ptr %a3, i32 32
+  store <32 x i32> %v15, ptr %v18, align 128
+  store <32 x i32> %v17, ptr %a3, align 128
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/vcombine_subreg.ll b/llvm/test/CodeGen/Hexagon/vcombine_subreg.ll
index 38d6b35e58212..d3a603027c23b 100644
--- a/llvm/test/CodeGen/Hexagon/vcombine_subreg.ll
+++ b/llvm/test/CodeGen/Hexagon/vcombine_subreg.ll
@@ -10,11 +10,11 @@ target triple = "hexagon"
 ; Function Attrs: nounwind
 define void @f0() #0 {
 b0:
-  %v0 = load <16 x i32>, <16 x i32>* @g0, align 64
-  %v1 = load <32 x i32>, <32 x i32>* @g1, align 128
+  %v0 = load <16 x i32>, ptr @g0, align 64
+  %v1 = load <32 x i32>, ptr @g1, align 128
   %v2 = call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v1)
   %v3 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v0, <16 x i32> %v2)
-  store <32 x i32> %v3, <32 x i32>* @g2, align 128
+  store <32 x i32> %v3, ptr @g2, align 128
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/vcombine_to_req_seq.ll b/llvm/test/CodeGen/Hexagon/vcombine_to_req_seq.ll
index aea850307a63d..9b969d1935931 100644
--- a/llvm/test/CodeGen/Hexagon/vcombine_to_req_seq.ll
+++ b/llvm/test/CodeGen/Hexagon/vcombine_to_req_seq.ll
@@ -1,19 +1,16 @@
 ; RUN: llc -march=hexagon -O2 < %s | FileCheck %s
 ; CHECK-NOT: vcombine
 
-define void @f0(i8* nocapture readonly %a0, i8* nocapture readonly %a1, i32 %a2, i8* nocapture %a3, i32 %a4, i32 %a5) #0 {
+define void @f0(ptr nocapture readonly %a0, ptr nocapture readonly %a1, i32 %a2, ptr nocapture %a3, i32 %a4, i32 %a5) #0 {
 b0:
-  %v0 = bitcast i8* %a1 to i64*
-  %v1 = load i64, i64* %v0, align 8
+  %v1 = load i64, ptr %a1, align 8
   %v2 = shl i64 %v1, 8
   %v3 = trunc i64 %v2 to i32
   %v4 = trunc i64 %v1 to i32
   %v5 = and i32 %v4, 16777215
-  %v6 = bitcast i8* %a0 to <16 x i32>*
-  %v7 = load <16 x i32>, <16 x i32>* %v6, align 64
-  %v8 = getelementptr inbounds i8, i8* %a0, i32 32
-  %v9 = bitcast i8* %v8 to <16 x i32>*
-  %v10 = load <16 x i32>, <16 x i32>* %v9, align 64
+  %v7 = load <16 x i32>, ptr %a0, align 64
+  %v8 = getelementptr inbounds i8, ptr %a0, i32 32
+  %v10 = load <16 x i32>, ptr %v8, align 64
   %v11 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v10, <16 x i32> %v7)
   %v12 = tail call <32 x i32> @llvm.hexagon.V6.vrmpybusi(<32 x i32> %v11, i32 %v5, i32 0)
   %v13 = tail call <32 x i32> @llvm.hexagon.V6.vrmpybusi(<32 x i32> %v11, i32 %v3, i32 0)
@@ -21,11 +18,9 @@ b0:
   %v15 = tail call <16 x i32> @llvm.hexagon.V6.vasrwuhsat(<16 x i32> %v14, <16 x i32> %v14, i32 %a2)
   %v16 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v13)
   %v17 = tail call <16 x i32> @llvm.hexagon.V6.vasrwuhsat(<16 x i32> %v16, <16 x i32> %v16, i32 %a2)
-  %v18 = getelementptr inbounds i8, i8* %a3, i32 32
-  %v19 = bitcast i8* %v18 to <16 x i32>*
-  store <16 x i32> %v15, <16 x i32>* %v19, align 64
-  %v20 = bitcast i8* %a3 to <16 x i32>*
-  store <16 x i32> %v17, <16 x i32>* %v20, align 64
+  %v18 = getelementptr inbounds i8, ptr %a3, i32 32
+  store <16 x i32> %v15, ptr %v18, align 64
+  store <16 x i32> %v17, ptr %a3, align 64
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/vcombine_zero_
diff _ptrs.ll b/llvm/test/CodeGen/Hexagon/vcombine_zero_
diff _ptrs.ll
index 8972adfb92cae..2e4243340c12e 100644
--- a/llvm/test/CodeGen/Hexagon/vcombine_zero_
diff _ptrs.ll
+++ b/llvm/test/CodeGen/Hexagon/vcombine_zero_
diff _ptrs.ll
@@ -11,19 +11,18 @@
 target datalayout = "e-m:e-p:32:32:32-a:0-n16:32-i64:64:64-i32:32:32-i16:16:16-i1:8:8-f32:32:32-f64:64:64-v32:32:32-v64:64:64-v512:512:512-v1024:1024:1024-v2048:2048:2048"
 target triple = "hexagon"
 
-define dllexport void @f0(i8** %a0) local_unnamed_addr #0 {
+define dllexport void @f0(ptr %a0) local_unnamed_addr #0 {
 b0:
-  %v0 = load i8*, i8** %a0, align 4
-  %v1 = getelementptr i8, i8* %v0, i32 1794
-  %v2 = bitcast i8* %v1 to <64 x i16>*
-  call void @llvm.assume(i1 true) [ "align"(i8* %v0, i32 128) ]
-  %v3 = load <64 x i16>, <64 x i16>* %v2, align 128
+  %v0 = load ptr, ptr %a0, align 4
+  %v1 = getelementptr i8, ptr %v0, i32 1794
+  call void @llvm.assume(i1 true) [ "align"(ptr %v0, i32 128) ]
+  %v3 = load <64 x i16>, ptr %v1, align 128
   %v4 = add <64 x i16> %v3, %v3
-  call void @llvm.assume(i1 true) [ "align"(i8* %v0, i32 128) ]
-  %v5 = tail call <64 x i16> @llvm.masked.load.v64i16.p0v64i16(<64 x i16>* %v2, i32 128, <64 x i1> <i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true>, <64 x i16> undef)
-  call void @llvm.assume(i1 true) [ "align"(i8* %v0, i32 128) ]
+  call void @llvm.assume(i1 true) [ "align"(ptr %v0, i32 128) ]
+  %v5 = tail call <64 x i16> @llvm.masked.load.v64i16.p0(ptr %v1, i32 128, <64 x i1> <i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true>, <64 x i16> undef)
+  call void @llvm.assume(i1 true) [ "align"(ptr %v0, i32 128) ]
   %v6 = add <64 x i16> %v4, %v5
-  store <64 x i16> %v6, <64 x i16>* %v2, align 128
+  store <64 x i16> %v6, ptr %v1, align 128
   ret void
 }
 
@@ -31,7 +30,7 @@ b0:
 declare void @llvm.assume(i1 noundef) #1
 
 ; Function Attrs: argmemonly nofree nosync nounwind readonly willreturn
-declare <64 x i16> @llvm.masked.load.v64i16.p0v64i16(<64 x i16>*, i32 immarg, <64 x i1>, <64 x i16>) #2
+declare <64 x i16> @llvm.masked.load.v64i16.p0(ptr, i32 immarg, <64 x i1>, <64 x i16>) #2
 
 attributes #0 = { "target-features"="+hvxv68,+hvx-length128b,+hvxv68,+hvx-length128b" }
 attributes #1 = { nofree nosync nounwind willreturn }

diff  --git a/llvm/test/CodeGen/Hexagon/vdotprod.ll b/llvm/test/CodeGen/Hexagon/vdotprod.ll
index b89b112477875..abdd4cbeffc65 100644
--- a/llvm/test/CodeGen/Hexagon/vdotprod.ll
+++ b/llvm/test/CodeGen/Hexagon/vdotprod.ll
@@ -13,22 +13,20 @@
 ; CHECK: }{{[ \t]*}}:endloop
 
 ; Function Attrs: nounwind readonly
-define i32 @f0(i32* nocapture readonly %a0, i32* nocapture readonly %a1) #0 {
+define i32 @f0(ptr nocapture readonly %a0, ptr nocapture readonly %a1) #0 {
 b0:
-  %v0 = bitcast i32* %a0 to i64*
-  %v1 = bitcast i32* %a1 to i64*
   br label %b1
 
 b1:                                               ; preds = %b1, %b0
-  %v2 = phi i64* [ %v0, %b0 ], [ %v21, %b1 ]
-  %v3 = phi i64* [ %v1, %b0 ], [ %v22, %b1 ]
+  %v2 = phi ptr [ %a0, %b0 ], [ %v21, %b1 ]
+  %v3 = phi ptr [ %a1, %b0 ], [ %v22, %b1 ]
   %v4 = phi i32 [ 0, %b0 ], [ %v19, %b1 ]
   %v5 = phi i32 [ 0, %b0 ], [ %v14, %b1 ]
   %v6 = phi i32 [ 0, %b0 ], [ %v18, %b1 ]
-  %v7 = load i64, i64* %v2, align 8
+  %v7 = load i64, ptr %v2, align 8
   %v8 = trunc i64 %v7 to i32
   %v9 = lshr i64 %v7, 32
-  %v10 = load i64, i64* %v3, align 8
+  %v10 = load i64, ptr %v3, align 8
   %v11 = trunc i64 %v10 to i32
   %v12 = lshr i64 %v10, 32
   %v13 = mul nsw i32 %v11, %v8
@@ -39,8 +37,8 @@ b1:                                               ; preds = %b1, %b0
   %v18 = add nsw i32 %v17, %v6
   %v19 = add nsw i32 %v4, 1
   %v20 = icmp eq i32 %v19, 199
-  %v21 = getelementptr i64, i64* %v2, i32 1
-  %v22 = getelementptr i64, i64* %v3, i32 1
+  %v21 = getelementptr i64, ptr %v2, i32 1
+  %v22 = getelementptr i64, ptr %v3, i32 1
   br i1 %v20, label %b2, label %b1
 
 b2:                                               ; preds = %b1

diff  --git a/llvm/test/CodeGen/Hexagon/vec-align.ll b/llvm/test/CodeGen/Hexagon/vec-align.ll
index 8178f53f05672..3ee75c7991719 100644
--- a/llvm/test/CodeGen/Hexagon/vec-align.ll
+++ b/llvm/test/CodeGen/Hexagon/vec-align.ll
@@ -17,17 +17,17 @@ b0:
   %v0 = alloca i32, align 4
   %v1 = alloca <16 x i32>, align 64
   %v2 = alloca <16 x i32>, align 64
-  store i32 0, i32* %v0
+  store i32 0, ptr %v0
   %v3 = call i32 @f1(i8 zeroext 0)
   %v4 = call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 1)
-  store <16 x i32> %v4, <16 x i32>* %v1, align 64
+  store <16 x i32> %v4, ptr %v1, align 64
   %v5 = call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 12)
-  store <16 x i32> %v5, <16 x i32>* %v2, align 64
-  %v6 = load <16 x i32>, <16 x i32>* %v1, align 64
-  %v7 = load <16 x i32>, <16 x i32>* %v2, align 64
+  store <16 x i32> %v5, ptr %v2, align 64
+  %v6 = load <16 x i32>, ptr %v1, align 64
+  %v7 = load <16 x i32>, ptr %v2, align 64
   %v8 = call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %v6, <16 x i32> %v7)
-  store <16 x i32> %v8, <16 x i32>* @g0, align 64
-  call void bitcast (void (...)* @f2 to void ()*)()
+  store <16 x i32> %v8, ptr @g0, align 64
+  call void @f2()
   ret i32 0
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/vec-pred-spill1.ll b/llvm/test/CodeGen/Hexagon/vec-pred-spill1.ll
index f8a12d33689b3..c9cd6dda91d21 100644
--- a/llvm/test/CodeGen/Hexagon/vec-pred-spill1.ll
+++ b/llvm/test/CodeGen/Hexagon/vec-pred-spill1.ll
@@ -11,8 +11,8 @@ target triple = "hexagon"
 @src = global i32 -1, align 4
 @Q6VecPredResult = common global <16 x i32> zeroinitializer, align 64
 @dst_addresses = common global [15 x i64] zeroinitializer, align 8
- at ptr_addresses = common global [15 x i8*] zeroinitializer, align 8
- at src_addresses = common global [15 x i8*] zeroinitializer, align 8
+ at ptr_addresses = common global [15 x ptr] zeroinitializer, align 8
+ at src_addresses = common global [15 x ptr] zeroinitializer, align 8
 @ptr = common global [32768 x i32] zeroinitializer, align 8
 @vecpreds = common global [15 x <16 x i32>] zeroinitializer, align 64
 @VectorResult = common global <16 x i32> zeroinitializer, align 64
@@ -26,7 +26,7 @@ target triple = "hexagon"
 ; Function Attrs: nounwind
 define i32 @main() #0 {
 entry:
-  %call = tail call i32 bitcast (i32 (...)* @init_addresses to i32 ()*)() #3
+  %call = tail call i32 @init_addresses() #3
   %call1 = tail call i32 @acquire_vector_unit(i8 zeroext 0) #3
   tail call void @init_vectors() #3
   %0 = tail call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 2)
@@ -34,19 +34,19 @@ entry:
   %2 = tail call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 1)
   %3 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt.acc(<64 x i1> %1, <16 x i32> %2, i32 -2147483648)
   %4 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %3, i32 -1)
-  store <16 x i32> %4, <16 x i32>* @Q6VecPredResult, align 64, !tbaa !1
-  %puts = tail call i32 @puts(i8* getelementptr inbounds ([106 x i8], [106 x i8]* @str, i32 0, i32 0))
-  tail call void @print_vecpred(i32 512, i8* bitcast (<16 x i32>* @Q6VecPredResult to i8*)) #3
+  store <16 x i32> %4, ptr @Q6VecPredResult, align 64, !tbaa !1
+  %puts = tail call i32 @puts(ptr @str)
+  tail call void @print_vecpred(i32 512, ptr @Q6VecPredResult) #3
   %5 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt.acc(<64 x i1> %1, <16 x i32> %2, i32 -1)
   %6 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %5, i32 -1)
-  store <16 x i32> %6, <16 x i32>* @Q6VecPredResult, align 64, !tbaa !1
-  %puts5 = tail call i32 @puts(i8* getelementptr inbounds ([99 x i8], [99 x i8]* @str3, i32 0, i32 0))
-  tail call void @print_vecpred(i32 512, i8* bitcast (<16 x i32>* @Q6VecPredResult to i8*)) #3
+  store <16 x i32> %6, ptr @Q6VecPredResult, align 64, !tbaa !1
+  %puts5 = tail call i32 @puts(ptr @str3)
+  tail call void @print_vecpred(i32 512, ptr @Q6VecPredResult) #3
   %7 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt.acc(<64 x i1> %1, <16 x i32> %2, i32 0)
   %8 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %7, i32 -1)
-  store <16 x i32> %8, <16 x i32>* @Q6VecPredResult, align 64, !tbaa !1
-  %puts6 = tail call i32 @puts(i8* getelementptr inbounds ([98 x i8], [98 x i8]* @str4, i32 0, i32 0))
-  tail call void @print_vecpred(i32 512, i8* bitcast (<16 x i32>* @Q6VecPredResult to i8*)) #3
+  store <16 x i32> %8, ptr @Q6VecPredResult, align 64, !tbaa !1
+  %puts6 = tail call i32 @puts(ptr @str4)
+  tail call void @print_vecpred(i32 512, ptr @Q6VecPredResult) #3
   ret i32 0
 }
 
@@ -68,10 +68,10 @@ declare <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1>, i32) #2
 ; Function Attrs: nounwind readnone
 declare <16 x i32> @llvm.hexagon.V6.lvsplatw(i32) #2
 
-declare void @print_vecpred(i32, i8*) #1
+declare void @print_vecpred(i32, ptr) #1
 
 ; Function Attrs: nounwind
-declare i32 @puts(i8* nocapture readonly) #3
+declare i32 @puts(ptr nocapture readonly) #3
 
 attributes #0 = { nounwind "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
 attributes #1 = { "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }

diff  --git a/llvm/test/CodeGen/Hexagon/vec-vararg-align.ll b/llvm/test/CodeGen/Hexagon/vec-vararg-align.ll
index 90a458db1b717..cceb544190340 100644
--- a/llvm/test/CodeGen/Hexagon/vec-vararg-align.ll
+++ b/llvm/test/CodeGen/Hexagon/vec-vararg-align.ll
@@ -14,17 +14,17 @@ target triple = "hexagon-unknown--elf"
 
 define i32 @main() #0 {
 b0:
-  %v1 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([32 x i8], [32 x i8]* @.str, i32 0, i32 0)) #0
-  %v2 = load <16 x i32>, <16 x i32>* @gVec0, align 64
-  %v3 = load <32 x i32>, <32 x i32>* @gVec10, align 128
-  %v4 = load i32, i32* @gi1, align 4
-  %v5 = load float, float* @gf1, align 4
+  %v1 = call i32 (ptr, ...) @printf(ptr @.str) #0
+  %v2 = load <16 x i32>, ptr @gVec0, align 64
+  %v3 = load <32 x i32>, ptr @gVec10, align 128
+  %v4 = load i32, ptr @gi1, align 4
+  %v5 = load float, ptr @gf1, align 4
   %v6 = fpext float %v5 to double
-  call void (i8*, i32, ...) @VarVec1(i8* getelementptr inbounds ([11 x i8], [11 x i8]* @.str.1, i32 0, i32 0), i32 4, <16 x i32> %v2, <32 x i32> %v3, i32 %v4, double %v6)
+  call void (ptr, i32, ...) @VarVec1(ptr @.str.1, i32 4, <16 x i32> %v2, <32 x i32> %v3, i32 %v4, double %v6)
   ret i32 0
 }
 
-declare i32 @printf(i8*, ...) #0
-declare void @VarVec1(i8*, i32, ...) #0
+declare i32 @printf(ptr, ...) #0
+declare void @VarVec1(ptr, i32, ...) #0
 
 attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvxv60,+hvx-length64b" }

diff  --git a/llvm/test/CodeGen/Hexagon/vecPred2Vec.ll b/llvm/test/CodeGen/Hexagon/vecPred2Vec.ll
index ab4f7eee1a3f1..241f96f49e916 100644
--- a/llvm/test/CodeGen/Hexagon/vecPred2Vec.ll
+++ b/llvm/test/CodeGen/Hexagon/vecPred2Vec.ll
@@ -16,7 +16,7 @@ b0:
   %v3 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %v2, i32 -1)
   %v4 = tail call <64 x i1> @llvm.hexagon.V6.pred.and(<64 x i1> %v1, <64 x i1> %v3)
   %v5 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %v4, i32 -1)
-  store <16 x i32> %v5, <16 x i32>* @g0, align 64, !tbaa !0
+  store <16 x i32> %v5, ptr @g0, align 64, !tbaa !0
   ret i32 0
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/vect-any_extend.ll b/llvm/test/CodeGen/Hexagon/vect-any_extend.ll
index 209f2b6abbd73..b4b4feb5613ed 100644
--- a/llvm/test/CodeGen/Hexagon/vect-any_extend.ll
+++ b/llvm/test/CodeGen/Hexagon/vect-any_extend.ll
@@ -7,9 +7,9 @@ target triple = "hexagon-unknown-linux-gnu"
 ; Function Attrs: nounwind
 define void @f0() #0 {
 b0:
-  %v0 = load <4 x i8>, <4 x i8>* undef, align 8
+  %v0 = load <4 x i8>, ptr undef, align 8
   %v1 = zext <4 x i8> %v0 to <4 x i32>
-  store <4 x i32> %v1, <4 x i32>* undef, align 8
+  store <4 x i32> %v1, ptr undef, align 8
   unreachable
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/vect-dbl-post-inc.ll b/llvm/test/CodeGen/Hexagon/vect-dbl-post-inc.ll
index 5fa6b65b2e8cd..788dd3fbff8ac 100644
--- a/llvm/test/CodeGen/Hexagon/vect-dbl-post-inc.ll
+++ b/llvm/test/CodeGen/Hexagon/vect-dbl-post-inc.ll
@@ -7,26 +7,24 @@
 ; CHECK: vmem(r{{[0-9]+}}++#1)
 
 ; Function Attrs: nounwind
-define void @f0(i8* noalias nocapture readonly %a0, i8* noalias nocapture %a1, i32 %a2) #0 {
+define void @f0(ptr noalias nocapture readonly %a0, ptr noalias nocapture %a1, i32 %a2) #0 {
 b0:
   %v0 = icmp sgt i32 %a2, 0
   br i1 %v0, label %b1, label %b3
 
 b1:                                               ; preds = %b0
-  %v1 = bitcast i8* %a0 to <32 x i32>*
-  %v2 = bitcast i8* %a1 to <32 x i32>*
   br label %b2
 
 b2:                                               ; preds = %b2, %b1
-  %v3 = phi <32 x i32>* [ %v9, %b2 ], [ %v1, %b1 ]
-  %v4 = phi <32 x i32>* [ %v10, %b2 ], [ %v2, %b1 ]
+  %v3 = phi ptr [ %v9, %b2 ], [ %a0, %b1 ]
+  %v4 = phi ptr [ %v10, %b2 ], [ %a1, %b1 ]
   %v5 = phi i32 [ %v7, %b2 ], [ 0, %b1 ]
-  %v6 = load <32 x i32>, <32 x i32>* %v3, align 128, !tbaa !0
-  store <32 x i32> %v6, <32 x i32>* %v4, align 128, !tbaa !0
+  %v6 = load <32 x i32>, ptr %v3, align 128, !tbaa !0
+  store <32 x i32> %v6, ptr %v4, align 128, !tbaa !0
   %v7 = add nsw i32 %v5, 1
   %v8 = icmp eq i32 %v7, %a2
-  %v9 = getelementptr <32 x i32>, <32 x i32>* %v3, i32 1
-  %v10 = getelementptr <32 x i32>, <32 x i32>* %v4, i32 1
+  %v9 = getelementptr <32 x i32>, ptr %v3, i32 1
+  %v10 = getelementptr <32 x i32>, ptr %v4, i32 1
   br i1 %v8, label %b3, label %b2
 
 b3:                                               ; preds = %b2, %b0

diff  --git a/llvm/test/CodeGen/Hexagon/vect-downscale.ll b/llvm/test/CodeGen/Hexagon/vect-downscale.ll
index ce10b74a72a66..65cd1ef489c1d 100644
--- a/llvm/test/CodeGen/Hexagon/vect-downscale.ll
+++ b/llvm/test/CodeGen/Hexagon/vect-downscale.ll
@@ -17,7 +17,7 @@
 ; CHECK-NOT: }
 ; CHECK: }{{[ \t]*}}:endloop0
 
-define void @f0(i8* noalias %a0, i32 %a1, i32 %a2, i32 %a3, i8* noalias nocapture %a4, i32 %a5, i32 %a6) #0 {
+define void @f0(ptr noalias %a0, i32 %a1, i32 %a2, i32 %a3, ptr noalias nocapture %a4, i32 %a5, i32 %a6) #0 {
 b0:
   %v0 = tail call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 8388736)
   %v1 = zext i32 %a3 to i64
@@ -26,7 +26,7 @@ b0:
   %v4 = shl nuw nsw i64 %v3, 16
   %v5 = or i64 %v4, %v2
   %v6 = or i64 %v5, 281474976710658
-  tail call void asm sideeffect "    l2fetch($0, $1)\0A", "r,r"(i8* %a0, i64 %v6) #2, !srcloc !0
+  tail call void asm sideeffect "    l2fetch($0, $1)\0A", "r,r"(ptr %a0, i64 %v6) #2, !srcloc !0
   %v7 = tail call i32 @llvm.hexagon.S2.ct0(i32 %a6)
   %v8 = add i32 %v7, 1
   %v9 = lshr i32 %a1, %v8
@@ -44,61 +44,55 @@ b1:                                               ; preds = %b0
   %v18 = icmp eq i32 %v11, %a1
   %v19 = icmp ugt i32 %v12, %a6
   %v20 = mul i32 %v9, 64
-  %v21 = getelementptr i8, i8* %a4, i32 %v20
+  %v21 = getelementptr i8, ptr %a4, i32 %v20
   %v22 = mul i32 %v9, 128
   %v23 = add i32 %v22, %a3
-  %v24 = getelementptr i8, i8* %a0, i32 %v23
-  %v25 = getelementptr i8, i8* %a0, i32 %v22
+  %v24 = getelementptr i8, ptr %a0, i32 %v23
+  %v25 = getelementptr i8, ptr %a0, i32 %v22
   br label %b2
 
 b2:                                               ; preds = %b10, %b1
-  %v26 = phi i8* [ %v25, %b1 ], [ %v90, %b10 ]
-  %v27 = phi i8* [ %v24, %b1 ], [ %v89, %b10 ]
-  %v28 = phi i8* [ %v21, %b1 ], [ %v88, %b10 ]
+  %v26 = phi ptr [ %v25, %b1 ], [ %v90, %b10 ]
+  %v27 = phi ptr [ %v24, %b1 ], [ %v89, %b10 ]
+  %v28 = phi ptr [ %v21, %b1 ], [ %v88, %b10 ]
   %v29 = phi <16 x i32> [ undef, %b1 ], [ %v85, %b10 ]
   %v30 = phi <16 x i32> [ undef, %b1 ], [ %v84, %b10 ]
-  %v31 = phi i8* [ %a0, %b1 ], [ %v86, %b10 ]
-  %v32 = phi i8* [ %a4, %b1 ], [ %v87, %b10 ]
+  %v31 = phi ptr [ %a0, %b1 ], [ %v86, %b10 ]
+  %v32 = phi ptr [ %a4, %b1 ], [ %v87, %b10 ]
   %v33 = phi i32 [ 0, %b1 ], [ %v37, %b10 ]
-  %v34 = bitcast i8* %v26 to <16 x i32>*
-  %v35 = bitcast i8* %v27 to <16 x i32>*
-  %v36 = bitcast i8* %v28 to <16 x i32>*
   %v37 = add nsw i32 %v33, 2
   %v38 = icmp ult i32 %v37, %a2
   br i1 %v38, label %b3, label %b4
 
 b3:                                               ; preds = %b2
-  %v39 = getelementptr inbounds i8, i8* %v31, i32 %v16
-  tail call void asm sideeffect "    l2fetch($0, $1)\0A", "r,r"(i8* %v39, i64 %v6) #2, !srcloc !1
+  %v39 = getelementptr inbounds i8, ptr %v31, i32 %v16
+  tail call void asm sideeffect "    l2fetch($0, $1)\0A", "r,r"(ptr %v39, i64 %v6) #2, !srcloc !1
   br label %b4
 
 b4:                                               ; preds = %b3, %b2
-  %v40 = bitcast i8* %v32 to <16 x i32>*
-  %v41 = bitcast i8* %v31 to <16 x i32>*
-  %v42 = getelementptr inbounds i8, i8* %v31, i32 %a3
-  %v43 = bitcast i8* %v42 to <16 x i32>*
+  %v42 = getelementptr inbounds i8, ptr %v31, i32 %a3
   br i1 %v17, label %b6, label %b5
 
 b5:                                               ; preds = %b5, %b4
-  %v44 = phi <16 x i32>* [ %v54, %b5 ], [ %v43, %b4 ]
-  %v45 = phi <16 x i32>* [ %v52, %b5 ], [ %v41, %b4 ]
-  %v46 = phi <16 x i32>* [ %v61, %b5 ], [ %v40, %b4 ]
+  %v44 = phi ptr [ %v54, %b5 ], [ %v42, %b4 ]
+  %v45 = phi ptr [ %v52, %b5 ], [ %v31, %b4 ]
+  %v46 = phi ptr [ %v61, %b5 ], [ %v32, %b4 ]
   %v47 = phi i32 [ %v62, %b5 ], [ 0, %b4 ]
-  %v48 = getelementptr inbounds <16 x i32>, <16 x i32>* %v45, i32 1
-  %v49 = load <16 x i32>, <16 x i32>* %v45, align 64, !tbaa !2
-  %v50 = getelementptr inbounds <16 x i32>, <16 x i32>* %v44, i32 1
-  %v51 = load <16 x i32>, <16 x i32>* %v44, align 64, !tbaa !2
-  %v52 = getelementptr inbounds <16 x i32>, <16 x i32>* %v45, i32 2
-  %v53 = load <16 x i32>, <16 x i32>* %v48, align 64, !tbaa !2
-  %v54 = getelementptr inbounds <16 x i32>, <16 x i32>* %v44, i32 2
-  %v55 = load <16 x i32>, <16 x i32>* %v50, align 64, !tbaa !2
+  %v48 = getelementptr inbounds <16 x i32>, ptr %v45, i32 1
+  %v49 = load <16 x i32>, ptr %v45, align 64, !tbaa !2
+  %v50 = getelementptr inbounds <16 x i32>, ptr %v44, i32 1
+  %v51 = load <16 x i32>, ptr %v44, align 64, !tbaa !2
+  %v52 = getelementptr inbounds <16 x i32>, ptr %v45, i32 2
+  %v53 = load <16 x i32>, ptr %v48, align 64, !tbaa !2
+  %v54 = getelementptr inbounds <16 x i32>, ptr %v44, i32 2
+  %v55 = load <16 x i32>, ptr %v50, align 64, !tbaa !2
   %v56 = tail call <16 x i32> @llvm.hexagon.V6.vdmpybus.acc(<16 x i32> %v0, <16 x i32> %v49, i32 1077952576)
   %v57 = tail call <16 x i32> @llvm.hexagon.V6.vdmpybus.acc(<16 x i32> %v0, <16 x i32> %v53, i32 1077952576)
   %v58 = tail call <16 x i32> @llvm.hexagon.V6.vdmpybus.acc(<16 x i32> %v56, <16 x i32> %v51, i32 1077952576)
   %v59 = tail call <16 x i32> @llvm.hexagon.V6.vdmpybus.acc(<16 x i32> %v57, <16 x i32> %v55, i32 1077952576)
   %v60 = tail call <16 x i32> @llvm.hexagon.V6.vpackob(<16 x i32> %v59, <16 x i32> %v58)
-  %v61 = getelementptr inbounds <16 x i32>, <16 x i32>* %v46, i32 1
-  store <16 x i32> %v60, <16 x i32>* %v46, align 64, !tbaa !2
+  %v61 = getelementptr inbounds <16 x i32>, ptr %v46, i32 1
+  store <16 x i32> %v60, ptr %v46, align 64, !tbaa !2
   %v62 = add nsw i32 %v47, 1
   %v63 = icmp eq i32 %v62, %v9
   br i1 %v63, label %b6, label %b5
@@ -106,21 +100,21 @@ b5:                                               ; preds = %b5, %b4
 b6:                                               ; preds = %b5, %b4
   %v64 = phi <16 x i32> [ %v29, %b4 ], [ %v55, %b5 ]
   %v65 = phi <16 x i32> [ %v30, %b4 ], [ %v53, %b5 ]
-  %v66 = phi <16 x i32>* [ %v43, %b4 ], [ %v35, %b5 ]
-  %v67 = phi <16 x i32>* [ %v41, %b4 ], [ %v34, %b5 ]
-  %v68 = phi <16 x i32>* [ %v40, %b4 ], [ %v36, %b5 ]
+  %v66 = phi ptr [ %v42, %b4 ], [ %v27, %b5 ]
+  %v67 = phi ptr [ %v31, %b4 ], [ %v26, %b5 ]
+  %v68 = phi ptr [ %v32, %b4 ], [ %v28, %b5 ]
   br i1 %v18, label %b10, label %b7
 
 b7:                                               ; preds = %b6
-  %v69 = load <16 x i32>, <16 x i32>* %v67, align 64, !tbaa !2
-  %v70 = load <16 x i32>, <16 x i32>* %v66, align 64, !tbaa !2
+  %v69 = load <16 x i32>, ptr %v67, align 64, !tbaa !2
+  %v70 = load <16 x i32>, ptr %v66, align 64, !tbaa !2
   br i1 %v19, label %b8, label %b9
 
 b8:                                               ; preds = %b7
-  %v71 = getelementptr inbounds <16 x i32>, <16 x i32>* %v66, i32 1
-  %v72 = getelementptr inbounds <16 x i32>, <16 x i32>* %v67, i32 1
-  %v73 = load <16 x i32>, <16 x i32>* %v72, align 64, !tbaa !2
-  %v74 = load <16 x i32>, <16 x i32>* %v71, align 64, !tbaa !2
+  %v71 = getelementptr inbounds <16 x i32>, ptr %v66, i32 1
+  %v72 = getelementptr inbounds <16 x i32>, ptr %v67, i32 1
+  %v73 = load <16 x i32>, ptr %v72, align 64, !tbaa !2
+  %v74 = load <16 x i32>, ptr %v71, align 64, !tbaa !2
   br label %b9
 
 b9:                                               ; preds = %b8, %b7
@@ -131,19 +125,19 @@ b9:                                               ; preds = %b8, %b7
   %v79 = tail call <16 x i32> @llvm.hexagon.V6.vdmpybus.acc(<16 x i32> %v77, <16 x i32> %v70, i32 1077952576)
   %v80 = tail call <16 x i32> @llvm.hexagon.V6.vdmpybus.acc(<16 x i32> %v78, <16 x i32> %v76, i32 1077952576)
   %v81 = tail call <16 x i32> @llvm.hexagon.V6.vpackob(<16 x i32> %v80, <16 x i32> %v79)
-  %v82 = load <16 x i32>, <16 x i32>* %v68, align 64, !tbaa !2
+  %v82 = load <16 x i32>, ptr %v68, align 64, !tbaa !2
   %v83 = tail call <16 x i32> @llvm.hexagon.V6.vmux(<64 x i1> %v14, <16 x i32> %v81, <16 x i32> %v82)
-  store <16 x i32> %v83, <16 x i32>* %v68, align 64, !tbaa !2
+  store <16 x i32> %v83, ptr %v68, align 64, !tbaa !2
   br label %b10
 
 b10:                                              ; preds = %b9, %b6
   %v84 = phi <16 x i32> [ %v75, %b9 ], [ %v65, %b6 ]
   %v85 = phi <16 x i32> [ %v76, %b9 ], [ %v64, %b6 ]
-  %v86 = getelementptr inbounds i8, i8* %v31, i32 %v16
-  %v87 = getelementptr inbounds i8, i8* %v32, i32 %a5
-  %v88 = getelementptr i8, i8* %v28, i32 %a5
-  %v89 = getelementptr i8, i8* %v27, i32 %v16
-  %v90 = getelementptr i8, i8* %v26, i32 %v16
+  %v86 = getelementptr inbounds i8, ptr %v31, i32 %v16
+  %v87 = getelementptr inbounds i8, ptr %v32, i32 %a5
+  %v88 = getelementptr i8, ptr %v28, i32 %a5
+  %v89 = getelementptr i8, ptr %v27, i32 %v16
+  %v90 = getelementptr i8, ptr %v26, i32 %v16
   br i1 %v38, label %b2, label %b11
 
 b11:                                              ; preds = %b10, %b0

diff  --git a/llvm/test/CodeGen/Hexagon/vect-set_cc_v2i32.ll b/llvm/test/CodeGen/Hexagon/vect-set_cc_v2i32.ll
index fb6a9a8f5661f..cbb9241b2490f 100644
--- a/llvm/test/CodeGen/Hexagon/vect-set_cc_v2i32.ll
+++ b/llvm/test/CodeGen/Hexagon/vect-set_cc_v2i32.ll
@@ -3,10 +3,10 @@
 
 target triple = "hexagon"
 
-%s.0 = type { i16, i16, i16, [4 x i8*], i32, i32, i32, %s.1*, %s.3, i16, i16, i16, i16, i16, %s.4 }
-%s.1 = type { %s.1*, %s.2* }
+%s.0 = type { i16, i16, i16, [4 x ptr], i32, i32, i32, ptr, %s.3, i16, i16, i16, i16, i16, %s.4 }
+%s.1 = type { ptr, ptr }
 %s.2 = type { i16, i16 }
-%s.3 = type { i32, i16*, i16*, i32* }
+%s.3 = type { i32, ptr, ptr, ptr }
 %s.4 = type { i8 }
 
 @g0 = private unnamed_addr constant [7 x i8] c"Static\00", align 1
@@ -14,36 +14,34 @@ target triple = "hexagon"
 @g2 = private unnamed_addr constant [6 x i8] c"Stack\00", align 1
 
 ; Function Attrs: nounwind
-define i32 @f0(i32 %a0, i8** nocapture %a1) #0 {
+define i32 @f0(i32 %a0, ptr nocapture %a1) #0 {
 b0:
   %v0 = alloca [1 x %s.0], align 8
   %v1 = call i32 @f1(i32 5) #0
-  %v2 = getelementptr inbounds [1 x %s.0], [1 x %s.0]* %v0, i32 0, i32 0, i32 6
+  %v2 = getelementptr inbounds [1 x %s.0], ptr %v0, i32 0, i32 0, i32 6
   %v3 = icmp eq i32 %v1, 0
   %v4 = select i1 %v3, i32 7, i32 %v1
-  store i32 %v4, i32* %v2, align 8, !tbaa !0
-  %v5 = getelementptr inbounds [1 x %s.0], [1 x %s.0]* %v0, i32 0, i32 0, i32 0
-  %v6 = bitcast [1 x %s.0]* %v0 to i32*
-  %v7 = load i32, i32* %v6, align 8
+  store i32 %v4, ptr %v2, align 8, !tbaa !0
+  %v7 = load i32, ptr %v0, align 8
   %v8 = trunc i32 %v7 to i16
   %v9 = icmp eq i16 %v8, 0
   br i1 %v9, label %b1, label %b4
 
 b1:                                               ; preds = %b0
-  %v10 = getelementptr inbounds [1 x %s.0], [1 x %s.0]* %v0, i32 0, i32 0, i32 1
+  %v10 = getelementptr inbounds [1 x %s.0], ptr %v0, i32 0, i32 0, i32 1
   %v11 = icmp ult i32 %v7, 65536
   br i1 %v11, label %b2, label %b4
 
 b2:                                               ; preds = %b1
-  %v12 = getelementptr inbounds [1 x %s.0], [1 x %s.0]* %v0, i32 0, i32 0, i32 2
-  %v13 = load i16, i16* %v12, align 4, !tbaa !4
+  %v12 = getelementptr inbounds [1 x %s.0], ptr %v0, i32 0, i32 0, i32 2
+  %v13 = load i16, ptr %v12, align 4, !tbaa !4
   %v14 = icmp eq i16 %v13, 0
   br i1 %v14, label %b3, label %b4
 
 b3:                                               ; preds = %b2
-  store i16 0, i16* %v5, align 8, !tbaa !4
-  store i16 0, i16* %v10, align 2, !tbaa !4
-  store i16 102, i16* %v12, align 4, !tbaa !4
+  store i16 0, ptr %v0, align 8, !tbaa !4
+  store i16 0, ptr %v10, align 2, !tbaa !4
+  store i16 102, ptr %v12, align 4, !tbaa !4
   br label %b4
 
 b4:                                               ; preds = %b3, %b2, %b1, %b0
@@ -60,12 +58,12 @@ b4:                                               ; preds = %b3, %b2, %b1, %b0
   %v25 = trunc i32 %v24 to i16
   %v26 = and i16 %v25, 1
   %v27 = add i16 %v23, %v26
-  %v28 = getelementptr [1 x %s.0], [1 x %s.0]* %v0, i32 0, i32 0, i32 4
-  %v29 = load i32, i32* %v28, align 8
+  %v28 = getelementptr [1 x %s.0], ptr %v0, i32 0, i32 0, i32 4
+  %v29 = load i32, ptr %v28, align 8
   %v30 = zext i16 %v27 to i32
   %v31 = udiv i32 %v29, %v30
-  store i32 %v31, i32* %v28, align 8
-  %v32 = getelementptr [1 x %s.0], [1 x %s.0]* %v0, i32 0, i32 0, i32 3, i32 0
+  store i32 %v31, ptr %v28, align 8
+  %v32 = getelementptr [1 x %s.0], ptr %v0, i32 0, i32 0, i32 3, i32 0
   %v33 = and i32 %v4, 1
   %v34 = icmp eq i32 %v33, 0
   br i1 %v34, label %b5, label %b12
@@ -77,13 +75,12 @@ b5:                                               ; preds = %b12, %b4
   br i1 %v37, label %b14, label %b13
 
 b6:                                               ; preds = %b16
-  %v38 = getelementptr inbounds [1 x %s.0], [1 x %s.0]* %v0, i32 0, i32 0, i32 3, i32 1
-  %v39 = load i8*, i8** %v38, align 4, !tbaa !6
-  %v40 = bitcast i8* %v39 to %s.1*
-  %v41 = call %s.1* @f2(i32 %v31, %s.1* %v40, i16 signext %v15) #0
-  %v42 = getelementptr inbounds [1 x %s.0], [1 x %s.0]* %v0, i32 0, i32 0, i32 7
-  store %s.1* %v41, %s.1** %v42, align 4, !tbaa !6
-  %v43 = load i32, i32* %v2, align 8, !tbaa !0
+  %v38 = getelementptr inbounds [1 x %s.0], ptr %v0, i32 0, i32 0, i32 3, i32 1
+  %v39 = load ptr, ptr %v38, align 4, !tbaa !6
+  %v41 = call ptr @f2(i32 %v31, ptr %v39, i16 signext %v15) #0
+  %v42 = getelementptr inbounds [1 x %s.0], ptr %v0, i32 0, i32 0, i32 7
+  store ptr %v41, ptr %v42, align 4, !tbaa !6
+  %v43 = load i32, ptr %v2, align 8, !tbaa !0
   br label %b7
 
 b7:                                               ; preds = %b16, %b6
@@ -93,17 +90,17 @@ b7:                                               ; preds = %b16, %b6
   br i1 %v46, label %b9, label %b8
 
 b8:                                               ; preds = %b7
-  %v47 = load i32, i32* %v28, align 8, !tbaa !0
-  %v48 = getelementptr inbounds [1 x %s.0], [1 x %s.0]* %v0, i32 0, i32 0, i32 3, i32 2
-  %v49 = load i8*, i8** %v48, align 8, !tbaa !6
-  %v50 = load i32, i32* %v6, align 8
+  %v47 = load i32, ptr %v28, align 8, !tbaa !0
+  %v48 = getelementptr inbounds [1 x %s.0], ptr %v0, i32 0, i32 0, i32 3, i32 2
+  %v49 = load ptr, ptr %v48, align 8, !tbaa !6
+  %v50 = load i32, ptr %v0, align 8
   %v51 = shl i32 %v50, 16
   %v52 = ashr exact i32 %v51, 16
   %v53 = and i32 %v50, -65536
   %v54 = or i32 %v53, %v52
-  %v55 = getelementptr inbounds [1 x %s.0], [1 x %s.0]* %v0, i32 0, i32 0, i32 8
-  %v56 = call i32 @f3(i32 %v47, i8* %v49, i32 %v54, %s.3* %v55) #0
-  %v57 = load i32, i32* %v2, align 8, !tbaa !0
+  %v55 = getelementptr inbounds [1 x %s.0], ptr %v0, i32 0, i32 0, i32 8
+  %v56 = call i32 @f3(i32 %v47, ptr %v49, i32 %v54, ptr %v55) #0
+  %v57 = load i32, ptr %v2, align 8, !tbaa !0
   br label %b9
 
 b9:                                               ; preds = %b8, %b7
@@ -113,30 +110,30 @@ b9:                                               ; preds = %b8, %b7
   br i1 %v60, label %b11, label %b10
 
 b10:                                              ; preds = %b9
-  %v61 = load i32, i32* %v28, align 8, !tbaa !0
-  %v62 = load i16, i16* %v5, align 8, !tbaa !4
-  %v63 = getelementptr inbounds [1 x %s.0], [1 x %s.0]* %v0, i32 0, i32 0, i32 3, i32 3
-  %v64 = load i8*, i8** %v63, align 4, !tbaa !6
-  call void @f4(i32 %v61, i16 signext %v62, i8* %v64) #0
+  %v61 = load i32, ptr %v28, align 8, !tbaa !0
+  %v62 = load i16, ptr %v0, align 8, !tbaa !4
+  %v63 = getelementptr inbounds [1 x %s.0], ptr %v0, i32 0, i32 0, i32 3, i32 3
+  %v64 = load ptr, ptr %v63, align 4, !tbaa !6
+  call void @f4(i32 %v61, i16 signext %v62, ptr %v64) #0
   br label %b11
 
 b11:                                              ; preds = %b10, %b9
   ret i32 0
 
 b12:                                              ; preds = %b4
-  %v65 = getelementptr [1 x %s.0], [1 x %s.0]* %v0, i32 0, i32 0, i32 3, i32 1
-  %v66 = load i8*, i8** %v32, align 8
-  store i8* %v66, i8** %v65, align 4
+  %v65 = getelementptr [1 x %s.0], ptr %v0, i32 0, i32 0, i32 3, i32 1
+  %v66 = load ptr, ptr %v32, align 8
+  store ptr %v66, ptr %v65, align 4
   br label %b5
 
 b13:                                              ; preds = %b5
-  %v67 = getelementptr [1 x %s.0], [1 x %s.0]* %v0, i32 0, i32 0, i32 3, i32 2
-  %v68 = load i8*, i8** %v32, align 8
+  %v67 = getelementptr [1 x %s.0], ptr %v0, i32 0, i32 0, i32 3, i32 2
+  %v68 = load ptr, ptr %v32, align 8
   %v69 = zext i16 %v35 to i32
   %v70 = sub i32 0, %v69
   %v71 = and i32 %v31, %v70
-  %v72 = getelementptr inbounds i8, i8* %v68, i32 %v71
-  store i8* %v72, i8** %v67, align 8
+  %v72 = getelementptr inbounds i8, ptr %v68, i32 %v71
+  store ptr %v72, ptr %v67, align 8
   %v73 = add i16 %v35, 1
   br label %b14
 
@@ -147,12 +144,12 @@ b14:                                              ; preds = %b13, %b5
   br i1 %v76, label %b16, label %b15
 
 b15:                                              ; preds = %b14
-  %v77 = getelementptr [1 x %s.0], [1 x %s.0]* %v0, i32 0, i32 0, i32 3, i32 3
-  %v78 = load i8*, i8** %v32, align 8
+  %v77 = getelementptr [1 x %s.0], ptr %v0, i32 0, i32 0, i32 3, i32 3
+  %v78 = load ptr, ptr %v32, align 8
   %v79 = zext i16 %v74 to i32
   %v80 = mul i32 %v31, %v79
-  %v81 = getelementptr inbounds i8, i8* %v78, i32 %v80
-  store i8* %v81, i8** %v77, align 4
+  %v81 = getelementptr inbounds i8, ptr %v78, i32 %v80
+  store ptr %v81, ptr %v77, align 4
   br label %b16
 
 b16:                                              ; preds = %b15, %b14
@@ -161,11 +158,11 @@ b16:                                              ; preds = %b15, %b14
 
 declare i32 @f1(i32) #0
 
-declare %s.1* @f2(i32, %s.1*, i16 signext) #0
+declare ptr @f2(i32, ptr, i16 signext) #0
 
-declare i32 @f3(i32, i8*, i32, %s.3*) #0
+declare i32 @f3(i32, ptr, i32, ptr) #0
 
-declare void @f4(i32, i16 signext, i8*) #0
+declare void @f4(i32, i16 signext, ptr) #0
 
 attributes #0 = { nounwind "target-cpu"="hexagonv55" }
 

diff  --git a/llvm/test/CodeGen/Hexagon/vect-vd0.ll b/llvm/test/CodeGen/Hexagon/vect-vd0.ll
index f115d862e5621..09174b0d6e9e0 100644
--- a/llvm/test/CodeGen/Hexagon/vect-vd0.ll
+++ b/llvm/test/CodeGen/Hexagon/vect-vd0.ll
@@ -9,10 +9,10 @@
 define i32 @f0(i32 %a0) #0 {
 b0:
   %v0 = alloca i32, align 4
-  store i32 %a0, i32* %v0, align 4
+  store i32 %a0, ptr %v0, align 4
   %v1 = call <16 x i32> @llvm.hexagon.V6.vd0()
-  store <16 x i32> %v1, <16 x i32>* @g0, align 64
-  ret i32 ptrtoint (<16 x i32>* @g0 to i32)
+  store <16 x i32> %v1, ptr @g0, align 64
+  ret i32 ptrtoint (ptr @g0 to i32)
 }
 
 ; Function Attrs: nounwind readnone

diff  --git a/llvm/test/CodeGen/Hexagon/vect-zero_extend.ll b/llvm/test/CodeGen/Hexagon/vect-zero_extend.ll
index 75cde66e984e5..295cb1e4933f1 100644
--- a/llvm/test/CodeGen/Hexagon/vect-zero_extend.ll
+++ b/llvm/test/CodeGen/Hexagon/vect-zero_extend.ll
@@ -13,9 +13,9 @@ b1:                                               ; preds = %b0
   br label %b2
 
 b2:                                               ; preds = %b2, %b1
-  %v0 = load <3 x i8>, <3 x i8>* undef, align 8
+  %v0 = load <3 x i8>, ptr undef, align 8
   %v1 = zext <3 x i8> %v0 to <3 x i16>
-  store <3 x i16> %v1, <3 x i16>* undef, align 8
+  store <3 x i16> %v1, ptr undef, align 8
   br label %b2
 
 b3:                                               ; preds = %b0

diff  --git a/llvm/test/CodeGen/Hexagon/vect/extract-v4i1.ll b/llvm/test/CodeGen/Hexagon/vect/extract-v4i1.ll
index 9dac2c321cf58..d050cf1b195ac 100644
--- a/llvm/test/CodeGen/Hexagon/vect/extract-v4i1.ll
+++ b/llvm/test/CodeGen/Hexagon/vect/extract-v4i1.ll
@@ -11,9 +11,9 @@
 
 target triple = "hexagon"
 
-define void @f0(i16* %a0, <8 x i16>* %a1) #0 {
+define void @f0(ptr %a0, ptr %a1) #0 {
 b0:
-  %v0 = load i16, i16* %a0, align 2
+  %v0 = load i16, ptr %a0, align 2
   %v1 = sext i16 %v0 to i32
   %v2 = insertelement <8 x i32> undef, i32 %v1, i32 0
   %v3 = shufflevector <8 x i32> %v2, <8 x i32> undef, <8 x i32> zeroinitializer
@@ -21,7 +21,7 @@ b0:
   %v5 = and <8 x i32> %v4, %v3
   %v6 = icmp ne <8 x i32> %v5, zeroinitializer
   %v7 = zext <8 x i1> %v6 to <8 x i16>
-  store <8 x i16> %v7, <8 x i16>* %a1, align 8
+  store <8 x i16> %v7, ptr %a1, align 8
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/vect/setcc-v2i32.ll b/llvm/test/CodeGen/Hexagon/vect/setcc-v2i32.ll
index b03e4f08bc24d..ac8309a74f9f4 100644
--- a/llvm/test/CodeGen/Hexagon/vect/setcc-v2i32.ll
+++ b/llvm/test/CodeGen/Hexagon/vect/setcc-v2i32.ll
@@ -7,9 +7,9 @@
 target datalayout = "e-m:e-p:32:32:32-a:0-n16:32-i64:64:64-i32:32:32-i16:16:16-i1:8:8-f32:32:32-f64:64:64-v32:32:32-v64:64:64-v512:512:512-v1024:1024:1024-v2048:2048:2048"
 target triple = "hexagon"
 
-define i32 @fred(<2 x i16>* %a0) #0 {
+define i32 @fred(ptr %a0) #0 {
 b1:
-  %v2 = load <2 x i16>, <2 x i16>* %a0, align 2
+  %v2 = load <2 x i16>, ptr %a0, align 2
   %v3 = icmp eq <2 x i16> %v2, zeroinitializer
   %v4 = zext <2 x i1> %v3 to <2 x i16>
   %v5 = extractelement <2 x i16> %v4, i32 1

diff  --git a/llvm/test/CodeGen/Hexagon/vect/vect-anyextend.ll b/llvm/test/CodeGen/Hexagon/vect/vect-anyextend.ll
index fe5fe84fc37dd..cb23e5595a908 100644
--- a/llvm/test/CodeGen/Hexagon/vect/vect-anyextend.ll
+++ b/llvm/test/CodeGen/Hexagon/vect/vect-anyextend.ll
@@ -8,8 +8,8 @@ target triple = "hexagon-unknown-linux-gnu"
 
 define void @foo() nounwind {
 entry:
-  %_p_vec_full48 = load <4 x i8>, <4 x i8>* undef, align 8
+  %_p_vec_full48 = load <4 x i8>, ptr undef, align 8
   %0 = zext <4 x i8> %_p_vec_full48 to <4 x i32>
-  store <4 x i32> %0, <4 x i32>* undef, align 8
+  store <4 x i32> %0, ptr undef, align 8
   unreachable
 }

diff  --git a/llvm/test/CodeGen/Hexagon/vect/vect-apint-truncate.ll b/llvm/test/CodeGen/Hexagon/vect/vect-apint-truncate.ll
index eb94ddfe2961c..6df628421eb51 100644
--- a/llvm/test/CodeGen/Hexagon/vect/vect-apint-truncate.ll
+++ b/llvm/test/CodeGen/Hexagon/vect/vect-apint-truncate.ll
@@ -18,10 +18,10 @@ polly.loop_header:                                ; preds = %polly.loop_body, %e
   br i1 %0, label %polly.loop_body, label %polly.loop_after
 
 polly.loop_body:                                  ; preds = %polly.loop_header
-  %_p_vec_full = load <4 x i8>, <4 x i8>* undef, align 8
+  %_p_vec_full = load <4 x i8>, ptr undef, align 8
   %1 = sext <4 x i8> %_p_vec_full to <4 x i32>
   %p_vec = mul <4 x i32> %1, <i32 3, i32 3, i32 3, i32 3>
   %mulp_vec = add <4 x i32> %p_vec, <i32 21, i32 21, i32 21, i32 21>
-  store <4 x i32> %mulp_vec, <4 x i32>* undef, align 8
+  store <4 x i32> %mulp_vec, ptr undef, align 8
   br label %polly.loop_header
 }

diff  --git a/llvm/test/CodeGen/Hexagon/vect/vect-bad-bitcast.ll b/llvm/test/CodeGen/Hexagon/vect/vect-bad-bitcast.ll
index 460e39e1120b5..34537f674e3a9 100644
--- a/llvm/test/CodeGen/Hexagon/vect/vect-bad-bitcast.ll
+++ b/llvm/test/CodeGen/Hexagon/vect/vect-bad-bitcast.ll
@@ -15,7 +15,7 @@ entry:
 
 for.body8:                                        ; preds = %for.body8, %polly.loop_exit.loopexit
   %i.120 = phi i32 [ 0, %polly.loop_exit.loopexit ], [ %inc11.24, %for.body8 ]
-  %call = call i32 bitcast (i32 (...)* @fxpBitAllocation to i32 (i32, i32, i32, i32, i16*, i32, i32, i32)*)(i32 0, i32 0, i32 256, i32 %conv9, i16* %WaterLeveldB_out, i32 0, i32 1920, i32 %i.120) #2
+  %call = call i32 @fxpBitAllocation(i32 0, i32 0, i32 256, i32 %conv9, ptr %WaterLeveldB_out, i32 0, i32 1920, i32 %i.120) #2
   %inc11.24 = add i32 %i.120, 25
   %exitcond.24 = icmp eq i32 %inc11.24, 500
   br i1 %exitcond.24, label %for.end12, label %for.body8
@@ -38,15 +38,14 @@ polly.loop_exit.loopexit:                         ; preds = %polly.stmt.for.body
 
 polly.stmt.for.body:                              ; preds = %entry, %polly.stmt.for.body
   %WaterLeveldB.1p_vsel35 = phi <4 x i16> [ <i16 -32768, i16 -32768, i16 -32768, i16 -32768>, %entry ], [ %WaterLeveldB.1p_vsel, %polly.stmt.for.body ]
-  %scevgep.phi = phi i16* [ getelementptr inbounds ([256 x i16], [256 x i16]* @input_buf, i32 0, i32 0), %entry ], [ %scevgep.inc, %polly.stmt.for.body ]
+  %scevgep.phi = phi ptr [ @input_buf, %entry ], [ %scevgep.inc, %polly.stmt.for.body ]
   %polly.indvar = phi i32 [ 0, %entry ], [ %polly.indvar_next, %polly.stmt.for.body ]
-  %vector_ptr = bitcast i16* %scevgep.phi to <4 x i16>*
-  %_p_vec_full = load <4 x i16>, <4 x i16>* %vector_ptr, align 8
+  %_p_vec_full = load <4 x i16>, ptr %scevgep.phi, align 8
   %cmp2p_vicmp = icmp sgt <4 x i16> %_p_vec_full, %WaterLeveldB.1p_vsel35
   %WaterLeveldB.1p_vsel = select <4 x i1> %cmp2p_vicmp, <4 x i16> %_p_vec_full, <4 x i16> %WaterLeveldB.1p_vsel35
   %polly.indvar_next = add nsw i32 %polly.indvar, 4
   %polly.loop_cond = icmp slt i32 %polly.indvar, 252
-  %scevgep.inc = getelementptr i16, i16* %scevgep.phi, i32 4
+  %scevgep.inc = getelementptr i16, ptr %scevgep.phi, i32 4
   br i1 %polly.loop_cond, label %polly.stmt.for.body, label %polly.loop_exit.loopexit
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/vect/vect-bitcast-1.ll b/llvm/test/CodeGen/Hexagon/vect/vect-bitcast-1.ll
index b834744d9b12b..64465b2c4b8ea 100644
--- a/llvm/test/CodeGen/Hexagon/vect/vect-bitcast-1.ll
+++ b/llvm/test/CodeGen/Hexagon/vect/vect-bitcast-1.ll
@@ -10,7 +10,7 @@ entry:
   br label %while.body
 
 while.body:                                       ; preds = %if.then155, %if.then12, %entry
-  %cmp.i = icmp eq i8* undef, null
+  %cmp.i = icmp eq ptr undef, null
   br i1 %cmp.i, label %lab_ci.exit, label %if.end.i
 
 if.end.i:                                         ; preds = %while.body
@@ -58,7 +58,7 @@ if.then155:                                       ; preds = %if.else150
   %0 = sext <2 x i16> %_p_splat to <2 x i32>
   %mul198p_vec = shl <2 x i32> %0, <i32 2, i32 2>
   %1 = extractelement <2 x i32> %mul198p_vec, i32 0
-  store i32 %1, i32* null, align 4
+  store i32 %1, ptr null, align 4
   br label %while.body
 
 if.else208:                                       ; preds = %if.else150

diff  --git a/llvm/test/CodeGen/Hexagon/vect/vect-bitcast.ll b/llvm/test/CodeGen/Hexagon/vect/vect-bitcast.ll
index 2d6b0b827397f..4806ff3515778 100644
--- a/llvm/test/CodeGen/Hexagon/vect/vect-bitcast.ll
+++ b/llvm/test/CodeGen/Hexagon/vect/vect-bitcast.ll
@@ -48,7 +48,7 @@ if.then155:                                       ; preds = %if.else150
   %0 = sext <2 x i16> %_p_splat.1 to <2 x i32>
   %mul198p_vec.1 = mul <2 x i32> %0, <i32 4, i32 4>
   %1 = extractelement <2 x i32> %mul198p_vec.1, i32 0
-  store i32 %1, i32* undef, align 4
+  store i32 %1, ptr undef, align 4
   br label %while.body
 
 if.else208:                                       ; preds = %if.else150

diff  --git a/llvm/test/CodeGen/Hexagon/vect/vect-bool-isel-crash.ll b/llvm/test/CodeGen/Hexagon/vect/vect-bool-isel-crash.ll
index 5bc65f0045713..630bada363729 100644
--- a/llvm/test/CodeGen/Hexagon/vect/vect-bool-isel-crash.ll
+++ b/llvm/test/CodeGen/Hexagon/vect/vect-bool-isel-crash.ll
@@ -6,7 +6,7 @@
 target datalayout = "e-m:e-p:32:32:32-a:0-n16:32-i64:64:64-i32:32:32-i16:16:16-i1:8:8-f32:32:32-f64:64:64-v32:32:32-v64:64:64-v512:512:512-v1024:1024:1024-v2048:2048:2048"
 target triple = "hexagon"
 
-define void @fred(i32* %a0, i8* %a1) #0 {
+define void @fred(ptr %a0, ptr %a1) #0 {
 b0:
   %v1 = icmp sgt <8 x i32> undef, undef
   %v2 = extractelement <8 x i1> %v1, i32 4
@@ -18,13 +18,13 @@ b0:
   %v8 = add nsw i32 %v7, %v5
   %v9 = add nsw i32 0, %v8
   %v10 = add nsw i32 0, %v9
-  %v11 = load i32, i32* %a0, align 4
+  %v11 = load i32, ptr %a0, align 4
   %v12 = mul nsw i32 %v11, %v10
   %v13 = add nsw i32 %v12, 16384
   %v14 = ashr i32 %v13, 15
   %v15 = select i1 undef, i32 %v14, i32 255
   %v16 = trunc i32 %v15 to i8
-  store i8 %v16, i8* %a1, align 1
+  store i8 %v16, ptr %a1, align 1
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/vect/vect-cst-v4i32.ll b/llvm/test/CodeGen/Hexagon/vect/vect-cst-v4i32.ll
index 41dfd4d40fe30..03b61f04d4579 100644
--- a/llvm/test/CodeGen/Hexagon/vect/vect-cst-v4i32.ll
+++ b/llvm/test/CodeGen/Hexagon/vect/vect-cst-v4i32.ll
@@ -15,15 +15,13 @@ polly.loop_after:                                 ; preds = %polly.loop_body
 polly.loop_body:                                  ; preds = %entry, %polly.loop_body
   %polly.loopiv23 = phi i32 [ 0, %entry ], [ %polly.next_loopiv, %polly.loop_body ]
   %polly.next_loopiv = add nsw i32 %polly.loopiv23, 4
-  %p_arrayidx1 = getelementptr [400 x i32], [400 x i32]* @A, i32 0, i32 %polly.loopiv23
-  %p_arrayidx = getelementptr [400 x i32], [400 x i32]* @B, i32 0, i32 %polly.loopiv23
-  %vector_ptr = bitcast i32* %p_arrayidx to <4 x i32>*
-  %_p_vec_full = load <4 x i32>, <4 x i32>* %vector_ptr, align 8
+  %p_arrayidx1 = getelementptr [400 x i32], ptr @A, i32 0, i32 %polly.loopiv23
+  %p_arrayidx = getelementptr [400 x i32], ptr @B, i32 0, i32 %polly.loopiv23
+  %_p_vec_full = load <4 x i32>, ptr %p_arrayidx, align 8
   %mulp_vec = mul <4 x i32> %_p_vec_full, <i32 7, i32 7, i32 7, i32 7>
-  %vector_ptr12 = bitcast i32* %p_arrayidx1 to <4 x i32>*
-  %_p_vec_full13 = load <4 x i32>, <4 x i32>* %vector_ptr12, align 8
+  %_p_vec_full13 = load <4 x i32>, ptr %p_arrayidx1, align 8
   %addp_vec = add <4 x i32> %_p_vec_full13, %mulp_vec
-  store <4 x i32> %addp_vec, <4 x i32>* %vector_ptr12, align 8
+  store <4 x i32> %addp_vec, ptr %p_arrayidx1, align 8
   %0 = icmp slt i32 %polly.next_loopiv, 400
   br i1 %0, label %polly.loop_body, label %polly.loop_after
 }

diff  --git a/llvm/test/CodeGen/Hexagon/vect/vect-cst-v4i8.ll b/llvm/test/CodeGen/Hexagon/vect/vect-cst-v4i8.ll
index de3e14e2e91ca..391b5c0d8e618 100644
--- a/llvm/test/CodeGen/Hexagon/vect/vect-cst-v4i8.ll
+++ b/llvm/test/CodeGen/Hexagon/vect/vect-cst-v4i8.ll
@@ -16,15 +16,13 @@ polly.loop_after:                                 ; preds = %polly.loop_body
 polly.loop_body:                                  ; preds = %entry, %polly.loop_body
   %polly.loopiv25 = phi i32 [ 0, %entry ], [ %polly.next_loopiv, %polly.loop_body ]
   %polly.next_loopiv = add i32 %polly.loopiv25, 4
-  %p_arrayidx1 = getelementptr [400 x i8], [400 x i8]* @A, i32 0, i32 %polly.loopiv25
-  %p_arrayidx = getelementptr [400 x i8], [400 x i8]* @B, i32 0, i32 %polly.loopiv25
-  %vector_ptr = bitcast i8* %p_arrayidx to <4 x i8>*
-  %_p_vec_full = load <4 x i8>, <4 x i8>* %vector_ptr, align 8
+  %p_arrayidx1 = getelementptr [400 x i8], ptr @A, i32 0, i32 %polly.loopiv25
+  %p_arrayidx = getelementptr [400 x i8], ptr @B, i32 0, i32 %polly.loopiv25
+  %_p_vec_full = load <4 x i8>, ptr %p_arrayidx, align 8
   %mulp_vec = mul <4 x i8> %_p_vec_full, <i8 1, i8 2, i8 3, i8 4>
-  %vector_ptr14 = bitcast i8* %p_arrayidx1 to <4 x i8>*
-  %_p_vec_full15 = load <4 x i8>, <4 x i8>* %vector_ptr14, align 8
+  %_p_vec_full15 = load <4 x i8>, ptr %p_arrayidx1, align 8
   %addp_vec = add <4 x i8> %_p_vec_full15, %mulp_vec
-  store <4 x i8> %addp_vec, <4 x i8>* %vector_ptr14, align 8
+  store <4 x i8> %addp_vec, ptr %p_arrayidx1, align 8
   %0 = icmp slt i32 %polly.next_loopiv, 400
   br i1 %0, label %polly.loop_body, label %polly.loop_after
 }

diff  --git a/llvm/test/CodeGen/Hexagon/vect/vect-cst.ll b/llvm/test/CodeGen/Hexagon/vect/vect-cst.ll
index 370fa5c7539ee..3deec435b89d6 100644
--- a/llvm/test/CodeGen/Hexagon/vect/vect-cst.ll
+++ b/llvm/test/CodeGen/Hexagon/vect/vect-cst.ll
@@ -15,15 +15,13 @@ polly.loop_after:                                 ; preds = %polly.loop_body
 polly.loop_body:                                  ; preds = %entry, %polly.loop_body
   %polly.loopiv25 = phi i32 [ 0, %entry ], [ %polly.next_loopiv, %polly.loop_body ]
   %polly.next_loopiv = add i32 %polly.loopiv25, 4
-  %p_arrayidx1 = getelementptr [400 x i8], [400 x i8]* @A, i32 0, i32 %polly.loopiv25
-  %p_arrayidx = getelementptr [400 x i8], [400 x i8]* @B, i32 0, i32 %polly.loopiv25
-  %vector_ptr = bitcast i8* %p_arrayidx to <4 x i8>*
-  %_p_vec_full = load <4 x i8>, <4 x i8>* %vector_ptr, align 8
+  %p_arrayidx1 = getelementptr [400 x i8], ptr @A, i32 0, i32 %polly.loopiv25
+  %p_arrayidx = getelementptr [400 x i8], ptr @B, i32 0, i32 %polly.loopiv25
+  %_p_vec_full = load <4 x i8>, ptr %p_arrayidx, align 8
   %mulp_vec = mul <4 x i8> %_p_vec_full, <i8 7, i8 7, i8 7, i8 7>
-  %vector_ptr14 = bitcast i8* %p_arrayidx1 to <4 x i8>*
-  %_p_vec_full15 = load <4 x i8>, <4 x i8>* %vector_ptr14, align 8
+  %_p_vec_full15 = load <4 x i8>, ptr %p_arrayidx1, align 8
   %addp_vec = add <4 x i8> %_p_vec_full15, %mulp_vec
-  store <4 x i8> %addp_vec, <4 x i8>* %vector_ptr14, align 8
+  store <4 x i8> %addp_vec, ptr %p_arrayidx1, align 8
   %0 = icmp slt i32 %polly.next_loopiv, 400
   br i1 %0, label %polly.loop_body, label %polly.loop_after
 }

diff  --git a/llvm/test/CodeGen/Hexagon/vect/vect-extract.ll b/llvm/test/CodeGen/Hexagon/vect/vect-extract.ll
index 1a7eaf9b88bb0..8deb68dfbcdf2 100644
--- a/llvm/test/CodeGen/Hexagon/vect/vect-extract.ll
+++ b/llvm/test/CodeGen/Hexagon/vect/vect-extract.ll
@@ -5,7 +5,7 @@
 target datalayout = "e-p:32:32:32-i64:64:64-i32:32:32-i16:16:16-i1:32:32-f64:64:64-f32:32:32-v64:64:64-v32:32:32-a0:0-n16:32"
 target triple = "hexagon"
 
-define void @foo(i32 %N, i32* nocapture %C, i16* nocapture %A, i16 signext %val) #0 {
+define void @foo(i32 %N, ptr nocapture %C, ptr nocapture %A, i16 signext %val) #0 {
 entry:
   %cmp14 = icmp eq i32 %N, 0
   br i1 %cmp14, label %for.end11, label %for.cond1.preheader.single_entry.preheader
@@ -45,24 +45,22 @@ polly.loop_body.lr.ph:                            ; preds = %for.cond1.preheader
   %4 = extractelement <2 x i32> %3, i32 0
   %5 = call i64 @llvm.hexagon.A2.combinew(i32 %p_conv4, i32 %p_conv4)
   %6 = bitcast i64 %5 to <2 x i32>
-  %p_arrayidx8.gep = getelementptr i32, i32* %C, i32 %4
-  %p_arrayidx.gep = getelementptr i16, i16* %A, i32 %4
+  %p_arrayidx8.gep = getelementptr i32, ptr %C, i32 %4
+  %p_arrayidx.gep = getelementptr i16, ptr %A, i32 %4
   br label %polly.loop_body
 
 polly.loop_body:                                  ; preds = %polly.loop_body.lr.ph, %polly.loop_body
-  %p_arrayidx8.phi = phi i32* [ %p_arrayidx8.gep, %polly.loop_body.lr.ph ], [ %p_arrayidx8.inc, %polly.loop_body ]
-  %p_arrayidx.phi = phi i16* [ %p_arrayidx.gep, %polly.loop_body.lr.ph ], [ %p_arrayidx.inc, %polly.loop_body ]
+  %p_arrayidx8.phi = phi ptr [ %p_arrayidx8.gep, %polly.loop_body.lr.ph ], [ %p_arrayidx8.inc, %polly.loop_body ]
+  %p_arrayidx.phi = phi ptr [ %p_arrayidx.gep, %polly.loop_body.lr.ph ], [ %p_arrayidx.inc, %polly.loop_body ]
   %polly.loopiv38 = phi i32 [ 0, %polly.loop_body.lr.ph ], [ %polly.next_loopiv, %polly.loop_body ]
   %polly.next_loopiv = add nsw i32 %polly.loopiv38, 2
-  %vector_ptr = bitcast i16* %p_arrayidx.phi to <2 x i16>*
-  %_p_vec_full = load <2 x i16>, <2 x i16>* %vector_ptr, align 2
+  %_p_vec_full = load <2 x i16>, ptr %p_arrayidx.phi, align 2
   %7 = sext <2 x i16> %_p_vec_full to <2 x i32>
   %mul5p_vec = mul <2 x i32> %7, %6
-  %vector_ptr21 = bitcast i32* %p_arrayidx8.phi to <2 x i32>*
-  store <2 x i32> %mul5p_vec, <2 x i32>* %vector_ptr21, align 4
+  store <2 x i32> %mul5p_vec, ptr %p_arrayidx8.phi, align 4
   %8 = icmp slt i32 %polly.next_loopiv, %leftover_lb
-  %p_arrayidx8.inc = getelementptr i32, i32* %p_arrayidx8.phi, i32 2
-  %p_arrayidx.inc = getelementptr i16, i16* %p_arrayidx.phi, i32 2
+  %p_arrayidx8.inc = getelementptr i32, ptr %p_arrayidx8.phi, i32 2
+  %p_arrayidx.inc = getelementptr i16, ptr %p_arrayidx.phi, i32 2
   br i1 %8, label %polly.loop_body, label %polly.loop_header26.preheader.loopexit
 
 polly.loop_header26.preheader.loopexit:           ; preds = %polly.loop_body
@@ -80,12 +78,12 @@ polly.stmt.for.body331:                           ; preds = %polly.stmt.for.body
   %polly.loopiv2939 = phi i32 [ %polly.next_loopiv30, %polly.stmt.for.body331 ], [ %polly.loopiv29.ph, %polly.stmt.for.body331.preheader ]
   %polly.next_loopiv30 = add nsw i32 %polly.loopiv2939, 1
   %p_32 = add i32 %polly.loopiv2939, %1
-  %p_arrayidx833 = getelementptr i32, i32* %C, i32 %p_32
-  %p_arrayidx34 = getelementptr i16, i16* %A, i32 %p_32
-  %_p_scalar_ = load i16, i16* %p_arrayidx34, align 2
+  %p_arrayidx833 = getelementptr i32, ptr %C, i32 %p_32
+  %p_arrayidx34 = getelementptr i16, ptr %A, i32 %p_32
+  %_p_scalar_ = load i16, ptr %p_arrayidx34, align 2
   %p_conv = sext i16 %_p_scalar_ to i32
   %p_mul5 = mul nsw i32 %p_conv, %p_conv4
-  store i32 %p_mul5, i32* %p_arrayidx833, align 4
+  store i32 %p_mul5, ptr %p_arrayidx833, align 4
   %exitcond = icmp eq i32 %polly.next_loopiv30, %N
   br i1 %exitcond, label %for.inc9.loopexit, label %polly.stmt.for.body331
 }

diff  --git a/llvm/test/CodeGen/Hexagon/vect/vect-fma.ll b/llvm/test/CodeGen/Hexagon/vect/vect-fma.ll
index c35e0159df702..d5018ff774beb 100644
--- a/llvm/test/CodeGen/Hexagon/vect/vect-fma.ll
+++ b/llvm/test/CodeGen/Hexagon/vect/vect-fma.ll
@@ -18,9 +18,9 @@ polly.loop_header:                                ; preds = %polly.loop_body, %e
   br i1 %0, label %polly.loop_body, label %polly.loop_after
 
 polly.loop_body:                                  ; preds = %polly.loop_header
-  %_p_vec_full = load <4 x double>, <4 x double>* undef, align 8
+  %_p_vec_full = load <4 x double>, ptr undef, align 8
   %mulp_vec = fmul <4 x double> %_p_vec_full, <double 7.000000e+00, double 7.000000e+00, double 7.000000e+00, double 7.000000e+00>
   %addp_vec = fadd <4 x double> undef, %mulp_vec
-  store <4 x double> %addp_vec, <4 x double>* undef, align 8
+  store <4 x double> %addp_vec, ptr undef, align 8
   br label %polly.loop_header
 }

diff  --git a/llvm/test/CodeGen/Hexagon/vect/vect-illegal-type.ll b/llvm/test/CodeGen/Hexagon/vect/vect-illegal-type.ll
index 3d3bf88b64d33..8e1c9af57bcfb 100644
--- a/llvm/test/CodeGen/Hexagon/vect/vect-illegal-type.ll
+++ b/llvm/test/CodeGen/Hexagon/vect/vect-illegal-type.ll
@@ -42,9 +42,9 @@ sw.epilog:                                        ; preds = %for.end670, %for.en
   ret void
 
 polly.loop_header228:                             ; preds = %polly.loop_header228, %for.cond375.preheader
-  %_p_splat_one = load <1 x i16>, <1 x i16>* undef, align 8
+  %_p_splat_one = load <1 x i16>, ptr undef, align 8
   %_p_splat = shufflevector <1 x i16> %_p_splat_one, <1 x i16> %_p_splat_one, <4 x i32> zeroinitializer
   %0 = trunc <4 x i16> %_p_splat to <4 x i8>
-  store <4 x i8> %0, <4 x i8>* undef, align 8
+  store <4 x i8> %0, ptr undef, align 8
   br label %polly.loop_header228
 }

diff  --git a/llvm/test/CodeGen/Hexagon/vect/vect-infloop.ll b/llvm/test/CodeGen/Hexagon/vect/vect-infloop.ll
index 9ee0b0ab3aa60..5c3df3224bbda 100644
--- a/llvm/test/CodeGen/Hexagon/vect/vect-infloop.ll
+++ b/llvm/test/CodeGen/Hexagon/vect/vect-infloop.ll
@@ -2,9 +2,9 @@
 ; RUN: llc -march=hexagon < %s | FileCheck %s
 ; CHECK: convert_df2w
 
-define void @a(<2 x double>* %p, <2 x i8>* %q) {
-  %t = load <2 x double>, <2 x double>* %p
+define void @a(ptr %p, ptr %q) {
+  %t = load <2 x double>, ptr %p
   %r = fptosi <2 x double> %t to <2 x i8>
-  store <2 x i8> %r, <2 x i8>* %q
+  store <2 x i8> %r, ptr %q
   ret void
 }

diff  --git a/llvm/test/CodeGen/Hexagon/vect/vect-insert-extract-elt.ll b/llvm/test/CodeGen/Hexagon/vect/vect-insert-extract-elt.ll
index baf0cd748f7f8..003fb9f7d50da 100644
--- a/llvm/test/CodeGen/Hexagon/vect/vect-insert-extract-elt.ll
+++ b/llvm/test/CodeGen/Hexagon/vect/vect-insert-extract-elt.ll
@@ -6,66 +6,60 @@ target triple = "hexagon-unknown-linux-gnu"
 %struct.elt = type { [2 x [4 x %struct.block]] }
 %struct.block = type { [2 x i16] }
 
-define void @foo(%struct.elt* noalias nocapture %p0, %struct.elt* noalias nocapture %p1) nounwind {
+define void @foo(ptr noalias nocapture %p0, ptr noalias nocapture %p1) nounwind {
 entry:
-  %arrayidx1 = getelementptr inbounds %struct.elt, %struct.elt* %p1, i32 0, i32 0, i32 0, i32 3
-  %arrayidx4 = getelementptr inbounds %struct.elt, %struct.elt* %p1, i32 0, i32 0, i32 0, i32 2
-  %arrayidx7 = getelementptr inbounds %struct.elt, %struct.elt* %p0, i32 0, i32 0, i32 0, i32 3
-  %0 = bitcast %struct.block* %arrayidx7 to i32*
-  %1 = bitcast %struct.block* %arrayidx4 to i32*
-  %2 = load i32, i32* %0, align 4
-  store i32 %2, i32* %1, align 4
-  %3 = bitcast %struct.block* %arrayidx1 to i32*
-  store i32 %2, i32* %3, align 4
-  %arrayidx10 = getelementptr inbounds %struct.elt, %struct.elt* %p1, i32 0, i32 0, i32 0, i32 1
-  %arrayidx16 = getelementptr inbounds %struct.elt, %struct.elt* %p0, i32 0, i32 0, i32 0, i32 2
-  %4 = bitcast %struct.block* %arrayidx16 to i32*
-  %5 = bitcast %struct.elt* %p1 to i32*
-  %6 = load i32, i32* %4, align 4
-  store i32 %6, i32* %5, align 4
-  %7 = bitcast %struct.block* %arrayidx10 to i32*
-  store i32 %6, i32* %7, align 4
-  %p_arrayidx26 = getelementptr %struct.elt, %struct.elt* %p0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 1
-  %p_arrayidx2632 = getelementptr %struct.elt, %struct.elt* %p0, i32 0, i32 0, i32 0, i32 1, i32 0, i32 1
-  %p_arrayidx2633 = getelementptr %struct.elt, %struct.elt* %p0, i32 0, i32 0, i32 0, i32 2, i32 0, i32 1
-  %p_arrayidx2634 = getelementptr %struct.elt, %struct.elt* %p0, i32 0, i32 0, i32 0, i32 3, i32 0, i32 1
-  %p_arrayidx20 = getelementptr %struct.elt, %struct.elt* %p1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 1
-  %p_arrayidx2035 = getelementptr %struct.elt, %struct.elt* %p1, i32 0, i32 0, i32 0, i32 1, i32 0, i32 1
-  %p_arrayidx2036 = getelementptr %struct.elt, %struct.elt* %p1, i32 0, i32 0, i32 0, i32 2, i32 0, i32 1
-  %p_arrayidx2037 = getelementptr %struct.elt, %struct.elt* %p1, i32 0, i32 0, i32 0, i32 3, i32 0, i32 1
-  %8 = lshr i32 %6, 16
-  %9 = trunc i32 %8 to i16
-  %_p_vec_ = insertelement <4 x i16> undef, i16 %9, i32 0
-  %_p_vec_39 = insertelement <4 x i16> %_p_vec_, i16 %9, i32 1
-  %10 = lshr i32 %2, 16
-  %11 = trunc i32 %10 to i16
-  %_p_vec_41 = insertelement <4 x i16> %_p_vec_39, i16 %11, i32 2
-  %_p_vec_43 = insertelement <4 x i16> %_p_vec_41, i16 %11, i32 3
+  %arrayidx1 = getelementptr inbounds %struct.elt, ptr %p1, i32 0, i32 0, i32 0, i32 3
+  %arrayidx4 = getelementptr inbounds %struct.elt, ptr %p1, i32 0, i32 0, i32 0, i32 2
+  %arrayidx7 = getelementptr inbounds %struct.elt, ptr %p0, i32 0, i32 0, i32 0, i32 3
+  %0 = load i32, ptr %arrayidx7, align 4
+  store i32 %0, ptr %arrayidx4, align 4
+  store i32 %0, ptr %arrayidx1, align 4
+  %arrayidx10 = getelementptr inbounds %struct.elt, ptr %p1, i32 0, i32 0, i32 0, i32 1
+  %arrayidx16 = getelementptr inbounds %struct.elt, ptr %p0, i32 0, i32 0, i32 0, i32 2
+  %1 = load i32, ptr %arrayidx16, align 4
+  store i32 %1, ptr %p1, align 4
+  store i32 %1, ptr %arrayidx10, align 4
+  %p_arrayidx26 = getelementptr %struct.elt, ptr %p0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 1
+  %p_arrayidx2632 = getelementptr %struct.elt, ptr %p0, i32 0, i32 0, i32 0, i32 1, i32 0, i32 1
+  %p_arrayidx2633 = getelementptr %struct.elt, ptr %p0, i32 0, i32 0, i32 0, i32 2, i32 0, i32 1
+  %p_arrayidx2634 = getelementptr %struct.elt, ptr %p0, i32 0, i32 0, i32 0, i32 3, i32 0, i32 1
+  %p_arrayidx20 = getelementptr %struct.elt, ptr %p1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 1
+  %p_arrayidx2035 = getelementptr %struct.elt, ptr %p1, i32 0, i32 0, i32 0, i32 1, i32 0, i32 1
+  %p_arrayidx2036 = getelementptr %struct.elt, ptr %p1, i32 0, i32 0, i32 0, i32 2, i32 0, i32 1
+  %p_arrayidx2037 = getelementptr %struct.elt, ptr %p1, i32 0, i32 0, i32 0, i32 3, i32 0, i32 1
+  %2 = lshr i32 %1, 16
+  %3 = trunc i32 %2 to i16
+  %_p_vec_ = insertelement <4 x i16> undef, i16 %3, i32 0
+  %_p_vec_39 = insertelement <4 x i16> %_p_vec_, i16 %3, i32 1
+  %4 = lshr i32 %0, 16
+  %5 = trunc i32 %4 to i16
+  %_p_vec_41 = insertelement <4 x i16> %_p_vec_39, i16 %5, i32 2
+  %_p_vec_43 = insertelement <4 x i16> %_p_vec_41, i16 %5, i32 3
   %shlp_vec = shl <4 x i16> %_p_vec_43, <i16 1, i16 1, i16 1, i16 1>
-  %12 = extractelement <4 x i16> %shlp_vec, i32 0
-  store i16 %12, i16* %p_arrayidx20, align 2
-  %13 = extractelement <4 x i16> %shlp_vec, i32 1
-  store i16 %13, i16* %p_arrayidx2035, align 2
-  %14 = extractelement <4 x i16> %shlp_vec, i32 2
-  store i16 %14, i16* %p_arrayidx2036, align 2
-  %15 = extractelement <4 x i16> %shlp_vec, i32 3
-  store i16 %15, i16* %p_arrayidx2037, align 2
-  %_p_scalar_44 = load i16, i16* %p_arrayidx26, align 2
+  %6 = extractelement <4 x i16> %shlp_vec, i32 0
+  store i16 %6, ptr %p_arrayidx20, align 2
+  %7 = extractelement <4 x i16> %shlp_vec, i32 1
+  store i16 %7, ptr %p_arrayidx2035, align 2
+  %8 = extractelement <4 x i16> %shlp_vec, i32 2
+  store i16 %8, ptr %p_arrayidx2036, align 2
+  %9 = extractelement <4 x i16> %shlp_vec, i32 3
+  store i16 %9, ptr %p_arrayidx2037, align 2
+  %_p_scalar_44 = load i16, ptr %p_arrayidx26, align 2
   %_p_vec_45 = insertelement <4 x i16> undef, i16 %_p_scalar_44, i32 0
-  %_p_scalar_46 = load i16, i16* %p_arrayidx2632, align 2
+  %_p_scalar_46 = load i16, ptr %p_arrayidx2632, align 2
   %_p_vec_47 = insertelement <4 x i16> %_p_vec_45, i16 %_p_scalar_46, i32 1
-  %_p_scalar_48 = load i16, i16* %p_arrayidx2633, align 2
+  %_p_scalar_48 = load i16, ptr %p_arrayidx2633, align 2
   %_p_vec_49 = insertelement <4 x i16> %_p_vec_47, i16 %_p_scalar_48, i32 2
-  %_p_scalar_50 = load i16, i16* %p_arrayidx2634, align 2
+  %_p_scalar_50 = load i16, ptr %p_arrayidx2634, align 2
   %_p_vec_51 = insertelement <4 x i16> %_p_vec_49, i16 %_p_scalar_50, i32 3
   %shl28p_vec = shl <4 x i16> %_p_vec_51, <i16 1, i16 1, i16 1, i16 1>
-  %16 = extractelement <4 x i16> %shl28p_vec, i32 0
-  store i16 %16, i16* %p_arrayidx26, align 2
-  %17 = extractelement <4 x i16> %shl28p_vec, i32 1
-  store i16 %17, i16* %p_arrayidx2632, align 2
-  %18 = extractelement <4 x i16> %shl28p_vec, i32 2
-  store i16 %18, i16* %p_arrayidx2633, align 2
-  %19 = extractelement <4 x i16> %shl28p_vec, i32 3
-  store i16 %19, i16* %p_arrayidx2634, align 2
+  %10 = extractelement <4 x i16> %shl28p_vec, i32 0
+  store i16 %10, ptr %p_arrayidx26, align 2
+  %11 = extractelement <4 x i16> %shl28p_vec, i32 1
+  store i16 %11, ptr %p_arrayidx2632, align 2
+  %12 = extractelement <4 x i16> %shl28p_vec, i32 2
+  store i16 %12, ptr %p_arrayidx2633, align 2
+  %13 = extractelement <4 x i16> %shl28p_vec, i32 3
+  store i16 %13, ptr %p_arrayidx2634, align 2
   ret void
 }

diff  --git a/llvm/test/CodeGen/Hexagon/vect/vect-load-1.ll b/llvm/test/CodeGen/Hexagon/vect/vect-load-1.ll
index 0c3aaefa4ff59..34532180f62bb 100644
--- a/llvm/test/CodeGen/Hexagon/vect/vect-load-1.ll
+++ b/llvm/test/CodeGen/Hexagon/vect/vect-load-1.ll
@@ -4,7 +4,7 @@
 
 target triple = "hexagon-unknown-linux-gnu"
 
-define void @foo(<2 x i8>* %p) nounwind {
+define void @foo(ptr %p) nounwind {
 entry:
   br label %polly.loop_header
 
@@ -16,10 +16,10 @@ polly.loop_header:                                ; preds = %polly.loop_body, %e
   br i1 %0, label %polly.loop_body, label %polly.loop_after
 
 polly.loop_body:                                  ; preds = %polly.loop_header
-  %_p_vec_full = load <2 x i8>, <2 x i8>* %p, align 8
+  %_p_vec_full = load <2 x i8>, ptr %p, align 8
   %1 = sext <2 x i8> %_p_vec_full to <2 x i32>
   %p_vec = mul <2 x i32> %1, <i32 3, i32 3>
   %mulp_vec = add <2 x i32> %p_vec, <i32 21, i32 21>
-  store <2 x i32> %mulp_vec, <2 x i32>* undef, align 8
+  store <2 x i32> %mulp_vec, ptr undef, align 8
   br label %polly.loop_header
 }

diff  --git a/llvm/test/CodeGen/Hexagon/vect/vect-load-v4i16.ll b/llvm/test/CodeGen/Hexagon/vect/vect-load-v4i16.ll
index c0c691fc5b0a6..161d985622a79 100644
--- a/llvm/test/CodeGen/Hexagon/vect/vect-load-v4i16.ll
+++ b/llvm/test/CodeGen/Hexagon/vect/vect-load-v4i16.ll
@@ -8,8 +8,8 @@
 ; CHECK-DAG: [[T2:r[0-9]+]] = memuh(r1+#4)
 ; CHECK-DAG: [[T3:r[0-9]+]] = memuh(r1+#6)
 ; CHECK:     r1 |= asl([[T3]],#16)
-define <4 x i16> @danny(<4 x i16>* %p) {
-  %t0 = load <4 x i16>, <4 x i16>* %p, align 2
+define <4 x i16> @danny(ptr %p) {
+  %t0 = load <4 x i16>, ptr %p, align 2
   ret <4 x i16> %t0
 }
 
@@ -17,7 +17,7 @@ define <4 x i16> @danny(<4 x i16>* %p) {
 ; CHECK-DAG: [[T0:r[0-9]+]] = memw(r0+#0)
 ; CHECK-DAG: r1 = memw(r0+#4)
 ; CHECK:     r0 = [[T0]]
-define <4 x i16> @sammy(<4 x i16>* %p) {
-  %t0 = load <4 x i16>, <4 x i16>* %p, align 4
+define <4 x i16> @sammy(ptr %p) {
+  %t0 = load <4 x i16>, ptr %p, align 4
   ret <4 x i16> %t0
 }

diff  --git a/llvm/test/CodeGen/Hexagon/vect/vect-load.ll b/llvm/test/CodeGen/Hexagon/vect/vect-load.ll
index 6bdcc6d3de614..cbbfb79166c49 100644
--- a/llvm/test/CodeGen/Hexagon/vect/vect-load.ll
+++ b/llvm/test/CodeGen/Hexagon/vect/vect-load.ll
@@ -8,7 +8,7 @@ target triple = "hexagon-unknown-linux-gnu"
 %struct.ext_hdrs.10.65.142.274.307.318.329.681.692.703.714.725.736.758.791.802.846.857.868.879.890.901.945.956.958 = type { i8, i8, i8, i8, i8, i8, i16, i32, [8 x %struct.hcdc_ext_vec.9.64.141.273.306.317.328.680.691.702.713.724.735.757.790.801.845.856.867.878.889.900.944.955.957] }
 %struct.hcdc_ext_vec.9.64.141.273.306.317.328.680.691.702.713.724.735.757.790.801.845.856.867.878.889.900.944.955.957 = type { i8, i8, i16 }
 
-define void @foo(%struct.ext_hdrs.10.65.142.274.307.318.329.681.692.703.714.725.736.758.791.802.846.857.868.879.890.901.945.956.958* %hc_ext_info) nounwind {
+define void @foo(ptr %hc_ext_info) nounwind {
 entry:
   br i1 undef, label %if.end, label %if.then
 
@@ -22,14 +22,14 @@ if.then3:                                         ; preds = %if.end
   br label %if.end5
 
 if.end5:                                          ; preds = %if.then3, %if.end
-  %add.ptr = getelementptr inbounds %struct.ext_hdrs.10.65.142.274.307.318.329.681.692.703.714.725.736.758.791.802.846.857.868.879.890.901.945.956.958, %struct.ext_hdrs.10.65.142.274.307.318.329.681.692.703.714.725.736.758.791.802.846.857.868.879.890.901.945.956.958* %hc_ext_info, i32 0, i32 8, i32 0
-  %add.ptr22 = getelementptr inbounds %struct.ext_hdrs.10.65.142.274.307.318.329.681.692.703.714.725.736.758.791.802.846.857.868.879.890.901.945.956.958, %struct.ext_hdrs.10.65.142.274.307.318.329.681.692.703.714.725.736.758.791.802.846.857.868.879.890.901.945.956.958* null, i32 0, i32 8, i32 undef
+  %add.ptr = getelementptr inbounds %struct.ext_hdrs.10.65.142.274.307.318.329.681.692.703.714.725.736.758.791.802.846.857.868.879.890.901.945.956.958, ptr %hc_ext_info, i32 0, i32 8, i32 0
+  %add.ptr22 = getelementptr inbounds %struct.ext_hdrs.10.65.142.274.307.318.329.681.692.703.714.725.736.758.791.802.846.857.868.879.890.901.945.956.958, ptr null, i32 0, i32 8, i32 undef
   br label %while.cond
 
 while.cond:                                       ; preds = %if.end419, %if.end5
   %gre_chksum.0 = phi <2 x i8> [ undef, %if.end5 ], [ %gre_chksum.2, %if.end419 ]
-  %cmp23 = icmp ult %struct.hcdc_ext_vec.9.64.141.273.306.317.328.680.691.702.713.724.735.757.790.801.845.856.867.878.889.900.944.955.957* null, %add.ptr
-  %cmp25 = icmp ult %struct.hcdc_ext_vec.9.64.141.273.306.317.328.680.691.702.713.724.735.757.790.801.845.856.867.878.889.900.944.955.957* null, %add.ptr22
+  %cmp23 = icmp ult ptr null, %add.ptr
+  %cmp25 = icmp ult ptr null, %add.ptr22
   %sel1 = and i1 %cmp23, %cmp25
   br i1 %sel1, label %while.body, label %while.end422
 
@@ -60,7 +60,7 @@ while.body222:                                    ; preds = %while.body222, %if.
   br i1 undef, label %if.end240, label %while.body222
 
 if.end240:                                        ; preds = %while.body222
-  %_p_vec_full100 = load <2 x i8>, <2 x i8>* undef, align 8
+  %_p_vec_full100 = load <2 x i8>, ptr undef, align 8
   br label %if.end274
 
 if.end274:                                        ; preds = %if.end240, %if.then195

diff  --git a/llvm/test/CodeGen/Hexagon/vect/vect-shuffle.ll b/llvm/test/CodeGen/Hexagon/vect/vect-shuffle.ll
index da5016fb6ed16..2d194dc4047ae 100644
--- a/llvm/test/CodeGen/Hexagon/vect/vect-shuffle.ll
+++ b/llvm/test/CodeGen/Hexagon/vect/vect-shuffle.ll
@@ -10,7 +10,7 @@
 target datalayout = "e-p:32:32:32-i64:64:64-i32:32:32-i16:16:16-i1:32:32-f64:64:64-f32:32:32-v64:64:64-v32:32:32-a0:0-n16:32"
 target triple = "hexagon"
 
-define i32 @foo(i16* noalias nocapture %src, i16* noalias nocapture %dstImg, i32 %width, i32 %idx, i32 %flush) #0 {
+define i32 @foo(ptr noalias nocapture %src, ptr noalias nocapture %dstImg, i32 %width, i32 %idx, i32 %flush) #0 {
 entry:
   %0 = tail call i64 @llvm.hexagon.A2.combinew(i32 %flush, i32 %flush)
   %1 = bitcast i64 %0 to <2 x i32>
@@ -20,12 +20,11 @@ polly.loop_after:                                 ; preds = %polly.loop_body
   ret i32 0
 
 polly.loop_body:                                  ; preds = %entry, %polly.loop_body
-  %p_arrayidx35.phi = phi i16* [ %dstImg, %entry ], [ %p_arrayidx35.inc, %polly.loop_body ]
-  %p_arrayidx.phi = phi i16* [ %src, %entry ], [ %p_arrayidx.inc, %polly.loop_body ]
+  %p_arrayidx35.phi = phi ptr [ %dstImg, %entry ], [ %p_arrayidx35.inc, %polly.loop_body ]
+  %p_arrayidx.phi = phi ptr [ %src, %entry ], [ %p_arrayidx.inc, %polly.loop_body ]
   %polly.loopiv56 = phi i32 [ 0, %entry ], [ %polly.next_loopiv, %polly.loop_body ]
   %polly.next_loopiv = add nsw i32 %polly.loopiv56, 4
-  %vector_ptr = bitcast i16* %p_arrayidx.phi to <4 x i16>*
-  %_p_vec_full = load <4 x i16>, <4 x i16>* %vector_ptr, align 2
+  %_p_vec_full = load <4 x i16>, ptr %p_arrayidx.phi, align 2
   %_high_half = shufflevector <4 x i16> %_p_vec_full, <4 x i16> undef, <2 x i32> <i32 2, i32 3>
   %_low_half = shufflevector <4 x i16> %_p_vec_full, <4 x i16> undef, <2 x i32> <i32 0, i32 1>
   %2 = zext <2 x i16> %_low_half to <2 x i32>
@@ -35,11 +34,10 @@ polly.loop_body:                                  ; preds = %entry, %polly.loop_
   %4 = trunc <2 x i32> %add33p_vec to <2 x i16>
   %5 = trunc <2 x i32> %add33p_vec48 to <2 x i16>
   %_combined_vec = shufflevector <2 x i16> %4, <2 x i16> %5, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-  %vector_ptr49 = bitcast i16* %p_arrayidx35.phi to <4 x i16>*
-  store <4 x i16> %_combined_vec, <4 x i16>* %vector_ptr49, align 2
+  store <4 x i16> %_combined_vec, ptr %p_arrayidx35.phi, align 2
   %6 = icmp slt i32 %polly.next_loopiv, 1024
-  %p_arrayidx35.inc = getelementptr i16, i16* %p_arrayidx35.phi, i32 4
-  %p_arrayidx.inc = getelementptr i16, i16* %p_arrayidx.phi, i32 4
+  %p_arrayidx35.inc = getelementptr i16, ptr %p_arrayidx35.phi, i32 4
+  %p_arrayidx.inc = getelementptr i16, ptr %p_arrayidx.phi, i32 4
   br i1 %6, label %polly.loop_body, label %polly.loop_after
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/vect/vect-splat.ll b/llvm/test/CodeGen/Hexagon/vect/vect-splat.ll
index 8cc226a00dab8..54533b5668b24 100644
--- a/llvm/test/CodeGen/Hexagon/vect/vect-splat.ll
+++ b/llvm/test/CodeGen/Hexagon/vect/vect-splat.ll
@@ -4,13 +4,13 @@
 
 %i4 = type <4 x i32>
 
-define void @splat_i4(%i4* %P, %i4* %Q, i32 %X) {
+define void @splat_i4(ptr %P, ptr %Q, i32 %X) {
 	%tmp = insertelement %i4 undef, i32 %X, i32 0		; <%i4> [#uses=1]
 	%tmp2 = insertelement %i4 %tmp, i32 %X, i32 1		; <%i4> [#uses=1]
 	%tmp4 = insertelement %i4 %tmp2, i32 %X, i32 2		; <%i4> [#uses=1]
 	%tmp6 = insertelement %i4 %tmp4, i32 %X, i32 3		; <%i4> [#uses=1]
-	%q = load %i4, %i4* %Q		; <%i4> [#uses=1]
+	%q = load %i4, ptr %Q		; <%i4> [#uses=1]
 	%R = add %i4 %q, %tmp6		; <%i4> [#uses=1]
-	store %i4 %R, %i4* %P
+	store %i4 %R, ptr %P
 	ret void
 }

diff  --git a/llvm/test/CodeGen/Hexagon/vect/vect-store-v2i16.ll b/llvm/test/CodeGen/Hexagon/vect/vect-store-v2i16.ll
index 1de3058e68a61..7813466a55f7e 100644
--- a/llvm/test/CodeGen/Hexagon/vect/vect-store-v2i16.ll
+++ b/llvm/test/CodeGen/Hexagon/vect/vect-store-v2i16.ll
@@ -40,12 +40,12 @@ polly.loop_after378:                              ; preds = %polly.loop_body377
   unreachable
 
 polly.loop_body377:                               ; preds = %polly.loop_body377, %for.end
-  %_p_vec_full384 = load <2 x i16>, <2 x i16>* undef, align 4
+  %_p_vec_full384 = load <2 x i16>, ptr undef, align 4
   %0 = sext <2 x i16> %_p_vec_full384 to <2 x i32>
   %mulp_vec = mul <2 x i32> %0, %_p_splat387
   %shr100293p_vec = lshr <2 x i32> %mulp_vec, <i32 15, i32 15>
   %1 = trunc <2 x i32> %shr100293p_vec to <2 x i16>
-  store <2 x i16> %1, <2 x i16>* undef, align 4
+  store <2 x i16> %1, ptr undef, align 4
   br i1 undef, label %polly.loop_body377, label %polly.loop_after378
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/vect/vect-truncate.ll b/llvm/test/CodeGen/Hexagon/vect/vect-truncate.ll
index 01c493567372b..7466ee8171d47 100644
--- a/llvm/test/CodeGen/Hexagon/vect/vect-truncate.ll
+++ b/llvm/test/CodeGen/Hexagon/vect/vect-truncate.ll
@@ -26,7 +26,7 @@ polly.loop_body:                                  ; preds = %polly.loop_header
   %p_25 = call i32 @llvm.hexagon.A2.asrh(i32 undef)
   %1 = insertelement <4 x i32> undef, i32 %p_25, i32 3
   %2 = trunc <4 x i32> %1 to <4 x i16>
-  store <4 x i16> %2, <4 x i16>* undef, align 8
+  store <4 x i16> %2, ptr undef, align 8
   br label %polly.loop_header
 
 polly.loop_after45:                               ; preds = %polly.loop_header43

diff  --git a/llvm/test/CodeGen/Hexagon/vect/vect-v4i16.ll b/llvm/test/CodeGen/Hexagon/vect/vect-v4i16.ll
index 7b12c8f37339f..99487c4f77517 100644
--- a/llvm/test/CodeGen/Hexagon/vect/vect-v4i16.ll
+++ b/llvm/test/CodeGen/Hexagon/vect/vect-v4i16.ll
@@ -6,7 +6,7 @@
 target datalayout = "e-p:32:32:32-i64:64:64-i32:32:32-i16:16:16-i1:32:32-f64:64:64-f32:32:32-v64:64:64-v32:32:32-a0:0-n16:32"
 target triple = "hexagon"
 
-define void @matrix_add_const(i32 %N, i16* nocapture %A, i16 signext %val) #0 {
+define void @matrix_add_const(i32 %N, ptr nocapture %A, i16 signext %val) #0 {
 entry:
   %cmp5 = icmp eq i32 %N, 0
   br i1 %cmp5, label %for.end, label %polly.cond
@@ -46,24 +46,23 @@ polly.stmt.for.body29.preheader:                  ; preds = %polly.loop_header24
   br label %polly.stmt.for.body29
 
 polly.loop_body:                                  ; preds = %polly.loop_body.lr.ph, %polly.loop_body
-  %p_arrayidx.phi = phi i16* [ %A, %polly.loop_body.lr.ph ], [ %p_arrayidx.inc, %polly.loop_body ]
+  %p_arrayidx.phi = phi ptr [ %A, %polly.loop_body.lr.ph ], [ %p_arrayidx.inc, %polly.loop_body ]
   %polly.loopiv34 = phi i32 [ 0, %polly.loop_body.lr.ph ], [ %polly.next_loopiv, %polly.loop_body ]
   %polly.next_loopiv = add nsw i32 %polly.loopiv34, 4
-  %vector_ptr = bitcast i16* %p_arrayidx.phi to <4 x i16>*
-  %_p_vec_full = load <4 x i16>, <4 x i16>* %vector_ptr, align 2
+  %_p_vec_full = load <4 x i16>, ptr %p_arrayidx.phi, align 2
   %addp_vec = add <4 x i16> %_p_vec_full, %6
-  store <4 x i16> %addp_vec, <4 x i16>* %vector_ptr, align 2
+  store <4 x i16> %addp_vec, ptr %p_arrayidx.phi, align 2
   %8 = icmp slt i32 %polly.next_loopiv, %leftover_lb
-  %p_arrayidx.inc = getelementptr i16, i16* %p_arrayidx.phi, i32 4
+  %p_arrayidx.inc = getelementptr i16, ptr %p_arrayidx.phi, i32 4
   br i1 %8, label %polly.loop_body, label %polly.loop_header24.preheader.loopexit
 
 polly.stmt.for.body29:                            ; preds = %polly.stmt.for.body29.preheader, %polly.stmt.for.body29
   %polly.loopiv2733 = phi i32 [ %polly.next_loopiv28, %polly.stmt.for.body29 ], [ %polly.loopiv27.ph, %polly.stmt.for.body29.preheader ]
   %polly.next_loopiv28 = add nsw i32 %polly.loopiv2733, 1
-  %p_arrayidx30 = getelementptr i16, i16* %A, i32 %polly.loopiv2733
-  %_p_scalar_ = load i16, i16* %p_arrayidx30, align 2
+  %p_arrayidx30 = getelementptr i16, ptr %A, i32 %polly.loopiv2733
+  %_p_scalar_ = load i16, ptr %p_arrayidx30, align 2
   %p_add = add i16 %_p_scalar_, %val
-  store i16 %p_add, i16* %p_arrayidx30, align 2
+  store i16 %p_add, ptr %p_arrayidx30, align 2
   %exitcond = icmp eq i32 %polly.next_loopiv28, %N
   br i1 %exitcond, label %for.end.loopexit, label %polly.stmt.for.body29
 }

diff  --git a/llvm/test/CodeGen/Hexagon/vect/vect-vaslw.ll b/llvm/test/CodeGen/Hexagon/vect/vect-vaslw.ll
index 23c167608a10f..dcd2888d74779 100644
--- a/llvm/test/CodeGen/Hexagon/vect/vect-vaslw.ll
+++ b/llvm/test/CodeGen/Hexagon/vect/vect-vaslw.ll
@@ -4,11 +4,10 @@
 target datalayout = "e-p:32:32:32-i64:64:64-i32:32:32-i16:16:16-i1:32:32-f64:64:64-f32:32:32-v64:64:64-v32:32:32-a0:0-n16:32"
 target triple = "hexagon-unknown-linux-gnu"
 
-define void @foo(i16* nocapture %v) nounwind {
+define void @foo(ptr nocapture %v) nounwind {
 entry:
-  %p_arrayidx = getelementptr i16, i16* %v, i32 4
-  %vector_ptr = bitcast i16* %p_arrayidx to <4 x i16>*
-  %_p_vec_full = load <4 x i16>, <4 x i16>* %vector_ptr, align 2
+  %p_arrayidx = getelementptr i16, ptr %v, i32 4
+  %_p_vec_full = load <4 x i16>, ptr %p_arrayidx, align 2
   %_high_half = shufflevector <4 x i16> %_p_vec_full, <4 x i16> undef, <2 x i32> <i32 2, i32 3>
   %_low_half = shufflevector <4 x i16> %_p_vec_full, <4 x i16> undef, <2 x i32> <i32 0, i32 1>
   %0 = sext <2 x i16> %_low_half to <2 x i32>
@@ -17,8 +16,7 @@ entry:
   %shr6p_vec19 = shl <2 x i32> %1, <i32 2, i32 2>
   %addp_vec = add <2 x i32> %shr6p_vec, <i32 34, i32 34>
   %addp_vec20 = add <2 x i32> %shr6p_vec19, <i32 34, i32 34>
-  %vector_ptr21 = bitcast i16* %v to <4 x i16>*
-  %_p_vec_full22 = load <4 x i16>, <4 x i16>* %vector_ptr21, align 2
+  %_p_vec_full22 = load <4 x i16>, ptr %v, align 2
   %_high_half23 = shufflevector <4 x i16> %_p_vec_full22, <4 x i16> undef, <2 x i32> <i32 2, i32 3>
   %_low_half24 = shufflevector <4 x i16> %_p_vec_full22, <4 x i16> undef, <2 x i32> <i32 0, i32 1>
   %2 = zext <2 x i16> %_low_half24 to <2 x i32>
@@ -28,6 +26,6 @@ entry:
   %4 = trunc <2 x i32> %add3p_vec to <2 x i16>
   %5 = trunc <2 x i32> %add3p_vec25 to <2 x i16>
   %_combined_vec = shufflevector <2 x i16> %4, <2 x i16> %5, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-  store <4 x i16> %_combined_vec, <4 x i16>* %vector_ptr21, align 2
+  store <4 x i16> %_combined_vec, ptr %v, align 2
   ret void
 }

diff  --git a/llvm/test/CodeGen/Hexagon/vect/vect-vshifts.ll b/llvm/test/CodeGen/Hexagon/vect/vect-vshifts.ll
index 2291222236be1..d3f51c5382e4a 100644
--- a/llvm/test/CodeGen/Hexagon/vect/vect-vshifts.ll
+++ b/llvm/test/CodeGen/Hexagon/vect/vect-vshifts.ll
@@ -6,11 +6,11 @@
 target datalayout = "e-p:32:32:32-i64:64:64-i32:32:32-i16:16:16-i1:32:32-f64:64:64-f32:32:32-v64:64:64-v32:32:32-a0:0-n16:32"
 target triple = "hexagon"
 
-define void @foo(i32* nocapture %buf, i32* nocapture %dest, i32 %offset, i32 %oddBlock, i32 %gb) #0 {
+define void @foo(ptr nocapture %buf, ptr nocapture %dest, i32 %offset, i32 %oddBlock, i32 %gb) #0 {
 entry:
-  %0 = load i32, i32* %buf, align 4, !tbaa !0
+  %0 = load i32, ptr %buf, align 4, !tbaa !0
   %shr = ashr i32 %0, %gb
-  store i32 %shr, i32* %buf, align 4, !tbaa !0
+  store i32 %shr, ptr %buf, align 4, !tbaa !0
   %not.tobool = icmp eq i32 %oddBlock, 0
   %1 = sub i32 %offset, %oddBlock
   %2 = zext i1 %not.tobool to i32
@@ -27,14 +27,14 @@ entry:
   %12 = bitcast i64 %11 to <2 x i32>
   %sub12p_vec = add <2 x i32> %10, %12
   %p_22 = add i32 %4, 64
-  %p_d.018 = getelementptr i32, i32* %dest, i32 %4
-  %p_d.01823 = getelementptr i32, i32* %dest, i32 %p_22
+  %p_d.018 = getelementptr i32, ptr %dest, i32 %4
+  %p_d.01823 = getelementptr i32, ptr %dest, i32 %p_22
   %p_25 = add i32 %4, 72
-  %p_arrayidx14 = getelementptr i32, i32* %dest, i32 %5
-  %p_arrayidx1426 = getelementptr i32, i32* %dest, i32 %p_25
-  %_p_scalar_ = load i32, i32* %p_d.018, align 4
+  %p_arrayidx14 = getelementptr i32, ptr %dest, i32 %5
+  %p_arrayidx1426 = getelementptr i32, ptr %dest, i32 %p_25
+  %_p_scalar_ = load i32, ptr %p_d.018, align 4
   %_p_vec_ = insertelement <2 x i32> undef, i32 %_p_scalar_, i32 0
-  %_p_scalar_27 = load i32, i32* %p_d.01823, align 4
+  %_p_scalar_27 = load i32, ptr %p_d.01823, align 4
   %_p_vec_28 = insertelement <2 x i32> %_p_vec_, i32 %_p_scalar_27, i32 1
   %13 = bitcast <2 x i32> %_p_vec_28 to i64
   %14 = tail call i64 @llvm.hexagon.S2.asr.i.vw(i64 %13, i32 31)
@@ -48,22 +48,22 @@ entry:
   %20 = tail call i64 @llvm.hexagon.S2.asl.r.vw(i64 %19, i32 %gb)
   %21 = bitcast i64 %20 to <2 x i32>
   %22 = extractelement <2 x i32> %21, i32 0
-  store i32 %22, i32* %p_arrayidx14, align 4
+  store i32 %22, ptr %p_arrayidx14, align 4
   %23 = extractelement <2 x i32> %21, i32 1
-  store i32 %23, i32* %p_arrayidx1426, align 4
-  store i32 %22, i32* %p_d.018, align 4
-  store i32 %23, i32* %p_d.01823, align 4
+  store i32 %23, ptr %p_arrayidx1426, align 4
+  store i32 %22, ptr %p_d.018, align 4
+  store i32 %23, ptr %p_d.01823, align 4
   %p_21.1 = add i32 %4, 128
   %p_22.1 = add i32 %4, 192
-  %p_d.018.1 = getelementptr i32, i32* %dest, i32 %p_21.1
-  %p_d.01823.1 = getelementptr i32, i32* %dest, i32 %p_22.1
+  %p_d.018.1 = getelementptr i32, ptr %dest, i32 %p_21.1
+  %p_d.01823.1 = getelementptr i32, ptr %dest, i32 %p_22.1
   %p_24.1 = add i32 %4, 136
   %p_25.1 = add i32 %4, 200
-  %p_arrayidx14.1 = getelementptr i32, i32* %dest, i32 %p_24.1
-  %p_arrayidx1426.1 = getelementptr i32, i32* %dest, i32 %p_25.1
-  %_p_scalar_.1 = load i32, i32* %p_d.018.1, align 4
+  %p_arrayidx14.1 = getelementptr i32, ptr %dest, i32 %p_24.1
+  %p_arrayidx1426.1 = getelementptr i32, ptr %dest, i32 %p_25.1
+  %_p_scalar_.1 = load i32, ptr %p_d.018.1, align 4
   %_p_vec_.1 = insertelement <2 x i32> undef, i32 %_p_scalar_.1, i32 0
-  %_p_scalar_27.1 = load i32, i32* %p_d.01823.1, align 4
+  %_p_scalar_27.1 = load i32, ptr %p_d.01823.1, align 4
   %_p_vec_28.1 = insertelement <2 x i32> %_p_vec_.1, i32 %_p_scalar_27.1, i32 1
   %24 = bitcast <2 x i32> %_p_vec_28.1 to i64
   %25 = tail call i64 @llvm.hexagon.S2.asr.i.vw(i64 %24, i32 31)
@@ -77,22 +77,22 @@ entry:
   %31 = tail call i64 @llvm.hexagon.S2.asl.r.vw(i64 %30, i32 %gb)
   %32 = bitcast i64 %31 to <2 x i32>
   %33 = extractelement <2 x i32> %32, i32 0
-  store i32 %33, i32* %p_arrayidx14.1, align 4
+  store i32 %33, ptr %p_arrayidx14.1, align 4
   %34 = extractelement <2 x i32> %32, i32 1
-  store i32 %34, i32* %p_arrayidx1426.1, align 4
-  store i32 %33, i32* %p_d.018.1, align 4
-  store i32 %34, i32* %p_d.01823.1, align 4
+  store i32 %34, ptr %p_arrayidx1426.1, align 4
+  store i32 %33, ptr %p_d.018.1, align 4
+  store i32 %34, ptr %p_d.01823.1, align 4
   %p_21.2 = add i32 %4, 256
   %p_22.2 = add i32 %4, 320
-  %p_d.018.2 = getelementptr i32, i32* %dest, i32 %p_21.2
-  %p_d.01823.2 = getelementptr i32, i32* %dest, i32 %p_22.2
+  %p_d.018.2 = getelementptr i32, ptr %dest, i32 %p_21.2
+  %p_d.01823.2 = getelementptr i32, ptr %dest, i32 %p_22.2
   %p_24.2 = add i32 %4, 264
   %p_25.2 = add i32 %4, 328
-  %p_arrayidx14.2 = getelementptr i32, i32* %dest, i32 %p_24.2
-  %p_arrayidx1426.2 = getelementptr i32, i32* %dest, i32 %p_25.2
-  %_p_scalar_.2 = load i32, i32* %p_d.018.2, align 4
+  %p_arrayidx14.2 = getelementptr i32, ptr %dest, i32 %p_24.2
+  %p_arrayidx1426.2 = getelementptr i32, ptr %dest, i32 %p_25.2
+  %_p_scalar_.2 = load i32, ptr %p_d.018.2, align 4
   %_p_vec_.2 = insertelement <2 x i32> undef, i32 %_p_scalar_.2, i32 0
-  %_p_scalar_27.2 = load i32, i32* %p_d.01823.2, align 4
+  %_p_scalar_27.2 = load i32, ptr %p_d.01823.2, align 4
   %_p_vec_28.2 = insertelement <2 x i32> %_p_vec_.2, i32 %_p_scalar_27.2, i32 1
   %35 = bitcast <2 x i32> %_p_vec_28.2 to i64
   %36 = tail call i64 @llvm.hexagon.S2.asr.i.vw(i64 %35, i32 31)
@@ -106,22 +106,22 @@ entry:
   %42 = tail call i64 @llvm.hexagon.S2.asl.r.vw(i64 %41, i32 %gb)
   %43 = bitcast i64 %42 to <2 x i32>
   %44 = extractelement <2 x i32> %43, i32 0
-  store i32 %44, i32* %p_arrayidx14.2, align 4
+  store i32 %44, ptr %p_arrayidx14.2, align 4
   %45 = extractelement <2 x i32> %43, i32 1
-  store i32 %45, i32* %p_arrayidx1426.2, align 4
-  store i32 %44, i32* %p_d.018.2, align 4
-  store i32 %45, i32* %p_d.01823.2, align 4
+  store i32 %45, ptr %p_arrayidx1426.2, align 4
+  store i32 %44, ptr %p_d.018.2, align 4
+  store i32 %45, ptr %p_d.01823.2, align 4
   %p_21.3 = add i32 %4, 384
   %p_22.3 = add i32 %4, 448
-  %p_d.018.3 = getelementptr i32, i32* %dest, i32 %p_21.3
-  %p_d.01823.3 = getelementptr i32, i32* %dest, i32 %p_22.3
+  %p_d.018.3 = getelementptr i32, ptr %dest, i32 %p_21.3
+  %p_d.01823.3 = getelementptr i32, ptr %dest, i32 %p_22.3
   %p_24.3 = add i32 %4, 392
   %p_25.3 = add i32 %4, 456
-  %p_arrayidx14.3 = getelementptr i32, i32* %dest, i32 %p_24.3
-  %p_arrayidx1426.3 = getelementptr i32, i32* %dest, i32 %p_25.3
-  %_p_scalar_.3 = load i32, i32* %p_d.018.3, align 4
+  %p_arrayidx14.3 = getelementptr i32, ptr %dest, i32 %p_24.3
+  %p_arrayidx1426.3 = getelementptr i32, ptr %dest, i32 %p_25.3
+  %_p_scalar_.3 = load i32, ptr %p_d.018.3, align 4
   %_p_vec_.3 = insertelement <2 x i32> undef, i32 %_p_scalar_.3, i32 0
-  %_p_scalar_27.3 = load i32, i32* %p_d.01823.3, align 4
+  %_p_scalar_27.3 = load i32, ptr %p_d.01823.3, align 4
   %_p_vec_28.3 = insertelement <2 x i32> %_p_vec_.3, i32 %_p_scalar_27.3, i32 1
   %46 = bitcast <2 x i32> %_p_vec_28.3 to i64
   %47 = tail call i64 @llvm.hexagon.S2.asr.i.vw(i64 %46, i32 31)
@@ -135,22 +135,22 @@ entry:
   %53 = tail call i64 @llvm.hexagon.S2.asl.r.vw(i64 %52, i32 %gb)
   %54 = bitcast i64 %53 to <2 x i32>
   %55 = extractelement <2 x i32> %54, i32 0
-  store i32 %55, i32* %p_arrayidx14.3, align 4
+  store i32 %55, ptr %p_arrayidx14.3, align 4
   %56 = extractelement <2 x i32> %54, i32 1
-  store i32 %56, i32* %p_arrayidx1426.3, align 4
-  store i32 %55, i32* %p_d.018.3, align 4
-  store i32 %56, i32* %p_d.01823.3, align 4
+  store i32 %56, ptr %p_arrayidx1426.3, align 4
+  store i32 %55, ptr %p_d.018.3, align 4
+  store i32 %56, ptr %p_d.01823.3, align 4
   %p_21.4 = add i32 %4, 512
   %p_22.4 = add i32 %4, 576
-  %p_d.018.4 = getelementptr i32, i32* %dest, i32 %p_21.4
-  %p_d.01823.4 = getelementptr i32, i32* %dest, i32 %p_22.4
+  %p_d.018.4 = getelementptr i32, ptr %dest, i32 %p_21.4
+  %p_d.01823.4 = getelementptr i32, ptr %dest, i32 %p_22.4
   %p_24.4 = add i32 %4, 520
   %p_25.4 = add i32 %4, 584
-  %p_arrayidx14.4 = getelementptr i32, i32* %dest, i32 %p_24.4
-  %p_arrayidx1426.4 = getelementptr i32, i32* %dest, i32 %p_25.4
-  %_p_scalar_.4 = load i32, i32* %p_d.018.4, align 4
+  %p_arrayidx14.4 = getelementptr i32, ptr %dest, i32 %p_24.4
+  %p_arrayidx1426.4 = getelementptr i32, ptr %dest, i32 %p_25.4
+  %_p_scalar_.4 = load i32, ptr %p_d.018.4, align 4
   %_p_vec_.4 = insertelement <2 x i32> undef, i32 %_p_scalar_.4, i32 0
-  %_p_scalar_27.4 = load i32, i32* %p_d.01823.4, align 4
+  %_p_scalar_27.4 = load i32, ptr %p_d.01823.4, align 4
   %_p_vec_28.4 = insertelement <2 x i32> %_p_vec_.4, i32 %_p_scalar_27.4, i32 1
   %57 = bitcast <2 x i32> %_p_vec_28.4 to i64
   %58 = tail call i64 @llvm.hexagon.S2.asr.i.vw(i64 %57, i32 31)
@@ -164,22 +164,22 @@ entry:
   %64 = tail call i64 @llvm.hexagon.S2.asl.r.vw(i64 %63, i32 %gb)
   %65 = bitcast i64 %64 to <2 x i32>
   %66 = extractelement <2 x i32> %65, i32 0
-  store i32 %66, i32* %p_arrayidx14.4, align 4
+  store i32 %66, ptr %p_arrayidx14.4, align 4
   %67 = extractelement <2 x i32> %65, i32 1
-  store i32 %67, i32* %p_arrayidx1426.4, align 4
-  store i32 %66, i32* %p_d.018.4, align 4
-  store i32 %67, i32* %p_d.01823.4, align 4
+  store i32 %67, ptr %p_arrayidx1426.4, align 4
+  store i32 %66, ptr %p_d.018.4, align 4
+  store i32 %67, ptr %p_d.01823.4, align 4
   %p_21.5 = add i32 %4, 640
   %p_22.5 = add i32 %4, 704
-  %p_d.018.5 = getelementptr i32, i32* %dest, i32 %p_21.5
-  %p_d.01823.5 = getelementptr i32, i32* %dest, i32 %p_22.5
+  %p_d.018.5 = getelementptr i32, ptr %dest, i32 %p_21.5
+  %p_d.01823.5 = getelementptr i32, ptr %dest, i32 %p_22.5
   %p_24.5 = add i32 %4, 648
   %p_25.5 = add i32 %4, 712
-  %p_arrayidx14.5 = getelementptr i32, i32* %dest, i32 %p_24.5
-  %p_arrayidx1426.5 = getelementptr i32, i32* %dest, i32 %p_25.5
-  %_p_scalar_.5 = load i32, i32* %p_d.018.5, align 4
+  %p_arrayidx14.5 = getelementptr i32, ptr %dest, i32 %p_24.5
+  %p_arrayidx1426.5 = getelementptr i32, ptr %dest, i32 %p_25.5
+  %_p_scalar_.5 = load i32, ptr %p_d.018.5, align 4
   %_p_vec_.5 = insertelement <2 x i32> undef, i32 %_p_scalar_.5, i32 0
-  %_p_scalar_27.5 = load i32, i32* %p_d.01823.5, align 4
+  %_p_scalar_27.5 = load i32, ptr %p_d.01823.5, align 4
   %_p_vec_28.5 = insertelement <2 x i32> %_p_vec_.5, i32 %_p_scalar_27.5, i32 1
   %68 = bitcast <2 x i32> %_p_vec_28.5 to i64
   %69 = tail call i64 @llvm.hexagon.S2.asr.i.vw(i64 %68, i32 31)
@@ -193,22 +193,22 @@ entry:
   %75 = tail call i64 @llvm.hexagon.S2.asl.r.vw(i64 %74, i32 %gb)
   %76 = bitcast i64 %75 to <2 x i32>
   %77 = extractelement <2 x i32> %76, i32 0
-  store i32 %77, i32* %p_arrayidx14.5, align 4
+  store i32 %77, ptr %p_arrayidx14.5, align 4
   %78 = extractelement <2 x i32> %76, i32 1
-  store i32 %78, i32* %p_arrayidx1426.5, align 4
-  store i32 %77, i32* %p_d.018.5, align 4
-  store i32 %78, i32* %p_d.01823.5, align 4
+  store i32 %78, ptr %p_arrayidx1426.5, align 4
+  store i32 %77, ptr %p_d.018.5, align 4
+  store i32 %78, ptr %p_d.01823.5, align 4
   %p_21.6 = add i32 %4, 768
   %p_22.6 = add i32 %4, 832
-  %p_d.018.6 = getelementptr i32, i32* %dest, i32 %p_21.6
-  %p_d.01823.6 = getelementptr i32, i32* %dest, i32 %p_22.6
+  %p_d.018.6 = getelementptr i32, ptr %dest, i32 %p_21.6
+  %p_d.01823.6 = getelementptr i32, ptr %dest, i32 %p_22.6
   %p_24.6 = add i32 %4, 776
   %p_25.6 = add i32 %4, 840
-  %p_arrayidx14.6 = getelementptr i32, i32* %dest, i32 %p_24.6
-  %p_arrayidx1426.6 = getelementptr i32, i32* %dest, i32 %p_25.6
-  %_p_scalar_.6 = load i32, i32* %p_d.018.6, align 4
+  %p_arrayidx14.6 = getelementptr i32, ptr %dest, i32 %p_24.6
+  %p_arrayidx1426.6 = getelementptr i32, ptr %dest, i32 %p_25.6
+  %_p_scalar_.6 = load i32, ptr %p_d.018.6, align 4
   %_p_vec_.6 = insertelement <2 x i32> undef, i32 %_p_scalar_.6, i32 0
-  %_p_scalar_27.6 = load i32, i32* %p_d.01823.6, align 4
+  %_p_scalar_27.6 = load i32, ptr %p_d.01823.6, align 4
   %_p_vec_28.6 = insertelement <2 x i32> %_p_vec_.6, i32 %_p_scalar_27.6, i32 1
   %79 = bitcast <2 x i32> %_p_vec_28.6 to i64
   %80 = tail call i64 @llvm.hexagon.S2.asr.i.vw(i64 %79, i32 31)
@@ -222,22 +222,22 @@ entry:
   %86 = tail call i64 @llvm.hexagon.S2.asl.r.vw(i64 %85, i32 %gb)
   %87 = bitcast i64 %86 to <2 x i32>
   %88 = extractelement <2 x i32> %87, i32 0
-  store i32 %88, i32* %p_arrayidx14.6, align 4
+  store i32 %88, ptr %p_arrayidx14.6, align 4
   %89 = extractelement <2 x i32> %87, i32 1
-  store i32 %89, i32* %p_arrayidx1426.6, align 4
-  store i32 %88, i32* %p_d.018.6, align 4
-  store i32 %89, i32* %p_d.01823.6, align 4
+  store i32 %89, ptr %p_arrayidx1426.6, align 4
+  store i32 %88, ptr %p_d.018.6, align 4
+  store i32 %89, ptr %p_d.01823.6, align 4
   %p_21.7 = add i32 %4, 896
   %p_22.7 = add i32 %4, 960
-  %p_d.018.7 = getelementptr i32, i32* %dest, i32 %p_21.7
-  %p_d.01823.7 = getelementptr i32, i32* %dest, i32 %p_22.7
+  %p_d.018.7 = getelementptr i32, ptr %dest, i32 %p_21.7
+  %p_d.01823.7 = getelementptr i32, ptr %dest, i32 %p_22.7
   %p_24.7 = add i32 %4, 904
   %p_25.7 = add i32 %4, 968
-  %p_arrayidx14.7 = getelementptr i32, i32* %dest, i32 %p_24.7
-  %p_arrayidx1426.7 = getelementptr i32, i32* %dest, i32 %p_25.7
-  %_p_scalar_.7 = load i32, i32* %p_d.018.7, align 4
+  %p_arrayidx14.7 = getelementptr i32, ptr %dest, i32 %p_24.7
+  %p_arrayidx1426.7 = getelementptr i32, ptr %dest, i32 %p_25.7
+  %_p_scalar_.7 = load i32, ptr %p_d.018.7, align 4
   %_p_vec_.7 = insertelement <2 x i32> undef, i32 %_p_scalar_.7, i32 0
-  %_p_scalar_27.7 = load i32, i32* %p_d.01823.7, align 4
+  %_p_scalar_27.7 = load i32, ptr %p_d.01823.7, align 4
   %_p_vec_28.7 = insertelement <2 x i32> %_p_vec_.7, i32 %_p_scalar_27.7, i32 1
   %90 = bitcast <2 x i32> %_p_vec_28.7 to i64
   %91 = tail call i64 @llvm.hexagon.S2.asr.i.vw(i64 %90, i32 31)
@@ -251,11 +251,11 @@ entry:
   %97 = tail call i64 @llvm.hexagon.S2.asl.r.vw(i64 %96, i32 %gb)
   %98 = bitcast i64 %97 to <2 x i32>
   %99 = extractelement <2 x i32> %98, i32 0
-  store i32 %99, i32* %p_arrayidx14.7, align 4
+  store i32 %99, ptr %p_arrayidx14.7, align 4
   %100 = extractelement <2 x i32> %98, i32 1
-  store i32 %100, i32* %p_arrayidx1426.7, align 4
-  store i32 %99, i32* %p_d.018.7, align 4
-  store i32 %100, i32* %p_d.01823.7, align 4
+  store i32 %100, ptr %p_arrayidx1426.7, align 4
+  store i32 %99, ptr %p_d.018.7, align 4
+  store i32 %100, ptr %p_d.01823.7, align 4
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/vect/vect-vsplatb.ll b/llvm/test/CodeGen/Hexagon/vect/vect-vsplatb.ll
index faea9350a9d49..c4f4149e4a51a 100644
--- a/llvm/test/CodeGen/Hexagon/vect/vect-vsplatb.ll
+++ b/llvm/test/CodeGen/Hexagon/vect/vect-vsplatb.ll
@@ -15,19 +15,17 @@ polly.loop_after:                                 ; preds = %polly.loop_body
 polly.loop_body:                                  ; preds = %entry, %polly.loop_body
   %polly.loopiv25 = phi i32 [ 0, %entry ], [ %polly.next_loopiv, %polly.loop_body ]
   %polly.next_loopiv = add i32 %polly.loopiv25, 4
-  %p_arrayidx1 = getelementptr [400 x i8], [400 x i8]* @A, i32 0, i32 %polly.loopiv25
-  %p_arrayidx = getelementptr [400 x i8], [400 x i8]* @B, i32 0, i32 %polly.loopiv25
-  %vector_ptr = bitcast i8* %p_arrayidx to <4 x i8>*
-  %_p_vec_full = load <4 x i8>, <4 x i8>* %vector_ptr, align 8
+  %p_arrayidx1 = getelementptr [400 x i8], ptr @A, i32 0, i32 %polly.loopiv25
+  %p_arrayidx = getelementptr [400 x i8], ptr @B, i32 0, i32 %polly.loopiv25
+  %_p_vec_full = load <4 x i8>, ptr %p_arrayidx, align 8
   %vec0 = insertelement <4 x i8> undef, i8 %v, i32 0
   %vec1 = insertelement <4 x i8> %vec0, i8 %v, i32 1
   %vec2 = insertelement <4 x i8> %vec1, i8 %v, i32 2
   %vec3 = insertelement <4 x i8> %vec2, i8 %v, i32 3
   %mulp_vec = mul <4 x i8> %_p_vec_full, %vec3
-  %vector_ptr14 = bitcast i8* %p_arrayidx1 to <4 x i8>*
-  %_p_vec_full15 = load <4 x i8>, <4 x i8>* %vector_ptr14, align 8
+  %_p_vec_full15 = load <4 x i8>, ptr %p_arrayidx1, align 8
   %addp_vec = add <4 x i8> %_p_vec_full15, %mulp_vec
-  store <4 x i8> %addp_vec, <4 x i8>* %vector_ptr14, align 8
+  store <4 x i8> %addp_vec, ptr %p_arrayidx1, align 8
   %0 = icmp slt i32 %polly.next_loopiv, 400
   br i1 %0, label %polly.loop_body, label %polly.loop_after
 }

diff  --git a/llvm/test/CodeGen/Hexagon/vect/vect-vsplath.ll b/llvm/test/CodeGen/Hexagon/vect/vect-vsplath.ll
index 2f6897c328099..c3214d2d689b5 100644
--- a/llvm/test/CodeGen/Hexagon/vect/vect-vsplath.ll
+++ b/llvm/test/CodeGen/Hexagon/vect/vect-vsplath.ll
@@ -18,15 +18,13 @@ polly.loop_after:                                 ; preds = %polly.loop_body
 polly.loop_body:                                  ; preds = %entry, %polly.loop_body
   %polly.loopiv26 = phi i32 [ 0, %entry ], [ %polly.next_loopiv, %polly.loop_body ]
   %polly.next_loopiv = add nsw i32 %polly.loopiv26, 4
-  %p_arrayidx1 = getelementptr [400 x i16], [400 x i16]* @A, i32 0, i32 %polly.loopiv26
-  %p_arrayidx = getelementptr [400 x i16], [400 x i16]* @B, i32 0, i32 %polly.loopiv26
-  %vector_ptr = bitcast i16* %p_arrayidx to <4 x i16>*
-  %_p_vec_full = load <4 x i16>, <4 x i16>* %vector_ptr, align 8
+  %p_arrayidx1 = getelementptr [400 x i16], ptr @A, i32 0, i32 %polly.loopiv26
+  %p_arrayidx = getelementptr [400 x i16], ptr @B, i32 0, i32 %polly.loopiv26
+  %_p_vec_full = load <4 x i16>, ptr %p_arrayidx, align 8
   %mulp_vec = mul <4 x i16> %_p_vec_full, <i16 7, i16 7, i16 7, i16 7>
-  %vector_ptr15 = bitcast i16* %p_arrayidx1 to <4 x i16>*
-  %_p_vec_full16 = load <4 x i16>, <4 x i16>* %vector_ptr15, align 8
+  %_p_vec_full16 = load <4 x i16>, ptr %p_arrayidx1, align 8
   %addp_vec = add <4 x i16> %_p_vec_full16, %mulp_vec
-  store <4 x i16> %addp_vec, <4 x i16>* %vector_ptr15, align 8
+  store <4 x i16> %addp_vec, ptr %p_arrayidx1, align 8
   %0 = icmp slt i32 %polly.next_loopiv, 400
   br i1 %0, label %polly.loop_body, label %polly.loop_after
 }

diff  --git a/llvm/test/CodeGen/Hexagon/vect/vect-xor.ll b/llvm/test/CodeGen/Hexagon/vect/vect-xor.ll
index e3c8c7783ad68..87a7f0eb252fd 100644
--- a/llvm/test/CodeGen/Hexagon/vect/vect-xor.ll
+++ b/llvm/test/CodeGen/Hexagon/vect/vect-xor.ll
@@ -24,13 +24,12 @@ polly.loop_after:                                 ; preds = %polly.loop_body
 polly.loop_body:                                  ; preds = %entry, %polly.loop_body
   %polly.loopiv36 = phi i32 [ 0, %entry ], [ %polly.next_loopiv, %polly.loop_body ]
   %polly.next_loopiv = add nsw i32 %polly.loopiv36, 4
-  %p_arrayidx4 = getelementptr [0 x i16], [0 x i16]* @prev, i32 0, i32 %polly.loopiv36
-  %vector_ptr = bitcast i16* %p_arrayidx4 to <4 x i16>*
-  %_p_vec_full = load <4 x i16>, <4 x i16>* %vector_ptr, align 2
+  %p_arrayidx4 = getelementptr [0 x i16], ptr @prev, i32 0, i32 %polly.loopiv36
+  %_p_vec_full = load <4 x i16>, ptr %p_arrayidx4, align 2
   %cmp1p_vicmp = icmp slt <4 x i16> %_p_vec_full, zeroinitializer
   %subp_vec = xor <4 x i16> %_p_vec_full, <i16 -32768, i16 -32768, i16 -32768, i16 -32768>
   %sel1p_vsel = select <4 x i1> %cmp1p_vicmp, <4 x i16> %subp_vec, <4 x i16> zeroinitializer
-  store <4 x i16> %sel1p_vsel, <4 x i16>* %vector_ptr, align 2
+  store <4 x i16> %sel1p_vsel, ptr %p_arrayidx4, align 2
   %0 = icmp slt i32 %polly.next_loopiv, 32768
   br i1 %0, label %polly.loop_body, label %polly.loop_after
 }

diff  --git a/llvm/test/CodeGen/Hexagon/vect/vect-zeroextend.ll b/llvm/test/CodeGen/Hexagon/vect/vect-zeroextend.ll
index 3d0b7946f77a4..86f242b9d8e3a 100644
--- a/llvm/test/CodeGen/Hexagon/vect/vect-zeroextend.ll
+++ b/llvm/test/CodeGen/Hexagon/vect/vect-zeroextend.ll
@@ -13,9 +13,9 @@ for.cond30.preheader.lr.ph:                       ; preds = %entry
   br label %for.cond37.preheader
 
 for.cond37.preheader:                             ; preds = %for.cond37.preheader, %for.cond30.preheader.lr.ph
-  %_p_vec_full = load <3 x i8>, <3 x i8>* undef, align 8
+  %_p_vec_full = load <3 x i8>, ptr undef, align 8
   %0 = zext <3 x i8> %_p_vec_full to <3 x i16>
-  store <3 x i16> %0, <3 x i16>* undef, align 8
+  store <3 x i16> %0, ptr undef, align 8
   br label %for.cond37.preheader
 
 for.end425:                                       ; preds = %entry

diff  --git a/llvm/test/CodeGen/Hexagon/vect/zext-v4i1.ll b/llvm/test/CodeGen/Hexagon/vect/zext-v4i1.ll
index 5f9a1522a2f68..dddc4bd953d7a 100644
--- a/llvm/test/CodeGen/Hexagon/vect/zext-v4i1.ll
+++ b/llvm/test/CodeGen/Hexagon/vect/zext-v4i1.ll
@@ -6,7 +6,7 @@
 target datalayout = "e-m:e-p:32:32:32-a:0-n16:32-i64:64:64-i32:32:32-i16:16:16-i1:8:8-f32:32:32-f64:64:64-v32:32:32-v64:64:64-v512:512:512-v1024:1024:1024-v2048:2048:2048"
 target triple = "hexagon"
 
-define i32 @fred(<8 x i16>* %a0) #0 {
+define i32 @fred(ptr %a0) #0 {
 ; CHECK-LABEL: fred:
 ; CHECK:       // %bb.0: // %b0
 ; CHECK-NEXT:    {
@@ -49,7 +49,7 @@ b1:                                               ; preds = %b0
   br label %b14
 
 b2:                                               ; preds = %b0
-  %v2 = load <8 x i16>, <8 x i16>* %a0, align 64
+  %v2 = load <8 x i16>, ptr %a0, align 64
   %v3 = icmp eq <8 x i16> %v2, zeroinitializer
   %v4 = zext <8 x i1> %v3 to <8 x i16>
   %v5 = add <8 x i16> zeroinitializer, %v4

diff  --git a/llvm/test/CodeGen/Hexagon/vect_setcc.ll b/llvm/test/CodeGen/Hexagon/vect_setcc.ll
index 03b073f376d75..b42ddf1c24f1b 100644
--- a/llvm/test/CodeGen/Hexagon/vect_setcc.ll
+++ b/llvm/test/CodeGen/Hexagon/vect_setcc.ll
@@ -5,11 +5,11 @@
 target triple = "hexagon"
 
 ; Function Attrs: nounwind readonly
-define void @f0(i16* nocapture %a0) #0 {
+define void @f0(ptr nocapture %a0) #0 {
 b0:
   %v0 = alloca [16 x i16], align 8
-  %v1 = load i16, i16* %a0, align 2, !tbaa !0
-  %v2 = getelementptr [16 x i16], [16 x i16]* %v0, i32 0, i32 5
+  %v1 = load i16, ptr %a0, align 2, !tbaa !0
+  %v2 = getelementptr [16 x i16], ptr %v0, i32 0, i32 5
   br label %b12
 
 b1:                                               ; preds = %b11
@@ -24,21 +24,21 @@ b3:                                               ; preds = %b1
 
 b4:                                               ; preds = %b4, %b3
   %v4 = phi i32 [ %v6, %b4 ], [ 0, %b3 ]
-  %v5 = getelementptr inbounds [16 x i16], [16 x i16]* %v0, i32 0, i32 %v4
-  store i16 1, i16* %v5, align 2, !tbaa !0
+  %v5 = getelementptr inbounds [16 x i16], ptr %v0, i32 0, i32 %v4
+  store i16 1, ptr %v5, align 2, !tbaa !0
   %v6 = add nsw i32 %v4, 1
   %v7 = icmp eq i32 %v6, 16
   br i1 %v7, label %b8, label %b4
 
 b5:                                               ; preds = %b7, %b2
   %v8 = phi i32 [ %v12, %b7 ], [ 0, %b2 ]
-  %v9 = getelementptr inbounds [16 x i16], [16 x i16]* %v0, i32 0, i32 %v8
-  %v10 = load i16, i16* %v9, align 2, !tbaa !0
+  %v9 = getelementptr inbounds [16 x i16], ptr %v0, i32 0, i32 %v8
+  %v10 = load i16, ptr %v9, align 2, !tbaa !0
   %v11 = icmp slt i16 %v10, 13
   br i1 %v11, label %b6, label %b7
 
 b6:                                               ; preds = %b5
-  store i16 1, i16* %v9, align 2, !tbaa !0
+  store i16 1, ptr %v9, align 2, !tbaa !0
   br label %b7
 
 b7:                                               ; preds = %b6, %b5
@@ -66,10 +66,9 @@ b11:                                              ; preds = %b12
 b12:                                              ; preds = %b12, %b0
   %v19 = phi <2 x i32> [ zeroinitializer, %b0 ], [ %v31, %b12 ]
   %v20 = phi <2 x i32> [ zeroinitializer, %b0 ], [ %v32, %b12 ]
-  %v21 = phi i16* [ %v2, %b0 ], [ %v35, %b12 ]
+  %v21 = phi ptr [ %v2, %b0 ], [ %v35, %b12 ]
   %v22 = phi i32 [ 0, %b0 ], [ %v33, %b12 ]
-  %v23 = bitcast i16* %v21 to <4 x i16>*
-  %v24 = load <4 x i16>, <4 x i16>* %v23, align 2
+  %v24 = load <4 x i16>, ptr %v21, align 2
   %v25 = icmp sgt <4 x i16> %v24, <i16 11, i16 11, i16 11, i16 11>
   %v26 = zext <4 x i1> %v25 to <4 x i16>
   %v27 = shufflevector <4 x i16> %v26, <4 x i16> undef, <2 x i32> <i32 2, i32 3>
@@ -80,7 +79,7 @@ b12:                                              ; preds = %b12, %b0
   %v32 = add <2 x i32> %v20, %v30
   %v33 = add nsw i32 %v22, 4
   %v34 = icmp slt i32 %v22, 4
-  %v35 = getelementptr i16, i16* %v21, i32 4
+  %v35 = getelementptr i16, ptr %v21, i32 4
   br i1 %v34, label %b12, label %b11
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/vector-align.ll b/llvm/test/CodeGen/Hexagon/vector-align.ll
index d2e0071700eda..dbacbababd86b 100644
--- a/llvm/test/CodeGen/Hexagon/vector-align.ll
+++ b/llvm/test/CodeGen/Hexagon/vector-align.ll
@@ -15,8 +15,8 @@ entry:
   %v0 = tail call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 1)
   %v1 = tail call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %v0, i32 -2147483648)
   %v2 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %v1, i32 -1)
-  store <16 x i32> %v2, <16 x i32>* @Q6VecPredResult, align 64, !tbaa !1
-  tail call void @print_vecpred(i32 64, i8* bitcast (<16 x i32>* @Q6VecPredResult to i8*)) #3
+  store <16 x i32> %v2, ptr @Q6VecPredResult, align 64, !tbaa !1
+  tail call void @print_vecpred(i32 64, ptr @Q6VecPredResult) #3
   ret i32 0
 }
 
@@ -24,7 +24,7 @@ declare <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32>, i32) #1
 declare <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1>, i32) #1
 declare <16 x i32> @llvm.hexagon.V6.lvsplatw(i32) #1
 
-declare void @print_vecpred(i32, i8*) #2
+declare void @print_vecpred(i32, ptr) #2
 
 attributes #0 = { nounwind "target-cpu"="hexagonv66" "target-features"="+hvxv66,+hvx-length64b" }
 attributes #1 = { nounwind readnone }

diff  --git a/llvm/test/CodeGen/Hexagon/vector-ext-load.ll b/llvm/test/CodeGen/Hexagon/vector-ext-load.ll
index 536dad165ef82..6f2edd81a6303 100644
--- a/llvm/test/CodeGen/Hexagon/vector-ext-load.ll
+++ b/llvm/test/CodeGen/Hexagon/vector-ext-load.ll
@@ -2,9 +2,9 @@
 
 ; RUN: llc -march=hexagon < %s
 
-define void @test1(<8 x i32>* %ptr) {
-  %1 = load <8 x i32>, <8 x i32>* %ptr, align 32
+define void @test1(ptr %ptr) {
+  %1 = load <8 x i32>, ptr %ptr, align 32
   %2 = and <8 x i32> %1, <i32 0, i32 0, i32 0, i32 -1, i32 0, i32 0, i32 0, i32 -1>
-  store <8 x i32> %2, <8 x i32>* %ptr, align 16
+  store <8 x i32> %2, ptr %ptr, align 16
   ret void
 }

diff  --git a/llvm/test/CodeGen/Hexagon/vector-sint-to-fp.ll b/llvm/test/CodeGen/Hexagon/vector-sint-to-fp.ll
index 699d6219ebe7f..ad676f1353981 100644
--- a/llvm/test/CodeGen/Hexagon/vector-sint-to-fp.ll
+++ b/llvm/test/CodeGen/Hexagon/vector-sint-to-fp.ll
@@ -30,7 +30,7 @@ b1:                                               ; preds = %b1, %b0
   %v14 = call <64 x half> @llvm.fmuladd.v64f16(<64 x half> zeroinitializer, <64 x half> %v13, <64 x half> zeroinitializer)
   %v15 = shufflevector <64 x half> %v14, <64 x half> undef, <128 x i32> <i32 0, i32 undef, i32 2, i32 undef, i32 4, i32 undef, i32 6, i32 undef, i32 8, i32 undef, i32 10, i32 undef, i32 12, i32 undef, i32 14, i32 undef, i32 16, i32 undef, i32 18, i32 undef, i32 20, i32 undef, i32 22, i32 undef, i32 24, i32 undef, i32 26, i32 undef, i32 28, i32 undef, i32 30, i32 undef, i32 32, i32 undef, i32 34, i32 undef, i32 36, i32 undef, i32 38, i32 undef, i32 40, i32 undef, i32 42, i32 undef, i32 44, i32 undef, i32 46, i32 undef, i32 48, i32 undef, i32 50, i32 undef, i32 52, i32 undef, i32 54, i32 undef, i32 56, i32 undef, i32 58, i32 undef, i32 60, i32 undef, i32 62, i32 undef, i32 1, i32 undef, i32 3, i32 undef, i32 5, i32 undef, i32 7, i32 undef, i32 9, i32 undef, i32 11, i32 undef, i32 13, i32 undef, i32 15, i32 undef, i32 17, i32 undef, i32 19, i32 undef, i32 21, i32 undef, i32 23, i32 undef, i32 25, i32 undef, i32 27, i32 undef, i32 29, i32 undef, i32 31, i32 undef, i32 33, i32 undef, i32 35, i32 undef, i32 37, i32 undef, i32 39, i32 undef, i32 41, i32 undef, i32 43, i32 undef, i32 45, i32 undef, i32 47, i32 undef, i32 49, i32 undef, i32 51, i32 undef, i32 53, i32 undef, i32 55, i32 undef, i32 57, i32 undef, i32 59, i32 undef, i32 61, i32 undef, i32 63, i32 undef>
   %v16 = shufflevector <128 x half> %v15, <128 x half> undef, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
-  call void @llvm.masked.store.v64f16.p0v64f16(<64 x half> %v16, <64 x half>* undef, i32 64, <64 x i1> <i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false>)
+  call void @llvm.masked.store.v64f16.p0(<64 x half> %v16, ptr undef, i32 64, <64 x i1> <i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false>)
   %v17 = add nsw i32 %v0, 1
   br label %b1
 }
@@ -42,7 +42,7 @@ declare <64 x float> @llvm.floor.v64f32(<64 x float>) #1
 declare <64 x half> @llvm.fmuladd.v64f16(<64 x half>, <64 x half>, <64 x half>) #1
 
 ; Function Attrs: argmemonly nofree nosync nounwind willreturn writeonly
-declare void @llvm.masked.store.v64f16.p0v64f16(<64 x half>, <64 x half>*, i32 immarg, <64 x i1>) #2
+declare void @llvm.masked.store.v64f16.p0(<64 x half>, ptr, i32 immarg, <64 x i1>) #2
 
 attributes #0 = { "target-features"="+hvxv69,+hvx-length128b,+hvx-qfloat" }
 attributes #1 = { nofree nosync nounwind readnone speculatable willreturn }

diff  --git a/llvm/test/CodeGen/Hexagon/verify-sink-code.ll b/llvm/test/CodeGen/Hexagon/verify-sink-code.ll
index 1952629fe5ae7..4ec1ab4873434 100644
--- a/llvm/test/CodeGen/Hexagon/verify-sink-code.ll
+++ b/llvm/test/CodeGen/Hexagon/verify-sink-code.ll
@@ -4,40 +4,40 @@
 
 target triple = "hexagon"
 
-%s.0 = type { %s.1, [128 x %s.0*], i32, i32, i32, %s.6, i32, i32, i32, i32, i32, i32, i32, i32, i32, [1 x %s.9], %s.9*, [1 x %s.12], %s.12*, i32, [4 x [4 x [4 x i32]]*], [2 x [8 x [8 x i32]]*], [4 x [16 x i32]*], [2 x [64 x i32]*], [4 x [16 x i16]*], [2 x [64 x i16]*], [4 x [16 x i16]*], [2 x [64 x i16]*], [2 x [64 x i32]], [2 x [64 x i32]], [2 x i32], %s.13, %s.15, %s.16, %s.17*, %s.17*, i32, [19 x %s.17*], i32, [19 x %s.17*], [2 x i32], [4 x i8], %s.18, %s.20, %s.23*, %s.24, [7 x void (i8*)*], [7 x void (i8*)*], [12 x void (i8*, i8*)*], [12 x void (i8*)*], %s.26, %s.27, %s.28, %s.29, %s.30, %s.32, %s.33, [5 x %s.34*], %s.34*, [15 x %s.34*], [3 x %s.34*], [7 x %s.34*], [8 x i8] }
-%s.1 = type { i32, i32, i32, i32, i32, i32, i32, i32, %s.2, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i8*, [16 x i8], [16 x i8], [16 x i8], [16 x i8], [64 x i8], [64 x i8], void (i8*, i32, i8*, i8*)*, i8*, i32, i32, %s.3, %s.4, i32, i32, i32 }
+%s.0 = type { %s.1, [128 x ptr], i32, i32, i32, %s.6, i32, i32, i32, i32, i32, i32, i32, i32, i32, [1 x %s.9], ptr, [1 x %s.12], ptr, i32, [4 x ptr], [2 x ptr], [4 x ptr], [2 x ptr], [4 x ptr], [2 x ptr], [4 x ptr], [2 x ptr], [2 x [64 x i32]], [2 x [64 x i32]], [2 x i32], %s.13, %s.15, %s.16, ptr, ptr, i32, [19 x ptr], i32, [19 x ptr], [2 x i32], [4 x i8], %s.18, %s.20, ptr, %s.24, [7 x ptr], [7 x ptr], [12 x ptr], [12 x ptr], %s.26, %s.27, %s.28, %s.29, %s.30, %s.32, %s.33, [5 x ptr], ptr, [15 x ptr], [3 x ptr], [7 x ptr], [8 x i8] }
+%s.1 = type { i32, i32, i32, i32, i32, i32, i32, i32, %s.2, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, ptr, [16 x i8], [16 x i8], [16 x i8], [16 x i8], [64 x i8], [64 x i8], ptr, ptr, i32, i32, %s.3, %s.4, i32, i32, i32 }
 %s.2 = type { i32, i32, i32, i32, i32, i32, i32, i32, i32 }
 %s.3 = type { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, [2 x i32], i32, i32 }
-%s.4 = type { i32, i32, i32, i32, i32, i32, float, float, i32, i32, float, float, float, i32, i8*, i32, i8*, i8*, float, float, float, %s.5*, i32, i8* }
+%s.4 = type { i32, i32, i32, i32, i32, i32, float, float, i32, i32, float, float, float, i32, ptr, i32, ptr, ptr, float, float, float, ptr, i32, ptr }
 %s.5 = type { i32, i32, i32, i32, float }
-%s.6 = type { i32, [8 x %s.7], i32, i8*, %s.8, i32 }
-%s.7 = type { i32, i32, i32, i8* }
-%s.8 = type { i8*, i8*, i8*, i32, i32 }
+%s.6 = type { i32, [8 x %s.7], i32, ptr, %s.8, i32 }
+%s.7 = type { i32, i32, i32, ptr }
+%s.8 = type { ptr, ptr, ptr, i32, i32 }
 %s.9 = type { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, [256 x i32], i32, i32, i32, i32, i32, i32, i32, i32, %s.10, i32, %s.11, i32 }
 %s.10 = type { i32, i32, i32, i32 }
 %s.11 = type { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 }
-%s.12 = type { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, [6 x i8*] }
-%s.13 = type { %s.9*, %s.12*, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, [2 x i32], i32, i32, i32, i32, i32, i32, i32, [2 x [16 x %s.14]], i32, i32, i32, i32, i32, i32, i32, i32 }
+%s.12 = type { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, [6 x ptr] }
+%s.13 = type { ptr, ptr, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, [2 x i32], i32, i32, i32, i32, i32, i32, i32, [2 x [16 x %s.14]], i32, i32, i32, i32, i32, i32, i32, i32 }
 %s.14 = type { i32, i32 }
-%s.15 = type { [460 x i8], i32, i32, i32, i32, i32, i8*, i8*, i8* }
-%s.16 = type { [19 x %s.17*], [19 x %s.17*], [292 x %s.17*], %s.17*, [18 x %s.17*], i32, i32, i32, i32, i32, i32, i32 }
-%s.17 = type { i32, i32, i32, i64, i32, i32, i32, float, i32, [4 x i32], [4 x i32], i32, i32, [4 x i8*], [4 x i8*], [4 x i8*], i16*, [8 x i8*], [4 x i8*], i8*, [2 x [2 x i16]*], [2 x i8*], [2 x i32], [2 x [16 x i32]], [18 x [18 x i32]], i32, [18 x i32], [18 x [18 x i32*]], i32*, i32*, i32*, i32, i32, i32, i32 }
+%s.15 = type { [460 x i8], i32, i32, i32, i32, i32, ptr, ptr, ptr }
+%s.16 = type { [19 x ptr], [19 x ptr], [292 x ptr], ptr, [18 x ptr], i32, i32, i32, i32, i32, i32, i32 }
+%s.17 = type { i32, i32, i32, i64, i32, i32, i32, float, i32, [4 x i32], [4 x i32], i32, i32, [4 x ptr], [4 x ptr], [4 x ptr], ptr, [8 x ptr], [4 x ptr], ptr, [2 x ptr], [2 x ptr], [2 x i32], [2 x [16 x i32]], [18 x [18 x i32]], i32, [18 x i32], [18 x [18 x ptr]], ptr, ptr, ptr, i32, i32, i32, i32 }
 %s.18 = type { [16 x i32], [2 x [4 x i32]], [4 x [64 x i32]], [24 x %s.19] }
 %s.19 = type { [16 x i32] }
-%s.20 = type { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, [2 x i32], [2 x i32], [2 x i32], [2 x i32], [2 x i32], [2 x i32], i32, [4 x i32], [16 x i32], i32, i32, i32, i32, i32, i32, i8*, i8*, i16*, [7 x i8]*, [24 x i8]*, i8*, [2 x [2 x i16]*], [2 x [2 x i16]*], [2 x i8*], [2 x [32 x [2 x i16]*]], i8*, i8*, [2 x [3 x i8*]], [16 x i8]*, i32, i32, [4 x i32], i32, i32, i32, i32, i32, [8 x i8], %s.21, %s.22, i32, i32, i32, i32, i32, i32, i32, i32, [16 x [2 x i32]], [32 x [4 x i32]], [2 x i32], [16 x i32], [4 x i8] }
-%s.21 = type { [384 x i8], [864 x i8], [3 x i8*], [3 x i8*], [2 x i32], [2 x [32 x [6 x i8*]]], [2 x [16 x i16*]], [3 x i32], [4 x i8] }
+%s.20 = type { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, [2 x i32], [2 x i32], [2 x i32], [2 x i32], [2 x i32], [2 x i32], i32, [4 x i32], [16 x i32], i32, i32, i32, i32, i32, i32, ptr, ptr, ptr, ptr, ptr, ptr, [2 x ptr], [2 x ptr], [2 x ptr], [2 x [32 x ptr]], ptr, ptr, [2 x [3 x ptr]], ptr, i32, i32, [4 x i32], i32, i32, i32, i32, i32, [8 x i8], %s.21, %s.22, i32, i32, i32, i32, i32, i32, i32, i32, [16 x [2 x i32]], [32 x [4 x i32]], [2 x i32], [16 x i32], [4 x i8] }
+%s.21 = type { [384 x i8], [864 x i8], [3 x ptr], [3 x ptr], [2 x i32], [2 x [32 x [6 x ptr]]], [2 x [16 x ptr]], [3 x i32], [4 x i8] }
 %s.22 = type { [48 x i32], [48 x i32], [2 x [48 x i8]], [2 x [48 x [2 x i16]]], [2 x [48 x [2 x i16]]], [48 x i8], [2 x [48 x [2 x i16]]], [2 x [48 x i8]], [2 x i32], i32, i32, i32 }
 %s.23 = type opaque
 %s.24 = type { %s.25, [5 x i32], [5 x i64], [5 x i32], [5 x i64], [5 x float], [5 x float], [5 x float], [5 x float], [5 x float], [5 x [19 x i64]], [2 x i64], [2 x [7 x i64]], [2 x [32 x i64]], [2 x i32], [2 x i32] }
 %s.25 = type { i32, i32, i32, i32, [19 x i32], i32, i32, i32, [2 x i32], [7 x i32], [32 x i32], i32, i32, i32, [2 x i32] }
-%s.26 = type { [7 x i32 (i8*, i32, i8*, i32)*], [7 x i32 (i8*, i32, i8*, i32)*], [7 x i32 (i8*, i32, i8*, i32)*], [7 x i32 (i8*, i32, i8*, i32)*], [4 x i32 (i8*, i32, i8*, i32)*], [7 x i32 (i8*, i32, i8*, i32)*], [7 x i32 (i8*, i32, i8*, i32)*], void (i8*, i32, i8*, i32, [4 x i32]*)*, float ([4 x i32]*, [4 x i32]*, i32)*, [7 x i32 (i8*, i32, i8*, i32, i32)*], [7 x void (i8*, i8*, i8*, i8*, i32, i32*)*], [7 x void (i8*, i8*, i8*, i8*, i8*, i32, i32*)*], [7 x void (i32*, i16*, i32, i16*, i32)*], void (i8*, i8*, i32*)*, void (i8*, i8*, i32*)*, void (i8*, i8*, i32*)*, void (i8*, i8*, i32*)* }
-%s.27 = type { void (i8**, i32, i8*, i32, i32, i32, i32, i32)*, i8* (i8**, i32, i8*, i32*, i32, i32, i32, i32)*, void (i8*, i32, i8*, i32, i32, i32, i32, i32)*, [10 x void (i8*, i32, i8*, i32)*], [10 x void (i8*, i32, i8*, i32, i32)*], [7 x void (i8*, i32, i8*, i32, i32)*], void (i8*, i32, i8*, i32, i32, i32)*, void (i8*, i32, i8*, i32, i32)*, void (i8*, i32, i32)* }
-%s.28 = type { void ([4 x i16]*, i8*, i8*)*, void (i8*, [4 x i16]*)*, void ([4 x [4 x i16]]*, i8*, i8*)*, void (i8*, [4 x [4 x i16]]*)*, void ([4 x [4 x i16]]*, i8*, i8*)*, void (i8*, [4 x [4 x i16]]*)*, void ([8 x i16]*, i8*, i8*)*, void (i8*, [8 x i16]*)*, void ([8 x [8 x i16]]*, i8*, i8*)*, void (i8*, [8 x [8 x i16]]*)*, void ([4 x i16]*)*, void ([4 x i16]*)*, void ([2 x i16]*)*, void ([2 x i16]*)* }
-%s.29 = type { void (i32*, [8 x i16]*)*, void (i32*, [4 x i16]*)*, void (i32*, [4 x i16]*)*, void (i32*, i8*, i8*)*, void (i32*, i8*, i8*)* }
-%s.30 = type { [9 x void (%s.27*, %s.17*, %s.31*, i32, i32)*] }
-%s.31 = type { i32, i32, [4 x i32], [4 x i8*] }
-%s.32 = type { void ([8 x i16]*, i16*, i16*)*, void ([4 x i16]*, i16*, i16*)*, void ([4 x i16]*, i32, i32)*, void ([2 x i16]*, i32, i32)*, void ([4 x i16]*, [4 x [4 x i32]]*, i32)*, void ([8 x i16]*, [8 x [8 x i32]]*, i32)* }
-%s.33 = type { void (i8*, i32, i32, i32, i8*)*, void (i8*, i32, i32, i32, i8*)*, void (i8*, i32, i32, i32, i8*)*, void (i8*, i32, i32, i32, i8*)*, void (i8*, i32, i32, i32)*, void (i8*, i32, i32, i32)*, void (i8*, i32, i32, i32)*, void (i8*, i32, i32, i32)* }
+%s.26 = type { [7 x ptr], [7 x ptr], [7 x ptr], [7 x ptr], [4 x ptr], [7 x ptr], [7 x ptr], ptr, ptr, [7 x ptr], [7 x ptr], [7 x ptr], [7 x ptr], ptr, ptr, ptr, ptr }
+%s.27 = type { ptr, ptr, ptr, [10 x ptr], [10 x ptr], [7 x ptr], ptr, ptr, ptr }
+%s.28 = type { ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr }
+%s.29 = type { ptr, ptr, ptr, ptr, ptr }
+%s.30 = type { [9 x ptr] }
+%s.31 = type { i32, i32, [4 x i32], [4 x ptr] }
+%s.32 = type { ptr, ptr, ptr, ptr, ptr, ptr }
+%s.33 = type { ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr }
 %s.34 = type opaque
 
 @g0 = private unnamed_addr constant [148 x i8] c"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\00", align 8
@@ -45,24 +45,24 @@ target triple = "hexagon"
 @g2 = private unnamed_addr constant [148 x i8] c"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\00", align 8
 
 ; Function Attrs: nounwind
-define void @f0(%s.0* %a0, i32 %a1, i32 %a2) #0 {
+define void @f0(ptr %a0, i32 %a1, i32 %a2) #0 {
 b0:
-  %v0 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 43, i32 1
-  %v1 = load i32, i32* %v0, align 4
+  %v0 = getelementptr inbounds %s.0, ptr %a0, i32 0, i32 43, i32 1
+  %v1 = load i32, ptr %v0, align 4
   %v2 = mul nsw i32 %v1, %a2
   %v3 = add nsw i32 %v2, %a1
-  %v4 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 43, i32 3
-  %v5 = load i32, i32* %v4, align 4
+  %v4 = getelementptr inbounds %s.0, ptr %a0, i32 0, i32 43, i32 3
+  %v5 = load i32, ptr %v4, align 4
   %v6 = mul nsw i32 %v5, %a2
   %v7 = add nsw i32 %v6, %a1
   %v8 = mul nsw i32 %v7, 4
-  %v9 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 43, i32 2
-  %v10 = load i32, i32* %v9, align 4
+  %v9 = getelementptr inbounds %s.0, ptr %a0, i32 0, i32 43, i32 2
+  %v10 = load i32, ptr %v9, align 4
   %v11 = mul nsw i32 %v10, %a2
   %v12 = add nsw i32 %v11, %a1
   %v13 = mul nsw i32 %v12, 2
-  %v14 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 43, i32 14
-  %v15 = load i32, i32* %v14, align 4
+  %v14 = getelementptr inbounds %s.0, ptr %a0, i32 0, i32 43, i32 14
+  %v15 = load i32, ptr %v14, align 4
   %v16 = shl i32 1, %v15
   %v17 = sub nsw i32 %a2, %v16
   %v18 = mul nsw i32 %v17, %v1
@@ -72,9 +72,9 @@ b0:
   br i1 %v21, label %b2, label %b1
 
 b1:                                               ; preds = %b0
-  tail call void @f1(i8* getelementptr inbounds ([148 x i8], [148 x i8]* @g0, i32 0, i32 0), i8* getelementptr inbounds ([27 x i8], [27 x i8]* @g1, i32 0, i32 0)) #2
-  %v22 = load i32, i32* %v4, align 4
-  %v23 = load i32, i32* %v0, align 4
+  tail call void @f1(ptr @g0, ptr @g1) #2
+  %v22 = load i32, ptr %v4, align 4
+  %v23 = load i32, ptr %v0, align 4
   br label %b2
 
 b2:                                               ; preds = %b1, %b0
@@ -85,68 +85,67 @@ b2:                                               ; preds = %b1, %b0
   br i1 %v27, label %b4, label %b3
 
 b3:                                               ; preds = %b2
-  tail call void @f1(i8* getelementptr inbounds ([148 x i8], [148 x i8]* @g2, i32 0, i32 0), i8* getelementptr inbounds ([27 x i8], [27 x i8]* @g1, i32 0, i32 0)) #2
+  tail call void @f1(ptr @g2, ptr @g1) #2
   br label %b4
 
 b4:                                               ; preds = %b3, %b2
-  %v28 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 43, i32 4
-  store i32 %a1, i32* %v28, align 4
-  %v29 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 43, i32 5
-  store i32 %a2, i32* %v29, align 4
-  %v30 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 43, i32 6
-  store i32 %v3, i32* %v30, align 4
-  %v31 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 43, i32 7
-  store i32 %v13, i32* %v31, align 4
-  %v32 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 43, i32 8
-  store i32 %v8, i32* %v32, align 4
-  %v33 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 43, i32 29
-  store i32 %v19, i32* %v33, align 4
-  %v34 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 43, i32 21
-  store i32 0, i32* %v34, align 4
-  %v35 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 31, i32 3
-  %v36 = load i32, i32* %v35, align 4
+  %v28 = getelementptr inbounds %s.0, ptr %a0, i32 0, i32 43, i32 4
+  store i32 %a1, ptr %v28, align 4
+  %v29 = getelementptr inbounds %s.0, ptr %a0, i32 0, i32 43, i32 5
+  store i32 %a2, ptr %v29, align 4
+  %v30 = getelementptr inbounds %s.0, ptr %a0, i32 0, i32 43, i32 6
+  store i32 %v3, ptr %v30, align 4
+  %v31 = getelementptr inbounds %s.0, ptr %a0, i32 0, i32 43, i32 7
+  store i32 %v13, ptr %v31, align 4
+  %v32 = getelementptr inbounds %s.0, ptr %a0, i32 0, i32 43, i32 8
+  store i32 %v8, ptr %v32, align 4
+  %v33 = getelementptr inbounds %s.0, ptr %a0, i32 0, i32 43, i32 29
+  store i32 %v19, ptr %v33, align 4
+  %v34 = getelementptr inbounds %s.0, ptr %a0, i32 0, i32 43, i32 21
+  store i32 0, ptr %v34, align 4
+  %v35 = getelementptr inbounds %s.0, ptr %a0, i32 0, i32 31, i32 3
+  %v36 = load i32, ptr %v35, align 4
   %v37 = icmp slt i32 %v19, %v36
   br i1 %v37, label %b6, label %b5
 
 b5:                                               ; preds = %b4
-  %v38 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 43, i32 30
-  %v39 = load i8*, i8** %v38, align 4
-  %v40 = getelementptr inbounds i8, i8* %v39, i32 %v19
-  %v41 = load i8, i8* %v40, align 1
+  %v38 = getelementptr inbounds %s.0, ptr %a0, i32 0, i32 43, i32 30
+  %v39 = load ptr, ptr %v38, align 4
+  %v40 = getelementptr inbounds i8, ptr %v39, i32 %v19
+  %v41 = load i8, ptr %v40, align 1
   %v42 = sext i8 %v41 to i32
-  %v43 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 43, i32 24
-  store i32 %v42, i32* %v43, align 4
-  store i32 2, i32* %v34, align 4
-  %v44 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 43, i32 33
-  %v45 = load [7 x i8]*, [7 x i8]** %v44, align 4
-  %v46 = getelementptr inbounds [7 x i8], [7 x i8]* %v45, i32 %v19, i32 0
-  %v47 = load i8, i8* %v46, align 1
+  %v43 = getelementptr inbounds %s.0, ptr %a0, i32 0, i32 43, i32 24
+  store i32 %v42, ptr %v43, align 4
+  store i32 2, ptr %v34, align 4
+  %v44 = getelementptr inbounds %s.0, ptr %a0, i32 0, i32 43, i32 33
+  %v45 = load ptr, ptr %v44, align 4
+  %v46 = getelementptr inbounds [7 x i8], ptr %v45, i32 %v19, i32 0
+  %v47 = load i8, ptr %v46, align 1
   %v48 = sext i8 %v47 to i32
-  %v49 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 43, i32 54, i32 0, i32 4
-  store i32 %v48, i32* %v49, align 4
-  %v50 = getelementptr inbounds [7 x i8], [7 x i8]* %v45, i32 %v19, i32 1
-  %v51 = load i8, i8* %v50, align 1
+  %v49 = getelementptr inbounds %s.0, ptr %a0, i32 0, i32 43, i32 54, i32 0, i32 4
+  store i32 %v48, ptr %v49, align 4
+  %v50 = getelementptr inbounds [7 x i8], ptr %v45, i32 %v19, i32 1
+  %v51 = load i8, ptr %v50, align 1
   %v52 = sext i8 %v51 to i32
-  %v53 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 43, i32 54, i32 0, i32 5
-  store i32 %v52, i32* %v53, align 4
-  %v54 = getelementptr inbounds [7 x i8], [7 x i8]* %v45, i32 %v19, i32 2
-  %v55 = load i8, i8* %v54, align 1
+  %v53 = getelementptr inbounds %s.0, ptr %a0, i32 0, i32 43, i32 54, i32 0, i32 5
+  store i32 %v52, ptr %v53, align 4
+  %v54 = getelementptr inbounds [7 x i8], ptr %v45, i32 %v19, i32 2
+  %v55 = load i8, ptr %v54, align 1
   %v56 = sext i8 %v55 to i32
-  %v57 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 43, i32 54, i32 0, i32 6
-  store i32 %v56, i32* %v57, align 4
-  %v58 = getelementptr inbounds [7 x i8], [7 x i8]* %v45, i32 %v19, i32 3
-  %v59 = load i8, i8* %v58, align 1
+  %v57 = getelementptr inbounds %s.0, ptr %a0, i32 0, i32 43, i32 54, i32 0, i32 6
+  store i32 %v56, ptr %v57, align 4
+  %v58 = getelementptr inbounds [7 x i8], ptr %v45, i32 %v19, i32 3
+  %v59 = load i8, ptr %v58, align 1
   %v60 = sext i8 %v59 to i32
-  %v61 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 43, i32 54, i32 0, i32 7
-  store i32 %v60, i32* %v61, align 4
+  %v61 = getelementptr inbounds %s.0, ptr %a0, i32 0, i32 43, i32 54, i32 0, i32 7
+  store i32 %v60, ptr %v61, align 4
   br label %b7
 
 b6:                                               ; preds = %b4
-  %v62 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 43, i32 24
-  store i32 -1, i32* %v62, align 4
-  %v63 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 43, i32 54, i32 0, i32 4
-  %v64 = bitcast i32* %v63 to i8*
-  call void @llvm.memset.p0i8.i64(i8* align 4 %v64, i8 -1, i64 16, i1 false)
+  %v62 = getelementptr inbounds %s.0, ptr %a0, i32 0, i32 43, i32 24
+  store i32 -1, ptr %v62, align 4
+  %v63 = getelementptr inbounds %s.0, ptr %a0, i32 0, i32 43, i32 54, i32 0, i32 4
+  call void @llvm.memset.p0.i64(ptr align 4 %v63, i8 -1, i64 16, i1 false)
   br label %b7
 
 b7:                                               ; preds = %b6, %b5
@@ -154,10 +153,10 @@ b7:                                               ; preds = %b6, %b5
 }
 
 ; Function Attrs: nounwind
-declare void @f1(i8*, i8*) #0
+declare void @f1(ptr, ptr) #0
 
 ; Function Attrs: argmemonly nounwind
-declare void @llvm.memset.p0i8.i64(i8* nocapture writeonly, i8, i64, i1) #1
+declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1) #1
 
 attributes #0 = { nounwind "target-cpu"="hexagonv55" }
 attributes #1 = { argmemonly nounwind }

diff  --git a/llvm/test/CodeGen/Hexagon/verify-undef.ll b/llvm/test/CodeGen/Hexagon/verify-undef.ll
index dc406b569c9d7..fb95214cdac12 100644
--- a/llvm/test/CodeGen/Hexagon/verify-undef.ll
+++ b/llvm/test/CodeGen/Hexagon/verify-undef.ll
@@ -19,11 +19,11 @@ b0:
   br i1 %v0, label %b2, label %b1
 
 b1:                                               ; preds = %b0
-  %v1 = tail call i32 bitcast (i32 (...)* @f1 to i32 (i32)*)(i32 %a0) #0
+  %v1 = tail call i32 @f1(i32 %a0) #0
   br label %b3
 
 b2:                                               ; preds = %b0
-  store i32 0, i32* @g0, align 4
+  store i32 0, ptr @g0, align 4
   br label %b3
 
 b3:                                               ; preds = %b2, %b1

diff  --git a/llvm/test/CodeGen/Hexagon/vgather-opt-addr.ll b/llvm/test/CodeGen/Hexagon/vgather-opt-addr.ll
index b43f79a3b4152..d2db6da0a3716 100644
--- a/llvm/test/CodeGen/Hexagon/vgather-opt-addr.ll
+++ b/llvm/test/CodeGen/Hexagon/vgather-opt-addr.ll
@@ -32,65 +32,59 @@ target triple = "hexagon"
 define dso_local void @contiguos_vgather_test(i32 %Rb, i32 %mu, i32 %nloops, <32 x i32> %Vv, <64 x i32> %Vvv, <32 x i32> %Qs) local_unnamed_addr #0 {
 entry:
   %Vout1 = alloca <32 x i32>, align 128
-  %0 = bitcast <32 x i32>* %Vout1 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 128, i8* nonnull %0) #2
+  call void @llvm.lifetime.start.p0(i64 128, ptr nonnull %Vout1) #2
   %cmp23 = icmp sgt i32 %nloops, 0
   br i1 %cmp23, label %for.body.lr.ph, label %for.cond.cleanup
 
 for.body.lr.ph:                                   ; preds = %entry
-  %add.ptr = getelementptr inbounds <32 x i32>, <32 x i32>* %Vout1, i32 1
-  %1 = bitcast <32 x i32>* %add.ptr to i8*
-  %add.ptr1 = getelementptr inbounds <32 x i32>, <32 x i32>* %Vout1, i32 2
-  %2 = bitcast <32 x i32>* %add.ptr1 to i8*
-  %add.ptr2 = getelementptr inbounds <32 x i32>, <32 x i32>* %Vout1, i32 3
-  %3 = bitcast <32 x i32>* %add.ptr2 to i8*
-  %4 = tail call <128 x i1> @llvm.hexagon.V6.vandvrt.128B(<32 x i32> %Qs, i32 -1)
-  %add.ptr3 = getelementptr inbounds <32 x i32>, <32 x i32>* %Vout1, i32 4
-  %5 = bitcast <32 x i32>* %add.ptr3 to i8*
-  %add.ptr4 = getelementptr inbounds <32 x i32>, <32 x i32>* %Vout1, i32 5
-  %6 = bitcast <32 x i32>* %add.ptr4 to i8*
+  %add.ptr = getelementptr inbounds <32 x i32>, ptr %Vout1, i32 1
+  %add.ptr1 = getelementptr inbounds <32 x i32>, ptr %Vout1, i32 2
+  %add.ptr2 = getelementptr inbounds <32 x i32>, ptr %Vout1, i32 3
+  %0 = tail call <128 x i1> @llvm.hexagon.V6.vandvrt.128B(<32 x i32> %Qs, i32 -1)
+  %add.ptr3 = getelementptr inbounds <32 x i32>, ptr %Vout1, i32 4
+  %add.ptr4 = getelementptr inbounds <32 x i32>, ptr %Vout1, i32 5
   br label %for.body
 
 for.cond.cleanup:                                 ; preds = %for.body, %entry
-  call void @llvm.lifetime.end.p0i8(i64 128, i8* nonnull %0) #2
+  call void @llvm.lifetime.end.p0(i64 128, ptr nonnull %Vout1) #2
   ret void
 
 for.body:                                         ; preds = %for.body, %for.body.lr.ph
   %i.024 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
-  call void @llvm.hexagon.V6.vgathermh.128B(i8* nonnull %0, i32 %Rb, i32 %mu, <32 x i32> %Vv)
-  call void @llvm.hexagon.V6.vgathermw.128B(i8* nonnull %1, i32 %Rb, i32 %mu, <32 x i32> %Vv)
-  call void @llvm.hexagon.V6.vgathermhw.128B(i8* nonnull %2, i32 %Rb, i32 %mu, <64 x i32> %Vvv)
-  call void @llvm.hexagon.V6.vgathermhq.128B(i8* nonnull %3, <128 x i1> %4, i32 %Rb, i32 %mu, <32 x i32> %Vv)
-  call void @llvm.hexagon.V6.vgathermwq.128B(i8* nonnull %5, <128 x i1> %4, i32 %Rb, i32 %mu, <32 x i32> %Vv)
-  call void @llvm.hexagon.V6.vgathermhwq.128B(i8* nonnull %6, <128 x i1> %4, i32 %Rb, i32 %mu, <64 x i32> %Vvv)
+  call void @llvm.hexagon.V6.vgathermh.128B(ptr nonnull %Vout1, i32 %Rb, i32 %mu, <32 x i32> %Vv)
+  call void @llvm.hexagon.V6.vgathermw.128B(ptr nonnull %add.ptr, i32 %Rb, i32 %mu, <32 x i32> %Vv)
+  call void @llvm.hexagon.V6.vgathermhw.128B(ptr nonnull %add.ptr1, i32 %Rb, i32 %mu, <64 x i32> %Vvv)
+  call void @llvm.hexagon.V6.vgathermhq.128B(ptr nonnull %add.ptr2, <128 x i1> %0, i32 %Rb, i32 %mu, <32 x i32> %Vv)
+  call void @llvm.hexagon.V6.vgathermwq.128B(ptr nonnull %add.ptr3, <128 x i1> %0, i32 %Rb, i32 %mu, <32 x i32> %Vv)
+  call void @llvm.hexagon.V6.vgathermhwq.128B(ptr nonnull %add.ptr4, <128 x i1> %0, i32 %Rb, i32 %mu, <64 x i32> %Vvv)
   %inc = add nuw nsw i32 %i.024, 1
   %exitcond = icmp eq i32 %inc, %nloops
   br i1 %exitcond, label %for.cond.cleanup, label %for.body
 }
 
 ; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #1
+declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
 
 ; Function Attrs: argmemonly nounwind
-declare void @llvm.hexagon.V6.vgathermh.128B(i8*, i32, i32, <32 x i32>) #1
+declare void @llvm.hexagon.V6.vgathermh.128B(ptr, i32, i32, <32 x i32>) #1
 
 ; Function Attrs: argmemonly nounwind
-declare void @llvm.hexagon.V6.vgathermw.128B(i8*, i32, i32, <32 x i32>) #1
+declare void @llvm.hexagon.V6.vgathermw.128B(ptr, i32, i32, <32 x i32>) #1
 
 ; Function Attrs: argmemonly nounwind
-declare void @llvm.hexagon.V6.vgathermhw.128B(i8*, i32, i32, <64 x i32>) #1
+declare void @llvm.hexagon.V6.vgathermhw.128B(ptr, i32, i32, <64 x i32>) #1
 
 ; Function Attrs: argmemonly nounwind
-declare void @llvm.hexagon.V6.vgathermhq.128B(i8*, <128 x i1>, i32, i32, <32 x i32>) #1
+declare void @llvm.hexagon.V6.vgathermhq.128B(ptr, <128 x i1>, i32, i32, <32 x i32>) #1
 
 ; Function Attrs: argmemonly nounwind
-declare void @llvm.hexagon.V6.vgathermwq.128B(i8*, <128 x i1>, i32, i32, <32 x i32>) #1
+declare void @llvm.hexagon.V6.vgathermwq.128B(ptr, <128 x i1>, i32, i32, <32 x i32>) #1
 
 ; Function Attrs: argmemonly nounwind
-declare void @llvm.hexagon.V6.vgathermhwq.128B(i8*, <128 x i1>, i32, i32, <64 x i32>) #1
+declare void @llvm.hexagon.V6.vgathermhwq.128B(ptr, <128 x i1>, i32, i32, <64 x i32>) #1
 
 ; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #1
+declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
 
 declare <128 x i1> @llvm.hexagon.V6.vandvrt.128B(<32 x i32>, i32) #1
 

diff  --git a/llvm/test/CodeGen/Hexagon/vload-postinc-sel.ll b/llvm/test/CodeGen/Hexagon/vload-postinc-sel.ll
index 35381bd94ea78..5884e20b5d717 100644
--- a/llvm/test/CodeGen/Hexagon/vload-postinc-sel.ll
+++ b/llvm/test/CodeGen/Hexagon/vload-postinc-sel.ll
@@ -11,7 +11,7 @@ declare <64 x i32> @llvm.hexagon.V6.vaddh.dv.128B(<64 x i32>, <64 x i32>) #0
 declare <64 x i32> @llvm.hexagon.V6.vadduhsat.dv.128B(<64 x i32>, <64 x i32>) #0
 declare <32 x i32> @llvm.hexagon.V6.vabs
diff uh.128B(<32 x i32>, <32 x i32>) #0
 
-define void @f0(i8* %a0, <32 x i32>* %a1) #1 {
+define void @f0(ptr %a0, ptr %a1) #1 {
 b0:
   br label %b1
 
@@ -21,9 +21,8 @@ b1:                                               ; preds = %b2, %b1
   %v2 = add nsw i32 %v1, undef
   %v3 = shl i32 %v2, 7
   %v4 = add nsw i32 %v3, 128
-  %v5 = getelementptr inbounds i8, i8* %a0, i32 %v4
-  %v6 = bitcast i8* %v5 to <128 x i8>*
-  %v7 = load <128 x i8>, <128 x i8>* %v6, align 128
+  %v5 = getelementptr inbounds i8, ptr %a0, i32 %v4
+  %v7 = load <128 x i8>, ptr %v5, align 128
   %v8 = bitcast <128 x i8> %v0 to <32 x i32>
   %v9 = tail call <32 x i32> @llvm.hexagon.V6.valignbi.128B(<32 x i32> undef, <32 x i32> %v8, i32 1)
   %v10 = tail call <64 x i32> @llvm.hexagon.V6.vzb.128B(<32 x i32> %v9) #1
@@ -35,7 +34,7 @@ b1:                                               ; preds = %b2, %b1
   %v16 = tail call <64 x i32> @llvm.hexagon.V6.vaddh.dv.128B(<64 x i32> undef, <64 x i32> %v15) #1
   %v17 = tail call <32 x i32> @llvm.hexagon.V6.hi.128B(<64 x i32> %v16) #1
   %v18 = tail call <32 x i32> @llvm.hexagon.V6.vsathub.128B(<32 x i32> %v17, <32 x i32> undef) #1
-  store <32 x i32> %v18, <32 x i32>* %a1, align 128
+  store <32 x i32> %v18, ptr %a1, align 128
   %v19 = add nuw nsw i32 %v1, 1
   br label %b1
 }

diff  --git a/llvm/test/CodeGen/Hexagon/vmemu-128.ll b/llvm/test/CodeGen/Hexagon/vmemu-128.ll
index c385d5327fafd..9810332db3fc6 100644
--- a/llvm/test/CodeGen/Hexagon/vmemu-128.ll
+++ b/llvm/test/CodeGen/Hexagon/vmemu-128.ll
@@ -4,13 +4,11 @@
 ; CHECK-NOT: r{{[0-9]+}} = memw
 
 ; Function Attrs: nounwind
-define void @f0(i8* noalias nocapture readonly %a0, i16* nocapture %a1) #0 {
+define void @f0(ptr noalias nocapture readonly %a0, ptr nocapture %a1) #0 {
 b0:
-  %v0 = bitcast i8* %a0 to <32 x i32>*
-  %v1 = load <32 x i32>, <32 x i32>* %v0, align 4, !tbaa !0
+  %v1 = load <32 x i32>, ptr %a0, align 4, !tbaa !0
   %v2 = tail call <32 x i32> @llvm.hexagon.V6.vrmpyub.128B(<32 x i32> %v1, i32 16843009)
-  %v3 = bitcast i16* %a1 to <32 x i32>*
-  store <32 x i32> %v2, <32 x i32>* %v3, align 128, !tbaa !0
+  store <32 x i32> %v2, ptr %a1, align 128, !tbaa !0
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/vpack_eo.ll b/llvm/test/CodeGen/Hexagon/vpack_eo.ll
index cf8619c0f0a5d..a9759f3025e6c 100644
--- a/llvm/test/CodeGen/Hexagon/vpack_eo.ll
+++ b/llvm/test/CodeGen/Hexagon/vpack_eo.ll
@@ -4,57 +4,44 @@ target triple = "hexagon-unknown--elf"
 ; CHECK-DAG: vpacke
 ; CHECK-DAG: vpacko
 
-%struct.buffer_t = type { i64, i8*, [4 x i32], [4 x i32], [4 x i32], i32, i8, i8, [6 x i8] }
+%struct.buffer_t = type { i64, ptr, [4 x i32], [4 x i32], [4 x i32], i32, i8, i8, [6 x i8] }
 
 ; Function Attrs: norecurse nounwind
-define i32 @__Strided_LoadTest(%struct.buffer_t* noalias nocapture readonly %InputOne.buffer, %struct.buffer_t* noalias nocapture readonly %InputTwo.buffer, %struct.buffer_t* noalias nocapture readonly %Strided_LoadTest.buffer) #0 {
+define i32 @__Strided_LoadTest(ptr noalias nocapture readonly %InputOne.buffer, ptr noalias nocapture readonly %InputTwo.buffer, ptr noalias nocapture readonly %Strided_LoadTest.buffer) #0 {
 entry:
-  %buf_host = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %InputOne.buffer, i32 0, i32 1
-  %0 = bitcast i8** %buf_host to i16**
-  %InputOne.host45 = load i16*, i16** %0, align 4
-  %buf_host10 = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %InputTwo.buffer, i32 0, i32 1
-  %1 = bitcast i8** %buf_host10 to i16**
-  %InputTwo.host46 = load i16*, i16** %1, align 4
-  %buf_host27 = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %Strided_LoadTest.buffer, i32 0, i32 1
-  %2 = bitcast i8** %buf_host27 to i16**
-  %Strided_LoadTest.host44 = load i16*, i16** %2, align 4
-  %3 = bitcast i16* %InputOne.host45 to <32 x i16>*
-  %4 = load <32 x i16>, <32 x i16>* %3, align 2, !tbaa !4
-  %5 = getelementptr inbounds i16, i16* %InputOne.host45, i32 32
-  %6 = bitcast i16* %5 to <32 x i16>*
-  %7 = load <32 x i16>, <32 x i16>* %6, align 2, !tbaa !4
-  %8 = shufflevector <32 x i16> %4, <32 x i16> %7, <32 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31, i32 33, i32 35, i32 37, i32 39, i32 41, i32 43, i32 45, i32 47, i32 49, i32 51, i32 53, i32 55, i32 57, i32 59, i32 61, i32 63>
-  %9 = bitcast i16* %InputTwo.host46 to <32 x i16>*
-  %10 = load <32 x i16>, <32 x i16>* %9, align 2, !tbaa !7
-  %11 = getelementptr inbounds i16, i16* %InputTwo.host46, i32 32
-  %12 = bitcast i16* %11 to <32 x i16>*
-  %13 = load <32 x i16>, <32 x i16>* %12, align 2, !tbaa !7
-  %14 = shufflevector <32 x i16> %10, <32 x i16> %13, <32 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30, i32 32, i32 34, i32 36, i32 38, i32 40, i32 42, i32 44, i32 46, i32 48, i32 50, i32 52, i32 54, i32 56, i32 58, i32 60, i32 62>
-  %15 = bitcast <32 x i16> %8 to <16 x i32>
-  %16 = bitcast <32 x i16> %14 to <16 x i32>
-  %17 = tail call <16 x i32> @llvm.hexagon.V6.vaddh(<16 x i32> %15, <16 x i32> %16)
-  %18 = bitcast i16* %Strided_LoadTest.host44 to <16 x i32>*
-  store <16 x i32> %17, <16 x i32>* %18, align 2, !tbaa !9
-  %.inc = getelementptr i16, i16* %InputOne.host45, i32 64
-  %.inc49 = getelementptr i16, i16* %InputTwo.host46, i32 64
-  %.inc52 = getelementptr i16, i16* %Strided_LoadTest.host44, i32 32
-  %19 = bitcast i16* %.inc to <32 x i16>*
-  %20 = load <32 x i16>, <32 x i16>* %19, align 2, !tbaa !4
-  %21 = getelementptr inbounds i16, i16* %InputOne.host45, i32 96
-  %22 = bitcast i16* %21 to <32 x i16>*
-  %23 = load <32 x i16>, <32 x i16>* %22, align 2, !tbaa !4
-  %24 = shufflevector <32 x i16> %20, <32 x i16> %23, <32 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31, i32 33, i32 35, i32 37, i32 39, i32 41, i32 43, i32 45, i32 47, i32 49, i32 51, i32 53, i32 55, i32 57, i32 59, i32 61, i32 63>
-  %25 = bitcast i16* %.inc49 to <32 x i16>*
-  %26 = load <32 x i16>, <32 x i16>* %25, align 2, !tbaa !7
-  %27 = getelementptr inbounds i16, i16* %InputTwo.host46, i32 96
-  %28 = bitcast i16* %27 to <32 x i16>*
-  %29 = load <32 x i16>, <32 x i16>* %28, align 2, !tbaa !7
-  %30 = shufflevector <32 x i16> %26, <32 x i16> %29, <32 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30, i32 32, i32 34, i32 36, i32 38, i32 40, i32 42, i32 44, i32 46, i32 48, i32 50, i32 52, i32 54, i32 56, i32 58, i32 60, i32 62>
-  %31 = bitcast <32 x i16> %24 to <16 x i32>
-  %32 = bitcast <32 x i16> %30 to <16 x i32>
-  %33 = tail call <16 x i32> @llvm.hexagon.V6.vaddh(<16 x i32> %31, <16 x i32> %32)
-  %34 = bitcast i16* %.inc52 to <16 x i32>*
-  store <16 x i32> %33, <16 x i32>* %34, align 2, !tbaa !9
+  %buf_host = getelementptr inbounds %struct.buffer_t, ptr %InputOne.buffer, i32 0, i32 1
+  %InputOne.host45 = load ptr, ptr %buf_host, align 4
+  %buf_host10 = getelementptr inbounds %struct.buffer_t, ptr %InputTwo.buffer, i32 0, i32 1
+  %InputTwo.host46 = load ptr, ptr %buf_host10, align 4
+  %buf_host27 = getelementptr inbounds %struct.buffer_t, ptr %Strided_LoadTest.buffer, i32 0, i32 1
+  %Strided_LoadTest.host44 = load ptr, ptr %buf_host27, align 4
+  %0 = load <32 x i16>, ptr %InputOne.host45, align 2, !tbaa !4
+  %1 = getelementptr inbounds i16, ptr %InputOne.host45, i32 32
+  %2 = load <32 x i16>, ptr %1, align 2, !tbaa !4
+  %3 = shufflevector <32 x i16> %0, <32 x i16> %2, <32 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31, i32 33, i32 35, i32 37, i32 39, i32 41, i32 43, i32 45, i32 47, i32 49, i32 51, i32 53, i32 55, i32 57, i32 59, i32 61, i32 63>
+  %4 = load <32 x i16>, ptr %InputTwo.host46, align 2, !tbaa !7
+  %5 = getelementptr inbounds i16, ptr %InputTwo.host46, i32 32
+  %6 = load <32 x i16>, ptr %5, align 2, !tbaa !7
+  %7 = shufflevector <32 x i16> %4, <32 x i16> %6, <32 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30, i32 32, i32 34, i32 36, i32 38, i32 40, i32 42, i32 44, i32 46, i32 48, i32 50, i32 52, i32 54, i32 56, i32 58, i32 60, i32 62>
+  %8 = bitcast <32 x i16> %3 to <16 x i32>
+  %9 = bitcast <32 x i16> %7 to <16 x i32>
+  %10 = tail call <16 x i32> @llvm.hexagon.V6.vaddh(<16 x i32> %8, <16 x i32> %9)
+  store <16 x i32> %10, ptr %Strided_LoadTest.host44, align 2, !tbaa !9
+  %.inc = getelementptr i16, ptr %InputOne.host45, i32 64
+  %.inc49 = getelementptr i16, ptr %InputTwo.host46, i32 64
+  %.inc52 = getelementptr i16, ptr %Strided_LoadTest.host44, i32 32
+  %11 = load <32 x i16>, ptr %.inc, align 2, !tbaa !4
+  %12 = getelementptr inbounds i16, ptr %InputOne.host45, i32 96
+  %13 = load <32 x i16>, ptr %12, align 2, !tbaa !4
+  %14 = shufflevector <32 x i16> %11, <32 x i16> %13, <32 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31, i32 33, i32 35, i32 37, i32 39, i32 41, i32 43, i32 45, i32 47, i32 49, i32 51, i32 53, i32 55, i32 57, i32 59, i32 61, i32 63>
+  %15 = load <32 x i16>, ptr %.inc49, align 2, !tbaa !7
+  %16 = getelementptr inbounds i16, ptr %InputTwo.host46, i32 96
+  %17 = load <32 x i16>, ptr %16, align 2, !tbaa !7
+  %18 = shufflevector <32 x i16> %15, <32 x i16> %17, <32 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30, i32 32, i32 34, i32 36, i32 38, i32 40, i32 42, i32 44, i32 46, i32 48, i32 50, i32 52, i32 54, i32 56, i32 58, i32 60, i32 62>
+  %19 = bitcast <32 x i16> %14 to <16 x i32>
+  %20 = bitcast <32 x i16> %18 to <16 x i32>
+  %21 = tail call <16 x i32> @llvm.hexagon.V6.vaddh(<16 x i32> %19, <16 x i32> %20)
+  store <16 x i32> %21, ptr %.inc52, align 2, !tbaa !9
   ret i32 0
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/vrcmpys.ll b/llvm/test/CodeGen/Hexagon/vrcmpys.ll
index 2c9246bb76047..36fa6e6ef7eba 100644
--- a/llvm/test/CodeGen/Hexagon/vrcmpys.ll
+++ b/llvm/test/CodeGen/Hexagon/vrcmpys.ll
@@ -7,9 +7,9 @@
 ; CHECK: r{{[0-9]}}:{{[0-9]}} += vrcmpys(r{{[0-9]}}:{{[0-9]}},r{{[0-9]}}:{{[0-9]}}):<<1:sat:raw:lo
 define double @f0(i32 %a0, i32 %a1) {
 b0:
-  %v0 = load double, double* @g0, align 8, !tbaa !0
+  %v0 = load double, ptr @g0, align 8, !tbaa !0
   %v1 = fptosi double %v0 to i64
-  %v2 = load double, double* @g1, align 8, !tbaa !0
+  %v2 = load double, ptr @g1, align 8, !tbaa !0
   %v3 = fptosi double %v2 to i64
   %v4 = tail call i64 @llvm.hexagon.M2.vrcmpys.acc.s1(i64 %v1, i64 %v3, i32 %a0)
   %v5 = sitofp i64 %v4 to double
@@ -23,9 +23,9 @@ declare i64 @llvm.hexagon.M2.vrcmpys.acc.s1(i64, i64, i32) #0
 ; CHECK: r{{[0-9]}}:{{[0-9]}} += vrcmpys(r{{[0-9]}}:{{[0-9]}},r{{[0-9]}}:{{[0-9]}}):<<1:sat:raw:hi
 define double @f1(i32 %a0, i32 %a1) {
 b0:
-  %v0 = load double, double* @g0, align 8, !tbaa !0
+  %v0 = load double, ptr @g0, align 8, !tbaa !0
   %v1 = fptosi double %v0 to i64
-  %v2 = load double, double* @g1, align 8, !tbaa !0
+  %v2 = load double, ptr @g1, align 8, !tbaa !0
   %v3 = fptosi double %v2 to i64
   %v4 = tail call i64 @llvm.hexagon.M2.vrcmpys.acc.s1(i64 %v1, i64 %v3, i32 %a1)
   %v5 = sitofp i64 %v4 to double
@@ -36,7 +36,7 @@ b0:
 ; CHECK: r{{[0-9]}}:{{[0-9]}} = vrcmpys(r{{[0-9]}}:{{[0-9]}},r{{[0-9]}}:{{[0-9]}}):<<1:sat:raw:lo
 define double @f2(i32 %a0, i32 %a1) {
 b0:
-  %v0 = load double, double* @g1, align 8, !tbaa !0
+  %v0 = load double, ptr @g1, align 8, !tbaa !0
   %v1 = fptosi double %v0 to i64
   %v2 = tail call i64 @llvm.hexagon.M2.vrcmpys.s1(i64 %v1, i32 %a0)
   %v3 = sitofp i64 %v2 to double
@@ -50,7 +50,7 @@ declare i64 @llvm.hexagon.M2.vrcmpys.s1(i64, i32) #0
 ; CHECK: r{{[0-9]}}:{{[0-9]}} = vrcmpys(r{{[0-9]}}:{{[0-9]}},r{{[0-9]}}:{{[0-9]}}):<<1:sat:raw:hi
 define double @f3(i32 %a0, i32 %a1) {
 b0:
-  %v0 = load double, double* @g1, align 8, !tbaa !0
+  %v0 = load double, ptr @g1, align 8, !tbaa !0
   %v1 = fptosi double %v0 to i64
   %v2 = tail call i64 @llvm.hexagon.M2.vrcmpys.s1(i64 %v1, i32 %a1)
   %v3 = sitofp i64 %v2 to double

diff  --git a/llvm/test/CodeGen/Hexagon/vselect-pseudo.ll b/llvm/test/CodeGen/Hexagon/vselect-pseudo.ll
index 58fe4ad6675af..e301a9d218cb9 100644
--- a/llvm/test/CodeGen/Hexagon/vselect-pseudo.ll
+++ b/llvm/test/CodeGen/Hexagon/vselect-pseudo.ll
@@ -16,7 +16,7 @@ for.body9.us:
   %2 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %1)
   %3 = tail call <32 x i32> @llvm.hexagon.V6.vshuffvdd(<16 x i32> undef, <16 x i32> %2, i32 62)
   %4 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %3)
-  store <16 x i32> %4, <16 x i32>* undef, align 64
+  store <16 x i32> %4, ptr undef, align 64
   br i1 undef, label %for.body9.us, label %for.body43.us.preheader
 
 for.body43.us.preheader:                          ; preds = %for.body9.us

diff  --git a/llvm/test/CodeGen/Hexagon/wcsrtomb.ll b/llvm/test/CodeGen/Hexagon/wcsrtomb.ll
index 3953cab445e5a..0fc0741d3cd2e 100644
--- a/llvm/test/CodeGen/Hexagon/wcsrtomb.ll
+++ b/llvm/test/CodeGen/Hexagon/wcsrtomb.ll
@@ -6,20 +6,20 @@ target triple = "hexagon"
 %s.0 = type { i32, i16, i16 }
 
 ; Function Attrs: nounwind
-define i32 @f0(i8* %a0, i32** nocapture %a1, i32 %a2, %s.0* %a3) #0 {
+define i32 @f0(ptr %a0, ptr nocapture %a1, i32 %a2, ptr %a3) #0 {
 b0:
   %v0 = alloca [8 x i8], align 8
-  %v1 = load i32*, i32** %a1, align 4, !tbaa !0
-  %v2 = icmp eq %s.0* %a3, null
+  %v1 = load ptr, ptr %a1, align 4, !tbaa !0
+  %v2 = icmp eq ptr %a3, null
   br i1 %v2, label %b1, label %b2
 
 b1:                                               ; preds = %b0
-  %v3 = call %s.0* bitcast (%s.0* (...)* @f1 to %s.0* ()*)() #1
+  %v3 = call ptr @f1() #1
   br label %b2
 
 b2:                                               ; preds = %b1, %b0
-  %v4 = phi %s.0* [ %v3, %b1 ], [ %a3, %b0 ]
-  %v5 = icmp eq i8* %a0, null
+  %v4 = phi ptr [ %v3, %b1 ], [ %a3, %b0 ]
+  %v5 = icmp eq ptr %a0, null
   br i1 %v5, label %b5, label %b3
 
 b3:                                               ; preds = %b2
@@ -27,17 +27,13 @@ b3:                                               ; preds = %b2
   br i1 %v6, label %b23, label %b4
 
 b4:                                               ; preds = %b3
-  %v7 = getelementptr inbounds [8 x i8], [8 x i8]* %v0, i32 0, i32 0
-  %v8 = getelementptr inbounds %s.0, %s.0* %v4, i32 0, i32 0
-  %v9 = getelementptr inbounds %s.0, %s.0* %v4, i32 0, i32 1
-  %v10 = getelementptr inbounds %s.0, %s.0* %v4, i32 0, i32 2
-  %v11 = bitcast i16* %v9 to i32*
+  %v9 = getelementptr inbounds %s.0, ptr %v4, i32 0, i32 1
+  %v10 = getelementptr inbounds %s.0, ptr %v4, i32 0, i32 2
   br label %b11
 
 b5:                                               ; preds = %b2
-  %v12 = getelementptr inbounds [8 x i8], [8 x i8]* %v0, i32 0, i32 0
-  %v13 = load i32, i32* %v1, align 4, !tbaa !4
-  %v14 = call i32 @f2(i8* %v12, i32 %v13, %s.0* %v4) #1
+  %v13 = load i32, ptr %v1, align 4, !tbaa !4
+  %v14 = call i32 @f2(ptr %v0, i32 %v13, ptr %v4) #1
   %v15 = icmp slt i32 %v14, 0
   br i1 %v15, label %b25, label %b6
 
@@ -47,14 +43,14 @@ b6:                                               ; preds = %b5
 b7:                                               ; preds = %b10, %b6
   %v16 = phi i32 [ %v29, %b10 ], [ %v14, %b6 ]
   %v17 = phi i32 [ %v26, %b10 ], [ 0, %b6 ]
-  %v18 = phi i32* [ %v27, %b10 ], [ %v1, %b6 ]
+  %v18 = phi ptr [ %v27, %b10 ], [ %v1, %b6 ]
   %v19 = icmp sgt i32 %v16, 0
   br i1 %v19, label %b8, label %b10
 
 b8:                                               ; preds = %b7
   %v20 = add nsw i32 %v16, -1
-  %v21 = getelementptr inbounds [8 x i8], [8 x i8]* %v0, i32 0, i32 %v20
-  %v22 = load i8, i8* %v21, align 1, !tbaa !6
+  %v21 = getelementptr inbounds [8 x i8], ptr %v0, i32 0, i32 %v20
+  %v22 = load i8, ptr %v21, align 1, !tbaa !6
   %v23 = icmp eq i8 %v22, 0
   br i1 %v23, label %b9, label %b10
 
@@ -65,17 +61,17 @@ b9:                                               ; preds = %b8
 
 b10:                                              ; preds = %b8, %b7
   %v26 = add i32 %v16, %v17
-  %v27 = getelementptr inbounds i32, i32* %v18, i32 1
-  %v28 = load i32, i32* %v27, align 4, !tbaa !4
-  %v29 = call i32 @f2(i8* %v12, i32 %v28, %s.0* %v4) #1
+  %v27 = getelementptr inbounds i32, ptr %v18, i32 1
+  %v28 = load i32, ptr %v27, align 4, !tbaa !4
+  %v29 = call i32 @f2(ptr %v0, i32 %v28, ptr %v4) #1
   %v30 = icmp slt i32 %v29, 0
   br i1 %v30, label %b24, label %b7
 
 b11:                                              ; preds = %b21, %b4
-  %v31 = phi i8* [ %a0, %b4 ], [ %v64, %b21 ]
+  %v31 = phi ptr [ %a0, %b4 ], [ %v64, %b21 ]
   %v32 = phi i32 [ %a2, %b4 ], [ %v65, %b21 ]
   %v33 = phi i32 [ 0, %b4 ], [ %v62, %b21 ]
-  %v34 = phi i32* [ %v1, %b4 ], [ %v63, %b21 ]
+  %v34 = phi ptr [ %v1, %b4 ], [ %v63, %b21 ]
   %v35 = phi i32 [ undef, %b4 ], [ %v47, %b21 ]
   %v36 = phi i16 [ undef, %b4 ], [ %v46, %b21 ]
   %v37 = phi i16 [ undef, %b4 ], [ %v45, %b21 ]
@@ -84,8 +80,8 @@ b11:                                              ; preds = %b21, %b4
   br i1 %v39, label %b12, label %b13
 
 b12:                                              ; preds = %b11
-  %v40 = load i32, i32* %v8, align 4
-  %v41 = load i32, i32* %v11, align 4
+  %v40 = load i32, ptr %v4, align 4
+  %v41 = load i32, ptr %v9, align 4
   %v42 = trunc i32 %v41 to i16
   %v43 = lshr i32 %v41, 16
   %v44 = trunc i32 %v43 to i16
@@ -95,14 +91,14 @@ b13:                                              ; preds = %b12, %b11
   %v45 = phi i16 [ %v44, %b12 ], [ %v37, %b11 ]
   %v46 = phi i16 [ %v42, %b12 ], [ %v36, %b11 ]
   %v47 = phi i32 [ %v40, %b12 ], [ %v35, %b11 ]
-  %v48 = phi i8* [ %v7, %b12 ], [ %v31, %b11 ]
-  %v49 = load i32, i32* %v34, align 4, !tbaa !4
-  %v50 = call i32 @f2(i8* %v48, i32 %v49, %s.0* %v4) #1
+  %v48 = phi ptr [ %v0, %b12 ], [ %v31, %b11 ]
+  %v49 = load i32, ptr %v34, align 4, !tbaa !4
+  %v50 = call i32 @f2(ptr %v48, i32 %v49, ptr %v4) #1
   %v51 = icmp slt i32 %v50, 0
   br i1 %v51, label %b22, label %b14
 
 b14:                                              ; preds = %b13
-  %v52 = icmp eq i8* %v31, %v48
+  %v52 = icmp eq ptr %v31, %v48
   br i1 %v52, label %b18, label %b15
 
 b15:                                              ; preds = %b14
@@ -110,13 +106,13 @@ b15:                                              ; preds = %b14
   br i1 %v53, label %b16, label %b17
 
 b16:                                              ; preds = %b15
-  store i32 %v47, i32* %v8, align 4
-  store i16 %v46, i16* %v9, align 4
-  store i16 %v45, i16* %v10, align 2
+  store i32 %v47, ptr %v4, align 4
+  store i16 %v46, ptr %v9, align 4
+  store i16 %v45, ptr %v10, align 2
   br label %b23
 
 b17:                                              ; preds = %b15
-  %v54 = call i8* @f4(i8* %v31, i8* %v7, i32 %v50) #1
+  %v54 = call ptr @f4(ptr %v31, ptr %v0, i32 %v50) #1
   br label %b18
 
 b18:                                              ; preds = %b17, %b14
@@ -125,34 +121,34 @@ b18:                                              ; preds = %b17, %b14
 
 b19:                                              ; preds = %b18
   %v56 = add nsw i32 %v50, -1
-  %v57 = getelementptr inbounds i8, i8* %v31, i32 %v56
-  %v58 = load i8, i8* %v57, align 1, !tbaa !6
+  %v57 = getelementptr inbounds i8, ptr %v31, i32 %v56
+  %v58 = load i8, ptr %v57, align 1, !tbaa !6
   %v59 = icmp eq i8 %v58, 0
   br i1 %v59, label %b20, label %b21
 
 b20:                                              ; preds = %b19
-  store i32* null, i32** %a1, align 4, !tbaa !0
+  store ptr null, ptr %a1, align 4, !tbaa !0
   %v60 = add i32 %v33, -1
   %v61 = add i32 %v60, %v50
   br label %b25
 
 b21:                                              ; preds = %b19, %b18
   %v62 = add i32 %v50, %v33
-  %v63 = getelementptr inbounds i32, i32* %v34, i32 1
-  %v64 = getelementptr inbounds i8, i8* %v31, i32 %v50
+  %v63 = getelementptr inbounds i32, ptr %v34, i32 1
+  %v64 = getelementptr inbounds i8, ptr %v31, i32 %v50
   %v65 = sub i32 %v32, %v50
   %v66 = icmp eq i32 %v32, %v50
   br i1 %v66, label %b22, label %b11
 
 b22:                                              ; preds = %b21, %b13
-  %v67 = phi i32* [ %v34, %b13 ], [ %v63, %b21 ]
+  %v67 = phi ptr [ %v34, %b13 ], [ %v63, %b21 ]
   %v68 = phi i32 [ -1, %b13 ], [ %v62, %b21 ]
   br label %b23
 
 b23:                                              ; preds = %b22, %b16, %b3
-  %v69 = phi i32* [ %v34, %b16 ], [ %v1, %b3 ], [ %v67, %b22 ]
+  %v69 = phi ptr [ %v34, %b16 ], [ %v1, %b3 ], [ %v67, %b22 ]
   %v70 = phi i32 [ %v33, %b16 ], [ 0, %b3 ], [ %v68, %b22 ]
-  store i32* %v69, i32** %a1, align 4, !tbaa !0
+  store ptr %v69, ptr %a1, align 4, !tbaa !0
   br label %b25
 
 b24:                                              ; preds = %b10
@@ -163,13 +159,13 @@ b25:                                              ; preds = %b24, %b23, %b20, %b
   ret i32 %v71
 }
 
-declare %s.0* @f1(...)
+declare ptr @f1(...)
 
-declare i32 @f2(i8*, i32, %s.0*)
+declare i32 @f2(ptr, i32, ptr)
 
 declare i32 @f3()
 
-declare i8* @f4(i8*, i8*, i32)
+declare ptr @f4(ptr, ptr, i32)
 
 attributes #0 = { nounwind "target-cpu"="hexagonv55" }
 attributes #1 = { nounwind }

diff  --git a/llvm/test/CodeGen/Hexagon/zextloadi1.ll b/llvm/test/CodeGen/Hexagon/zextloadi1.ll
index 29ebf2e09275d..2aab427782257 100644
--- a/llvm/test/CodeGen/Hexagon/zextloadi1.ll
+++ b/llvm/test/CodeGen/Hexagon/zextloadi1.ll
@@ -13,8 +13,8 @@
 ; CHECK-DAG: memd(##i129_s+8) = r[[REG1]]
 ; CHECK-DAG: memd(##i129_s) = r[[REG0]]
 define void @i129_ls() nounwind  {
-        %tmp = load i129, i129* @i129_l
-        store i129 %tmp, i129* @i129_s
+        %tmp = load i129, ptr @i129_l
+        store i129 %tmp, ptr @i129_s
         ret void
 }
 
@@ -24,7 +24,7 @@ define void @i129_ls() nounwind  {
 ; CHECK-DAG: memd(##i65_s) = r[[REG0]]
 ; CHECK-DAG: memb(##i65_s+8) = r[[REG1]]
 define void @i65_ls() nounwind  {
-        %tmp = load i65, i65* @i65_l
-        store i65 %tmp, i65* @i65_s
+        %tmp = load i65, ptr @i65_l
+        store i65 %tmp, ptr @i65_s
         ret void
 }


        


More information about the llvm-commits mailing list